]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 4 Nov 2015 03:34:22 +0000 (19:34 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 4 Nov 2015 03:34:22 +0000 (19:34 -0800)
Pull x86 cleanups from Ingo Molnar:
 "An early_printk cleanup plus deinlining enhancements"

* 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/early_printk: Set __iomem address space for IO
  x86/signal: Deinline get_sigframe, save 240 bytes
  x86: Deinline early_console_register, save 403 bytes
  x86/e820: Deinline e820_type_to_string, save 126 bytes

1765 files changed:
.mailmap
Documentation/Changes
Documentation/IRQ-domain.txt
Documentation/RCU/stallwarn.txt
Documentation/RCU/torture.txt
Documentation/RCU/trace.txt
Documentation/RCU/whatisRCU.txt
Documentation/arm/OMAP/README [new file with mode: 0644]
Documentation/arm/SA1100/Victor [deleted file]
Documentation/arm/memory.txt
Documentation/arm/uefi.txt
Documentation/arm64/booting.txt
Documentation/atomic_ops.txt
Documentation/device-mapper/snapshot.txt
Documentation/devicetree/bindings/arm/gic.txt
Documentation/devicetree/bindings/arm/twd.txt
Documentation/devicetree/bindings/edac/apm-xgene-edac.txt
Documentation/devicetree/bindings/gpio/gpio-msm.txt [deleted file]
Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
Documentation/devicetree/bindings/gpio/gpio-zynq.txt
Documentation/devicetree/bindings/gpio/gpio.txt
Documentation/devicetree/bindings/gpio/netxbig-gpio-ext.txt [new file with mode: 0644]
Documentation/devicetree/bindings/input/cypress,cyapa.txt
Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt
Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
Documentation/devicetree/bindings/leds/leds-aat1290.txt
Documentation/devicetree/bindings/leds/leds-bcm6328.txt
Documentation/devicetree/bindings/leds/leds-netxbig.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
Documentation/devicetree/bindings/mmc/mmc.txt
Documentation/devicetree/bindings/mmc/mtk-sd.txt
Documentation/devicetree/bindings/mmc/renesas,mmcif.txt
Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
Documentation/devicetree/bindings/net/cpsw.txt
Documentation/devicetree/bindings/net/smsc-lan87xx.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pci/pci-msi.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
Documentation/devicetree/bindings/pinctrl/atmel,at91-pio4-pinctrl.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/berlin,pinctrl.txt
Documentation/devicetree/bindings/pinctrl/brcm,cygnus-gpio.txt
Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt
Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
Documentation/devicetree/bindings/spi/sh-msiof.txt
Documentation/devicetree/bindings/usb/renesas_usbhs.txt
Documentation/edac.txt
Documentation/features/vm/THP/arch-support.txt
Documentation/features/vm/pte_special/arch-support.txt
Documentation/filesystems/proc.txt
Documentation/gpio/driver.txt
Documentation/hwmon/lm75
Documentation/hwmon/max31790 [new file with mode: 0644]
Documentation/input/multi-touch-protocol.txt
Documentation/kernel-parameters.txt
Documentation/locking/lockstat.txt
Documentation/locking/locktorture.txt
Documentation/memory-barriers.txt
Documentation/mmc/mmc-dev-attrs.txt
Documentation/power/pci.txt
Documentation/ptp/testptp.c
MAINTAINERS
Makefile
arch/alpha/include/asm/atomic.h
arch/alpha/include/asm/word-at-a-time.h
arch/arc/Kconfig
arch/arc/boot/dts/axc001.dtsi
arch/arc/boot/dts/axc003.dtsi
arch/arc/boot/dts/axc003_idu.dtsi
arch/arc/boot/dts/nsim_hs.dts
arch/arc/boot/dts/skeleton.dtsi
arch/arc/boot/dts/vdk_axc003.dtsi
arch/arc/boot/dts/vdk_axc003_idu.dtsi
arch/arc/configs/axs101_defconfig
arch/arc/configs/axs103_defconfig
arch/arc/configs/axs103_smp_defconfig
arch/arc/include/asm/Kbuild
arch/arc/include/asm/arcregs.h
arch/arc/include/asm/atomic.h
arch/arc/include/asm/cache.h
arch/arc/include/asm/cacheflush.h
arch/arc/include/asm/entry-compact.h
arch/arc/include/asm/highmem.h [new file with mode: 0644]
arch/arc/include/asm/hugepage.h [new file with mode: 0644]
arch/arc/include/asm/irq.h
arch/arc/include/asm/irqflags-compact.h
arch/arc/include/asm/kmap_types.h [new file with mode: 0644]
arch/arc/include/asm/mach_desc.h
arch/arc/include/asm/mcip.h
arch/arc/include/asm/mmu.h
arch/arc/include/asm/page.h
arch/arc/include/asm/pgalloc.h
arch/arc/include/asm/pgtable.h
arch/arc/include/asm/processor.h
arch/arc/include/asm/setup.h
arch/arc/include/asm/smp.h
arch/arc/include/asm/tlbflush.h
arch/arc/include/uapi/asm/page.h
arch/arc/kernel/entry-arcv2.S
arch/arc/kernel/entry-compact.S
arch/arc/kernel/head.S
arch/arc/kernel/intc-compact.c
arch/arc/kernel/irq.c
arch/arc/kernel/mcip.c
arch/arc/kernel/setup.c
arch/arc/kernel/smp.c
arch/arc/kernel/time.c
arch/arc/kernel/vmlinux.lds.S
arch/arc/mm/Makefile
arch/arc/mm/cache.c
arch/arc/mm/fault.c
arch/arc/mm/highmem.c [new file with mode: 0644]
arch/arc/mm/init.c
arch/arc/mm/tlb.c
arch/arc/mm/tlbex.S
arch/arc/plat-axs10x/axs10x.c
arch/arc/plat-sim/platform.c
arch/arm/Kconfig
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/am57xx-beagle-x15.dts
arch/arm/boot/dts/armada-385-db-ap.dts
arch/arm/boot/dts/berlin2q.dtsi
arch/arm/boot/dts/emev2-kzm9d.dts
arch/arm/boot/dts/exynos4412.dtsi
arch/arm/boot/dts/exynos5250-smdk5250.dts
arch/arm/boot/dts/exynos5420-peach-pit.dts
arch/arm/boot/dts/exynos5420.dtsi
arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
arch/arm/boot/dts/exynos5800-peach-pi.dts
arch/arm/boot/dts/imx53-qsrb.dts
arch/arm/boot/dts/imx53.dtsi
arch/arm/boot/dts/imx6qdl-rex.dtsi
arch/arm/boot/dts/imx7d.dtsi
arch/arm/boot/dts/kirkwood-net5big.dts
arch/arm/boot/dts/kirkwood-netxbig.dtsi
arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
arch/arm/boot/dts/meson.dtsi
arch/arm/boot/dts/omap3-evm-37xx.dts
arch/arm/boot/dts/r8a7790.dtsi
arch/arm/boot/dts/r8a7791.dtsi
arch/arm/boot/dts/rk3288-veyron-sdmmc.dtsi
arch/arm/boot/dts/rk3288-veyron.dtsi
arch/arm/boot/dts/rk3288.dtsi
arch/arm/boot/dts/sama5d2.dtsi
arch/arm/boot/dts/ste-hrefv60plus.dtsi
arch/arm/boot/dts/ste-snowball.dts
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/boot/dts/tegra114.dtsi
arch/arm/boot/dts/tegra124.dtsi
arch/arm/boot/dts/tegra20.dtsi
arch/arm/boot/dts/tegra30.dtsi
arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts
arch/arm/configs/exynos_defconfig
arch/arm/configs/hisi_defconfig
arch/arm/configs/lpc18xx_defconfig
arch/arm/include/asm/arch_gicv3.h [new file with mode: 0644]
arch/arm/include/asm/atomic.h
arch/arm/include/asm/cmpxchg.h
arch/arm/include/asm/irqflags.h
arch/arm/include/asm/mach/arch.h
arch/arm/include/asm/memory.h
arch/arm/include/asm/pgtable.h
arch/arm/include/asm/smp.h
arch/arm/include/asm/unistd.h
arch/arm/kernel/devtree.c
arch/arm/kernel/entry-armv.S
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/kgdb.c
arch/arm/kernel/smp.c
arch/arm/kernel/smp_twd.c
arch/arm/kernel/traps.c
arch/arm/kvm/Kconfig
arch/arm/kvm/arm.c
arch/arm/lib/clear_user.S
arch/arm/mach-exynos/mcpm-exynos.c
arch/arm/mach-exynos/pm_domains.c
arch/arm/mach-exynos/regs-pmu.h
arch/arm/mach-exynos/suspend.c
arch/arm/mach-imx/gpc.c
arch/arm/mach-mvebu/Kconfig
arch/arm/mach-mvebu/Makefile
arch/arm/mach-mvebu/board.h [deleted file]
arch/arm/mach-mvebu/kirkwood.c
arch/arm/mach-mvebu/netxbig.c [deleted file]
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/board-generic.c
arch/arm/mach-omap2/omap-wakeupgen.c
arch/arm/mach-omap2/pdata-quirks.c
arch/arm/mach-pxa/pxa3xx.c
arch/arm/mm/Kconfig
arch/arm/mm/dma-mapping.c
arch/arm/mm/fault.c
arch/arm/mm/fault.h
arch/arm/mm/mmu.c
arch/arm/net/bpf_jit_32.c
arch/arm/plat-orion/common.c
arch/arm/vdso/vdsomunge.c
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/boot/dts/apm/apm-storm.dtsi
arch/arm64/boot/dts/arm/juno-motherboard.dtsi
arch/arm64/include/asm/acpi.h
arch/arm64/include/asm/arch_gicv3.h [new file with mode: 0644]
arch/arm64/include/asm/atomic.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/unistd.h
arch/arm64/include/asm/unistd32.h
arch/arm64/include/uapi/asm/signal.h
arch/arm64/kernel/acpi.c
arch/arm64/kernel/armv8_deprecated.c
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/debug-monitors.c
arch/arm64/kernel/efi-stub.c
arch/arm64/kernel/efi.c
arch/arm64/kernel/entry-ftrace.S
arch/arm64/kernel/head.S
arch/arm64/kernel/insn.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/stacktrace.c
arch/arm64/kernel/suspend.c
arch/arm64/kvm/Kconfig
arch/arm64/mm/fault.c
arch/arm64/mm/proc.S
arch/avr32/boards/atngw100/mrmt.c
arch/avr32/include/asm/Kbuild
arch/avr32/include/asm/atomic.h
arch/blackfin/include/asm/Kbuild
arch/c6x/include/asm/Kbuild
arch/c6x/platforms/megamod-pic.c
arch/cris/Kconfig
arch/cris/arch-v10/kernel/head.S
arch/cris/arch-v10/kernel/kgdb.c
arch/cris/arch-v10/mm/init.c
arch/cris/arch-v32/Kconfig
arch/cris/arch-v32/drivers/Kconfig
arch/cris/arch-v32/drivers/Makefile
arch/cris/arch-v32/drivers/axisflashmap.c
arch/cris/arch-v32/drivers/i2c.c [deleted file]
arch/cris/arch-v32/drivers/i2c.h [deleted file]
arch/cris/arch-v32/drivers/mach-a3/Makefile
arch/cris/arch-v32/drivers/mach-a3/gpio.c [deleted file]
arch/cris/arch-v32/drivers/mach-fs/Makefile
arch/cris/arch-v32/drivers/mach-fs/gpio.c [deleted file]
arch/cris/arch-v32/kernel/crisksyms.c
arch/cris/arch-v32/kernel/debugport.c
arch/cris/arch-v32/kernel/head.S
arch/cris/arch-v32/kernel/irq.c
arch/cris/arch-v32/kernel/kgdb.c
arch/cris/arch-v32/kernel/setup.c
arch/cris/arch-v32/mach-a3/Makefile
arch/cris/arch-v32/mach-a3/io.c [deleted file]
arch/cris/arch-v32/mach-fs/Kconfig
arch/cris/arch-v32/mach-fs/Makefile
arch/cris/arch-v32/mach-fs/io.c [deleted file]
arch/cris/boot/dts/artpec3.dtsi [new file with mode: 0644]
arch/cris/boot/dts/dev88.dts
arch/cris/boot/dts/etraxfs.dtsi
arch/cris/boot/dts/include/dt-bindings [new symlink]
arch/cris/boot/dts/p1343.dts [new file with mode: 0644]
arch/cris/boot/rescue/head_v10.S
arch/cris/include/arch-v32/arch/io.h [deleted file]
arch/cris/include/arch-v32/arch/irq.h
arch/cris/include/asm/Kbuild
arch/cris/include/asm/eshlibld.h
arch/cris/include/asm/io.h
arch/cris/include/uapi/asm/etraxgpio.h
arch/cris/kernel/crisksyms.c
arch/cris/kernel/time.c
arch/frv/include/asm/Kbuild
arch/frv/include/asm/atomic.h
arch/h8300/include/asm/Kbuild
arch/h8300/include/asm/atomic.h
arch/hexagon/include/asm/Kbuild
arch/hexagon/include/asm/atomic.h
arch/ia64/include/asm/Kbuild
arch/ia64/include/asm/atomic.h
arch/ia64/include/asm/unistd.h
arch/ia64/include/uapi/asm/unistd.h
arch/ia64/kernel/entry.S
arch/m32r/include/asm/Kbuild
arch/m32r/include/asm/atomic.h
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/include/asm/atomic.h
arch/m68k/include/asm/linkage.h
arch/m68k/include/asm/unistd.h
arch/m68k/include/uapi/asm/unistd.h
arch/m68k/kernel/syscalltable.S
arch/m68k/sun3/idprom.c
arch/metag/include/asm/Kbuild
arch/metag/include/asm/atomic_lnkget.h
arch/metag/include/asm/atomic_lock1.h
arch/microblaze/include/asm/Kbuild
arch/mips/ath79/irq.c
arch/mips/cavium-octeon/octeon-irq.c
arch/mips/cavium-octeon/setup.c
arch/mips/configs/pistachio_defconfig
arch/mips/include/asm/Kbuild
arch/mips/include/asm/atomic.h
arch/mips/include/asm/cpu-features.h
arch/mips/include/asm/cpu.h
arch/mips/include/asm/io.h
arch/mips/include/asm/maar.h
arch/mips/include/asm/mips-cm.h
arch/mips/include/asm/mipsregs.h
arch/mips/include/uapi/asm/swab.h
arch/mips/include/uapi/asm/unistd.h
arch/mips/jz4740/board-qi_lb60.c
arch/mips/jz4740/gpio.c
arch/mips/kernel/cps-vec.S
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/octeon_switch.S
arch/mips/kernel/r2300_switch.S
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/setup.c
arch/mips/kernel/smp.c
arch/mips/loongson64/common/env.c
arch/mips/mm/dma-default.c
arch/mips/mm/init.c
arch/mips/mti-sead3/Makefile
arch/mips/net/bpf_jit_asm.S
arch/mn10300/include/asm/Kbuild
arch/mn10300/include/asm/atomic.h
arch/nios2/include/asm/Kbuild
arch/parisc/include/asm/atomic.h
arch/powerpc/configs/ppc64_defconfig
arch/powerpc/configs/pseries_defconfig
arch/powerpc/include/asm/cache.h
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/include/asm/machdep.h
arch/powerpc/include/asm/ppc-opcode.h
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/word-at-a-time.h
arch/powerpc/kernel/dma.c
arch/powerpc/kernel/rtas.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/mm/hash_native_64.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/perf/hv-24x7.c
arch/powerpc/perf/power8-pmu.c
arch/powerpc/platforms/cell/axon_msi.c
arch/powerpc/platforms/cell/spider-pic.c
arch/powerpc/platforms/pasemi/msi.c
arch/powerpc/platforms/powernv/opal-irqchip.c
arch/powerpc/platforms/powernv/opal.c
arch/powerpc/platforms/powernv/smp.c
arch/powerpc/platforms/ps3/os-area.c
arch/powerpc/sysdev/ehv_pic.c
arch/powerpc/sysdev/fsl_msi.c
arch/powerpc/sysdev/i8259.c
arch/powerpc/sysdev/ipic.c
arch/powerpc/sysdev/mpic.c
arch/powerpc/sysdev/mpic_msi.c
arch/powerpc/sysdev/qe_lib/qe_ic.c
arch/s390/boot/compressed/Makefile
arch/s390/configs/default_defconfig
arch/s390/configs/gcov_defconfig
arch/s390/configs/performance_defconfig
arch/s390/include/asm/Kbuild
arch/s390/include/asm/numa.h
arch/s390/include/asm/topology.h
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/entry.S
arch/s390/kernel/perf_cpum_cf.c
arch/s390/kernel/vtime.c
arch/s390/numa/mode_emu.c
arch/s390/numa/numa.c
arch/score/include/asm/Kbuild
arch/sh/include/asm/atomic.h
arch/sh/include/asm/page.h
arch/sparc/crypto/aes_glue.c
arch/sparc/crypto/camellia_glue.c
arch/sparc/crypto/des_glue.c
arch/sparc/include/asm/atomic_64.h
arch/sparc/kernel/perf_event.c
arch/tile/gxio/mpipe.c
arch/tile/include/asm/atomic.h
arch/tile/include/asm/atomic_64.h
arch/tile/include/asm/word-at-a-time.h
arch/tile/kernel/usb.c
arch/um/Makefile
arch/um/include/asm/Kbuild
arch/um/kernel/trap.c
arch/um/os-Linux/helper.c
arch/unicore32/include/asm/Kbuild
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/boot/compressed/eboot.c
arch/x86/boot/header.S
arch/x86/crypto/camellia_aesni_avx_glue.c
arch/x86/entry/common.c
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/entry/entry_64_compat.S
arch/x86/entry/syscall_32.c
arch/x86/entry/syscall_64.c
arch/x86/entry/syscalls/syscall_32.tbl
arch/x86/entry/vdso/Makefile
arch/x86/entry/vdso/vdso2c.c
arch/x86/entry/vdso/vdso32-setup.c
arch/x86/entry/vdso/vdso32/int80.S [deleted file]
arch/x86/entry/vdso/vdso32/syscall.S [deleted file]
arch/x86/entry/vdso/vdso32/sysenter.S [deleted file]
arch/x86/entry/vdso/vdso32/system_call.S [new file with mode: 0644]
arch/x86/entry/vdso/vma.c
arch/x86/entry/vsyscall/vsyscall_64.c
arch/x86/ia32/ia32_signal.c
arch/x86/include/asm/acpi.h
arch/x86/include/asm/amd_nb.h
arch/x86/include/asm/apic.h
arch/x86/include/asm/atomic.h
arch/x86/include/asm/atomic64_64.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/dwarf2.h [new file with mode: 0644]
arch/x86/include/asm/efi.h
arch/x86/include/asm/elf.h
arch/x86/include/asm/hpet.h
arch/x86/include/asm/kdebug.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/mce.h
arch/x86/include/asm/microcode.h
arch/x86/include/asm/microcode_amd.h
arch/x86/include/asm/microcode_intel.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/numachip/numachip.h
arch/x86/include/asm/numachip/numachip_csr.h
arch/x86/include/asm/preempt.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/pvclock-abi.h
arch/x86/include/asm/string_64.h
arch/x86/include/asm/switch_to.h
arch/x86/include/asm/syscall.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/vdso.h
arch/x86/include/asm/xen/hypercall.h
arch/x86/include/uapi/asm/bitsperlong.h
arch/x86/include/uapi/asm/mce.h
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/apic_numachip.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/asm-offsets.c
arch/x86/kernel/cpu/Makefile
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mcheck/therm_throt.c
arch/x86/kernel/cpu/microcode/Makefile
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/cpu/microcode/amd_early.c [deleted file]
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/microcode/core_early.c [deleted file]
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/cpu/microcode/intel_early.c [deleted file]
arch/x86/kernel/cpu/microcode/intel_lib.c
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_bts.c
arch/x86/kernel/cpu/perf_event_intel_cstate.c [new file with mode: 0644]
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_intel_lbr.c
arch/x86/kernel/cpu/perf_event_intel_pt.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c
arch/x86/kernel/cpu/perf_event_intel_uncore.h
arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
arch/x86/kernel/cpu/perf_event_msr.c
arch/x86/kernel/cpu/scattered.c
arch/x86/kernel/crash.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/head_32.S
arch/x86/kernel/hpet.c
arch/x86/kernel/irq_64.c
arch/x86/kernel/pci-dma.c
arch/x86/kernel/process.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/quirks.c
arch/x86/kernel/setup.c
arch/x86/kernel/signal.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/tsc.c
arch/x86/kvm/emulate.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/lib/x86-opcode-map.txt
arch/x86/mm/init.c
arch/x86/mm/init_64.c
arch/x86/mm/pageattr.c
arch/x86/platform/efi/efi-bgrt.c
arch/x86/platform/efi/efi.c
arch/x86/ras/Kconfig
arch/x86/ras/mce_amd_inj.c
arch/x86/um/asm/syscall.h
arch/x86/um/ldt.c
arch/x86/um/sys_call_table_32.c
arch/x86/um/sys_call_table_64.c
arch/x86/xen/enlighten.c
arch/x86/xen/p2m.c
arch/x86/xen/setup.c
arch/xtensa/include/asm/Kbuild
arch/xtensa/include/asm/atomic.h
block/blk-core.c
block/blk-lib.c
block/blk-mq-cpumap.c
block/blk-mq-sysfs.c
block/blk-mq-tag.c
block/blk-mq-tag.h
block/blk-mq.c
block/blk-mq.h
block/blk-sysfs.c
crypto/ablkcipher.c
crypto/ahash.c
crypto/algapi.c
crypto/api.c
crypto/asymmetric_keys/x509_public_key.c
crypto/crypto_user.c
drivers/acpi/acpica/acglobal.h
drivers/acpi/acpica/actables.h
drivers/acpi/acpica/evxfevnt.c
drivers/acpi/acpica/tbfadt.c
drivers/acpi/acpica/tbutils.c
drivers/acpi/apei/ghes.c
drivers/acpi/ec.c
drivers/acpi/gsi.c
drivers/acpi/pci_irq.c
drivers/acpi/pci_link.c
drivers/base/dd.c
drivers/base/dma-contiguous.c
drivers/base/pinctrl.c
drivers/base/platform-msi.c
drivers/base/power/domain_governor.c
drivers/base/power/opp.c
drivers/base/regmap/internal.h
drivers/base/regmap/regmap-debugfs.c
drivers/base/regmap/regmap-irq.c
drivers/base/regmap/regmap.c
drivers/block/loop.c
drivers/block/nbd.c
drivers/block/null_blk.c
drivers/block/nvme-core.c
drivers/block/rbd.c
drivers/block/virtio_blk.c
drivers/block/xen-blkback/xenbus.c
drivers/block/xen-blkfront.c
drivers/bus/Kconfig
drivers/bus/arm-ccn.c
drivers/clk/clkdev.c
drivers/clk/mvebu/clk-cpu.c
drivers/clk/rockchip/clk-mmc-phase.c
drivers/clk/samsung/clk-cpu.c
drivers/clk/ti/clk-3xxx.c
drivers/clk/ti/clk-7xx.c
drivers/clk/ti/clkt_dflt.c
drivers/clocksource/Kconfig
drivers/clocksource/Makefile
drivers/clocksource/arm_global_timer.c
drivers/clocksource/em_sti.c
drivers/clocksource/exynos_mct.c
drivers/clocksource/fsl_ftm_timer.c
drivers/clocksource/h8300_timer16.c
drivers/clocksource/h8300_timer8.c
drivers/clocksource/h8300_tpu.c
drivers/clocksource/mtk_timer.c
drivers/clocksource/numachip.c [new file with mode: 0644]
drivers/clocksource/rockchip_timer.c
drivers/clocksource/samsung_pwm_timer.c
drivers/clocksource/sh_cmt.c
drivers/clocksource/sh_mtu2.c
drivers/clocksource/tango_xtal.c [new file with mode: 0644]
drivers/clocksource/time-armada-370-xp.c
drivers/clocksource/time-pistachio.c
drivers/clocksource/timer-digicolor.c
drivers/clocksource/timer-imx-gpt.c
drivers/clocksource/timer-keystone.c
drivers/clocksource/timer-prima2.c
drivers/clocksource/vf_pit_timer.c
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/devfreq/devfreq.c
drivers/dma/at_xdmac.c
drivers/dma/dmaengine.c
drivers/dma/dw/core.c
drivers/dma/idma64.c
drivers/dma/pxa_dma.c
drivers/dma/sun4i-dma.c
drivers/dma/xgene-dma.c
drivers/dma/zx296702_dma.c
drivers/edac/Makefile
drivers/edac/altera_edac.c
drivers/edac/altera_edac.h
drivers/edac/amd64_edac.c
drivers/edac/amd64_edac.h
drivers/edac/debugfs.c [new file with mode: 0644]
drivers/edac/edac_core.h
drivers/edac/edac_mc.c
drivers/edac/edac_mc_sysfs.c
drivers/edac/edac_module.h
drivers/edac/ghes_edac.c
drivers/edac/i5100_edac.c
drivers/edac/ppc4xx_edac.c
drivers/edac/sb_edac.c
drivers/edac/xgene_edac.c
drivers/firmware/efi/Kconfig
drivers/firmware/efi/Makefile
drivers/firmware/efi/efi-pstore.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/esrt.c
drivers/firmware/efi/fake_mem.c [new file with mode: 0644]
drivers/firmware/efi/libstub/arm-stub.c
drivers/gpio/Kconfig
drivers/gpio/Makefile
drivers/gpio/gpio-104-idio-16.c [new file with mode: 0644]
drivers/gpio/gpio-altera.c
drivers/gpio/gpio-amdpt.c [new file with mode: 0644]
drivers/gpio/gpio-arizona.c
drivers/gpio/gpio-ath79.c
drivers/gpio/gpio-etraxfs.c
drivers/gpio/gpio-generic.c
drivers/gpio/gpio-it87.c [new file with mode: 0644]
drivers/gpio/gpio-it8761e.c [deleted file]
drivers/gpio/gpio-lpc18xx.c
drivers/gpio/gpio-max730x.c
drivers/gpio/gpio-moxart.c
drivers/gpio/gpio-msm-v2.c [deleted file]
drivers/gpio/gpio-mvebu.c
drivers/gpio/gpio-omap.c
drivers/gpio/gpio-pca953x.c
drivers/gpio/gpio-pl061.c
drivers/gpio/gpio-sodaville.c
drivers/gpio/gpio-sx150x.c
drivers/gpio/gpio-tb10x.c
drivers/gpio/gpio-tz1090-pdc.c
drivers/gpio/gpio-vf610.c
drivers/gpio/gpio-xlp.c
drivers/gpio/gpio-zx.c
drivers/gpio/gpio-zynq.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib-legacy.c
drivers/gpio/gpiolib-of.c
drivers/gpio/gpiolib.c
drivers/gpio/gpiolib.h
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
drivers/gpu/drm/amd/amdgpu/cik.c
drivers/gpu/drm/amd/amdgpu/cz_dpm.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/kv_dpm.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/include/cgs_linux.h
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_probe_helper.c
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/exynos/exynos7_drm_decon.c
drivers/gpu/drm/exynos/exynos_dp_core.c
drivers/gpu/drm/exynos/exynos_drm_core.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.h
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_hotplug.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/qxl/qxl_fb.c
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_dp_mst.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/virtio/virtgpu_debugfs.c
drivers/gpu/drm/virtio/virtgpu_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
drivers/hwmon/Kconfig
drivers/hwmon/Makefile
drivers/hwmon/abx500.c
drivers/hwmon/coretemp.c
drivers/hwmon/fam15h_power.c
drivers/hwmon/gpio-fan.c
drivers/hwmon/ibmpowernv.c
drivers/hwmon/ina2xx.c
drivers/hwmon/lm75.c
drivers/hwmon/max31790.c [new file with mode: 0644]
drivers/hwmon/nct6775.c
drivers/hwmon/pwm-fan.c
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-mv64xxx.c
drivers/i2c/busses/i2c-pnx.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/i2c/i2c-core.c
drivers/idle/intel_idle.c
drivers/iio/accel/st_accel_core.c
drivers/iio/adc/twl4030-madc.c
drivers/infiniband/core/cache.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/roce_gid_mgmt.c
drivers/infiniband/core/ucma.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/usnic/usnic.h
drivers/infiniband/hw/usnic/usnic_abi.h
drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h
drivers/infiniband/hw/usnic/usnic_common_util.h
drivers/infiniband/hw/usnic/usnic_debugfs.c
drivers/infiniband/hw/usnic/usnic_debugfs.h
drivers/infiniband/hw/usnic/usnic_fwd.c
drivers/infiniband/hw/usnic/usnic_fwd.h
drivers/infiniband/hw/usnic/usnic_ib.h
drivers/infiniband/hw/usnic/usnic_ib_main.c
drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
drivers/infiniband/hw/usnic/usnic_ib_sysfs.h
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
drivers/infiniband/hw/usnic/usnic_ib_verbs.h
drivers/infiniband/hw/usnic/usnic_log.h
drivers/infiniband/hw/usnic/usnic_transport.c
drivers/infiniband/hw/usnic/usnic_transport.h
drivers/infiniband/hw/usnic/usnic_uiom.c
drivers/infiniband/hw/usnic/usnic_uiom.h
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h
drivers/infiniband/hw/usnic/usnic_vnic.c
drivers/infiniband/hw/usnic/usnic_vnic.h
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/infiniband/ulp/iser/iscsi_iser.h
drivers/infiniband/ulp/iser/iser_memory.c
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/input/joystick/Kconfig
drivers/input/joystick/walkera0701.c
drivers/input/keyboard/omap4-keypad.c
drivers/input/misc/pm8941-pwrkey.c
drivers/input/misc/uinput.c
drivers/input/mouse/alps.c
drivers/input/mouse/cyapa_gen6.c
drivers/input/mouse/elan_i2c.h
drivers/input/mouse/elan_i2c_core.c
drivers/input/mouse/elan_i2c_i2c.c
drivers/input/mouse/elan_i2c_smbus.c
drivers/input/mouse/synaptics.c
drivers/input/serio/libps2.c
drivers/input/serio/parkbd.c
drivers/input/touchscreen/Kconfig
drivers/input/touchscreen/ads7846.c
drivers/input/touchscreen/imx6ul_tsc.c
drivers/input/touchscreen/lpc32xx_ts.c
drivers/input/touchscreen/mms114.c
drivers/iommu/Kconfig
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_types.h
drivers/iommu/amd_iommu_v2.c
drivers/iommu/arm-smmu-v3.c
drivers/iommu/intel-iommu.c
drivers/iommu/io-pgtable-arm.c
drivers/iommu/iova.c
drivers/irqchip/Kconfig
drivers/irqchip/Makefile
drivers/irqchip/alphascale_asm9260-icoll.h [new file with mode: 0644]
drivers/irqchip/exynos-combiner.c
drivers/irqchip/irq-armada-370-xp.c
drivers/irqchip/irq-atmel-aic-common.c
drivers/irqchip/irq-atmel-aic5.c
drivers/irqchip/irq-crossbar.c
drivers/irqchip/irq-gic-common.c
drivers/irqchip/irq-gic-common.h
drivers/irqchip/irq-gic-v2m.c
drivers/irqchip/irq-gic-v3-its-pci-msi.c
drivers/irqchip/irq-gic-v3-its-platform-msi.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-gic.c
drivers/irqchip/irq-hip04.c
drivers/irqchip/irq-i8259.c
drivers/irqchip/irq-imx-gpcv2.c
drivers/irqchip/irq-mips-gic.c
drivers/irqchip/irq-mtk-sysirq.c
drivers/irqchip/irq-mxs.c
drivers/irqchip/irq-nvic.c
drivers/irqchip/irq-renesas-intc-irqpin.c
drivers/irqchip/irq-renesas-irqc.c
drivers/irqchip/irq-s3c24xx.c
drivers/irqchip/irq-sunxi-nmi.c
drivers/irqchip/irq-tegra.c
drivers/irqchip/irq-vf610-mscm-ir.c
drivers/isdn/hisax/isdnl2.c
drivers/isdn/mISDN/layer2.c
drivers/leds/Kconfig
drivers/leds/Makefile
drivers/leds/led-class.c
drivers/leds/led-core.c
drivers/leds/leds-88pm860x.c
drivers/leds/leds-bcm6328.c
drivers/leds/leds-bcm6358.c
drivers/leds/leds-cobalt-qube.c
drivers/leds/leds-gpio.c
drivers/leds/leds-hp6xx.c
drivers/leds/leds-ipaq-micro.c
drivers/leds/leds-locomo.c
drivers/leds/leds-menf21bmc.c
drivers/leds/leds-net48xx.c
drivers/leds/leds-netxbig.c
drivers/leds/leds-ot200.c
drivers/leds/leds-powernv.c
drivers/leds/leds-sead3.c [moved from arch/mips/mti-sead3/leds-sead3.c with 99% similarity]
drivers/leds/leds-wrap.c
drivers/leds/leds.h
drivers/leds/trigger/ledtrig-heartbeat.c
drivers/mcb/mcb-pci.c
drivers/md/bitmap.c
drivers/md/dm-cache-metadata.c
drivers/md/dm-cache-policy-cleaner.c
drivers/md/dm-exception-store.c
drivers/md/dm-exception-store.h
drivers/md/dm-raid.c
drivers/md/dm-snap-persistent.c
drivers/md/dm-snap-transient.c
drivers/md/dm-snap.c
drivers/md/dm-thin.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/persistent-data/dm-btree-remove.c
drivers/md/persistent-data/dm-btree.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/dvb-frontends/horus3a.h
drivers/media/dvb-frontends/lnbh25.h
drivers/media/dvb-frontends/m88ds3103.c
drivers/media/dvb-frontends/si2168.c
drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
drivers/media/rc/ir-hix5hd2.c
drivers/media/tuners/si2157.c
drivers/media/usb/dvb-usb-v2/rtl28xxu.c
drivers/media/usb/dvb-usb-v2/rtl28xxu.h
drivers/media/v4l2-core/Kconfig
drivers/memory/Kconfig
drivers/memory/omap-gpmc.c
drivers/mfd/intel-lpss.h
drivers/mfd/max77843.c
drivers/misc/cxl/api.c
drivers/misc/cxl/context.c
drivers/misc/cxl/cxl.h
drivers/misc/cxl/file.c
drivers/misc/cxl/irq.c
drivers/misc/cxl/native.c
drivers/misc/cxl/pci.c
drivers/misc/mei/hbm.c
drivers/mmc/card/block.c
drivers/mmc/card/mmc_test.c
drivers/mmc/core/Kconfig
drivers/mmc/core/core.c
drivers/mmc/core/core.h
drivers/mmc/core/debugfs.c
drivers/mmc/core/host.c
drivers/mmc/core/mmc.c
drivers/mmc/core/mmc_ops.c
drivers/mmc/core/mmc_ops.h
drivers/mmc/core/pwrseq_emmc.c
drivers/mmc/core/pwrseq_simple.c
drivers/mmc/core/quirks.c
drivers/mmc/core/sd.c
drivers/mmc/core/sdio.c
drivers/mmc/core/sdio_irq.c
drivers/mmc/core/sdio_ops.h
drivers/mmc/host/Kconfig
drivers/mmc/host/Makefile
drivers/mmc/host/dw_mmc-exynos.c
drivers/mmc/host/dw_mmc-pltfm.c
drivers/mmc/host/dw_mmc-rockchip.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/dw_mmc.h
drivers/mmc/host/mmc_spi.c
drivers/mmc/host/moxart-mmc.c
drivers/mmc/host/mtk-sd.c
drivers/mmc/host/omap.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/pxamci.c
drivers/mmc/host/sdhci-acpi.c
drivers/mmc/host/sdhci-bcm-kona.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci-esdhc.h
drivers/mmc/host/sdhci-msm.c
drivers/mmc/host/sdhci-of-at91.c
drivers/mmc/host/sdhci-of-esdhc.c
drivers/mmc/host/sdhci-pci-core.c [moved from drivers/mmc/host/sdhci-pci.c with 96% similarity]
drivers/mmc/host/sdhci-pci-o2micro.c
drivers/mmc/host/sdhci-pci-o2micro.h
drivers/mmc/host/sdhci-pci.h
drivers/mmc/host/sdhci-pltfm.c
drivers/mmc/host/sdhci-pxav3.c
drivers/mmc/host/sdhci-sirf.c
drivers/mmc/host/sdhci.c
drivers/mmc/host/sdhci.h
drivers/mmc/host/sunxi-mmc.c
drivers/mmc/host/vub300.c
drivers/mmc/host/wbsd.c
drivers/mtd/nand/mxc_nand.c
drivers/mtd/nand/sunxi_nand.c
drivers/mtd/ubi/io.c
drivers/mtd/ubi/vtbl.c
drivers/mtd/ubi/wl.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/ethernet/allwinner/sun4i-emac.c
drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/broadcom/bcm63xx_enet.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/brocade/bna/bfa_ioc.c
drivers/net/ethernet/cavium/Kconfig
drivers/net/ethernet/cavium/thunder/nic_main.c
drivers/net/ethernet/cavium/thunder/nic_reg.h
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fsl_pq_mdio.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/hisilicon/hip04_eth.c
drivers/net/ethernet/ibm/emac/core.h
drivers/net/ethernet/intel/i40e/i40e_adminq.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40evf/i40e_adminq.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
drivers/net/ethernet/mellanox/mlx5/core/fw.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/item.h
drivers/net/ethernet/mellanox/mlxsw/pci.c
drivers/net/ethernet/mellanox/mlxsw/switchx2.c
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/ethernet/nxp/lpc_eth.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/sfc/mcdi.c
drivers/net/ethernet/sfc/ptp.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/netcp_ethss.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/geneve.c
drivers/net/macvtap.c
drivers/net/phy/Kconfig
drivers/net/phy/Makefile
drivers/net/phy/dp83848.c [new file with mode: 0644]
drivers/net/phy/mdio-mux-mmioreg.c
drivers/net/phy/mdio-mux.c
drivers/net/phy/micrel.c
drivers/net/phy/smsc.c
drivers/net/ppp/pppoe.c
drivers/net/usb/Kconfig
drivers/net/usb/asix_common.c
drivers/net/usb/asix_devices.c
drivers/net/usb/qmi_wwan.c
drivers/net/vxlan.c
drivers/net/wireless/ath/ath10k/hw.h
drivers/net/wireless/ath/ath6kl/init.c
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/b43/main.c
drivers/net/wireless/iwlwifi/dvm/lib.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/mvm/d3.c
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rtlwifi/pci.h
drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
drivers/net/wireless/rtlwifi/rtl8821ae/sw.c
drivers/net/wireless/rtlwifi/wifi.h
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/nvmem/core.c
drivers/nvmem/sunxi_sid.c
drivers/of/irq.c
drivers/pci/host/pci-xgene-msi.c
drivers/pci/msi.c
drivers/pci/of.c
drivers/pci/pci-driver.c
drivers/pci/pci-sysfs.c
drivers/pci/probe.c
drivers/perf/arm_pmu.c
drivers/phy/phy-berlin-sata.c
drivers/phy/phy-qcom-ufs.c
drivers/phy/phy-rcar-gen2.c
drivers/phy/phy-rockchip-usb.c
drivers/pinctrl/Kconfig
drivers/pinctrl/Makefile
drivers/pinctrl/bcm/pinctrl-bcm2835.c
drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c
drivers/pinctrl/berlin/Kconfig
drivers/pinctrl/berlin/Makefile
drivers/pinctrl/berlin/berlin-bg2.c
drivers/pinctrl/berlin/berlin-bg2cd.c
drivers/pinctrl/berlin/berlin-bg2q.c
drivers/pinctrl/berlin/berlin-bg4ct.c [new file with mode: 0644]
drivers/pinctrl/berlin/berlin.c
drivers/pinctrl/berlin/berlin.h
drivers/pinctrl/core.c
drivers/pinctrl/freescale/pinctrl-imx.c
drivers/pinctrl/freescale/pinctrl-imx.h
drivers/pinctrl/freescale/pinctrl-imx25.c
drivers/pinctrl/freescale/pinctrl-imx7d.c
drivers/pinctrl/freescale/pinctrl-mxs.c
drivers/pinctrl/intel/Kconfig
drivers/pinctrl/intel/Makefile
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/intel/pinctrl-broxton.c [new file with mode: 0644]
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/intel/pinctrl-intel.c
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
drivers/pinctrl/nomadik/pinctrl-abx500.c
drivers/pinctrl/nomadik/pinctrl-nomadik.c
drivers/pinctrl/pinconf-generic.c
drivers/pinctrl/pinconf.c
drivers/pinctrl/pinctrl-adi2.c
drivers/pinctrl/pinctrl-as3722.c
drivers/pinctrl/pinctrl-at91-pio4.c [new file with mode: 0644]
drivers/pinctrl/pinctrl-at91.c
drivers/pinctrl/pinctrl-coh901.c
drivers/pinctrl/pinctrl-digicolor.c
drivers/pinctrl/pinctrl-pistachio.c
drivers/pinctrl/pinctrl-rockchip.c
drivers/pinctrl/pinctrl-st.c
drivers/pinctrl/pinctrl-tegra-xusb.c
drivers/pinctrl/pinctrl-tz1090-pdc.c
drivers/pinctrl/pinctrl-tz1090.c
drivers/pinctrl/pinctrl-xway.c
drivers/pinctrl/pinctrl-zynq.c
drivers/pinctrl/qcom/pinctrl-msm.c
drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
drivers/pinctrl/samsung/pinctrl-exynos5440.c
drivers/pinctrl/samsung/pinctrl-samsung.c
drivers/pinctrl/sh-pfc/Kconfig
drivers/pinctrl/sh-pfc/Makefile
drivers/pinctrl/sh-pfc/core.c
drivers/pinctrl/sh-pfc/core.h
drivers/pinctrl/sh-pfc/gpio.c
drivers/pinctrl/sh-pfc/pfc-emev2.c
drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
drivers/pinctrl/sh-pfc/pfc-r8a7740.c
drivers/pinctrl/sh-pfc/pfc-r8a7778.c
drivers/pinctrl/sh-pfc/pfc-r8a7779.c
drivers/pinctrl/sh-pfc/pfc-r8a7790.c
drivers/pinctrl/sh-pfc/pfc-r8a7791.c
drivers/pinctrl/sh-pfc/pfc-r8a7794.c
drivers/pinctrl/sh-pfc/pfc-r8a7795.c [new file with mode: 0644]
drivers/pinctrl/sh-pfc/pfc-sh7203.c
drivers/pinctrl/sh-pfc/pfc-sh7264.c
drivers/pinctrl/sh-pfc/pfc-sh7269.c
drivers/pinctrl/sh-pfc/pfc-sh73a0.c
drivers/pinctrl/sh-pfc/pfc-sh7720.c
drivers/pinctrl/sh-pfc/pfc-sh7722.c
drivers/pinctrl/sh-pfc/pfc-sh7723.c
drivers/pinctrl/sh-pfc/pfc-sh7724.c
drivers/pinctrl/sh-pfc/pfc-sh7734.c
drivers/pinctrl/sh-pfc/pfc-sh7757.c
drivers/pinctrl/sh-pfc/pfc-sh7785.c
drivers/pinctrl/sh-pfc/pfc-sh7786.c
drivers/pinctrl/sh-pfc/pfc-shx3.c
drivers/pinctrl/sh-pfc/sh_pfc.h
drivers/pinctrl/sirf/pinctrl-atlas7.c
drivers/pinctrl/sunxi/Kconfig
drivers/pinctrl/sunxi/Makefile
drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c
drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c [new file with mode: 0644]
drivers/pinctrl/sunxi/pinctrl-sunxi.c
drivers/pinctrl/uniphier/Kconfig
drivers/pinctrl/uniphier/Makefile
drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c
drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c
drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c
drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c
drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c
drivers/pinctrl/uniphier/pinctrl-proxstream2.c
drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
drivers/pinctrl/vt8500/pinctrl-wmt.c
drivers/pps/kapi.c
drivers/regulator/axp20x-regulator.c
drivers/regulator/core.c
drivers/scsi/3w-9xxx.c
drivers/scsi/libiscsi.c
drivers/scsi/mvsas/mv_sas.c
drivers/scsi/scsi_dh.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_priv.h
drivers/scsi/scsi_sysfs.c
drivers/spi/spi-davinci.c
drivers/spmi/spmi-pmic-arb.c
drivers/staging/iio/accel/sca3000_ring.c
drivers/staging/iio/adc/mxs-lradc.c
drivers/staging/lustre/lustre/llite/dir.c
drivers/staging/speakup/fakekey.c
drivers/staging/speakup/selection.c
drivers/thermal/power_allocator.c
drivers/thermal/samsung/exynos_tmu.c
drivers/tty/n_tty.c
drivers/tty/serial/8250/8250_dma.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/imx.c
drivers/tty/tty_buffer.c
drivers/tty/tty_io.c
drivers/usb/core/quirks.c
drivers/usb/gadget/udc/bdc/bdc_ep.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/misc/chaoskey.c
drivers/usb/musb/omap2430.c
drivers/usb/renesas_usbhs/common.c
drivers/usb/renesas_usbhs/rcar2.c
drivers/vhost/vhost.h
drivers/video/console/fbcon.c
drivers/video/fbdev/broadsheetfb.c
drivers/video/fbdev/efifb.c
drivers/video/fbdev/fsl-diu-fb.c
drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
drivers/video/fbdev/omap2/displays-new/connector-dvi.c
drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
drivers/video/fbdev/tridentfb.c
drivers/video/of_display_timing.c
drivers/watchdog/Kconfig
drivers/watchdog/bcm2835_wdt.c
drivers/watchdog/gef_wdt.c
drivers/watchdog/mena21_wdt.c
drivers/watchdog/moxart_wdt.c
fs/btrfs/backref.c
fs/btrfs/disk-io.c
fs/btrfs/export.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/ioctl.c
fs/btrfs/send.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/btrfs/volumes.h
fs/cifs/cifsfs.h
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/smb2pdu.c
fs/dax.c
fs/ext4/Kconfig
fs/ext4/readpage.c
fs/file.c
fs/fs-writeback.c
fs/mpage.c
fs/namei.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/nfs4trace.h
fs/nfs/write.c
fs/nfsd/blocklayout.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlm/dlmrecovery.c
fs/overlayfs/copy_up.c
fs/overlayfs/inode.c
fs/overlayfs/super.c
fs/proc/array.c
fs/proc/base.c
fs/proc/meminfo.c
fs/ramfs/file-nommu.c
fs/ubifs/xattr.c
include/asm-generic/atomic-long.h
include/asm-generic/atomic.h
include/asm-generic/mutex-dec.h
include/asm-generic/mutex-xchg.h
include/asm-generic/pgtable.h
include/asm-generic/preempt.h
include/asm-generic/qrwlock_types.h
include/asm-generic/rwsem.h
include/asm-generic/word-at-a-time.h
include/drm/drm_crtc_helper.h
include/drm/drm_dp_helper.h
include/drm/drm_dp_mst_helper.h
include/dt-bindings/gpio/gpio.h
include/dt-bindings/leds/leds-netxbig.h [new file with mode: 0644]
include/kvm/arm_vgic.h
include/linux/acpi.h
include/linux/amba/bus.h
include/linux/atomic.h
include/linux/backing-dev-defs.h
include/linux/backing-dev.h
include/linux/blk-cgroup.h
include/linux/blk-mq.h
include/linux/blkdev.h
include/linux/cma.h
include/linux/compiler-gcc.h
include/linux/compiler.h
include/linux/cpu.h
include/linux/dma-contiguous.h
include/linux/edac.h
include/linux/efi.h
include/linux/fdtable.h
include/linux/fwnode.h
include/linux/gpio/consumer.h
include/linux/gpio/driver.h
include/linux/init_task.h
include/linux/interrupt.h
include/linux/iova.h
include/linux/irq.h
include/linux/irqchip/arm-gic-v3.h
include/linux/irqchip/arm-gic.h
include/linux/irqdomain.h
include/linux/irqreturn.h
include/linux/list.h
include/linux/list_bl.h
include/linux/list_nulls.h
include/linux/memcontrol.h
include/linux/mlx5/device.h
include/linux/mlx5/driver.h
include/linux/mm.h
include/linux/mmc/card.h
include/linux/mmc/core.h
include/linux/mmc/dw_mmc.h
include/linux/mmc/host.h
include/linux/msi.h
include/linux/netdevice.h
include/linux/of_gpio.h
include/linux/of_irq.h
include/linux/omap-dma.h
include/linux/percpu-rwsem.h
include/linux/perf_event.h
include/linux/pinctrl/devinfo.h
include/linux/pinctrl/pinconf-generic.h
include/linux/pinctrl/pinctrl-state.h
include/linux/platform_data/leds-kirkwood-netxbig.h
include/linux/pps_kernel.h
include/linux/preempt.h
include/linux/rcu_sync.h [new file with mode: 0644]
include/linux/rculist.h
include/linux/rcupdate.h
include/linux/rcutiny.h
include/linux/rcutree.h
include/linux/regmap.h
include/linux/sched.h
include/linux/sched/deadline.h
include/linux/skbuff.h
include/linux/smpboot.h
include/linux/srcu.h
include/linux/stop_machine.h
include/linux/string.h
include/linux/timekeeping.h
include/linux/timex.h
include/linux/usb/renesas_usbhs.h
include/linux/vmalloc.h
include/net/af_unix.h
include/net/dst_metadata.h
include/net/inet_timewait_sock.h
include/net/sock.h
include/sound/soc.h
include/sound/wm8904.h
include/trace/events/sched.h
include/uapi/asm-generic/signal.h
include/uapi/linux/mmc/ioctl.h
include/uapi/linux/openvswitch.h
include/uapi/linux/perf_event.h
include/uapi/linux/rtnetlink.h
include/uapi/linux/screen_info.h
include/uapi/linux/userfaultfd.h
include/xen/interface/sched.h
ipc/msg.c
ipc/shm.c
ipc/util.c
kernel/cpu.c
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/exit.c
kernel/fork.c
kernel/futex.c
kernel/irq/Kconfig
kernel/irq/Makefile
kernel/irq/chip.c
kernel/irq/cpuhotplug.c [new file with mode: 0644]
kernel/irq/handle.c
kernel/irq/internals.h
kernel/irq/irqdomain.c
kernel/irq/manage.c
kernel/irq/msi.c
kernel/irq/proc.c
kernel/irq/settings.h
kernel/kexec_core.c
kernel/kmod.c
kernel/locking/lockdep.c
kernel/locking/locktorture.c
kernel/locking/mcs_spinlock.h
kernel/locking/mutex.c
kernel/locking/osq_lock.c
kernel/locking/percpu-rwsem.c
kernel/locking/qrwlock.c
kernel/locking/qspinlock_paravirt.h
kernel/locking/rtmutex.c
kernel/locking/rwsem-xadd.c
kernel/memremap.c
kernel/module.c
kernel/rcu/Makefile
kernel/rcu/rcutorture.c
kernel/rcu/srcu.c
kernel/rcu/sync.c [new file with mode: 0644]
kernel/rcu/tiny.c
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_plugin.h
kernel/rcu/tree_trace.c
kernel/rcu/update.c
kernel/sched/core.c
kernel/sched/cpudeadline.c
kernel/sched/cpudeadline.h
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/features.h
kernel/sched/idle.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/smpboot.c
kernel/stop_machine.c
kernel/time/clocksource.c
kernel/time/hrtimer.c
kernel/time/ntp.c
kernel/time/ntp_internal.h
kernel/time/posix-cpu-timers.c
kernel/time/timeconst.bc
kernel/time/timekeeping.c
kernel/time/timer.c
kernel/torture.c
kernel/trace/ftrace.c
kernel/trace/trace_sched_switch.c
kernel/trace/trace_sched_wakeup.c
kernel/trace/trace_stack.c
kernel/workqueue.c
lib/Kconfig
lib/Kconfig.debug
lib/fault-inject.c
lib/nmi_backtrace.c
lib/string.c
mm/backing-dev.c
mm/cma.c
mm/dmapool.c
mm/filemap.c
mm/huge_memory.c
mm/hugetlb.c
mm/memcontrol.c
mm/memory.c
mm/migrate.c
mm/page-writeback.c
mm/pgtable-generic.c
mm/readahead.c
mm/slab.c
mm/vmalloc.c
mm/vmstat.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/mgmt.c
net/ceph/osd_client.c
net/core/dev.c
net/core/ethtool.c
net/core/filter.c
net/core/net-sysfs.c
net/core/skbuff.c
net/dsa/dsa.c
net/dsa/slave.c
net/ipv4/arp.c
net/ipv4/fib_frontend.c
net/ipv4/fib_trie.c
net/ipv4/gre_offload.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_gre.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/ipt_rpfilter.c
net/ipv4/route.c
net/ipv4/tcp_dctcp.c
net/ipv4/tcp_output.c
net/ipv4/xfrm4_output.c
net/ipv6/addrconf.c
net/ipv6/fib6_rules.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_output.c
net/ipv6/netfilter/Kconfig
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/route.c
net/ipv6/xfrm6_output.c
net/ipv6/xfrm6_policy.c
net/irda/irlmp.c
net/key/af_key.c
net/l2tp/l2tp_core.c
net/mac80211/debugfs.c
net/mac80211/status.c
net/mac80211/tx.c
net/netfilter/core.c
net/netfilter/ipset/ip_set_list_set.c
net/netlink/af_netlink.c
net/openvswitch/actions.c
net/openvswitch/conntrack.c
net/openvswitch/conntrack.h
net/openvswitch/datapath.c
net/openvswitch/datapath.h
net/openvswitch/flow.h
net/openvswitch/flow_netlink.c
net/openvswitch/flow_netlink.h
net/openvswitch/flow_table.c
net/openvswitch/vport-geneve.c
net/openvswitch/vport-gre.c
net/openvswitch/vport-internal_dev.c
net/openvswitch/vport-vxlan.c
net/openvswitch/vport.c
net/openvswitch/vport.h
net/rds/tcp_recv.c
net/sched/act_mirred.c
net/sched/sch_hhf.c
net/sctp/associola.c
net/sctp/sm_sideeffect.c
net/sunrpc/xprtrdma/fmr_ops.c
net/sunrpc/xprtrdma/frwr_ops.c
net/sunrpc/xprtrdma/physical_ops.c
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
net/sunrpc/xprtrdma/transport.c
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtrdma/xprt_rdma.h
net/switchdev/switchdev.c
net/sysctl_net.c
net/tipc/bcast.c
net/tipc/msg.c
net/tipc/msg.h
net/tipc/node.c
net/tipc/udp_media.c
net/unix/af_unix.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/vmci_transport.c
net/vmw_vsock/vmci_transport.h
net/xfrm/xfrm_user.c
samples/bpf/bpf_helpers.h
samples/kprobes/jprobe_example.c
samples/kprobes/kprobe_example.c
samples/kprobes/kretprobe_example.c
scripts/extract-cert.c
scripts/package/builddeb
scripts/sign-file.c
security/keys/gc.c
security/keys/request_key.c
sound/hda/ext/hdac_ext_bus.c
sound/pci/hda/hda_codec.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/soc/au1x/db1200.c
sound/soc/codecs/rt298.c
sound/soc/codecs/rt5645.c
sound/soc/codecs/rt5645.h
sound/soc/codecs/sgtl5000.c
sound/soc/codecs/tas2552.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/codecs/wm8962.c
sound/soc/dwc/designware_i2s.c
sound/soc/fsl/imx-ssi.c
sound/soc/soc-ops.c
sound/synth/emux/emux_oss.c
tools/build/.gitignore [new file with mode: 0644]
tools/build/Build [new file with mode: 0644]
tools/build/Build.include
tools/build/Documentation/Build.txt
tools/build/Makefile [new file with mode: 0644]
tools/build/Makefile.build
tools/build/Makefile.feature
tools/build/Makefile.include [new file with mode: 0644]
tools/build/feature/Makefile
tools/build/feature/test-all.c
tools/build/feature/test-get_cpuid.c [new file with mode: 0644]
tools/build/feature/test-numa_num_possible_cpus.c [new file with mode: 0644]
tools/build/fixdep.c [new file with mode: 0644]
tools/build/tests/ex/Build
tools/build/tests/ex/Makefile
tools/build/tests/ex/ex.c
tools/build/tests/ex/inc.c [new file with mode: 0644]
tools/build/tests/run.sh
tools/include/linux/compiler.h
tools/include/linux/err.h [new file with mode: 0644]
tools/include/linux/filter.h [new file with mode: 0644]
tools/lib/api/Build
tools/lib/api/Makefile
tools/lib/api/cpu.c [new file with mode: 0644]
tools/lib/api/cpu.h [new file with mode: 0644]
tools/lib/api/fs/Build
tools/lib/api/fs/debugfs.c [deleted file]
tools/lib/api/fs/debugfs.h [deleted file]
tools/lib/api/fs/findfs.c [deleted file]
tools/lib/api/fs/findfs.h [deleted file]
tools/lib/api/fs/fs.c
tools/lib/api/fs/fs.h
tools/lib/api/fs/tracefs.c [deleted file]
tools/lib/api/fs/tracefs.h [deleted file]
tools/lib/api/fs/tracing_path.c [new file with mode: 0644]
tools/lib/api/fs/tracing_path.h [new file with mode: 0644]
tools/lib/bpf/Makefile
tools/lib/lockdep/Makefile
tools/lib/symbol/kallsyms.c
tools/lib/symbol/kallsyms.h
tools/lib/traceevent/event-parse.c
tools/lib/traceevent/event-parse.h
tools/lib/traceevent/plugin_kvm.c
tools/perf/Documentation/intel-pt.txt
tools/perf/Documentation/itrace.txt
tools/perf/Documentation/perf-bench.txt
tools/perf/Documentation/perf-inject.txt
tools/perf/Documentation/perf-list.txt
tools/perf/Documentation/perf-record.txt
tools/perf/Documentation/perf-report.txt
tools/perf/Documentation/perf-script.txt
tools/perf/Documentation/perf-stat.txt
tools/perf/Documentation/perf-top.txt
tools/perf/Documentation/perf.txt
tools/perf/MANIFEST
tools/perf/Makefile.perf
tools/perf/arch/common.c
tools/perf/arch/common.h
tools/perf/arch/x86/Build
tools/perf/arch/x86/Makefile
tools/perf/arch/x86/include/arch-tests.h [new file with mode: 0644]
tools/perf/arch/x86/tests/Build
tools/perf/arch/x86/tests/arch-tests.c [new file with mode: 0644]
tools/perf/arch/x86/tests/dwarf-unwind.c
tools/perf/arch/x86/tests/gen-insn-x86-dat.awk [new file with mode: 0644]
tools/perf/arch/x86/tests/gen-insn-x86-dat.sh [new file with mode: 0755]
tools/perf/arch/x86/tests/insn-x86-dat-32.c [new file with mode: 0644]
tools/perf/arch/x86/tests/insn-x86-dat-64.c [new file with mode: 0644]
tools/perf/arch/x86/tests/insn-x86-dat-src.c [new file with mode: 0644]
tools/perf/arch/x86/tests/insn-x86.c [new file with mode: 0644]
tools/perf/arch/x86/tests/intel-cqm.c [new file with mode: 0644]
tools/perf/arch/x86/tests/perf-time-to-tsc.c [moved from tools/perf/tests/perf-time-to-tsc.c with 98% similarity]
tools/perf/arch/x86/tests/rdpmc.c [moved from tools/perf/tests/rdpmc.c with 97% similarity]
tools/perf/arch/x86/util/dwarf-regs.c
tools/perf/arch/x86/util/intel-pt.c
tools/perf/bench/Build
tools/perf/bench/mem-functions.c [new file with mode: 0644]
tools/perf/bench/mem-memcpy.c [deleted file]
tools/perf/bench/numa.c
tools/perf/bench/sched-messaging.c
tools/perf/builtin-annotate.c
tools/perf/builtin-bench.c
tools/perf/builtin-evlist.c
tools/perf/builtin-help.c
tools/perf/builtin-inject.c
tools/perf/builtin-kmem.c
tools/perf/builtin-kvm.c
tools/perf/builtin-list.c
tools/perf/builtin-probe.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-sched.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/builtin-trace.c
tools/perf/config/Makefile
tools/perf/perf.c
tools/perf/python/twatch.py
tools/perf/scripts/python/export-to-postgresql.py
tools/perf/tests/Build
tools/perf/tests/bpf-script-example.c [new file with mode: 0644]
tools/perf/tests/builtin-test.c
tools/perf/tests/code-reading.c
tools/perf/tests/dwarf-unwind.c
tools/perf/tests/evsel-tp-sched.c
tools/perf/tests/hists_filter.c
tools/perf/tests/make
tools/perf/tests/mmap-basic.c
tools/perf/tests/openat-syscall-all-cpus.c
tools/perf/tests/openat-syscall-tp-fields.c
tools/perf/tests/openat-syscall.c
tools/perf/tests/parse-events.c
tools/perf/tests/tests.h
tools/perf/tests/topology.c [new file with mode: 0644]
tools/perf/tests/vmlinux-kallsyms.c
tools/perf/trace/strace/groups/file
tools/perf/ui/browser.c
tools/perf/ui/browser.h
tools/perf/ui/browsers/annotate.c
tools/perf/ui/browsers/hists.c
tools/perf/ui/browsers/map.c
tools/perf/ui/browsers/scripts.c
tools/perf/ui/hist.c
tools/perf/ui/tui/setup.c
tools/perf/util/Build
tools/perf/util/annotate.c
tools/perf/util/annotate.h
tools/perf/util/auxtrace.c
tools/perf/util/auxtrace.h
tools/perf/util/bpf-loader.c [new file with mode: 0644]
tools/perf/util/bpf-loader.h [new file with mode: 0644]
tools/perf/util/callchain.c
tools/perf/util/callchain.h
tools/perf/util/cpumap.c
tools/perf/util/cpumap.h
tools/perf/util/env.c [new file with mode: 0644]
tools/perf/util/env.h [new file with mode: 0644]
tools/perf/util/event.c
tools/perf/util/event.h
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/header.c
tools/perf/util/header.h
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/include/dwarf-regs.h
tools/perf/util/intel-pt-decoder/Build
tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
tools/perf/util/intel-pt-decoder/intel-pt-log.c
tools/perf/util/intel-pt-decoder/intel-pt-log.h
tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
tools/perf/util/intel-pt.c
tools/perf/util/machine.c
tools/perf/util/machine.h
tools/perf/util/map.c
tools/perf/util/map.h
tools/perf/util/parse-branch-options.c
tools/perf/util/parse-events.c
tools/perf/util/parse-events.h
tools/perf/util/parse-events.l
tools/perf/util/parse-events.y
tools/perf/util/parse-options.c
tools/perf/util/parse-options.h
tools/perf/util/perf_regs.c
tools/perf/util/perf_regs.h
tools/perf/util/pmu.c
tools/perf/util/probe-event.c
tools/perf/util/probe-event.h
tools/perf/util/probe-file.c
tools/perf/util/probe-file.h
tools/perf/util/probe-finder.c
tools/perf/util/python.c
tools/perf/util/scripting-engines/trace-event-perl.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/session.c
tools/perf/util/session.h
tools/perf/util/sort.c
tools/perf/util/sort.h
tools/perf/util/srcline.c
tools/perf/util/stat.c
tools/perf/util/stat.h
tools/perf/util/strbuf.c
tools/perf/util/strbuf.h
tools/perf/util/symbol-elf.c
tools/perf/util/symbol-minimal.c
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/trace-event-info.c
tools/perf/util/trace-event.c
tools/perf/util/trace-event.h
tools/perf/util/unwind-libunwind.c
tools/perf/util/usage.c
tools/perf/util/util.c
tools/perf/util/util.h
tools/power/x86/turbostat/turbostat.c
tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c
tools/testing/selftests/rcutorture/bin/kvm.sh
tools/testing/selftests/rcutorture/configs/lock/CFLIST
tools/testing/selftests/rcutorture/configs/lock/LOCK05 [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/lock/LOCK05.boot [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/lock/LOCK06 [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/lock/LOCK06.boot [new file with mode: 0644]
tools/testing/selftests/timers/Makefile
tools/testing/selftests/timers/adjtick.c [new file with mode: 0644]
tools/testing/selftests/x86/Makefile
tools/testing/selftests/x86/entry_from_vm86.c
tools/testing/selftests/x86/ptrace_syscall.c [new file with mode: 0644]
tools/testing/selftests/x86/raw_syscall_helper_32.S [new file with mode: 0644]
tools/testing/selftests/x86/test_syscall_vdso.c [new file with mode: 0644]
tools/testing/selftests/x86/thunks_32.S [new file with mode: 0644]
tools/testing/selftests/x86/unwind_vdso.c [new file with mode: 0644]
tools/vm/page-types.c
virt/kvm/arm/arch_timer.c
virt/kvm/arm/vgic.c

index 4b31af54ccd5864359c0810f9733f3026181a631..b1e9a97653dc64853775a97db377a8f269cc8d95 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -59,6 +59,7 @@ James Bottomley <jejb@mulgrave.(none)>
 James Bottomley <jejb@titanic.il.steeleye.com>
 James E Wilson <wilson@specifix.com>
 James Ketrenos <jketreno@io.(none)>
+<javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
 Jean Tourrilhes <jt@hpl.hp.com>
 Jeff Garzik <jgarzik@pretzel.yyz.us>
 Jens Axboe <axboe@suse.de>
index 6d886300485827846541744075f7919be3d16200..f447f0516f074c700b0c78ca87fcfcf4595ea49f 100644 (file)
@@ -43,7 +43,7 @@ o  udev                   081                     # udevd --version
 o  grub                   0.93                    # grub --version || grub-install --version
 o  mcelog                 0.6                     # mcelog --version
 o  iptables               1.4.2                   # iptables -V
-o  openssl & libcrypto    1.0.1k                  # openssl version
+o  openssl & libcrypto    1.0.                  # openssl version
 
 
 Kernel compilation
index 3a8e15cba816a4ea16fb0208518046214ebff1e6..8d990bde8693fdb85d4f4e67b5d9aaa155418990 100644 (file)
@@ -32,9 +32,9 @@ top of the irq_alloc_desc*() API.  An irq_domain to manage mapping is
 preferred over interrupt controller drivers open coding their own
 reverse mapping scheme.
 
-irq_domain also implements translation from Device Tree interrupt
-specifiers to hwirq numbers, and can be easily extended to support
-other IRQ topology data sources.
+irq_domain also implements translation from an abstract irq_fwspec
+structure to hwirq numbers (Device Tree and ACPI GSI so far), and can
+be easily extended to support other IRQ topology data sources.
 
 === irq_domain usage ===
 An interrupt controller driver creates and registers an irq_domain by
@@ -184,7 +184,7 @@ There are four major interfaces to use hierarchy irq_domain:
    related resources associated with these interrupts.
 3) irq_domain_activate_irq(): activate interrupt controller hardware to
    deliver the interrupt.
-3) irq_domain_deactivate_irq(): deactivate interrupt controller hardware
+4) irq_domain_deactivate_irq(): deactivate interrupt controller hardware
    to stop delivering the interrupt.
 
 Following changes are needed to support hierarchy irq_domain.
index efb9454875ab7453255532887225ad0cb2d5d2fd..0f7fb4298e7e0b657a7df678ee0852de7ab7887d 100644 (file)
@@ -205,6 +205,13 @@ o  For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the
        behavior, you might need to replace some of the cond_resched()
        calls with calls to cond_resched_rcu_qs().
 
+o      Booting Linux using a console connection that is too slow to
+       keep up with the boot-time console-message rate.  For example,
+       a 115Kbaud serial console can be -way- too slow to keep up
+       with boot-time message rates, and will frequently result in
+       RCU CPU stall warning messages.  Especially if you have added
+       debug printk()s.
+
 o      Anything that prevents RCU's grace-period kthreads from running.
        This can result in the "All QSes seen" console-log message.
        This message will include information on when the kthread last
index dac02a6219b1cae3c657512b293b957627cec830..118e7c176ce76aaca534c50fe7fe01da1cb3b710 100644 (file)
@@ -166,40 +166,27 @@ test_no_idle_hz   Whether or not to test the ability of RCU to operate in
 
 torture_type   The type of RCU to test, with string values as follows:
 
-               "rcu":  rcu_read_lock(), rcu_read_unlock() and call_rcu().
-
-               "rcu_sync":  rcu_read_lock(), rcu_read_unlock(), and
-                       synchronize_rcu().
-
-               "rcu_expedited": rcu_read_lock(), rcu_read_unlock(), and
-                       synchronize_rcu_expedited().
+               "rcu":  rcu_read_lock(), rcu_read_unlock() and call_rcu(),
+                       along with expedited, synchronous, and polling
+                       variants.
 
                "rcu_bh": rcu_read_lock_bh(), rcu_read_unlock_bh(), and
-                       call_rcu_bh().
-
-               "rcu_bh_sync": rcu_read_lock_bh(), rcu_read_unlock_bh(),
-                       and synchronize_rcu_bh().
+                       call_rcu_bh(), along with expedited and synchronous
+                       variants.
 
-               "rcu_bh_expedited": rcu_read_lock_bh(), rcu_read_unlock_bh(),
-                       and synchronize_rcu_bh_expedited().
+               "rcu_busted": This tests an intentionally incorrect version
+                       of RCU in order to help test rcutorture itself.
 
                "srcu": srcu_read_lock(), srcu_read_unlock() and
-                       call_srcu().
-
-               "srcu_sync": srcu_read_lock(), srcu_read_unlock() and
-                       synchronize_srcu().
-
-               "srcu_expedited": srcu_read_lock(), srcu_read_unlock() and
-                       synchronize_srcu_expedited().
+                       call_srcu(), along with expedited and
+                       synchronous variants.
 
                "sched": preempt_disable(), preempt_enable(), and
-                       call_rcu_sched().
-
-               "sched_sync": preempt_disable(), preempt_enable(), and
-                       synchronize_sched().
+                       call_rcu_sched(), along with expedited,
+                       synchronous, and polling variants.
 
-               "sched_expedited": preempt_disable(), preempt_enable(), and
-                       synchronize_sched_expedited().
+               "tasks": voluntary context switch and call_rcu_tasks(),
+                       along with expedited and synchronous variants.
 
                Defaults to "rcu".
 
index 97f17e9decdaeced345689e9eb410a79f642b10d..ec6998b1b6d04f3139ed6c066537cc059c89838d 100644 (file)
@@ -56,14 +56,14 @@ rcuboost:
 
 The output of "cat rcu/rcu_preempt/rcudata" looks as follows:
 
-  0!c=30455 g=30456 pq=1/0 qp=1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 nci=0 co=1131 ca=716
-  1!c=30719 g=30720 pq=1/0 qp=0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123209 nci=0 co=685 ca=982
-  2!c=30150 g=30151 pq=1/1 qp=1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 nci=0 co=1328 ca=1458
-  3 c=31249 g=31250 pq=1/1 qp=0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=151700 nci=0 co=509 ca=622
-  4!c=29502 g=29503 pq=1/0 qp=1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nci=0 co=1373 ca=1521
-  5 c=31201 g=31202 pq=1/0 qp=1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=698
-  6!c=30253 g=30254 pq=1/0 qp=1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 nci=0 co=1414 ca=1353
-  7 c=31178 g=31178 pq=1/0 qp=0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 ca=969
+  0!c=30455 g=30456 cnq=1/0:1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 nci=0 co=1131 ca=716
+  1!c=30719 g=30720 cnq=1/0:0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123209 nci=0 co=685 ca=982
+  2!c=30150 g=30151 cnq=1/1:1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 nci=0 co=1328 ca=1458
+  3 c=31249 g=31250 cnq=1/1:0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=151700 nci=0 co=509 ca=622
+  4!c=29502 g=29503 cnq=1/0:1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nci=0 co=1373 ca=1521
+  5 c=31201 g=31202 cnq=1/0:1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=698
+  6!c=30253 g=30254 cnq=1/0:1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 nci=0 co=1414 ca=1353
+  7 c=31178 g=31178 cnq=1/0:0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 ca=969
 
 This file has one line per CPU, or eight for this 8-CPU system.
 The fields are as follows:
@@ -188,14 +188,14 @@ o "ca" is the number of RCU callbacks that have been adopted by this
 Kernels compiled with CONFIG_RCU_BOOST=y display the following from
 /debug/rcu/rcu_preempt/rcudata:
 
-  0!c=12865 g=12866 pq=1/0 qp=1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 b=10 ci=60709 nci=0 co=748 ca=871
-  1 c=14407 g=14408 pq=1/0 qp=0 dt=100679/140000000000000/0 df=378 of=7 ql=0/119 qs=NRW. kt=0/W ktl=9b6 b=10 ci=109740 nci=0 co=589 ca=485
-  2 c=14407 g=14408 pq=1/0 qp=0 dt=105486/0/0 df=90 of=9 ql=0/89 qs=NRW. kt=0/W ktl=c0c b=10 ci=83113 nci=0 co=533 ca=490
-  3 c=14407 g=14408 pq=1/0 qp=0 dt=107138/0/0 df=142 of=8 ql=0/188 qs=NRW. kt=0/W ktl=b96 b=10 ci=121114 nci=0 co=426 ca=290
-  4 c=14405 g=14406 pq=1/0 qp=1 dt=50238/0/0 df=706 of=7 ql=0/0 qs=.... kt=0/W ktl=812 b=10 ci=34929 nci=0 co=643 ca=114
-  5!c=14168 g=14169 pq=1/0 qp=0 dt=45465/140000000000000/0 df=161 of=11 ql=0/0 qs=N... kt=0/O ktl=b4d b=10 ci=47712 nci=0 co=677 ca=722
-  6 c=14404 g=14405 pq=1/0 qp=0 dt=59454/0/0 df=94 of=6 ql=0/0 qs=.... kt=0/W ktl=e57 b=10 ci=55597 nci=0 co=701 ca=811
-  7 c=14407 g=14408 pq=1/0 qp=1 dt=68850/0/0 df=31 of=8 ql=0/0 qs=.... kt=0/W ktl=14bd b=10 ci=77475 nci=0 co=508 ca=1042
+  0!c=12865 g=12866 cnq=1/0:1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 b=10 ci=60709 nci=0 co=748 ca=871
+  1 c=14407 g=14408 cnq=1/0:0 dt=100679/140000000000000/0 df=378 of=7 ql=0/119 qs=NRW. kt=0/W ktl=9b6 b=10 ci=109740 nci=0 co=589 ca=485
+  2 c=14407 g=14408 cnq=1/0:0 dt=105486/0/0 df=90 of=9 ql=0/89 qs=NRW. kt=0/W ktl=c0c b=10 ci=83113 nci=0 co=533 ca=490
+  3 c=14407 g=14408 cnq=1/0:0 dt=107138/0/0 df=142 of=8 ql=0/188 qs=NRW. kt=0/W ktl=b96 b=10 ci=121114 nci=0 co=426 ca=290
+  4 c=14405 g=14406 cnq=1/0:1 dt=50238/0/0 df=706 of=7 ql=0/0 qs=.... kt=0/W ktl=812 b=10 ci=34929 nci=0 co=643 ca=114
+  5!c=14168 g=14169 cnq=1/0:0 dt=45465/140000000000000/0 df=161 of=11 ql=0/0 qs=N... kt=0/O ktl=b4d b=10 ci=47712 nci=0 co=677 ca=722
+  6 c=14404 g=14405 cnq=1/0:0 dt=59454/0/0 df=94 of=6 ql=0/0 qs=.... kt=0/W ktl=e57 b=10 ci=55597 nci=0 co=701 ca=811
+  7 c=14407 g=14408 cnq=1/0:1 dt=68850/0/0 df=31 of=8 ql=0/0 qs=.... kt=0/W ktl=14bd b=10 ci=77475 nci=0 co=508 ca=1042
 
 This is similar to the output discussed above, but contains the following
 additional fields:
index adc2184009c5ecec9ba4615291ca8998743b48e2..dc49c6712b17ff4968d3c4fdf2b304e0292fb5be 100644 (file)
@@ -364,7 +364,7 @@ uses of RCU may be found in listRCU.txt, arrayRCU.txt, and NMI-RCU.txt.
        };
        DEFINE_SPINLOCK(foo_mutex);
 
-       struct foo *gbl_foo;
+       struct foo __rcu *gbl_foo;
 
        /*
         * Create a new struct foo that is the same as the one currently
@@ -386,7 +386,7 @@ uses of RCU may be found in listRCU.txt, arrayRCU.txt, and NMI-RCU.txt.
 
                new_fp = kmalloc(sizeof(*new_fp), GFP_KERNEL);
                spin_lock(&foo_mutex);
-               old_fp = gbl_foo;
+               old_fp = rcu_dereference_protected(gbl_foo, lockdep_is_held(&foo_mutex));
                *new_fp = *old_fp;
                new_fp->a = new_a;
                rcu_assign_pointer(gbl_foo, new_fp);
@@ -487,7 +487,7 @@ The foo_update_a() function might then be written as follows:
 
                new_fp = kmalloc(sizeof(*new_fp), GFP_KERNEL);
                spin_lock(&foo_mutex);
-               old_fp = gbl_foo;
+               old_fp = rcu_dereference_protected(gbl_foo, lockdep_is_held(&foo_mutex));
                *new_fp = *old_fp;
                new_fp->a = new_a;
                rcu_assign_pointer(gbl_foo, new_fp);
diff --git a/Documentation/arm/OMAP/README b/Documentation/arm/OMAP/README
new file mode 100644 (file)
index 0000000..75645c4
--- /dev/null
@@ -0,0 +1,7 @@
+This file contains documentation for running mainline
+kernel on omaps.
+
+KERNEL         NEW DEPENDENCIES
+v4.3+          Update is needed for custom .config files to make sure
+               CONFIG_REGULATOR_PBIAS is enabled for MMC1 to work
+               properly.
diff --git a/Documentation/arm/SA1100/Victor b/Documentation/arm/SA1100/Victor
deleted file mode 100644 (file)
index 9cff415..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-Victor is known as a "digital talking book player" manufactured by
-VisuAide, Inc. to be used by blind people.
-
-For more information related to Victor, see:
-
-       http://www.humanware.com/en-usa/products
-
-Of course Victor is using Linux as its main operating system.
-The Victor implementation for Linux is maintained by Nicolas Pitre:
-
-       nico@visuaide.com
-       nico@fluxnic.net
-
-For any comments, please feel free to contact me through the above
-addresses.
-
index 4178ebda6e665c1c2e9dac6451e2137acfd41eb1..546a39048eb0d6893a6d2fa81fe309bb5555ddbc 100644 (file)
@@ -54,7 +54,7 @@ VMALLOC_START VMALLOC_END-1   vmalloc() / ioremap() space.
                                located here through iotable_init().
                                VMALLOC_START is based upon the value
                                of the high_memory variable, and VMALLOC_END
-                               is equal to 0xff000000.
+                               is equal to 0xff800000.
 
 PAGE_OFFSET    high_memory-1   Kernel direct-mapped RAM region.
                                This maps the platforms RAM, and typically
index d60030a1b909bc68dbe7e6d275064cafbe3fdf4e..7b3fdfe0f7ba37a7ff6a0e46cfec18e1fcfbe68e 100644 (file)
@@ -60,5 +60,3 @@ linux,uefi-mmap-desc-ver  | 32-bit | Version of the mmap descriptor format.
 --------------------------------------------------------------------------------
 linux,uefi-stub-kern-ver  | string | Copy of linux_banner from build.
 --------------------------------------------------------------------------------
-
-For verbose debug messages, specify 'uefi_debug' on the kernel command line.
index 7d9d3c2286b282d96f03cd1545be5780e998002e..369a4f48eb0dac3c80d5f22054fc4b02f1006f28 100644 (file)
@@ -173,13 +173,22 @@ Before jumping into the kernel, the following conditions must be met:
   the kernel image will be entered must be initialised by software at a
   higher exception level to prevent execution in an UNKNOWN state.
 
-  For systems with a GICv3 interrupt controller:
+  For systems with a GICv3 interrupt controller to be used in v3 mode:
   - If EL3 is present:
     ICC_SRE_EL3.Enable (bit 3) must be initialiased to 0b1.
     ICC_SRE_EL3.SRE (bit 0) must be initialised to 0b1.
   - If the kernel is entered at EL1:
     ICC.SRE_EL2.Enable (bit 3) must be initialised to 0b1
     ICC_SRE_EL2.SRE (bit 0) must be initialised to 0b1.
+  - The DT or ACPI tables must describe a GICv3 interrupt controller.
+
+  For systems with a GICv3 interrupt controller to be used in
+  compatibility (v2) mode:
+  - If EL3 is present:
+    ICC_SRE_EL3.SRE (bit 0) must be initialised to 0b0.
+  - If the kernel is entered at EL1:
+    ICC_SRE_EL2.SRE (bit 0) must be initialised to 0b0.
+  - The DT or ACPI tables must describe a GICv2 interrupt controller.
 
 The requirements described above for CPU mode, caches, MMUs, architected
 timers, coherency and system registers apply to all CPUs.  All CPUs must
index b19fc34efdb17921af43bda0000b13dc82640451..c9d1cacb439590a02dbfb1282cbe0d3abe3a9b0d 100644 (file)
@@ -542,6 +542,10 @@ The routines xchg() and cmpxchg() must provide the same exact
 memory-barrier semantics as the atomic and bit operations returning
 values.
 
+Note: If someone wants to use xchg(), cmpxchg() and their variants,
+linux/atomic.h should be included rather than asm/cmpxchg.h, unless
+the code is in arch/* and can take care of itself.
+
 Spinlocks and rwlocks have memory barrier expectations as well.
 The rule to follow is simple:
 
index 0d5bc46dc1676869358cd6f36975905031f17f9e..ad6949bff2e392d63e7ddb82391746687ea6235e 100644 (file)
@@ -41,9 +41,13 @@ useless and be disabled, returning errors.  So it is important to monitor
 the amount of free space and expand the <COW device> before it fills up.
 
 <persistent?> is P (Persistent) or N (Not persistent - will not survive
-after reboot).
-The difference is that for transient snapshots less metadata must be
-saved on disk - they can be kept in memory by the kernel.
+after reboot).  O (Overflow) can be added as a persistent store option
+to allow userspace to advertise its support for seeing "Overflow" in the
+snapshot status.  So supported store types are "P", "PO" and "N".
+
+The difference between persistent and transient is with transient
+snapshots less metadata must be saved on disk - they can be kept in
+memory by the kernel.
 
 
 * snapshot-merge <origin> <COW device> <persistent> <chunksize>
index 2da059a4790cb3c62365bedae458c5565589b225..cc56021eb60babea6c580bfbe594aca3c17dfce5 100644 (file)
@@ -11,13 +11,14 @@ have PPIs or SGIs.
 Main node required properties:
 
 - compatible : should be one of:
-       "arm,gic-400"
+       "arm,arm1176jzf-devchip-gic"
+       "arm,arm11mp-gic"
        "arm,cortex-a15-gic"
-       "arm,cortex-a9-gic"
        "arm,cortex-a7-gic"
-       "arm,arm11mp-gic"
+       "arm,cortex-a9-gic"
+       "arm,gic-400"
+       "arm,pl390"
        "brcm,brahma-b15-gic"
-       "arm,arm1176jzf-devchip-gic"
        "qcom,msm-8660-qgic"
        "qcom,msm-qgic2"
 - interrupt-controller : Identifies the node as an interrupt controller
@@ -58,6 +59,21 @@ Optional
   regions, used when the GIC doesn't have banked registers. The offset is
   cpu-offset * cpu-nr.
 
+- clocks        : List of phandle and clock-specific pairs, one for each entry
+  in clock-names.
+- clock-names   : List of names for the GIC clock input(s). Valid clock names
+  depend on the GIC variant:
+       "ic_clk" (for "arm,arm11mp-gic")
+       "PERIPHCLKEN" (for "arm,cortex-a15-gic")
+       "PERIPHCLK", "PERIPHCLKEN" (for "arm,cortex-a9-gic")
+       "clk" (for "arm,gic-400")
+       "gclk" (for "arm,pl390")
+
+- power-domains : A phandle and PM domain specifier as defined by bindings of
+                 the power controller specified by phandle, used when the GIC
+                 is part of a Power or Clock Domain.
+
+
 Example:
 
        intc: interrupt-controller@fff11000 {
index 75b8610939faf819100c1e4fc7089b8542b753a2..383ea19c2bf0073e3a09ec4c254db28f17f28738 100644 (file)
@@ -19,6 +19,11 @@ interrupts.
 - reg : Specify the base address and the size of the TWD timer
        register window.
 
+Optional
+
+- always-on : a boolean property. If present, the timer is powered through
+  an always-on power domain, therefore it never loses context.
+
 Example:
 
        twd-timer@2c000600 {
index 78edb80002c814278d5d5dfe9a60c4812fe45e64..78e2a31c58d0735b40254e592e1fa7c73983d571 100644 (file)
@@ -5,6 +5,8 @@ The follow error types are supported:
 
   memory controller    - Memory controller
   PMD (L1/L2)          - Processor module unit (PMD) L1/L2 cache
+  L3                   - L3 cache controller
+  SoC                  - SoC IP's such as Ethernet, SATA, and etc
 
 The following section describes the EDAC DT node binding.
 
@@ -30,6 +32,17 @@ Required properties for PMD subnode:
 - reg                  : First resource shall be the PMD resource.
 - pmd-controller       : Instance number of the PMD controller.
 
+Required properties for L3 subnode:
+- compatible           : Shall be "apm,xgene-edac-l3" or
+                          "apm,xgene-edac-l3-v2".
+- reg                  : First resource shall be the L3 EDAC resource.
+
+Required properties for SoC subnode:
+- compatible           : Shall be "apm,xgene-edac-soc-v1" for revision 1 or
+                          "apm,xgene-edac-l3-soc" for general value reporting
+                          only.
+- reg                  : First resource shall be the SoC EDAC resource.
+
 Example:
        csw: csw@7e200000 {
                compatible = "apm,xgene-csw", "syscon";
@@ -76,4 +89,14 @@ Example:
                        reg = <0x0 0x7c000000 0x0 0x200000>;
                        pmd-controller = <0>;
                };
+
+               edacl3@7e600000 {
+                       compatible = "apm,xgene-edac-l3";
+                       reg = <0x0 0x7e600000 0x0 0x1000>;
+               };
+
+               edacsoc@7e930000 {
+                       compatible = "apm,xgene-edac-soc-v1";
+                       reg = <0x0 0x7e930000 0x0 0x1000>;
+               };
        };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-msm.txt b/Documentation/devicetree/bindings/gpio/gpio-msm.txt
deleted file mode 100644 (file)
index ac20e68..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-MSM GPIO controller bindings
-
-Required properties:
-- compatible:
-  - "qcom,msm-gpio" for MSM controllers
-- #gpio-cells : Should be two.
-  - first cell is the pin number
-  - second cell is used to specify optional parameters (unused)
-- gpio-controller : Marks the device node as a GPIO controller.
-- #interrupt-cells : Should be 2.
-- interrupt-controller: Mark the device node as an interrupt controller
-- interrupts : Specify the TLMM summary interrupt number
-- ngpio : Specify the number of MSM GPIOs
-
-Example:
-
-       msmgpio: gpio@fd510000 {
-               compatible = "qcom,msm-gpio";
-               gpio-controller;
-               #gpio-cells = <2>;
-               interrupt-controller;
-               #interrupt-cells = <2>;
-               reg = <0xfd510000 0x4000>;
-               interrupts = <0 208 0>;
-               ngpio = <150>;
-       };
index b9a42f294dd01c63f97831c7487881aebd7d5d8f..13df9933f4cda74da94f2563675e6cd37ac97375 100644 (file)
@@ -24,6 +24,7 @@ Required properties:
        ti,tca6408
        ti,tca6416
        ti,tca6424
+       ti,tca9539
        exar,xra1202
 
 Example:
index db4c6a663c031280256d408e6a9f4bd15082a94b..7b542657f259577bc4e5fd5aa0b882053855bfad 100644 (file)
@@ -12,6 +12,13 @@ Required properties:
 - interrupts           : Interrupt specifier (see interrupt bindings for
                          details)
 - interrupt-parent     : Must be core interrupt controller
+- interrupt-controller : Marks the device node as an interrupt controller.
+- #interrupt-cells     : Should be 2.  The first cell is the GPIO number.
+                         The second cell bits[3:0] is used to specify trigger type and level flags:
+                             1 = low-to-high edge triggered.
+                             2 = high-to-low edge triggered.
+                             4 = active high level-sensitive.
+                             8 = active low level-sensitive.
 - reg                  : Address and length of the register set for the device
 
 Example:
@@ -22,5 +29,7 @@ Example:
                gpio-controller;
                interrupt-parent = <&intc>;
                interrupts = <0 20 4>;
+               interrupt-controller;
+               #interrupt-cells = <2>;
                reg = <0xe000a000 0x1000>;
        };
index 82d40e2505f641be635dd8769b0051508bcf0120..069cdf6f9dace228fe8fd97dc63c2d861273fc03 100644 (file)
@@ -54,9 +54,13 @@ only uses one.
 
 gpio-specifier may encode: bank, pin position inside the bank,
 whether pin is open-drain and whether pin is logically inverted.
+
 Exact meaning of each specifier cell is controller specific, and must
-be documented in the device tree binding for the device. Use the macros
-defined in include/dt-bindings/gpio/gpio.h whenever possible:
+be documented in the device tree binding for the device.
+
+Most controllers are however specifying a generic flag bitfield
+in the last cell, so for these, use the macros defined in
+include/dt-bindings/gpio/gpio.h whenever possible:
 
 Example of a node using GPIOs:
 
@@ -67,6 +71,15 @@ Example of a node using GPIOs:
 GPIO_ACTIVE_HIGH is 0, so in this example gpio-specifier is "18 0" and encodes
 GPIO pin number, and GPIO flags as accepted by the "qe_pio_e" gpio-controller.
 
+Optional standard bitfield specifiers for the last cell:
+
+- Bit 0: 0 means active high, 1 means active low
+- Bit 1: 1 means single-ended wiring, see:
+           https://en.wikipedia.org/wiki/Single-ended_triode
+          When used with active-low, this means open drain/collector, see:
+           https://en.wikipedia.org/wiki/Open_collector
+          When used with active-high, this means open source/emitter
+
 1.1) GPIO specifier best practices
 ----------------------------------
 
@@ -118,6 +131,30 @@ Every GPIO controller node must contain both an empty "gpio-controller"
 property, and a #gpio-cells integer property, which indicates the number of
 cells in a gpio-specifier.
 
+Optionally, a GPIO controller may have a "ngpios" property. This property
+indicates the number of in-use slots of available slots for GPIOs. The
+typical example is something like this: the hardware register is 32 bits
+wide, but only 18 of the bits have a physical counterpart. The driver is
+generally written so that all 32 bits can be used, but the IP block is reused
+in a lot of designs, some using all 32 bits, some using 18 and some using
+12. In this case, setting "ngpios = <18>;" informs the driver that only the
+first 18 GPIOs, at local offset 0 .. 17, are in use.
+
+If these GPIOs do not happen to be the first N GPIOs at offset 0...N-1, an
+additional bitmask is needed to specify which GPIOs are actually in use,
+and which are dummies. The bindings for this case has not yet been
+specified, but should be specified if/when such hardware appears.
+
+Example:
+
+gpio-controller@00000000 {
+       compatible = "foo";
+       reg = <0x00000000 0x1000>;
+       gpio-controller;
+       #gpio-cells = <2>;
+       ngpios = <18>;
+}
+
 The GPIO chip may contain GPIO hog definitions. GPIO hogging is a mechanism
 providing automatic GPIO request and configuration as part of the
 gpio-controller's driver probe function.
diff --git a/Documentation/devicetree/bindings/gpio/netxbig-gpio-ext.txt b/Documentation/devicetree/bindings/gpio/netxbig-gpio-ext.txt
new file mode 100644 (file)
index 0000000..50ec2e6
--- /dev/null
@@ -0,0 +1,22 @@
+Binding for the GPIO extension bus found on some LaCie/Seagate boards
+(Example: 2Big/5Big Network v2, 2Big NAS).
+
+Required properties:
+- compatible: "lacie,netxbig-gpio-ext".
+- addr-gpios: GPIOs representing the address register (LSB -> MSB).
+- data-gpios: GPIOs representing the data register (LSB -> MSB).
+- enable-gpio: latches the new configuration (address, data) on raising edge.
+
+Example:
+
+netxbig_gpio_ext: netxbig-gpio-ext {
+       compatible = "lacie,netxbig-gpio-ext";
+
+       addr-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH
+                     &gpio1 16 GPIO_ACTIVE_HIGH
+                     &gpio1 17 GPIO_ACTIVE_HIGH>;
+       data-gpios = <&gpio1 12 GPIO_ACTIVE_HIGH
+                     &gpio1 13 GPIO_ACTIVE_HIGH
+                     &gpio1 14 GPIO_ACTIVE_HIGH>;
+       enable-gpio = <&gpio0 29 GPIO_ACTIVE_HIGH>;
+};
index 635a3b03663002d8f4eb47b25fd504d2a7ebca93..8d91ba9ff2fd0918bbf18fc1149f2fb3613bf80e 100644 (file)
@@ -25,7 +25,7 @@ Example:
                /* Cypress Gen3 touchpad */
                touchpad@67 {
                        compatible = "cypress,cyapa";
-                       reg = <0x24>;
+                       reg = <0x67>;
                        interrupt-parent = <&gpio>;
                        interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO 2 */
                        wakeup-source;
index 391717a68f3b1dffe100762775a4bd0368184a83..ec96b1f0147886102554c16e3bd260dc3f425619 100644 (file)
@@ -4,8 +4,8 @@ The MISC interrupt controller is a secondary controller for lower priority
 interrupt.
 
 Required Properties:
-- compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc"
-  as fallback
+- compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc" or
+  "qca,<soctype>-cpu-intc", "qca,ar7240-misc-intc"
 - reg: Base address and size of the controllers memory area
 - interrupt-parent: phandle of the parent interrupt controller.
 - interrupts: Interrupt specifier for the controllers interrupt.
@@ -13,6 +13,9 @@ Required Properties:
 - #interrupt-cells : Specifies the number of cells needed to encode interrupt
                     source, should be 1
 
+Compatible fallback depends on the SoC. Use ar7100 for ar71xx and ar913x,
+use ar7240 for all other SoCs.
+
 Please refer to interrupts.txt in this directory for details of the common
 Interrupt Controllers bindings used by client devices.
 
@@ -28,3 +31,16 @@ Example:
                interrupt-controller;
                #interrupt-cells = <1>;
        };
+
+Another example:
+
+       interrupt-controller@18060010 {
+               compatible = "qca,ar9331-misc-intc", qca,ar7240-misc-intc";
+               reg = <0x18060010 0x4>;
+
+               interrupt-parent = <&cpuintc>;
+               interrupts = <6>;
+
+               interrupt-controller;
+               #interrupt-cells = <1>;
+       };
index 63633bdea7e40ea84aca7954a2d671a5d9163ad8..ae5054c27c996564b9675fac7d245bd6af28b0bc 100644 (file)
@@ -10,6 +10,7 @@ Required properties:
     - "renesas,irqc-r8a7792" (R-Car V2H)
     - "renesas,irqc-r8a7793" (R-Car M2-N)
     - "renesas,irqc-r8a7794" (R-Car E2)
+    - "renesas,intc-ex-r8a7795" (R-Car H3)
 - #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in
   interrupts.txt in this directory
 - clocks: Must contain a reference to the functional clock.
index c05ed91a4e42803abccc9769848a3d3fb16917b7..85c0c58617f6da50d3ac0f7f4177df3d9aeb68ab 100644 (file)
@@ -27,9 +27,9 @@ Required properties of the LED child node:
 - flash-max-microamp : see Documentation/devicetree/bindings/leds/common.txt
                        Maximum flash LED supply current can be calculated using
                        following formula: I = 1A * 162kohm / Rset.
-- flash-timeout-us : see Documentation/devicetree/bindings/leds/common.txt
-                     Maximum flash timeout can be calculated using following
-                     formula: T = 8.82 * 10^9 * Ct.
+- flash-max-timeout-us : see Documentation/devicetree/bindings/leds/common.txt
+                         Maximum flash timeout can be calculated using following
+                         formula: T = 8.82 * 10^9 * Ct.
 
 Optional properties of the LED child node:
 - label : see Documentation/devicetree/bindings/leds/common.txt
@@ -54,7 +54,7 @@ aat1290 {
                label = "aat1290-flash";
                led-max-microamp = <520833>;
                flash-max-microamp = <1012500>;
-               flash-timeout-us = <1940000>;
+               flash-max-timeout-us = <1940000>;
        };
 };
 
index f9e36adc0ebf9715594d2f298d86f221a30bd83b..3f48c1eaf0857799c6dd815340e83b0dc301f100 100644 (file)
@@ -29,6 +29,14 @@ Required properties:
 Optional properties:
   - brcm,serial-leds : Boolean, enables Serial LEDs.
     Default : false
+  - brcm,serial-mux : Boolean, enables Serial LEDs multiplexing.
+    Default : false
+  - brcm,serial-clk-low : Boolean, makes clock signal active low.
+    Default : false
+  - brcm,serial-dat-low : Boolean, makes data signal active low.
+    Default : false
+  - brcm,serial-shift-inv : Boolean, inverts Serial LEDs shift direction.
+    Default : false
 
 Each LED is represented as a sub-node of the brcm,bcm6328-leds device.
 
@@ -110,6 +118,8 @@ Scenario 2 : BCM63268 with Serial/GPHY0 LEDs
                #size-cells = <0>;
                reg = <0x10001900 0x24>;
                brcm,serial-leds;
+               brcm,serial-dat-low;
+               brcm,serial-shift-inv;
 
                gphy0_spd0@0 {
                        reg = <0>;
diff --git a/Documentation/devicetree/bindings/leds/leds-netxbig.txt b/Documentation/devicetree/bindings/leds/leds-netxbig.txt
new file mode 100644 (file)
index 0000000..5ef92a2
--- /dev/null
@@ -0,0 +1,92 @@
+Binding for the CPLD LEDs (GPIO extension bus) found on some LaCie/Seagate
+boards (Example: 2Big/5Big Network v2, 2Big NAS).
+
+Required properties:
+- compatible: "lacie,netxbig-leds".
+- gpio-ext: Phandle for the gpio-ext bus.
+
+Optional properties:
+- timers: Timer array. Each timer entry is represented by three integers:
+  Mode (gpio-ext bus), delay_on and delay_off.
+
+Each LED is represented as a sub-node of the netxbig-leds device.
+
+Required sub-node properties:
+- mode-addr: Mode register address on gpio-ext bus.
+- mode-val: Mode to value mapping. Each entry is represented by two integers:
+  A mode and the corresponding value on the gpio-ext bus.
+- bright-addr: Brightness register address on gpio-ext bus.
+- max-brightness: Maximum brightness value.
+
+Optional sub-node properties:
+- label: Name for this LED. If omitted, the label is taken from the node name.
+- linux,default-trigger: Trigger assigned to the LED.
+
+Example:
+
+netxbig-leds {
+       compatible = "lacie,netxbig-leds";
+
+       gpio-ext = &gpio_ext;
+
+       timers = <NETXBIG_LED_TIMER1 500 500
+                 NETXBIG_LED_TIMER2 500 1000>;
+
+       blue-power {
+               label = "netxbig:blue:power";
+               mode-addr = <0>;
+               mode-val = <NETXBIG_LED_OFF 0
+                           NETXBIG_LED_ON 1
+                           NETXBIG_LED_TIMER1 3
+                           NETXBIG_LED_TIMER2 7>;
+               bright-addr = <1>;
+               max-brightness = <7>;
+       };
+       red-power {
+               label = "netxbig:red:power";
+               mode-addr = <0>;
+               mode-val = <NETXBIG_LED_OFF 0
+                           NETXBIG_LED_ON 2
+                           NETXBIG_LED_TIMER1 4>;
+               bright-addr = <1>;
+               max-brightness = <7>;
+       };
+       blue-sata0 {
+               label = "netxbig:blue:sata0";
+               mode-addr = <3>;
+               mode-val = <NETXBIG_LED_OFF 0
+                           NETXBIG_LED_ON 7
+                           NETXBIG_LED_SATA 1
+                           NETXBIG_LED_TIMER1 3>;
+               bright-addr = <2>;
+               max-brightness = <7>;
+       };
+       red-sata0 {
+               label = "netxbig:red:sata0";
+               mode-addr = <3>;
+               mode-val = <NETXBIG_LED_OFF 0
+                           NETXBIG_LED_ON 2
+                           NETXBIG_LED_TIMER1 4>;
+               bright-addr = <2>;
+               max-brightness = <7>;
+       };
+       blue-sata1 {
+               label = "netxbig:blue:sata1";
+               mode-addr = <4>;
+               mode-val = <NETXBIG_LED_OFF 0
+                           NETXBIG_LED_ON 7
+                           NETXBIG_LED_SATA 1
+                           NETXBIG_LED_TIMER1 3>;
+               bright-addr = <2>;
+               max-brightness = <7>;
+       };
+       red-sata1 {
+               label = "netxbig:red:sata1";
+               mode-addr = <4>;
+               mode-val = <NETXBIG_LED_OFF 0
+                           NETXBIG_LED_ON 2
+                           NETXBIG_LED_TIMER1 4>;
+               bright-addr = <2>;
+               max-brightness = <7>;
+       };
+};
index b7943f3f999546cbaa1ec4170299c9221030427d..dedfb02c744a9b03277cbe42e95237eb3c38b24b 100644 (file)
@@ -22,6 +22,8 @@ Optional properties:
   - voltage-ranges : two cells are required, first cell specifies minimum
     slot voltage (mV), second cell specifies maximum slot voltage (mV).
     Several ranges could be specified.
+  - little-endian : If the host controller is little-endian mode, specify
+    this property. The default endian mode is big-endian.
 
 Example:
 
index 0384fc3f64e83f954183afca1c899fe65966a481..f693baf87264d6525b105c0eb244730111b47820 100644 (file)
@@ -37,6 +37,7 @@ Optional properties:
 - sd-uhs-sdr104: SD UHS SDR104 speed is supported
 - sd-uhs-ddr50: SD UHS DDR50 speed is supported
 - cap-power-off-card: powering off the card is safe
+- cap-mmc-hw-reset: eMMC hardware reset is supported
 - cap-sdio-irq: enable SDIO IRQ signalling on this interface
 - full-pwr-cycle: full power cycle of the card is supported
 - mmc-ddr-1_8v: eMMC high-speed DDR mode(1.8V I/O) is supported
index a1adfa495ad3d7ce60ea17da34fd3d9e22d4beaf..0120c7f1109cb2cf830b002f9e77e774ddbdef79 100644 (file)
@@ -17,6 +17,11 @@ Required properties:
 - vmmc-supply: power to the Core
 - vqmmc-supply: power to the IO
 
+Optional properties:
+- assigned-clocks: PLL of the source clock
+- assigned-clock-parents: parent of source clock, used for HS400 mode to get 400Mhz source clock
+- hs400-ds-delay: HS400 DS delay setting
+
 Examples:
 mmc0: mmc@11230000 {
        compatible = "mediatek,mt8173-mmc", "mediatek,mt8135-mmc";
@@ -24,9 +29,13 @@ mmc0: mmc@11230000 {
        interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_LOW>;
        vmmc-supply = <&mt6397_vemc_3v3_reg>;
        vqmmc-supply = <&mt6397_vio18_reg>;
-       clocks = <&pericfg CLK_PERI_MSDC30_0>, <&topckgen CLK_TOP_MSDC50_0_H_SEL>;
+       clocks = <&pericfg CLK_PERI_MSDC30_0>,
+                <&topckgen CLK_TOP_MSDC50_0_H_SEL>;
        clock-names = "source", "hclk";
        pinctrl-names = "default", "state_uhs";
        pinctrl-0 = <&mmc0_pins_default>;
        pinctrl-1 = <&mmc0_pins_uhs>;
+       assigned-clocks = <&topckgen CLK_TOP_MSDC50_0_SEL>;
+       assigned-clock-parents = <&topckgen CLK_TOP_MSDCPLL_D2>;
+       hs400-ds-delay = <0x14015>;
 };
index d38942f6c5ae8ef1b43272bf21ab010145a7fe21..cae29eb5733d8a529d2ec2c3606e4c2255a01884 100644 (file)
@@ -6,11 +6,12 @@ and the properties used by the MMCIF device.
 
 Required properties:
 
-- compatible: must contain one of the following
+- compatible: should be "renesas,mmcif-<soctype>", "renesas,sh-mmcif" as a
+  fallback. Examples with <soctype> are:
        - "renesas,mmcif-r8a7740" for the MMCIF found in r8a7740 SoCs
        - "renesas,mmcif-r8a7790" for the MMCIF found in r8a7790 SoCs
        - "renesas,mmcif-r8a7791" for the MMCIF found in r8a7791 SoCs
-       - "renesas,sh-mmcif" for the generic MMCIF
+       - "renesas,mmcif-r8a7794" for the MMCIF found in r8a7794 SoCs
 
 - clocks: reference to the functional clock
 
index c327c2d6f23d7f043799fd9bd4e1e9d94a7b4322..3dc13b68fc3ffb6dd136038f9116a96fbde996c7 100644 (file)
@@ -14,6 +14,19 @@ Required Properties:
                                                        before RK3288
        - "rockchip,rk3288-dw-mshc": for Rockchip RK3288
 
+Optional Properties:
+* clocks: from common clock binding: if ciu_drive and ciu_sample are
+  specified in clock-names, should contain handles to these clocks.
+
+* clock-names: Apart from the clock-names described in synopsys-dw-mshc.txt
+  two more clocks "ciu-drive" and "ciu-sample" are supported. They are used
+  to control the clock phases, "ciu-sample" is required for tuning high-
+  speed modes.
+
+* rockchip,default-sample-phase: The default phase to set ciu_sample at
+  probing, low speeds or in case where all phases work at tuning time.
+  If not specified 0 deg will be used.
+
 Example:
 
        rkdwmmc0@12200000 {
index 346c6095a6155138ad01f2d39374a8ed97224d12..8636f5ae97e5157ba868e4ddc8bf505465e7bcb3 100644 (file)
@@ -75,6 +75,12 @@ Optional properties:
 * vmmc-supply: The phandle to the regulator to use for vmmc.  If this is
   specified we'll defer probe until we can find this regulator.
 
+* dmas: List of DMA specifiers with the controller specific format as described
+  in the generic DMA client binding. Refer to dma.txt for details.
+
+* dma-names: request names for generic DMA client binding. Must be "rx-tx".
+  Refer to dma.txt for details.
+
 Aliases:
 
 - All the MSHC controller nodes should be represented in the aliases node using
@@ -95,6 +101,23 @@ board specific portions as listed below.
                #size-cells = <0>;
        };
 
+[board specific internal DMA resources]
+
+       dwmmc0@12200000 {
+               clock-frequency = <400000000>;
+               clock-freq-min-max = <400000 200000000>;
+               num-slots = <1>;
+               broken-cd;
+               fifo-depth = <0x80>;
+               card-detect-delay = <200>;
+               vmmc-supply = <&buck8>;
+               bus-width = <8>;
+               cap-mmc-highspeed;
+               cap-sd-highspeed;
+       };
+
+[board specific generic DMA request binding]
+
        dwmmc0@12200000 {
                clock-frequency = <400000000>;
                clock-freq-min-max = <400000 200000000>;
@@ -106,4 +129,6 @@ board specific portions as listed below.
                bus-width = <8>;
                cap-mmc-highspeed;
                cap-sd-highspeed;
+               dmas = <&pdma 12>;
+               dma-names = "rx-tx";
        };
index a9df21aaa1548a64da26c74dfbbef13e652cf3da..a2cae4eb4a60a38c83059c66934e6462ca5a2c6c 100644 (file)
@@ -39,6 +39,7 @@ Required properties:
 Optional properties:
 - dual_emac_res_vlan   : Specifies VID to be used to segregate the ports
 - mac-address          : See ethernet.txt file in the same directory
+- phy-handle           : See ethernet.txt file in the same directory
 
 Note: "ti,hwmods" field is used to fetch the base address and irq
 resources from TI, omap hwmod data base during device registration.
diff --git a/Documentation/devicetree/bindings/net/smsc-lan87xx.txt b/Documentation/devicetree/bindings/net/smsc-lan87xx.txt
new file mode 100644 (file)
index 0000000..974edd5
--- /dev/null
@@ -0,0 +1,24 @@
+SMSC LAN87xx Ethernet PHY
+
+Some boards require special tuning values. Configure them
+through an Ethernet OF device node.
+
+Optional properties:
+
+- smsc,disable-energy-detect:
+  If set, do not enable energy detect mode for the SMSC phy.
+  default: enable energy detect mode
+
+Examples:
+smsc phy with disabled energy detect mode on an am335x based board.
+&davinci_mdio {
+       pinctrl-names = "default", "sleep";
+       pinctrl-0 = <&davinci_mdio_default>;
+       pinctrl-1 = <&davinci_mdio_sleep>;
+       status = "okay";
+
+       ethernetphy0: ethernet-phy@0 {
+               reg = <0>;
+               smsc,disable-energy-detect;
+       };
+};
diff --git a/Documentation/devicetree/bindings/pci/pci-msi.txt b/Documentation/devicetree/bindings/pci/pci-msi.txt
new file mode 100644 (file)
index 0000000..9b3cc81
--- /dev/null
@@ -0,0 +1,220 @@
+This document describes the generic device tree binding for describing the
+relationship between PCI devices and MSI controllers.
+
+Each PCI device under a root complex is uniquely identified by its Requester ID
+(AKA RID). A Requester ID is a triplet of a Bus number, Device number, and
+Function number.
+
+For the purpose of this document, when treated as a numeric value, a RID is
+formatted such that:
+
+* Bits [15:8] are the Bus number.
+* Bits [7:3] are the Device number.
+* Bits [2:0] are the Function number.
+* Any other bits required for padding must be zero.
+
+MSIs may be distinguished in part through the use of sideband data accompanying
+writes. In the case of PCI devices, this sideband data may be derived from the
+Requester ID. A mechanism is required to associate a device with both the MSI
+controllers it can address, and the sideband data that will be associated with
+its writes to those controllers.
+
+For generic MSI bindings, see
+Documentation/devicetree/bindings/interrupt-controller/msi.txt.
+
+
+PCI root complex
+================
+
+Optional properties
+-------------------
+
+- msi-map: Maps a Requester ID to an MSI controller and associated
+  msi-specifier data. The property is an arbitrary number of tuples of
+  (rid-base,msi-controller,msi-base,length), where:
+
+  * rid-base is a single cell describing the first RID matched by the entry.
+
+  * msi-controller is a single phandle to an MSI controller
+
+  * msi-base is an msi-specifier describing the msi-specifier produced for the
+    first RID matched by the entry.
+
+  * length is a single cell describing how many consecutive RIDs are matched
+    following the rid-base.
+
+  Any RID r in the interval [rid-base, rid-base + length) is associated with
+  the listed msi-controller, with the msi-specifier (r - rid-base + msi-base).
+
+- msi-map-mask: A mask to be applied to each Requester ID prior to being mapped
+  to an msi-specifier per the msi-map property.
+
+- msi-parent: Describes the MSI parent of the root complex itself. Where
+  the root complex and MSI controller do not pass sideband data with MSI
+  writes, this property may be used to describe the MSI controller(s)
+  used by PCI devices under the root complex, if defined as such in the
+  binding for the root complex.
+
+
+Example (1)
+===========
+
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       msi: msi-controller@a {
+               reg = <0xa 0x1>;
+               compatible = "vendor,some-controller";
+               msi-controller;
+               #msi-cells = <1>;
+       };
+
+       pci: pci@f {
+               reg = <0xf 0x1>;
+               compatible = "vendor,pcie-root-complex";
+               device_type = "pci";
+
+               /*
+                * The sideband data provided to the MSI controller is
+                * the RID, identity-mapped.
+                */
+               msi-map = <0x0 &msi_a 0x0 0x10000>,
+       };
+};
+
+
+Example (2)
+===========
+
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       msi: msi-controller@a {
+               reg = <0xa 0x1>;
+               compatible = "vendor,some-controller";
+               msi-controller;
+               #msi-cells = <1>;
+       };
+
+       pci: pci@f {
+               reg = <0xf 0x1>;
+               compatible = "vendor,pcie-root-complex";
+               device_type = "pci";
+
+               /*
+                * The sideband data provided to the MSI controller is
+                * the RID, masked to only the device and function bits.
+                */
+               msi-map = <0x0 &msi_a 0x0 0x100>,
+               msi-map-mask = <0xff>
+       };
+};
+
+
+Example (3)
+===========
+
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       msi: msi-controller@a {
+               reg = <0xa 0x1>;
+               compatible = "vendor,some-controller";
+               msi-controller;
+               #msi-cells = <1>;
+       };
+
+       pci: pci@f {
+               reg = <0xf 0x1>;
+               compatible = "vendor,pcie-root-complex";
+               device_type = "pci";
+
+               /*
+                * The sideband data provided to the MSI controller is
+                * the RID, but the high bit of the bus number is
+                * ignored.
+                */
+               msi-map = <0x0000 &msi 0x0000 0x8000>,
+                         <0x8000 &msi 0x0000 0x8000>;
+       };
+};
+
+
+Example (4)
+===========
+
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       msi: msi-controller@a {
+               reg = <0xa 0x1>;
+               compatible = "vendor,some-controller";
+               msi-controller;
+               #msi-cells = <1>;
+       };
+
+       pci: pci@f {
+               reg = <0xf 0x1>;
+               compatible = "vendor,pcie-root-complex";
+               device_type = "pci";
+
+               /*
+                * The sideband data provided to the MSI controller is
+                * the RID, but the high bit of the bus number is
+                * negated.
+                */
+               msi-map = <0x0000 &msi 0x8000 0x8000>,
+                         <0x8000 &msi 0x0000 0x8000>;
+       };
+};
+
+
+Example (5)
+===========
+
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       msi_a: msi-controller@a {
+               reg = <0xa 0x1>;
+               compatible = "vendor,some-controller";
+               msi-controller;
+               #msi-cells = <1>;
+       };
+
+       msi_b: msi-controller@b {
+               reg = <0xb 0x1>;
+               compatible = "vendor,some-controller";
+               msi-controller;
+               #msi-cells = <1>;
+       };
+
+       msi_c: msi-controller@c {
+               reg = <0xc 0x1>;
+               compatible = "vendor,some-controller";
+               msi-controller;
+               #msi-cells = <1>;
+       };
+
+       pci: pci@c {
+               reg = <0xf 0x1>;
+               compatible = "vendor,pcie-root-complex";
+               device_type = "pci";
+
+               /*
+                * The sideband data provided to MSI controller a is the
+                * RID, but the high bit of the bus number is negated.
+                * The sideband data provided to MSI controller b is the
+                * RID, identity-mapped.
+                * MSI controller c is not addressable.
+                */
+               msi-map = <0x0000 &msi_a 0x8000 0x08000>,
+                         <0x8000 &msi_a 0x0000 0x08000>,
+                         <0x0000 &msi_b 0x0000 0x10000>;
+       };
+};
index 3c821cda1ad05940f3ff32947f7b74c2e990e528..b321b26780dc0697e54f0821289126c2a5c6e801 100644 (file)
@@ -17,6 +17,7 @@ Required properties:
   "allwinner,sun8i-a23-pinctrl"
   "allwinner,sun8i-a23-r-pinctrl"
   "allwinner,sun8i-a33-pinctrl"
+  "allwinner,sun8i-a83t-pinctrl"
 
 - reg: Should contain the register physical address and length for the
   pin controller.
diff --git a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pio4-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pio4-pinctrl.txt
new file mode 100644 (file)
index 0000000..61ac757
--- /dev/null
@@ -0,0 +1,90 @@
+* Atmel PIO4 Controller
+
+The Atmel PIO4 controller is used to select the function of a pin and to
+configure it.
+
+Required properties:
+- compatible: "atmel,sama5d2-pinctrl".
+- reg: base address and length of the PIO controller.
+- interrupts: interrupt outputs from the controller, one for each bank.
+- interrupt-controller: mark the device node as an interrupt controller.
+- #interrupt-cells: should be two.
+- gpio-controller: mark the device node as a gpio controller.
+- #gpio-cells: should be two.
+
+Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
+a general description of GPIO and interrupt bindings.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices.
+
+Subnode format
+Each node (or subnode) will list the pins it needs and how to configured these
+pins.
+
+       node {
+               pinmux = <PIN_NUMBER_PINMUX>;
+               GENERIC_PINCONFIG;
+       };
+
+Required properties:
+- pinmux: integer array. Each integer represents a pin number plus mux and
+ioset settings. Use the macros from boot/dts/<soc>-pinfunc.h file to get the
+right representation of the pin.
+
+Optional properties:
+- GENERIC_PINCONFIG: generic pinconfig options to use, bias-disable,
+bias-pull-down, bias-pull-up, drive-open-drain, input-schmitt-enable,
+input-debounce, output-low, output-high.
+
+Example:
+
+#include <sama5d2-pinfunc.h>
+
+...
+{
+       pioA: pinctrl@fc038000 {
+               compatible = "atmel,sama5d2-pinctrl";
+               reg = <0xfc038000 0x600>;
+               interrupts = <18 IRQ_TYPE_LEVEL_HIGH 7>,
+                            <68 IRQ_TYPE_LEVEL_HIGH 7>,
+                            <69 IRQ_TYPE_LEVEL_HIGH 7>,
+                            <70 IRQ_TYPE_LEVEL_HIGH 7>;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+               gpio-controller;
+               #gpio-cells = <2>;
+               clocks = <&pioA_clk>;
+
+               pinctrl_i2c0_default: i2c0_default {
+                       pinmux = <PIN_PD21__TWD0>,
+                                <PIN_PD22__TWCK0>;
+                       bias-disable;
+               };
+
+               pinctrl_led_gpio_default: led_gpio_default {
+                       pinmux = <PIN_PB0>,
+                                <PIN_PB5>;
+                       bias-pull-up;
+               };
+
+               pinctrl_sdmmc1_default: sdmmc1_default {
+                       cmd_data {
+                               pinmux = <PIN_PA28__SDMMC1_CMD>,
+                                        <PIN_PA18__SDMMC1_DAT0>,
+                                        <PIN_PA19__SDMMC1_DAT1>,
+                                        <PIN_PA20__SDMMC1_DAT2>,
+                                        <PIN_PA21__SDMMC1_DAT3>;
+                               bias-pull-up;
+                       };
+
+                       ck_cd {
+                               pinmux = <PIN_PA22__SDMMC1_CK>,
+                                        <PIN_PA30__SDMMC1_CD>;
+                               bias-disable;
+                       };
+               };
+               ...
+       };
+};
+...
index a8bb5e26019c6f69ea5ef49a8e0bad95ae806bda..f8fa28ce163e8f9a6661fcce8a7bad839555936a 100644 (file)
@@ -20,7 +20,10 @@ Required properties:
        "marvell,berlin2cd-soc-pinctrl",
        "marvell,berlin2cd-system-pinctrl",
        "marvell,berlin2q-soc-pinctrl",
-       "marvell,berlin2q-system-pinctrl"
+       "marvell,berlin2q-system-pinctrl",
+       "marvell,berlin4ct-avio-pinctrl",
+       "marvell,berlin4ct-soc-pinctrl",
+       "marvell,berlin4ct-system-pinctrl"
 
 Required subnode-properties:
 - groups: a list of strings describing the group names.
index 6540ca56be5edac956abf45bb87392ea169b584e..16589fb6f42059c4d3710d87cdf0e7bb37c323e1 100644 (file)
@@ -3,8 +3,8 @@ Broadcom Cygnus GPIO/PINCONF Controller
 Required properties:
 
 - compatible:
-    Must be "brcm,cygnus-ccm-gpio", "brcm,cygnus-asiu-gpio", or
-    "brcm,cygnus-crmu-gpio"
+    Must be "brcm,cygnus-ccm-gpio", "brcm,cygnus-asiu-gpio",
+    "brcm,cygnus-crmu-gpio" or "brcm,iproc-gpio"
 
 - reg:
     Define the base and range of the I/O address space that contains the Cygnus
@@ -26,9 +26,13 @@ Optional properties:
 - interrupt-controller:
     Specifies that the node is an interrupt controller
 
-- pinmux:
-    Specifies the phandle to the IOMUX device, where pins can be individually
-muxed to GPIO
+- gpio-ranges:
+    Specifies the mapping between gpio controller and pin-controllers pins.
+    This requires 4 fields in cells defined as -
+    1. Phandle of pin-controller.
+    2. GPIO base pin offset.
+    3  Pin-control base pin offset.
+    4. number of gpio pins which are linearly mapped from pin base.
 
 Supported generic PINCONF properties in child nodes:
 
@@ -78,6 +82,8 @@ Example:
                gpio-controller;
                interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-controller;
+               gpio-ranges = <&pinctrl 0 42 1>,
+                               <&pinctrl 1 44 3>;
        };
 
        /*
index 8bbf25d58656d2f09ae9b1a779050e2ff87fda65..457b2c68d47b6caa817acc7a8e048b69ff5eb4f5 100644 (file)
@@ -1,16 +1,42 @@
 * Freescale i.MX7 Dual IOMUX Controller
 
+iMX7D supports two iomuxc controllers, fsl,imx7d-iomuxc controller is similar
+as previous iMX SoC generation and fsl,imx7d-iomuxc-lpsr which provides low
+power state retention capabilities on gpios that are part of iomuxc-lpsr
+(GPIO1_IO7..GPIO1_IO0). While iomuxc-lpsr provides its own set of registers for
+mux and pad control settings, it shares the input select register from main
+iomuxc controller for daisy chain settings, the fsl,input-sel property extends
+fsl,imx-pinctrl driver to support iomuxc-lpsr controller.
+
+iomuxc_lpsr: iomuxc-lpsr@302c0000 {
+       compatible = "fsl,imx7d-iomuxc-lpsr";
+       reg = <0x302c0000 0x10000>;
+       fsl,input-sel = <&iomuxc>;
+};
+
+iomuxc: iomuxc@30330000 {
+       compatible = "fsl,imx7d-iomuxc";
+       reg = <0x30330000 0x10000>;
+};
+
+Pheriparials using pads from iomuxc-lpsr support low state retention power
+state, under LPSR mode GPIO's state of pads are retain.
+
 Please refer to fsl,imx-pinctrl.txt in this directory for common binding part
 and usage.
 
 Required properties:
-- compatible: "fsl,imx7d-iomuxc"
+- compatible: "fsl,imx7d-iomuxc" for main IOMUXC controller, or
+  "fsl,imx7d-iomuxc-lpsr" for Low Power State Retention IOMUXC controller.
 - fsl,pins: each entry consists of 6 integers and represents the mux and config
   setting for one pin.  The first 5 integers <mux_reg conf_reg input_reg mux_val
   input_val> are specified using a PIN_FUNC_ID macro, which can be found in
   imx7d-pinfunc.h under device tree source folder.  The last integer CONFIG is
   the pad setting value like pull-up on this pin.  Please refer to i.MX7 Dual
   Reference Manual for detailed CONFIG settings.
+- fsl,input-sel: required property for iomuxc-lpsr controller, this property is
+  a phandle for main iomuxc controller which shares the input select register for
+  daisy chain settings.
 
 CONFIG bits definition:
 PAD_CTL_PUS_100K_DOWN           (0 << 5)
@@ -25,3 +51,38 @@ PAD_CTL_DSE_X1                  (0 << 0)
 PAD_CTL_DSE_X2                  (1 << 0)
 PAD_CTL_DSE_X3                  (2 << 0)
 PAD_CTL_DSE_X4                  (3 << 0)
+
+Examples:
+While iomuxc-lpsr is intended to be used by dedicated peripherals to take
+advantages of LPSR power mode, is also possible that an IP to use pads from
+any of the iomux controllers. For example the I2C1 IP can use SCL pad from
+iomuxc-lpsr controller and SDA pad from iomuxc controller as:
+
+i2c1: i2c@30a20000 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_i2c1_1 &pinctrl_i2c1_2>;
+       status = "okay";
+};
+
+iomuxc-lpsr@302c0000 {
+       compatible = "fsl,imx7d-iomuxc-lpsr";
+       reg = <0x302c0000 0x10000>;
+       fsl,input-sel = <&iomuxc>;
+
+       pinctrl_i2c1_1: i2c1grp-1 {
+               fsl,pins = <
+                       MX7D_PAD_GPIO1_IO04__I2C1_SCL 0x4000007f
+               >;
+       };
+};
+
+iomuxc@30330000 {
+       compatible = "fsl,imx7d-iomuxc";
+       reg = <0x30330000 0x10000>;
+
+       pinctrl_i2c1_2: i2c1grp-2 {
+               fsl,pins = <
+                       MX7D_PAD_I2C1_SDA__I2C1_SDA 0x4000007f
+               >;
+       };
+};
index 9496934528bdef505a373f9946a2dbe0570e8e92..ffadb7a371f6e94ee1422e04cf2598c3b5d5ec31 100644 (file)
@@ -19,6 +19,7 @@ Required Properties:
     - "renesas,pfc-r8a7791": for R8A7791 (R-Car M2-W) compatible pin-controller.
     - "renesas,pfc-r8a7793": for R8A7793 (R-Car M2-N) compatible pin-controller.
     - "renesas,pfc-r8a7794": for R8A7794 (R-Car E2) compatible pin-controller.
+    - "renesas,pfc-r8a7795": for R8A7795 (R-Car H3) compatible pin-controller.
     - "renesas,pfc-sh73a0": for SH73A0 (SH-Mobile AG5) compatible pin-controller.
 
   - reg: Base address and length of each memory resource used by the pin
index 8f771441be60556ace93f2b29d87df856882c344..705075da2f10156e92a60828177c8483ee16eeec 100644 (file)
@@ -51,7 +51,7 @@ Optional properties, deprecated for soctype-specific bindings:
 - renesas,tx-fifo-size : Overrides the default tx fifo size given in words
                         (default is 64)
 - renesas,rx-fifo-size : Overrides the default rx fifo size given in words
-                        (default is 64, or 256 on R-Car Gen2)
+                        (default is 64)
 
 Pinctrl properties might be needed, too.  See
 Documentation/devicetree/bindings/pinctrl/renesas,*.
index 64a4ca6cf96ff5bd9df7c3b1c99abd7d086a701c..7d48f63db44ec9b9c0aa68ea0a54a70fc1a7014e 100644 (file)
@@ -5,6 +5,7 @@ Required properties:
        - "renesas,usbhs-r8a7790"
        - "renesas,usbhs-r8a7791"
        - "renesas,usbhs-r8a7794"
+       - "renesas,usbhs-r8a7795"
   - reg: Base address and length of the register for the USBHS
   - interrupts: Interrupt specifier for the USBHS
   - clocks: A list of phandle + clock specifier pairs
index 0cf27a3544a5744f39c232c75039a37ca079c2cd..80841a2d640cf5aeca8e16db4ebba5d224c5a008 100644 (file)
@@ -744,6 +744,52 @@ exports one
    possible that some errors could be lost. With rdimm's, they display the
    contents of the registers
 
+AMD64_EDAC REFERENCE DOCUMENTS USED
+-----------------------------------
+amd64_edac module is based on the following documents
+(available from http://support.amd.com/en-us/search/tech-docs):
+
+1. Title:  BIOS and Kernel Developer's Guide for AMD Athlon 64 and AMD
+          Opteron Processors
+   AMD publication #: 26094
+   Revision: 3.26
+   Link: http://support.amd.com/TechDocs/26094.PDF
+
+2. Title:  BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh
+          Processors
+   AMD publication #: 32559
+   Revision: 3.00
+   Issue Date: May 2006
+   Link: http://support.amd.com/TechDocs/32559.pdf
+
+3. Title:  BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h
+          Processors
+   AMD publication #: 31116
+   Revision: 3.00
+   Issue Date: September 07, 2007
+   Link: http://support.amd.com/TechDocs/31116.pdf
+
+4. Title: BIOS and Kernel Developer's Guide (BKDG) for AMD Family 15h
+         Models 30h-3Fh Processors
+   AMD publication #: 49125
+   Revision: 3.06
+   Issue Date: 2/12/2015 (latest release)
+   Link: http://support.amd.com/TechDocs/49125_15h_Models_30h-3Fh_BKDG.pdf
+
+5. Title: BIOS and Kernel Developer's Guide (BKDG) for AMD Family 15h
+         Models 60h-6Fh Processors
+   AMD publication #: 50742
+   Revision: 3.01
+   Issue Date: 7/23/2015 (latest release)
+   Link: http://support.amd.com/TechDocs/50742_15h_Models_60h-6Fh_BKDG.pdf
+
+6. Title: BIOS and Kernel Developer's Guide (BKDG) for AMD Family 16h
+         Models 00h-0Fh Processors
+   AMD publication #: 48751
+   Revision: 3.03
+   Issue Date: 2/23/2015 (latest release)
+   Link: http://support.amd.com/TechDocs/48751_16h_bkdg.pdf
+
 CREDITS:
 ========
 
index df384e3e845f7b22f121427c6a0c2d352d7648ce..523f8307b9cd574ee22961d4fd27e618f5fe5b05 100644 (file)
@@ -7,7 +7,7 @@
     |         arch |status|
     -----------------------
     |       alpha: | TODO |
-    |         arc: |  ..  |
+    |         arc: |  ok  |
     |         arm: |  ok  |
     |       arm64: |  ok  |
     |       avr32: |  ..  |
index aaaa21db6226eaec28873ee7c820cab43c367cfb..3de5434c857c8909deac9f8c1770c56723c31ff8 100644 (file)
@@ -7,7 +7,7 @@
     |         arch |status|
     -----------------------
     |       alpha: | TODO |
-    |         arc: | TODO |
+    |         arc: |  ok  |
     |         arm: |  ok  |
     |       arm64: |  ok  |
     |       avr32: | TODO |
index d411ca63c8b6ce1f00ba624ace1fe5e0045da554..3a9d65c912e780977c12102d7719a0374241b962 100644 (file)
@@ -140,7 +140,8 @@ Table 1-1: Process specific entries in /proc
  stat          Process status
  statm         Process memory status information
  status                Process status in human readable form
- wchan         If CONFIG_KALLSYMS is set, a pre-decoded wchan
+ wchan         Present with CONFIG_KALLSYMS=y: it shows the kernel function
+               symbol the task is blocked in - or "0" if not blocked.
  pagemap       Page table
  stack         Report full stack trace, enable via CONFIG_STACKTRACE
  smaps         a extension based on maps, showing the memory consumption of
@@ -310,7 +311,7 @@ Table 1-4: Contents of the stat files (as of 2.6.30-rc7)
   blocked       bitmap of blocked signals
   sigign        bitmap of ignored signals
   sigcatch      bitmap of caught signals
-  wchan         address where process went to sleep
+  0            (place holder, used to be the wchan address, use /proc/PID/wchan instead)
   0             (place holder)
   0             (place holder)
   exit_signal   signal to send to parent thread on exit
index 90d0f6aba7a6848a3043ff7a7de3913118a98461..12a61948ec917ff2af9787a59f82336994acfde7 100644 (file)
@@ -62,6 +62,11 @@ Any debugfs dump method should normally ignore signals which haven't been
 requested as GPIOs. They can use gpiochip_is_requested(), which returns either
 NULL or the label associated with that GPIO when it was requested.
 
+RT_FULL: GPIO driver should not use spinlock_t or any sleepable APIs
+(like PM runtime) in its gpio_chip implementation (.get/.set and direction
+control callbacks) if it is expected to call GPIO APIs from atomic context
+on -RT (inside hard IRQ handlers and similar contexts). Normally this should
+not be required.
 
 GPIO drivers providing IRQs
 ---------------------------
@@ -73,6 +78,13 @@ The IRQ portions of the GPIO block are implemented using an irqchip, using
 the header <linux/irq.h>. So basically such a driver is utilizing two sub-
 systems simultaneously: gpio and irq.
 
+RT_FULL: GPIO driver should not use spinlock_t or any sleepable APIs
+(like PM runtime) as part of its irq_chip implementation on -RT.
+- spinlock_t should be replaced with raw_spinlock_t [1].
+- If sleepable APIs have to be used, these can be done from the .irq_bus_lock()
+  and .irq_bus_unlock() callbacks, as these are the only slowpath callbacks
+  on an irqchip. Create the callbacks if needed [2].
+
 GPIO irqchips usually fall in one of two categories:
 
 * CHAINED GPIO irqchips: these are usually the type that is embedded on
@@ -93,6 +105,38 @@ GPIO irqchips usually fall in one of two categories:
   Chained GPIO irqchips typically can NOT set the .can_sleep flag on
   struct gpio_chip, as everything happens directly in the callbacks.
 
+  RT_FULL: Note, chained IRQ handlers will not be forced threaded on -RT.
+  As result, spinlock_t or any sleepable APIs (like PM runtime) can't be used
+  in chained IRQ handler.
+  if required (and if it can't be converted to the nested threaded GPIO irqchip)
+  - chained IRQ handler can be converted to generic irq handler and this way
+  it will be threaded IRQ handler on -RT and hard IRQ handler on non-RT
+  (for example, see [3]).
+  Know W/A: The generic_handle_irq() is expected to be called with IRQ disabled,
+  so IRQ core will complain if it will be called from IRQ handler wich is forced
+  thread. The "fake?" raw lock can be used to W/A this problem:
+
+       raw_spinlock_t wa_lock;
+       static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
+               unsigned long wa_lock_flags;
+               raw_spin_lock_irqsave(&bank->wa_lock, wa_lock_flags);
+               generic_handle_irq(irq_find_mapping(bank->chip.irqdomain, bit));
+               raw_spin_unlock_irqrestore(&bank->wa_lock, wa_lock_flags);
+
+* GENERIC CHAINED GPIO irqchips: these are the same as "CHAINED GPIO irqchips",
+  but chained IRQ handlers are not used. Instead GPIO IRQs dispatching is
+  performed by generic IRQ handler which is configured using request_irq().
+  The GPIO irqchip will then end up calling something like this sequence in
+  its interrupt handler:
+
+  static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
+       for each detected GPIO IRQ
+               generic_handle_irq(...);
+
+  RT_FULL: Such kind of handlers will be forced threaded on -RT, as result IRQ
+  core will complain that generic_handle_irq() is called with IRQ enabled and
+  the same W/A as for "CHAINED GPIO irqchips" can be applied.
+
 * NESTED THREADED GPIO irqchips: these are off-chip GPIO expanders and any
   other GPIO irqchip residing on the other side of a sleeping bus. Of course
   such drivers that need slow bus traffic to read out IRQ status and similar,
@@ -133,6 +177,13 @@ To use the helpers please keep the following in mind:
   the irqchip can initialize. E.g. .dev and .can_sleep shall be set up
   properly.
 
+- Nominally set all handlers to handle_bad_irq() in the setup call and pass
+  handle_bad_irq() as flow handler parameter in gpiochip_irqchip_add() if it is
+  expected for GPIO driver that irqchip .set_type() callback have to be called
+  before using/enabling GPIO IRQ. Then set the handler to handle_level_irq()
+  and/or handle_edge_irq() in the irqchip .set_type() callback depending on
+  what your controller supports.
+
 It is legal for any IRQ consumer to request an IRQ from any irqchip no matter
 if that is a combined GPIO+IRQ driver. The basic premise is that gpio_chip and
 irq_chip are orthogonal, and offering their services independent of each
@@ -169,6 +220,31 @@ When implementing an irqchip inside a GPIO driver, these two functions should
 typically be called in the .startup() and .shutdown() callbacks from the
 irqchip.
 
+Real-Time compliance for GPIO IRQ chips
+---------------------------------------
+
+Any provider of irqchips needs to be carefully tailored to support Real Time
+preemption. It is desireable that all irqchips in the GPIO subsystem keep this
+in mind and does the proper testing to assure they are real time-enabled.
+So, pay attention on above " RT_FULL:" notes, please.
+The following is a checklist to follow when preparing a driver for real
+time-compliance:
+
+- ensure spinlock_t is not used as part irq_chip implementation;
+- ensure that sleepable APIs are not used as part irq_chip implementation.
+  If sleepable APIs have to be used, these can be done from the .irq_bus_lock()
+  and .irq_bus_unlock() callbacks;
+- Chained GPIO irqchips: ensure spinlock_t or any sleepable APIs are not used
+  from chained IRQ handler;
+- Generic chained GPIO irqchips: take care about generic_handle_irq() calls and
+  apply corresponding W/A;
+- Chained GPIO irqchips: get rid of chained IRQ handler and use generic irq
+  handler if possible :)
+- regmap_mmio: Sry, but you are in trouble :( if MMIO regmap is used as for
+  GPIO IRQ chip implementation;
+- Test your driver with the appropriate in-kernel real time test cases for both
+  level and edge IRQs.
+
 
 Requesting self-owned GPIO pins
 -------------------------------
@@ -190,3 +266,7 @@ gpiochip_free_own_desc().
 These functions must be used with care since they do not affect module use
 count. Do not use the functions to request gpio descriptors not owned by the
 calling driver.
+
+[1] http://www.spinics.net/lists/linux-omap/msg120425.html
+[2] https://lkml.org/lkml/2015/9/25/494
+[3] https://lkml.org/lkml/2015/9/25/495
index 67691a0aa41db82387369957de586bd4a6a2c6df..ac95edfcd907ccddcf2f7be7cf34944b0895ea94 100644 (file)
@@ -42,8 +42,8 @@ Supported chips:
     Addresses scanned: none
     Datasheet: Publicly available at the ST website
                http://www.st.com/internet/analog/product/121769.jsp
-  * Texas Instruments TMP100, TMP101, TMP105, TMP112, TMP75, TMP175, TMP275
-    Prefixes: 'tmp100', 'tmp101', 'tmp105', 'tmp112', 'tmp175', 'tmp75', 'tmp275'
+  * Texas Instruments TMP100, TMP101, TMP105, TMP112, TMP75, TMP75C, TMP175, TMP275
+    Prefixes: 'tmp100', 'tmp101', 'tmp105', 'tmp112', 'tmp175', 'tmp75', 'tmp75c', 'tmp275'
     Addresses scanned: none
     Datasheet: Publicly available at the Texas Instruments website
                http://www.ti.com/product/tmp100
@@ -51,6 +51,7 @@ Supported chips:
                http://www.ti.com/product/tmp105
                http://www.ti.com/product/tmp112
                http://www.ti.com/product/tmp75
+               http://www.ti.com/product/tmp75c
                http://www.ti.com/product/tmp175
                http://www.ti.com/product/tmp275
   * NXP LM75B
diff --git a/Documentation/hwmon/max31790 b/Documentation/hwmon/max31790
new file mode 100644 (file)
index 0000000..855e624
--- /dev/null
@@ -0,0 +1,37 @@
+Kernel driver max31790
+======================
+
+Supported chips:
+  * Maxim MAX31790
+    Prefix: 'max31790'
+    Addresses scanned: -
+    Datasheet: http://pdfserv.maximintegrated.com/en/ds/MAX31790.pdf
+
+Author: Il Han <corone.il.han@gmail.com>
+
+
+Description
+-----------
+
+This driver implements support for the Maxim MAX31790 chip.
+
+The MAX31790 controls the speeds of up to six fans using six independent
+PWM outputs. The desired fan speeds (or PWM duty cycles) are written
+through the I2C interface. The outputs drive "4-wire" fans directly,
+or can be used to modulate the fan's power terminals using an external
+pass transistor.
+
+Tachometer inputs monitor fan tachometer logic outputs for precise (+/-1%)
+monitoring and control of fan RPM as well as detection of fan failure.
+Six pins are dedicated tachometer inputs. Any of the six PWM outputs can
+also be configured to serve as tachometer inputs.
+
+
+Sysfs entries
+-------------
+
+fan[1-12]_input    RO  fan tachometer speed in RPM
+fan[1-12]_fault    RO  fan experienced fault
+fan[1-6]_target    RW  desired fan speed in RPM
+pwm[1-6]_enable    RW  regulator mode, 0=disabled, 1=manual mode, 2=rpm mode
+pwm[1-6]           RW  fan target duty cycle (0-255)
index b85d000faeb4067c9ab1ed06690105d459a40a4e..c51f1146f3bd8396f572cddb9c87ae2620d236ba 100644 (file)
@@ -361,7 +361,7 @@ For win8 devices with both T and C coordinates, the position mapping is
    ABS_MT_POSITION_X := T_X
    ABS_MT_POSITION_Y := T_Y
    ABS_MT_TOOL_X := C_X
-   ABS_MT_TOOL_X := C_Y
+   ABS_MT_TOOL_Y := C_Y
 
 Unfortunately, there is not enough information to specify both the touching
 ellipse and the tool ellipse, so one has to resort to approximations.  One
index 22a4b687ea5b4b3cb9d576bfeffaed813256a795..046832ef14ce136d4192e6cf7b39fb54de75c522 100644 (file)
@@ -1094,6 +1094,21 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        you are really sure that your UEFI does sane gc and
                        fulfills the spec otherwise your board may brick.
 
+       efi_fake_mem=   nn[KMG]@ss[KMG]:aa[,nn[KMG]@ss[KMG]:aa,..] [EFI; X86]
+                       Add arbitrary attribute to specific memory range by
+                       updating original EFI memory map.
+                       Region of memory which aa attribute is added to is
+                       from ss to ss+nn.
+                       If efi_fake_mem=2G@4G:0x10000,2G@0x10a0000000:0x10000
+                       is specified, EFI_MEMORY_MORE_RELIABLE(0x10000)
+                       attribute is added to range 0x100000000-0x180000000 and
+                       0x10a0000000-0x1120000000.
+
+                       Using this parameter you can do debugging of EFI memmap
+                       related feature. For example, you can do debugging of
+                       Address Range Mirroring feature even if your box
+                       doesn't support it.
+
        eisa_irq_edge=  [PARISC,HW]
                        See header of drivers/parisc/eisa.c.
 
@@ -3074,9 +3089,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        cache-to-cache transfer latencies.
 
        rcutree.rcu_fanout_leaf= [KNL]
-                       Increase the number of CPUs assigned to each
-                       leaf rcu_node structure.  Useful for very large
-                       systems.
+                       Change the number of CPUs assigned to each
+                       leaf rcu_node structure.  Useful for very
+                       large systems, which will choose the value 64,
+                       and for NUMA systems with large remote-access
+                       latencies, which will choose a value aligned
+                       with the appropriate hardware boundaries.
 
        rcutree.jiffies_till_sched_qs= [KNL]
                        Set required age in jiffies for a
index 568bbbacee91a581e1bb4be9bd54ed7e73d830e1..5786ad2cd5e63eec344628ec76c432a324d17a1f 100644 (file)
@@ -12,7 +12,7 @@ Because things like lock contention can severely impact performance.
 - HOW
 
 Lockdep already has hooks in the lock functions and maps lock instances to
-lock classes. We build on that (see Documentation/lokcing/lockdep-design.txt).
+lock classes. We build on that (see Documentation/locking/lockdep-design.txt).
 The graph below shows the relation between the lock functions and the various
 hooks therein.
 
index 619f2bb136a545f1932609f3b93b898145f6a5cc..a2ef3a929bf189bb43f7425f8e5e8a5564d23833 100644 (file)
@@ -52,6 +52,9 @@ torture_type    Type of lock to torture. By default, only spinlocks will
 
                     o "mutex_lock": mutex_lock() and mutex_unlock() pairs.
 
+                    o "rtmutex_lock": rtmutex_lock() and rtmutex_unlock()
+                                      pairs. Kernel must have CONFIG_RT_MUTEX=y.
+
                     o "rwsem_lock": read/write down() and up() semaphore pairs.
 
 torture_runnable  Start locktorture at boot time in the case where the
index 2ba8461b0631de759fefd2a12918a6c4f4ee7562..aef9487303d023cd00d27dceadb441c78f12413f 100644 (file)
@@ -617,16 +617,16 @@ case what's actually required is:
 However, stores are not speculated.  This means that ordering -is- provided
 for load-store control dependencies, as in the following example:
 
-       q = READ_ONCE_CTRL(a);
+       q = READ_ONCE(a);
        if (q) {
                WRITE_ONCE(b, p);
        }
 
 Control dependencies pair normally with other types of barriers.  That
-said, please note that READ_ONCE_CTRL() is not optional!  Without the
-READ_ONCE_CTRL(), the compiler might combine the load from 'a' with
-other loads from 'a', and the store to 'b' with other stores to 'b',
-with possible highly counterintuitive effects on ordering.
+said, please note that READ_ONCE() is not optional! Without the
+READ_ONCE(), the compiler might combine the load from 'a' with other
+loads from 'a', and the store to 'b' with other stores to 'b', with
+possible highly counterintuitive effects on ordering.
 
 Worse yet, if the compiler is able to prove (say) that the value of
 variable 'a' is always non-zero, it would be well within its rights
@@ -636,15 +636,12 @@ as follows:
        q = a;
        b = p;  /* BUG: Compiler and CPU can both reorder!!! */
 
-Finally, the READ_ONCE_CTRL() includes an smp_read_barrier_depends()
-that DEC Alpha needs in order to respect control depedencies.
-
-So don't leave out the READ_ONCE_CTRL().
+So don't leave out the READ_ONCE().
 
 It is tempting to try to enforce ordering on identical stores on both
 branches of the "if" statement as follows:
 
-       q = READ_ONCE_CTRL(a);
+       q = READ_ONCE(a);
        if (q) {
                barrier();
                WRITE_ONCE(b, p);
@@ -658,7 +655,7 @@ branches of the "if" statement as follows:
 Unfortunately, current compilers will transform this as follows at high
 optimization levels:
 
-       q = READ_ONCE_CTRL(a);
+       q = READ_ONCE(a);
        barrier();
        WRITE_ONCE(b, p);  /* BUG: No ordering vs. load from a!!! */
        if (q) {
@@ -688,7 +685,7 @@ memory barriers, for example, smp_store_release():
 In contrast, without explicit memory barriers, two-legged-if control
 ordering is guaranteed only when the stores differ, for example:
 
-       q = READ_ONCE_CTRL(a);
+       q = READ_ONCE(a);
        if (q) {
                WRITE_ONCE(b, p);
                do_something();
@@ -697,14 +694,14 @@ ordering is guaranteed only when the stores differ, for example:
                do_something_else();
        }
 
-The initial READ_ONCE_CTRL() is still required to prevent the compiler
-from proving the value of 'a'.
+The initial READ_ONCE() is still required to prevent the compiler from
+proving the value of 'a'.
 
 In addition, you need to be careful what you do with the local variable 'q',
 otherwise the compiler might be able to guess the value and again remove
 the needed conditional.  For example:
 
-       q = READ_ONCE_CTRL(a);
+       q = READ_ONCE(a);
        if (q % MAX) {
                WRITE_ONCE(b, p);
                do_something();
@@ -717,7 +714,7 @@ If MAX is defined to be 1, then the compiler knows that (q % MAX) is
 equal to zero, in which case the compiler is within its rights to
 transform the above code into the following:
 
-       q = READ_ONCE_CTRL(a);
+       q = READ_ONCE(a);
        WRITE_ONCE(b, p);
        do_something_else();
 
@@ -728,7 +725,7 @@ is gone, and the barrier won't bring it back.  Therefore, if you are
 relying on this ordering, you should make sure that MAX is greater than
 one, perhaps as follows:
 
-       q = READ_ONCE_CTRL(a);
+       q = READ_ONCE(a);
        BUILD_BUG_ON(MAX <= 1); /* Order load from a with store to b. */
        if (q % MAX) {
                WRITE_ONCE(b, p);
@@ -745,7 +742,7 @@ of the 'if' statement.
 You must also be careful not to rely too much on boolean short-circuit
 evaluation.  Consider this example:
 
-       q = READ_ONCE_CTRL(a);
+       q = READ_ONCE(a);
        if (q || 1 > 0)
                WRITE_ONCE(b, 1);
 
@@ -753,7 +750,7 @@ Because the first condition cannot fault and the second condition is
 always true, the compiler can transform this example as following,
 defeating control dependency:
 
-       q = READ_ONCE_CTRL(a);
+       q = READ_ONCE(a);
        WRITE_ONCE(b, 1);
 
 This example underscores the need to ensure that the compiler cannot
@@ -767,7 +764,7 @@ x and y both being zero:
 
        CPU 0                     CPU 1
        =======================   =======================
-       r1 = READ_ONCE_CTRL(x);   r2 = READ_ONCE_CTRL(y);
+       r1 = READ_ONCE(x);        r2 = READ_ONCE(y);
        if (r1 > 0)               if (r2 > 0)
          WRITE_ONCE(y, 1);         WRITE_ONCE(x, 1);
 
@@ -796,11 +793,6 @@ site: https://www.cl.cam.ac.uk/~pes20/ppcmem/index.html.
 
 In summary:
 
-  (*) Control dependencies must be headed by READ_ONCE_CTRL().
-      Or, as a much less preferable alternative, interpose
-      smp_read_barrier_depends() between a READ_ONCE() and the
-      control-dependent write.
-
   (*) Control dependencies can order prior loads against later stores.
       However, they do -not- guarantee any other sort of ordering:
       Not prior loads against later loads, nor prior stores against
@@ -816,14 +808,13 @@ In summary:
       between the prior load and the subsequent store, and this
       conditional must involve the prior load.  If the compiler is able
       to optimize the conditional away, it will have also optimized
-      away the ordering.  Careful use of READ_ONCE_CTRL() READ_ONCE(),
-      and WRITE_ONCE() can help to preserve the needed conditional.
+      away the ordering.  Careful use of READ_ONCE() and WRITE_ONCE()
+      can help to preserve the needed conditional.
 
   (*) Control dependencies require that the compiler avoid reordering the
-      dependency into nonexistence.  Careful use of READ_ONCE_CTRL()
-      or smp_read_barrier_depends() can help to preserve your control
-      dependency.  Please see the Compiler Barrier section for more
-      information.
+      dependency into nonexistence.  Careful use of READ_ONCE() or
+      atomic{,64}_read() can help to preserve your control dependency.
+      Please see the Compiler Barrier section for more information.
 
   (*) Control dependencies pair normally with other types of barriers.
 
@@ -1710,6 +1701,17 @@ There are some more advanced barrier functions:
      operations" subsection for information on where to use these.
 
 
+ (*) lockless_dereference();
+     This can be thought of as a pointer-fetch wrapper around the
+     smp_read_barrier_depends() data-dependency barrier.
+
+     This is also similar to rcu_dereference(), but in cases where
+     object lifetime is handled by some mechanism other than RCU, for
+     example, when the objects removed only when the system goes down.
+     In addition, lockless_dereference() is used in some data structures
+     that can be used both with and without RCU.
+
+
  (*) dma_wmb();
  (*) dma_rmb();
 
@@ -1789,7 +1791,6 @@ The Linux kernel has a number of locking constructs:
  (*) mutexes
  (*) semaphores
  (*) R/W semaphores
- (*) RCU
 
 In all cases there are variants on "ACQUIRE" operations and "RELEASE" operations
 for each construct.  These operations all imply certain barriers:
index 189bab09255a929089ffd031d10c71386964b69e..caa555706f8931dddf8cab3c88763b42d3d2c4d8 100644 (file)
@@ -72,13 +72,3 @@ Note on raw_rpmb_size_mult:
        "raw_rpmb_size_mult" is a mutliple of 128kB block.
        RPMB size in byte is calculated by using the following equation:
        RPMB partition size = 128kB x raw_rpmb_size_mult
-
-SD/MMC/SDIO Clock Gating Attribute
-==================================
-
-Read and write access is provided to following attribute.
-This attribute appears only if CONFIG_MMC_CLKGATE is enabled.
-
-       clkgate_delay   Tune the clock gating delay with desired value in milliseconds.
-
-echo <desired delay> > /sys/class/mmc_host/mmcX/clkgate_delay
index 62328d76b55bd9cfc59294666b49a70e0ddca5da..b0e911e0e8f50ad749686961e494377a1a49db45 100644 (file)
@@ -979,20 +979,45 @@ every time right after the runtime_resume() callback has returned
 (alternatively, the runtime_suspend() callback will have to check if the
 device should really be suspended and return -EAGAIN if that is not the case).
 
-The runtime PM of PCI devices is disabled by default.  It is also blocked by
-pci_pm_init() that runs the pm_runtime_forbid() helper function.  If a PCI
-driver implements the runtime PM callbacks and intends to use the runtime PM
-framework provided by the PM core and the PCI subsystem, it should enable this
-feature by executing the pm_runtime_enable() helper function.  However, the
-driver should not call the pm_runtime_allow() helper function unblocking
-the runtime PM of the device.  Instead, it should allow user space or some
-platform-specific code to do that (user space can do it via sysfs), although
-once it has called pm_runtime_enable(), it must be prepared to handle the
+The runtime PM of PCI devices is enabled by default by the PCI core.  PCI
+device drivers do not need to enable it and should not attempt to do so.
+However, it is blocked by pci_pm_init() that runs the pm_runtime_forbid()
+helper function.  In addition to that, the runtime PM usage counter of
+each PCI device is incremented by local_pci_probe() before executing the
+probe callback provided by the device's driver.
+
+If a PCI driver implements the runtime PM callbacks and intends to use the
+runtime PM framework provided by the PM core and the PCI subsystem, it needs
+to decrement the device's runtime PM usage counter in its probe callback
+function.  If it doesn't do that, the counter will always be different from
+zero for the device and it will never be runtime-suspended.  The simplest
+way to do that is by calling pm_runtime_put_noidle(), but if the driver
+wants to schedule an autosuspend right away, for example, it may call
+pm_runtime_put_autosuspend() instead for this purpose.  Generally, it
+just needs to call a function that decrements the devices usage counter
+from its probe routine to make runtime PM work for the device.
+
+It is important to remember that the driver's runtime_suspend() callback
+may be executed right after the usage counter has been decremented, because
+user space may already have cuased the pm_runtime_allow() helper function
+unblocking the runtime PM of the device to run via sysfs, so the driver must
+be prepared to cope with that.
+
+The driver itself should not call pm_runtime_allow(), though.  Instead, it
+should let user space or some platform-specific code do that (user space can
+do it via sysfs as stated above), but it must be prepared to handle the
 runtime PM of the device correctly as soon as pm_runtime_allow() is called
-(which may happen at any time).  [It also is possible that user space causes
-pm_runtime_allow() to be called via sysfs before the driver is loaded, so in
-fact the driver has to be prepared to handle the runtime PM of the device as
-soon as it calls pm_runtime_enable().]
+(which may happen at any time, even before the driver is loaded).
+
+When the driver's remove callback runs, it has to balance the decrementation
+of the device's runtime PM usage counter at the probe time.  For this reason,
+if it has decremented the counter in its probe callback, it must run
+pm_runtime_get_noresume() in its remove callback.  [Since the core carries
+out a runtime resume of the device and bumps up the device's usage counter
+before running the driver's remove callback, the runtime PM of the device
+is effectively disabled for the duration of the remove execution and all
+runtime PM helper functions incrementing the device's usage counter are
+then effectively equivalent to pm_runtime_get_noresume().]
 
 The runtime PM framework works by processing requests to suspend or resume
 devices, or to check if they are idle (in which cases it is reasonable to
index 2bc8abc57fa04c1a4e47bb2d2b8626f8209b24a9..6c6247aaa7b93a0a038e5f65b30679486b410cc2 100644 (file)
@@ -18,6 +18,7 @@
  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 #define _GNU_SOURCE
+#define __SANE_USERSPACE_TYPES__        /* For PPC64, to get LL64 types */
 #include <errno.h>
 #include <fcntl.h>
 #include <inttypes.h>
index 9f6685f6c5a97062e7d44d04493afe518fa27ab8..dcc8ed6fccde769c3f2c44a4e6be5701fc33a74e 100644 (file)
@@ -240,6 +240,12 @@ L: lm-sensors@lm-sensors.org
 S:     Maintained
 F:     drivers/hwmon/abituguru3.c
 
+ACCES 104-IDIO-16 GPIO DRIVER
+M:     "William Breathitt Gray" <vilhelm.gray@gmail.com>
+L:     linux-gpio@vger.kernel.org
+S:     Maintained
+F:     drivers/gpio/gpio-104-idio-16.c
+
 ACENIC DRIVER
 M:     Jes Sorensen <jes@trained-monkey.org>
 L:     linux-acenic@sunsite.dk
@@ -654,11 +660,6 @@ F: drivers/gpu/drm/radeon/radeon_kfd.c
 F:     drivers/gpu/drm/radeon/radeon_kfd.h
 F:     include/uapi/linux/kfd_ioctl.h
 
-AMD MICROCODE UPDATE SUPPORT
-M:     Borislav Petkov <bp@alien8.de>
-S:     Maintained
-F:     arch/x86/kernel/cpu/microcode/amd*
-
 AMD XGBE DRIVER
 M:     Tom Lendacky <thomas.lendacky@amd.com>
 L:     netdev@vger.kernel.org
@@ -894,11 +895,12 @@ M:        Lennert Buytenhek <kernel@wantstofly.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 
-ARM/Allwinner A1X SoC support
+ARM/Allwinner sunXi SoC support
 M:     Maxime Ripard <maxime.ripard@free-electrons.com>
+M:     Chen-Yu Tsai <wens@csie.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
-N:     sun[x4567]i
+N:     sun[x456789]i
 
 ARM/Allwinner SoC Clock Support
 M:     Emilio López <emilio@elopez.com.ar>
@@ -1779,6 +1781,14 @@ S:       Supported
 F:     Documentation/aoe/
 F:     drivers/block/aoe/
 
+ATHEROS 71XX/9XXX GPIO DRIVER
+M:     Alban Bedel <albeu@free.fr>
+W:     https://github.com/AlbanBedel/linux
+T:     git git://github.com/AlbanBedel/linux
+S:     Maintained
+F:     drivers/gpio/gpio-ath79.c
+F:     Documentation/devicetree/bindings/gpio/gpio-ath79.txt
+
 ATHEROS ATH GENERIC UTILITIES
 M:     "Luis R. Rodriguez" <mcgrof@do-not-panic.com>
 L:     linux-wireless@vger.kernel.org
@@ -3591,6 +3601,13 @@ F:       drivers/gpu/drm/i915/
 F:     include/drm/i915*
 F:     include/uapi/drm/i915*
 
+DRM DRIVERS FOR ATMEL HLCDC
+M:     Boris Brezillon <boris.brezillon@free-electrons.com>
+L:     dri-devel@lists.freedesktop.org
+S:     Supported
+F:     drivers/gpu/drm/atmel-hlcdc/
+F:     Documentation/devicetree/bindings/drm/atmel/
+
 DRM DRIVERS FOR EXYNOS
 M:     Inki Dae <inki.dae@samsung.com>
 M:     Joonyoung Shim <jy0922.shim@samsung.com>
@@ -3619,6 +3636,14 @@ S:       Maintained
 F:     drivers/gpu/drm/imx/
 F:     Documentation/devicetree/bindings/drm/imx/
 
+DRM DRIVERS FOR GMA500 (Poulsbo, Moorestown and derivative chipsets)
+M:     Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+L:     dri-devel@lists.freedesktop.org
+T:     git git://github.com/patjak/drm-gma500
+S:     Maintained
+F:     drivers/gpu/drm/gma500
+F:     include/drm/gma500*
+
 DRM DRIVERS FOR NVIDIA TEGRA
 M:     Thierry Reding <thierry.reding@gmail.com>
 M:     Terje Bergström <tbergstrom@nvidia.com>
@@ -4003,7 +4028,7 @@ S:        Maintained
 F:     sound/usb/misc/ua101.c
 
 EXTENSIBLE FIRMWARE INTERFACE (EFI)
-M:     Matt Fleming <matt.fleming@intel.com>
+M:     Matt Fleming <matt@codeblueprint.co.uk>
 L:     linux-efi@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
 S:     Maintained
@@ -4018,7 +4043,7 @@ F:        include/linux/efi*.h
 EFI VARIABLE FILESYSTEM
 M:     Matthew Garrett <matthew.garrett@nebula.com>
 M:     Jeremy Kerr <jk@ozlabs.org>
-M:     Matt Fleming <matt.fleming@intel.com>
+M:     Matt Fleming <matt@codeblueprint.co.uk>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
 L:     linux-efi@vger.kernel.org
 S:     Maintained
@@ -4412,6 +4437,14 @@ L:       linuxppc-dev@lists.ozlabs.org
 S:     Maintained
 F:     drivers/net/ethernet/freescale/ucc_geth*
 
+FREESCALE eTSEC ETHERNET DRIVER (GIANFAR)
+M:     Claudiu Manoil <claudiu.manoil@freescale.com>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     drivers/net/ethernet/freescale/gianfar*
+X:     drivers/net/ethernet/freescale/gianfar_ptp.c
+F:     Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
+
 FREESCALE QUICC ENGINE UCC UART DRIVER
 M:     Timur Tabi <timur@tabi.org>
 L:     linuxppc-dev@lists.ozlabs.org
@@ -5430,12 +5463,6 @@ W:       https://01.org/linux-acpi
 S:     Supported
 F:     drivers/platform/x86/intel_menlow.c
 
-INTEL IA32 MICROCODE UPDATE SUPPORT
-M:     Borislav Petkov <bp@alien8.de>
-S:     Maintained
-F:     arch/x86/kernel/cpu/microcode/core*
-F:     arch/x86/kernel/cpu/microcode/intel*
-
 INTEL I/OAT DMA DRIVER
 M:     Dave Jiang <dave.jiang@intel.com>
 R:     Dan Williams <dan.j.williams@intel.com>
@@ -5957,7 +5984,7 @@ F:        virt/kvm/
 KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V
 M:     Joerg Roedel <joro@8bytes.org>
 L:     kvm@vger.kernel.org
-W:     http://kvm.qumranet.com
+W:     http://www.linux-kvm.org/
 S:     Maintained
 F:     arch/x86/include/asm/svm.h
 F:     arch/x86/kvm/svm.c
@@ -5965,7 +5992,7 @@ F:        arch/x86/kvm/svm.c
 KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
 M:     Alexander Graf <agraf@suse.com>
 L:     kvm-ppc@vger.kernel.org
-W:     http://kvm.qumranet.com
+W:     http://www.linux-kvm.org/
 T:     git git://github.com/agraf/linux-2.6.git
 S:     Supported
 F:     arch/powerpc/include/asm/kvm*
@@ -6778,7 +6805,6 @@ F:        drivers/scsi/megaraid/
 
 MELLANOX ETHERNET DRIVER (mlx4_en)
 M:     Amir Vadai <amirv@mellanox.com>
-M:     Ido Shamay <idos@mellanox.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 W:     http://www.mellanox.com
@@ -8158,6 +8184,13 @@ L:       linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     drivers/pinctrl/pinctrl-at91.*
 
+PIN CONTROLLER - ATMEL AT91 PIO4
+M:     Ludovic Desroches <ludovic.desroches@atmel.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L:     linux-gpio@vger.kernel.org
+S:     Supported
+F:     drivers/pinctrl/pinctrl-at91-pio4.*
+
 PIN CONTROLLER - INTEL
 M:     Mika Westerberg <mika.westerberg@linux.intel.com>
 M:     Heikki Krogerus <heikki.krogerus@linux.intel.com>
@@ -9101,6 +9134,15 @@ S: Supported
 F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
 F: drivers/net/ethernet/synopsys/dwc_eth_qos.c
 
+SYNOPSYS DESIGNWARE I2C DRIVER
+M:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+M:     Jarkko Nikula <jarkko.nikula@linux.intel.com>
+M:     Mika Westerberg <mika.westerberg@linux.intel.com>
+L:     linux-i2c@vger.kernel.org
+S:     Maintained
+F:     drivers/i2c/busses/i2c-designware-*
+F:     include/linux/platform_data/i2c-designware.h
+
 SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
 M:     Seungwon Jeon <tgih.jun@samsung.com>
 M:     Jaehoon Chung <jh80.chung@samsung.com>
@@ -9914,7 +9956,6 @@ S:        Maintained
 F:     drivers/staging/lustre
 
 STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec)
-M:     Julian Andres Klode <jak@jak-linux.org>
 M:     Marc Dietrich <marvin24@gmx.de>
 L:     ac100@lists.launchpad.net (moderated for non-subscribers)
 L:     linux-tegra@vger.kernel.org
@@ -10069,6 +10110,7 @@ F:      include/net/switchdev.h
 
 SYNOPSYS ARC ARCHITECTURE
 M:     Vineet Gupta <vgupta@synopsys.com>
+L:     linux-snps-arc@lists.infraded.org
 S:     Supported
 F:     arch/arc/
 F:     Documentation/devicetree/bindings/arc/*
@@ -11378,15 +11420,6 @@ W:     http://oops.ghostprotocols.net:81/blog
 S:     Maintained
 F:     drivers/net/wireless/wl3501*
 
-WM97XX TOUCHSCREEN DRIVERS
-M:     Mark Brown <broonie@kernel.org>
-M:     Liam Girdwood <lrg@slimlogic.co.uk>
-L:     linux-input@vger.kernel.org
-W:     https://github.com/CirrusLogic/linux-drivers/wiki
-S:     Supported
-F:     drivers/input/touchscreen/*wm97*
-F:     include/linux/wm97xx.h
-
 WOLFSON MICROELECTRONICS DRIVERS
 L:     patches@opensource.wolfsonmicro.com
 T:     git https://github.com/CirrusLogic/linux-drivers.git
@@ -11461,6 +11494,11 @@ L:     linux-edac@vger.kernel.org
 S:     Maintained
 F:     arch/x86/kernel/cpu/mcheck/*
 
+X86 MICROCODE UPDATE SUPPORT
+M:     Borislav Petkov <bp@alien8.de>
+S:     Maintained
+F:     arch/x86/kernel/cpu/microcode/*
+
 X86 VDSO
 M:     Andy Lutomirski <luto@amacapital.net>
 L:     linux-kernel@vger.kernel.org
@@ -11661,6 +11699,7 @@ F:      drivers/tty/serial/zs.*
 ZSMALLOC COMPRESSED SLAB MEMORY ALLOCATOR
 M:     Minchan Kim <minchan@kernel.org>
 M:     Nitin Gupta <ngupta@vflare.org>
+R:     Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/zsmalloc.c
index 1d341eba143d38f0b9e7bd7669c6357f477d94b4..d5b37391195f80866ad2b831a417039b3ee230cf 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 VERSION = 4
 PATCHLEVEL = 3
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
-NAME = Hurr durr I'ma sheep
+EXTRAVERSION =
+NAME = Blurry Fish Butt
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
index e8c95609842436771f6c7c024c7b2830706987e8..572b228c44c7a80aec6eed925715c8d2ebacb00d 100644 (file)
 #define ATOMIC_INIT(i)         { (i) }
 #define ATOMIC64_INIT(i)       { (i) }
 
-#define atomic_read(v)         ACCESS_ONCE((v)->counter)
-#define atomic64_read(v)       ACCESS_ONCE((v)->counter)
+#define atomic_read(v)         READ_ONCE((v)->counter)
+#define atomic64_read(v)       READ_ONCE((v)->counter)
 
-#define atomic_set(v,i)                ((v)->counter = (i))
-#define atomic64_set(v,i)      ((v)->counter = (i))
+#define atomic_set(v,i)                WRITE_ONCE((v)->counter, (i))
+#define atomic64_set(v,i)      WRITE_ONCE((v)->counter, (i))
 
 /*
  * To get proper branch prediction for the main line, we must branch
index 6b340d0f1521c3ad9c4edf984abe60982ae24c04..902e6ab00a066fead53614ed86cb88980aea2fba 100644 (file)
@@ -52,4 +52,6 @@ static inline unsigned long find_zero(unsigned long bits)
 #endif
 }
 
+#define zero_bytemask(mask) ((2ul << (find_zero(mask) * 8)) - 1)
+
 #endif /* _ASM_WORD_AT_A_TIME_H */
index 78c0621d581940f3f765e75c5218a7cdc9a4ae5f..2c2ac3f3ff80370bd6bad9fffc9dfb8c8f6931e8 100644 (file)
@@ -76,6 +76,10 @@ config STACKTRACE_SUPPORT
 config HAVE_LATENCYTOP_SUPPORT
        def_bool y
 
+config HAVE_ARCH_TRANSPARENT_HUGEPAGE
+       def_bool y
+       depends on ARC_MMU_V4
+
 source "init/Kconfig"
 source "kernel/Kconfig.freezer"
 
@@ -190,6 +194,16 @@ config NR_CPUS
        range 2 4096
        default "4"
 
+config ARC_SMP_HALT_ON_RESET
+       bool "Enable Halt-on-reset boot mode"
+       default y if ARC_UBOOT_SUPPORT
+       help
+         In SMP configuration cores can be configured as Halt-on-reset
+         or they could all start at same time. For Halt-on-reset, non
+         masters are parked until Master kicks them so they can start of
+         at designated entry point. For other case, all jump to common
+         entry point and spin wait for Master's signal.
+
 endif  #SMP
 
 menuconfig ARC_CACHE
@@ -278,6 +292,8 @@ choice
        default ARC_MMU_V2 if ARC_CPU_750D
        default ARC_MMU_V4 if ARC_CPU_HS
 
+if ISA_ARCOMPACT
+
 config ARC_MMU_V1
        bool "MMU v1"
        help
@@ -297,6 +313,8 @@ config ARC_MMU_V3
          Variable Page size (1k-16k), var JTLB size 128 x (2 or 4)
          Shared Address Spaces (SASID)
 
+endif
+
 config ARC_MMU_V4
        bool "MMU v4"
        depends on ISA_ARCV2
@@ -428,6 +446,28 @@ config LINUX_LINK_BASE
          Linux needs to be scooted a bit.
          If you don't know what the above means, leave this setting alone.
 
+config HIGHMEM
+       bool "High Memory Support"
+       help
+         With ARC 2G:2G address split, only upper 2G is directly addressable by
+         kernel. Enable this to potentially allow access to rest of 2G and PAE
+         in future
+
+config ARC_HAS_PAE40
+       bool "Support for the 40-bit Physical Address Extension"
+       default n
+       depends on ISA_ARCV2
+       select HIGHMEM
+       help
+         Enable access to physical memory beyond 4G, only supported on
+         ARC cores with 40 bit Physical Addressing support
+
+config ARCH_PHYS_ADDR_T_64BIT
+       def_bool ARC_HAS_PAE40
+
+config ARCH_DMA_ADDR_T_64BIT
+       bool
+
 config ARC_CURR_IN_REG
        bool "Dedicate Register r25 for current_task pointer"
        default y
index a5e2726a067e6dffde2a552956ee82a7adaf88e1..420dcfde289fe8fe3e99193df1876d399b0dce04 100644 (file)
@@ -95,6 +95,6 @@
                #size-cells = <1>;
                ranges = <0x00000000 0x80000000 0x40000000>;
                device_type = "memory";
-               reg = <0x00000000 0x20000000>;  /* 512MiB */
+               reg = <0x80000000 0x20000000>;  /* 512MiB */
        };
 };
index 846481f37eef08bd8911859009fddf390974b193..f90fadf7f94e5e51f551d9e914aaad728aa5bc84 100644 (file)
@@ -98,6 +98,6 @@
                #size-cells = <1>;
                ranges = <0x00000000 0x80000000 0x40000000>;
                device_type = "memory";
-               reg = <0x00000000 0x20000000>;  /* 512MiB */
+               reg = <0x80000000 0x20000000>;  /* 512MiB */
        };
 };
index 2f0b33257db2e2ecf4749bee0d2f5ed3260dc3a3..06a9f294a2e600a4aa7f8f7b18316ceb60d87ef0 100644 (file)
                #size-cells = <1>;
                ranges = <0x00000000 0x80000000 0x40000000>;
                device_type = "memory";
-               reg = <0x00000000 0x20000000>;  /* 512MiB */
+               reg = <0x80000000 0x20000000>;  /* 512MiB */
        };
 };
index 911f069e0540500d60987e3b3bcce3abde2a48d3..b0eb0e7fe21d8a66b1d0ef5267f24dd6a1f8f254 100644 (file)
 
 / {
        compatible = "snps,nsim_hs";
+       #address-cells = <2>;
+       #size-cells = <2>;
        interrupt-parent = <&core_intc>;
 
+       memory {
+               device_type = "memory";
+               reg = <0x0 0x80000000 0x0 0x40000000    /* 1 GB low mem */
+                      0x1 0x00000000 0x0 0x40000000>;  /* 1 GB highmem */
+       };
+
        chosen {
                bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8";
        };
@@ -26,8 +34,8 @@
                #address-cells = <1>;
                #size-cells = <1>;
 
-               /* child and parent address space 1:1 mapped */
-               ranges;
+               /* only perip space at end of low mem accessible */
+               ranges = <0x80000000 0x0 0x80000000 0x80000000>;
 
                core_intc: core-interrupt-controller {
                        compatible = "snps,archs-intc";
index a870bdd5e404ca386bb2d8d5efd6b454677f0eae..296d371a335c8374b98efba984d95b705b3f343b 100644 (file)
@@ -32,6 +32,6 @@
 
        memory {
                device_type = "memory";
-               reg = <0x00000000 0x10000000>;  /* 256M */
+               reg = <0x80000000 0x10000000>;  /* 256M */
        };
 };
index 9393fd902f0d401a475db4f97646ee95e36220bb..84226bd48baf070a2ab5bea8cc0741ebee1251e9 100644 (file)
@@ -56,6 +56,6 @@
                #size-cells = <1>;
                ranges = <0x00000000 0x80000000 0x40000000>;
                device_type = "memory";
-               reg = <0x00000000 0x20000000>;  /* 512MiB */
+               reg = <0x80000000 0x20000000>;  /* 512MiB */
        };
 };
index 9bee8ed09eb03cd3e73997c5358d0791634c5add..31f0fb5fc91dec885a598a1769f7e167cbd8a780 100644 (file)
@@ -71,6 +71,6 @@
                #size-cells = <1>;
                ranges = <0x00000000 0x80000000 0x40000000>;
                device_type = "memory";
-               reg = <0x00000000 0x20000000>;  /* 512MiB */
+               reg = <0x80000000 0x20000000>;  /* 512MiB */
        };
 };
index 562dac6a7f7824f4469678999ffed77809a19a1d..c92c0ef1e9d290b2db437001338cf477c1df9513 100644 (file)
@@ -89,7 +89,6 @@ CONFIG_MMC=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_DW=y
-CONFIG_MMC_DW_IDMAC=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_EXT3_FS=y
 CONFIG_EXT4_FS=y
index 83a6d8d5cc589b02852a048d42543f027d6f9cae..cfac24e0e7b6565a48c3a22f6251e8992fbb84bb 100644 (file)
@@ -95,7 +95,6 @@ CONFIG_MMC=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_DW=y
-CONFIG_MMC_DW_IDMAC=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_EXT3_FS=y
 CONFIG_EXT4_FS=y
index f1e1c84e0dda25a6a7cc95dc34a4de542863babb..9922a118a15a3d28c99c85f1c67407142e75fb86 100644 (file)
@@ -96,7 +96,6 @@ CONFIG_MMC=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_DW=y
-CONFIG_MMC_DW_IDMAC=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_EXT3_FS=y
 CONFIG_EXT4_FS=y
index 7611b10a2d238c7b4bb73696b59e2fe8b14ceb2c..0b10ef2a43726e0188b61de96bd5a5bf1ba50067 100644 (file)
@@ -48,4 +48,5 @@ generic-y += types.h
 generic-y += ucontext.h
 generic-y += user.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index d8023bc8d1ad687c26a57ee6873509d0d1570222..7fac7d85ed6a32bb1abaa4f8e36c5d243cec06c0 100644 (file)
 
 /* gcc builtin sr needs reg param to be long immediate */
 #define write_aux_reg(reg_immed, val)          \
-               __builtin_arc_sr((unsigned int)val, reg_immed)
+               __builtin_arc_sr((unsigned int)(val), reg_immed)
 
 #else
 
@@ -327,8 +327,8 @@ struct bcr_generic {
  */
 
 struct cpuinfo_arc_mmu {
-       unsigned int ver:4, pg_sz_k:8, s_pg_sz_m:8, u_dtlb:6, u_itlb:6;
-       unsigned int num_tlb:16, sets:12, ways:4;
+       unsigned int ver:4, pg_sz_k:8, s_pg_sz_m:8, pad:10, sasid:1, pae:1;
+       unsigned int sets:12, ways:4, u_dtlb:8, u_itlb:8;
 };
 
 struct cpuinfo_arc_cache {
index c3ecda023e3a52a0aa0bd04cb5ba7b69c6f856a6..7730d302cadb5620feff02b2fbe5ebebf1f3e53d 100644 (file)
 #include <asm/barrier.h>
 #include <asm/smp.h>
 
-#define atomic_read(v)  ((v)->counter)
+#define atomic_read(v)  READ_ONCE((v)->counter)
 
 #ifdef CONFIG_ARC_HAS_LLSC
 
-#define atomic_set(v, i) (((v)->counter) = (i))
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
 
 #ifdef CONFIG_ARC_STAR_9000923308
 
@@ -107,7 +107,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v)          \
 #ifndef CONFIG_SMP
 
  /* violating atomic_xxx API locking protocol in UP for optimization sake */
-#define atomic_set(v, i) (((v)->counter) = (i))
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
 
 #else
 
@@ -125,7 +125,7 @@ static inline void atomic_set(atomic_t *v, int i)
        unsigned long flags;
 
        atomic_ops_lock(flags);
-       v->counter = i;
+       WRITE_ONCE(v->counter, i);
        atomic_ops_unlock(flags);
 }
 
index e23ea6e7633a6e8cefbc31be639ac073a673b5f3..abf06e81c9290f6eafc441b563061e3389032922 100644 (file)
@@ -65,6 +65,7 @@ extern int ioc_exists;
 #if defined(CONFIG_ARC_MMU_V3) || defined(CONFIG_ARC_MMU_V4)
 #define ARC_REG_IC_PTAG                0x1E
 #endif
+#define ARC_REG_IC_PTAG_HI     0x1F
 
 /* Bit val in IC_CTRL */
 #define IC_CTRL_CACHE_DISABLE   0x1
@@ -77,6 +78,7 @@ extern int ioc_exists;
 #define ARC_REG_DC_FLSH                0x4B
 #define ARC_REG_DC_FLDL                0x4C
 #define ARC_REG_DC_PTAG                0x5C
+#define ARC_REG_DC_PTAG_HI     0x5F
 
 /* Bit val in DC_CTRL */
 #define DC_CTRL_INV_MODE_FLUSH  0x40
index 0992d3dbcc65f66e4e97925703ec9dc113a7b9a4..fbe3587c4f36f1add284667fd8a04edbe90ea2ef 100644 (file)
 
 void flush_cache_all(void);
 
-void flush_icache_range(unsigned long start, unsigned long end);
-void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len);
-void __inv_icache_page(unsigned long paddr, unsigned long vaddr);
-void __flush_dcache_page(unsigned long paddr, unsigned long vaddr);
+void flush_icache_range(unsigned long kstart, unsigned long kend);
+void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len);
+void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr);
+void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
 
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 
index 415443c2a8c4297d235796f8af4a71d2d07002fd..1aff3be9107563ca620072a49abf9e9925d4ec6b 100644 (file)
 
 .macro FAKE_RET_FROM_EXCPN
 
-       ld  r9, [sp, PT_status32]
-       bic r9, r9, (STATUS_U_MASK|STATUS_DE_MASK)
-       bset  r9, r9, STATUS_L_BIT
-       sr  r9, [erstatus]
-       mov r9, 55f
-       sr  r9, [eret]
-
+       lr      r9, [status32]
+       bclr    r9, r9, STATUS_AE_BIT
+       or      r9, r9, (STATUS_E1_MASK|STATUS_E2_MASK)
+       sr      r9, [erstatus]
+       mov     r9, 55f
+       sr      r9, [eret]
        rtie
 55:
 .endm
diff --git a/arch/arc/include/asm/highmem.h b/arch/arc/include/asm/highmem.h
new file mode 100644 (file)
index 0000000..b1585c9
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _ASM_HIGHMEM_H
+#define _ASM_HIGHMEM_H
+
+#ifdef CONFIG_HIGHMEM
+
+#include <uapi/asm/page.h>
+#include <asm/kmap_types.h>
+
+/* start after vmalloc area */
+#define FIXMAP_BASE            (PAGE_OFFSET - FIXMAP_SIZE - PKMAP_SIZE)
+#define FIXMAP_SIZE            PGDIR_SIZE      /* only 1 PGD worth */
+#define KM_TYPE_NR             ((FIXMAP_SIZE >> PAGE_SHIFT)/NR_CPUS)
+#define FIXMAP_ADDR(nr)                (FIXMAP_BASE + ((nr) << PAGE_SHIFT))
+
+/* start after fixmap area */
+#define PKMAP_BASE             (FIXMAP_BASE + FIXMAP_SIZE)
+#define PKMAP_SIZE             PGDIR_SIZE
+#define LAST_PKMAP             (PKMAP_SIZE >> PAGE_SHIFT)
+#define LAST_PKMAP_MASK                (LAST_PKMAP - 1)
+#define PKMAP_ADDR(nr)         (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+#define PKMAP_NR(virt)         (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
+
+#define kmap_prot              PAGE_KERNEL
+
+
+#include <asm/cacheflush.h>
+
+extern void *kmap(struct page *page);
+extern void *kmap_high(struct page *page);
+extern void *kmap_atomic(struct page *page);
+extern void __kunmap_atomic(void *kvaddr);
+extern void kunmap_high(struct page *page);
+
+extern void kmap_init(void);
+
+static inline void flush_cache_kmaps(void)
+{
+       flush_cache_all();
+}
+
+static inline void kunmap(struct page *page)
+{
+       BUG_ON(in_interrupt());
+       if (!PageHighMem(page))
+               return;
+       kunmap_high(page);
+}
+
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h
new file mode 100644 (file)
index 0000000..c5094de
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+
+#ifndef _ASM_ARC_HUGEPAGE_H
+#define _ASM_ARC_HUGEPAGE_H
+
+#include <linux/types.h>
+#include <asm-generic/pgtable-nopmd.h>
+
+static inline pte_t pmd_pte(pmd_t pmd)
+{
+       return __pte(pmd_val(pmd));
+}
+
+static inline pmd_t pte_pmd(pte_t pte)
+{
+       return __pmd(pte_val(pte));
+}
+
+#define pmd_wrprotect(pmd)     pte_pmd(pte_wrprotect(pmd_pte(pmd)))
+#define pmd_mkwrite(pmd)       pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_mkdirty(pmd)       pte_pmd(pte_mkdirty(pmd_pte(pmd)))
+#define pmd_mkold(pmd)         pte_pmd(pte_mkold(pmd_pte(pmd)))
+#define pmd_mkyoung(pmd)       pte_pmd(pte_mkyoung(pmd_pte(pmd)))
+#define pmd_mkhuge(pmd)                pte_pmd(pte_mkhuge(pmd_pte(pmd)))
+#define pmd_mknotpresent(pmd)  pte_pmd(pte_mknotpresent(pmd_pte(pmd)))
+#define pmd_mksplitting(pmd)   pte_pmd(pte_mkspecial(pmd_pte(pmd)))
+#define pmd_mkclean(pmd)       pte_pmd(pte_mkclean(pmd_pte(pmd)))
+
+#define pmd_write(pmd)         pte_write(pmd_pte(pmd))
+#define pmd_young(pmd)         pte_young(pmd_pte(pmd))
+#define pmd_pfn(pmd)           pte_pfn(pmd_pte(pmd))
+#define pmd_dirty(pmd)         pte_dirty(pmd_pte(pmd))
+#define pmd_special(pmd)       pte_special(pmd_pte(pmd))
+
+#define mk_pmd(page, prot)     pte_pmd(mk_pte(page, prot))
+
+#define pmd_trans_huge(pmd)    (pmd_val(pmd) & _PAGE_HW_SZ)
+#define pmd_trans_splitting(pmd)       (pmd_trans_huge(pmd) && pmd_special(pmd))
+
+#define pfn_pmd(pfn, prot)     (__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+        /*
+         * open-coded pte_modify() with additional retaining of HW_SZ bit
+         * so that pmd_trans_huge() remains true for this PMD
+         */
+        return __pmd((pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HW_SZ)) | pgprot_val(newprot));
+}
+
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+                             pmd_t *pmdp, pmd_t pmd)
+{
+       *pmdp = pmd;
+}
+
+extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+                                pmd_t *pmd);
+
+#define has_transparent_hugepage() 1
+
+/* Generic variants assume pgtable_t is struct page *, hence need for these */
+#define __HAVE_ARCH_PGTABLE_DEPOSIT
+extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+                                      pgtable_t pgtable);
+
+#define __HAVE_ARCH_PGTABLE_WITHDRAW
+extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+
+#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+                               unsigned long end);
+
+#endif
index bc51036373261c6068292830adae89a80b5d0c9c..4fd7d62a6e30aa513b9e04f6ee881c219b96da96 100644 (file)
@@ -16,6 +16,7 @@
 #ifdef CONFIG_ISA_ARCOMPACT
 #define TIMER0_IRQ      3
 #define TIMER1_IRQ      4
+#define IPI_IRQ                (NR_CPU_IRQS-1) /* dummy to enable SMP build for up hardware */
 #else
 #define TIMER0_IRQ      16
 #define TIMER1_IRQ      17
index aa805575c320ae6be1cfcd28e6c86f60d5f17ba1..d8c608174617783496b8855bc6ed19de9b6f67cd 100644 (file)
 #define STATUS_E2_BIT          2       /* Int 2 enable */
 #define STATUS_A1_BIT          3       /* Int 1 active */
 #define STATUS_A2_BIT          4       /* Int 2 active */
+#define STATUS_AE_BIT          5       /* Exception active */
 
 #define STATUS_E1_MASK         (1<<STATUS_E1_BIT)
 #define STATUS_E2_MASK         (1<<STATUS_E2_BIT)
 #define STATUS_A1_MASK         (1<<STATUS_A1_BIT)
 #define STATUS_A2_MASK         (1<<STATUS_A2_BIT)
+#define STATUS_AE_MASK         (1<<STATUS_AE_BIT)
 #define STATUS_IE_MASK         (STATUS_E1_MASK | STATUS_E2_MASK)
 
 /* Other Interrupt Handling related Aux regs */
@@ -91,7 +93,19 @@ static inline void arch_local_irq_restore(unsigned long flags)
 /*
  * Unconditionally Enable IRQs
  */
-extern void arch_local_irq_enable(void);
+static inline void arch_local_irq_enable(void)
+{
+       unsigned long temp;
+
+       __asm__ __volatile__(
+       "       lr   %0, [status32]     \n"
+       "       or   %0, %0, %1         \n"
+       "       flag %0                 \n"
+       : "=&r"(temp)
+       : "n"((STATUS_E1_MASK | STATUS_E2_MASK))
+       : "cc", "memory");
+}
+
 
 /*
  * Unconditionally Disable IRQs
diff --git a/arch/arc/include/asm/kmap_types.h b/arch/arc/include/asm/kmap_types.h
new file mode 100644 (file)
index 0000000..f0d7f6a
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _ASM_KMAP_TYPES_H
+#define _ASM_KMAP_TYPES_H
+
+/*
+ * We primarily need to define KM_TYPE_NR here but that in turn
+ * is a function of PGDIR_SIZE etc.
+ * To avoid circular deps issue, put everything in asm/highmem.h
+ */
+#endif
index e8993a2be6c238a11e557986249773148a6eef7d..6ff657a904b61e700421b60423991273df93e1e1 100644 (file)
  * @dt_compat:         Array of device tree 'compatible' strings
  *                     (XXX: although only 1st entry is looked at)
  * @init_early:                Very early callback [called from setup_arch()]
- * @init_irq:          setup external IRQ controllers [called from init_IRQ()]
- * @init_smp:          for each CPU (e.g. setup IPI)
+ * @init_cpu_smp:      for each CPU as it is coming up (SMP as well as UP)
  *                     [(M):init_IRQ(), (o):start_kernel_secondary()]
- * @init_time:         platform specific clocksource/clockevent registration
- *                     [called from time_init()]
  * @init_machine:      arch initcall level callback (e.g. populate static
  *                     platform devices or parse Devicetree)
  * @init_late:         Late initcall level callback
 struct machine_desc {
        const char              *name;
        const char              **dt_compat;
-
        void                    (*init_early)(void);
-       void                    (*init_irq)(void);
 #ifdef CONFIG_SMP
-       void                    (*init_smp)(unsigned int);
+       void                    (*init_cpu_smp)(unsigned int);
 #endif
-       void                    (*init_time)(void);
        void                    (*init_machine)(void);
        void                    (*init_late)(void);
 
index 52c11f0bb0e5b5afbeacda75ef9c014032de4fb4..46f4e5351b2a56e96d440a27201fc612d2e9c195 100644 (file)
@@ -86,9 +86,6 @@ static inline void __mcip_cmd_data(unsigned int cmd, unsigned int param,
        __mcip_cmd(cmd, param);
 }
 
-extern void mcip_init_early_smp(void);
-extern void mcip_init_smp(unsigned int cpu);
-
 #endif
 
 #endif
index 0f9c3eb5327e4494f4a310e62e194c4457c08bea..b144d7ca7d2076d7f5a41b9bfa179f0f64d21d83 100644 (file)
@@ -24,6 +24,7 @@
 #if (CONFIG_ARC_MMU_VER < 4)
 #define ARC_REG_TLBPD0         0x405
 #define ARC_REG_TLBPD1         0x406
+#define ARC_REG_TLBPD1HI       0       /* Dummy: allows code sharing with ARC700 */
 #define ARC_REG_TLBINDEX       0x407
 #define ARC_REG_TLBCOMMAND     0x408
 #define ARC_REG_PID            0x409
@@ -31,6 +32,7 @@
 #else
 #define ARC_REG_TLBPD0         0x460
 #define ARC_REG_TLBPD1         0x461
+#define ARC_REG_TLBPD1HI       0x463
 #define ARC_REG_TLBINDEX       0x464
 #define ARC_REG_TLBCOMMAND     0x465
 #define ARC_REG_PID            0x468
@@ -83,6 +85,11 @@ void arc_mmu_init(void);
 extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
 void read_decode_mmu_bcr(void);
 
+static inline int is_pae40_enabled(void)
+{
+       return IS_ENABLED(CONFIG_ARC_HAS_PAE40);
+}
+
 #endif /* !__ASSEMBLY__ */
 
 #endif
index 9c8aa41e45c2248b0ee39a23c3802e4f7425d358..429957f1c2365566006d674bb37311cddb97cc00 100644 (file)
@@ -43,7 +43,6 @@ typedef struct {
 typedef struct {
        unsigned long pgprot;
 } pgprot_t;
-typedef unsigned long pgtable_t;
 
 #define pte_val(x)      ((x).pte)
 #define pgd_val(x)      ((x).pgd)
@@ -57,20 +56,26 @@ typedef unsigned long pgtable_t;
 
 #else /* !STRICT_MM_TYPECHECKS */
 
+#ifdef CONFIG_ARC_HAS_PAE40
+typedef unsigned long long pte_t;
+#else
 typedef unsigned long pte_t;
+#endif
 typedef unsigned long pgd_t;
 typedef unsigned long pgprot_t;
-typedef unsigned long pgtable_t;
 
 #define pte_val(x)     (x)
 #define pgd_val(x)     (x)
 #define pgprot_val(x)  (x)
 #define __pte(x)       (x)
+#define __pgd(x)       (x)
 #define __pgprot(x)    (x)
 #define pte_pgprot(x)  (x)
 
 #endif
 
+typedef pte_t * pgtable_t;
+
 #define ARCH_PFN_OFFSET     (CONFIG_LINUX_LINK_BASE >> PAGE_SHIFT)
 
 #define pfn_valid(pfn)      (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
index 81208bfd9dcbc460c9875ad509bb0cb11348c07e..86ed671286df377bb6231013724737a135a86d69 100644 (file)
@@ -49,7 +49,7 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep)
 
 static inline int __get_order_pgd(void)
 {
-       return get_order(PTRS_PER_PGD * 4);
+       return get_order(PTRS_PER_PGD * sizeof(pgd_t));
 }
 
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
@@ -87,7 +87,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 
 static inline int __get_order_pte(void)
 {
-       return get_order(PTRS_PER_PTE * 4);
+       return get_order(PTRS_PER_PTE * sizeof(pte_t));
 }
 
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
@@ -107,10 +107,10 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
        pgtable_t pte_pg;
        struct page *page;
 
-       pte_pg = __get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
+       pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
        if (!pte_pg)
                return 0;
-       memzero((void *)pte_pg, PTRS_PER_PTE * 4);
+       memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
        page = virt_to_page(pte_pg);
        if (!pgtable_page_ctor(page)) {
                __free_page(page);
@@ -128,12 +128,12 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
 static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
 {
        pgtable_page_dtor(virt_to_page(ptep));
-       free_pages(ptep, __get_order_pte());
+       free_pages((unsigned long)ptep, __get_order_pte());
 }
 
 #define __pte_free_tlb(tlb, pte, addr)  pte_free((tlb)->mm, pte)
 
 #define check_pgt_cache()   do { } while (0)
-#define pmd_pgtable(pmd) pmd_page_vaddr(pmd)
+#define pmd_pgtable(pmd)       ((pgtable_t) pmd_page_vaddr(pmd))
 
 #endif /* _ASM_ARC_PGALLOC_H */
index 1281718802f7c8e4d3f71bdf50b3affd57fd66d6..57af2f05ae8459e2ee2427d231370269894ba95f 100644 (file)
@@ -38,6 +38,7 @@
 #include <asm/page.h>
 #include <asm/mmu.h>
 #include <asm-generic/pgtable-nopmd.h>
+#include <linux/const.h>
 
 /**************************************************************************
  * Page Table Flags
@@ -60,7 +61,8 @@
 #define _PAGE_EXECUTE       (1<<3)     /* Page has user execute perm (H) */
 #define _PAGE_WRITE         (1<<4)     /* Page has user write perm (H) */
 #define _PAGE_READ          (1<<5)     /* Page has user read perm (H) */
-#define _PAGE_MODIFIED      (1<<6)     /* Page modified (dirty) (S) */
+#define _PAGE_DIRTY         (1<<6)     /* Page modified (dirty) (S) */
+#define _PAGE_SPECIAL       (1<<7)
 #define _PAGE_GLOBAL        (1<<8)     /* Page is global (H) */
 #define _PAGE_PRESENT       (1<<10)    /* TLB entry is valid (H) */
 
@@ -71,7 +73,8 @@
 #define _PAGE_WRITE         (1<<2)     /* Page has user write perm (H) */
 #define _PAGE_READ          (1<<3)     /* Page has user read perm (H) */
 #define _PAGE_ACCESSED      (1<<4)     /* Page is accessed (S) */
-#define _PAGE_MODIFIED      (1<<5)     /* Page modified (dirty) (S) */
+#define _PAGE_DIRTY         (1<<5)     /* Page modified (dirty) (S) */
+#define _PAGE_SPECIAL       (1<<6)
 
 #if (CONFIG_ARC_MMU_VER >= 4)
 #define _PAGE_WTHRU         (1<<7)     /* Page cache mode write-thru (H) */
 #define _PAGE_PRESENT       (1<<9)     /* TLB entry is valid (H) */
 
 #if (CONFIG_ARC_MMU_VER >= 4)
-#define _PAGE_SZ            (1<<10)    /* Page Size indicator (H) */
+#define _PAGE_HW_SZ         (1<<10)    /* Page Size indicator (H): 0 normal, 1 super */
 #endif
 
 #define _PAGE_SHARED_CODE   (1<<11)    /* Shared Code page with cmn vaddr
                                           usable for shared TLB entries (H) */
+
+#define _PAGE_UNUSED_BIT    (1<<12)
 #endif
 
 /* vmalloc permissions */
 #define _K_PAGE_PERMS  (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
                        _PAGE_GLOBAL | _PAGE_PRESENT)
 
-#ifdef CONFIG_ARC_CACHE_PAGES
-#define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE
-#else
-#define _PAGE_DEF_CACHEABLE (0)
+#ifndef CONFIG_ARC_CACHE_PAGES
+#undef _PAGE_CACHEABLE
+#define _PAGE_CACHEABLE 0
 #endif
 
-/* Helper for every "user" page
- * -kernel can R/W/X
- * -by default cached, unless config otherwise
- * -present in memory
- */
-#define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE)
+#ifndef _PAGE_HW_SZ
+#define _PAGE_HW_SZ    0
+#endif
+
+/* Defaults for every user page */
+#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
 
 /* Set of bits not changed in pte_modify */
-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
 
 /* More Abbrevaited helpers */
 #define PAGE_U_NONE     __pgprot(___DEF)
  * user vaddr space - visible in all addr spaces, but kernel mode only
  * Thus Global, all-kernel-access, no-user-access, cached
  */
-#define PAGE_KERNEL          __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
+#define PAGE_KERNEL          __pgprot(_K_PAGE_PERMS | _PAGE_CACHEABLE)
 
 /* ioremap */
 #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
 
 /* Masks for actual TLB "PD"s */
-#define PTE_BITS_IN_PD0                (_PAGE_GLOBAL | _PAGE_PRESENT)
+#define PTE_BITS_IN_PD0                (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
 #define PTE_BITS_RWX           (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
+
+#ifdef CONFIG_ARC_HAS_PAE40
+#define PTE_BITS_NON_RWX_IN_PD1        (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
+#else
 #define PTE_BITS_NON_RWX_IN_PD1        (PAGE_MASK | _PAGE_CACHEABLE)
+#endif
 
 /**************************************************************************
  * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
 
 /* Optimal Sizing of Pg Tbl - based on MMU page size */
 #if defined(CONFIG_ARC_PAGE_SIZE_8K)
-#define BITS_FOR_PTE   8
+#define BITS_FOR_PTE   8               /* 11:8:13 */
 #elif defined(CONFIG_ARC_PAGE_SIZE_16K)
-#define BITS_FOR_PTE   8
+#define BITS_FOR_PTE   8               /* 10:8:14 */
 #elif defined(CONFIG_ARC_PAGE_SIZE_4K)
-#define BITS_FOR_PTE   9
+#define BITS_FOR_PTE   9               /* 11:9:12 */
 #endif
 
 #define BITS_FOR_PGD   (32 - BITS_FOR_PTE - BITS_IN_PAGE)
 
-#define PGDIR_SHIFT    (BITS_FOR_PTE + BITS_IN_PAGE)
+#define PGDIR_SHIFT    (32 - BITS_FOR_PGD)
 #define PGDIR_SIZE     (1UL << PGDIR_SHIFT)    /* vaddr span, not PDG sz */
 #define PGDIR_MASK     (~(PGDIR_SIZE-1))
 
-#ifdef __ASSEMBLY__
-#define        PTRS_PER_PTE    (1 << BITS_FOR_PTE)
-#define        PTRS_PER_PGD    (1 << BITS_FOR_PGD)
-#else
-#define        PTRS_PER_PTE    (1UL << BITS_FOR_PTE)
-#define        PTRS_PER_PGD    (1UL << BITS_FOR_PGD)
-#endif
+#define        PTRS_PER_PTE    _BITUL(BITS_FOR_PTE)
+#define        PTRS_PER_PGD    _BITUL(BITS_FOR_PGD)
+
 /*
  * Number of entries a user land program use.
  * TASK_SIZE is the maximum vaddr that can be used by a userland program.
@@ -270,15 +275,10 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
                (unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \
                                PAGE_SHIFT)))
 
-#define mk_pte(page, pgprot)                                           \
-({                                                                     \
-       pte_t pte;                                                      \
-       pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot);   \
-       pte;                                                            \
-})
-
+#define mk_pte(page, prot)     pfn_pte(page_to_pfn(page), prot)
 #define pte_pfn(pte)           (pte_val(pte) >> PAGE_SHIFT)
-#define pfn_pte(pfn, prot)     (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define pfn_pte(pfn, prot)     (__pte(((pte_t)(pfn) << PAGE_SHIFT) | \
+                                pgprot_val(prot)))
 #define __pte_index(addr)      (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 
 /*
@@ -295,23 +295,26 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
 /* Zoo of pte_xxx function */
 #define pte_read(pte)          (pte_val(pte) & _PAGE_READ)
 #define pte_write(pte)         (pte_val(pte) & _PAGE_WRITE)
-#define pte_dirty(pte)         (pte_val(pte) & _PAGE_MODIFIED)
+#define pte_dirty(pte)         (pte_val(pte) & _PAGE_DIRTY)
 #define pte_young(pte)         (pte_val(pte) & _PAGE_ACCESSED)
-#define pte_special(pte)       (0)
+#define pte_special(pte)       (pte_val(pte) & _PAGE_SPECIAL)
 
 #define PTE_BIT_FUNC(fn, op) \
        static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
 
+PTE_BIT_FUNC(mknotpresent,     &= ~(_PAGE_PRESENT));
 PTE_BIT_FUNC(wrprotect,        &= ~(_PAGE_WRITE));
 PTE_BIT_FUNC(mkwrite,  |= (_PAGE_WRITE));
-PTE_BIT_FUNC(mkclean,  &= ~(_PAGE_MODIFIED));
-PTE_BIT_FUNC(mkdirty,  |= (_PAGE_MODIFIED));
+PTE_BIT_FUNC(mkclean,  &= ~(_PAGE_DIRTY));
+PTE_BIT_FUNC(mkdirty,  |= (_PAGE_DIRTY));
 PTE_BIT_FUNC(mkold,    &= ~(_PAGE_ACCESSED));
 PTE_BIT_FUNC(mkyoung,  |= (_PAGE_ACCESSED));
 PTE_BIT_FUNC(exprotect,        &= ~(_PAGE_EXECUTE));
 PTE_BIT_FUNC(mkexec,   |= (_PAGE_EXECUTE));
+PTE_BIT_FUNC(mkspecial,        |= (_PAGE_SPECIAL));
+PTE_BIT_FUNC(mkhuge,   |= (_PAGE_HW_SZ));
 
-static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+#define __HAVE_ARCH_PTE_SPECIAL
 
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
@@ -357,7 +360,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 #define pgd_offset_fast(mm, addr)      pgd_offset(mm, addr)
 #endif
 
-extern void paging_init(void);
 extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
                      pte_t *ptep);
@@ -383,6 +385,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
  * remap a physical page `pfn' of size `size' with page protection `prot'
  * into virtual address `from'
  */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#include <asm/hugepage.h>
+#endif
+
 #include <asm-generic/pgtable.h>
 
 /* to cope with aliasing VIPT cache */
index ee682d8e0213c5c6c2fac2d70f39dff23b15854a..44545354e9e85616b703f531787ed74def265bf5 100644 (file)
@@ -114,7 +114,12 @@ extern unsigned int get_wchan(struct task_struct *p);
  * -----------------------------------------------------------------------------
  */
 #define VMALLOC_START  0x70000000
-#define VMALLOC_SIZE   (PAGE_OFFSET - VMALLOC_START)
+
+/*
+ * 1 PGDIR_SIZE each for fixmap/pkmap, 2 PGDIR_SIZE gutter
+ * See asm/highmem.h for details
+ */
+#define VMALLOC_SIZE   (PAGE_OFFSET - VMALLOC_START - PGDIR_SIZE * 4)
 #define VMALLOC_END    (VMALLOC_START + VMALLOC_SIZE)
 
 #define USER_KERNEL_GUTTER    0x10000000
index 6e3ef5ba4f74adca5d4fd1ed78440579166b4b64..307846691be6ad7bcad8abcdc217af84ddec5609 100644 (file)
@@ -33,4 +33,11 @@ extern int root_mountflags, end_mem;
 void setup_processor(void);
 void __init setup_arch_memory(void);
 
+/* Helpers used in arc_*_mumbojumbo routines */
+#define IS_AVAIL1(v, s)                ((v) ? s : "")
+#define IS_DISABLED_RUN(v)     ((v) ? "" : "(disabled) ")
+#define IS_USED_RUN(v)         ((v) ? "" : "(not used) ")
+#define IS_USED_CFG(cfg)       IS_USED_RUN(IS_ENABLED(cfg))
+#define IS_AVAIL2(v, s, cfg)   IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
+
 #endif /* __ASMARC_SETUP_H */
index 3845b9e94f69b0ee5dbda11fe9467c3d4b517aed..133c867d15af0a627576d9faf89c0569220a6f47 100644 (file)
@@ -45,12 +45,19 @@ extern int smp_ipi_irq_setup(int cpu, int irq);
  * struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP
  *
  * @info:              SoC SMP specific info for /proc/cpuinfo etc
+ * @init_early_smp:    A SMP specific h/w block can init itself
+ *                     Could be common across platforms so not covered by
+ *                     mach_desc->init_early()
+ * @init_irq_cpu:      Called for each core so SMP h/w block driver can do
+ *                     any needed setup per cpu (e.g. IPI request)
  * @cpu_kick:          For Master to kickstart a cpu (optionally at a PC)
  * @ipi_send:          To send IPI to a @cpu
  * @ips_clear:         To clear IPI received at @irq
  */
 struct plat_smp_ops {
        const char      *info;
+       void            (*init_early_smp)(void);
+       void            (*init_irq_cpu)(int cpu);
        void            (*cpu_kick)(int cpu, unsigned long pc);
        void            (*ipi_send)(int cpu);
        void            (*ipi_clear)(int irq);
index 71c7b2e4b8745002083e71fd19ae28305d62972e..1fe9c8c80280b9c53006a695994548aa6296cd56 100644 (file)
@@ -17,6 +17,8 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
 void local_flush_tlb_range(struct vm_area_struct *vma,
                           unsigned long start, unsigned long end);
+void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+                              unsigned long end);
 
 #ifndef CONFIG_SMP
 #define flush_tlb_range(vma, s, e)     local_flush_tlb_range(vma, s, e)
@@ -24,6 +26,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma,
 #define flush_tlb_kernel_range(s, e)   local_flush_tlb_kernel_range(s, e)
 #define flush_tlb_all()                        local_flush_tlb_all()
 #define flush_tlb_mm(mm)               local_flush_tlb_mm(mm)
+#define flush_pmd_tlb_range(vma, s, e) local_flush_pmd_tlb_range(vma, s, e)
 #else
 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                                                         unsigned long end);
@@ -31,5 +34,7 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 extern void flush_tlb_all(void);
 extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+
 #endif /* CONFIG_SMP */
 #endif
index 9d129a2a1351951465b22fd020f568abe1843b7a..059aff38f10ab46892470a87e60ddd59e627d710 100644 (file)
@@ -9,6 +9,8 @@
 #ifndef _UAPI__ASM_ARC_PAGE_H
 #define _UAPI__ASM_ARC_PAGE_H
 
+#include <linux/const.h>
+
 /* PAGE_SHIFT determines the page size */
 #if defined(CONFIG_ARC_PAGE_SIZE_16K)
 #define PAGE_SHIFT 14
 #define PAGE_SHIFT 13
 #endif
 
-#ifdef __ASSEMBLY__
-#define PAGE_SIZE      (1 << PAGE_SHIFT)
-#define PAGE_OFFSET    (0x80000000)
-#else
-#define PAGE_SIZE      (1UL << PAGE_SHIFT)     /* Default 8K */
-#define PAGE_OFFSET    (0x80000000UL)          /* Kernel starts at 2G onwards */
-#endif
+#define PAGE_SIZE      _BITUL(PAGE_SHIFT)      /* Default 8K */
+#define PAGE_OFFSET    _AC(0x80000000, UL)     /* Kernel starts at 2G onwrds */
 
 #define PAGE_MASK      (~(PAGE_SIZE-1))
 
index 8fa76567e40299e0449b01d4d6dae6e7b8918807..445e63a10754fbdb0be82663af03f489282cfce5 100644 (file)
@@ -24,7 +24,7 @@
        .align 4
 
 # Initial 16 slots are Exception Vectors
-VECTOR stext                   ; Restart Vector (jump to entry point)
+VECTOR res_service             ; Reset Vector
 VECTOR mem_service             ; Mem exception
 VECTOR instr_service           ; Instrn Error
 VECTOR EV_MachineCheck         ; Fatal Machine check
index 15d457b4403ae4a9ab55f455fd7e2ad1492e780c..59f52035b4ea34a582b50b80e5db48893c1dc7bf 100644 (file)
@@ -86,7 +86,7 @@
  */
 
 ; ********* Critical System Events **********************
-VECTOR   res_service             ; 0x0, Restart Vector  (0x0)
+VECTOR   res_service             ; 0x0, Reset Vector   (0x0)
 VECTOR   mem_service             ; 0x8, Mem exception   (0x1)
 VECTOR   instr_service           ; 0x10, Instrn Error   (0x2)
 
@@ -155,13 +155,9 @@ int2_saved_reg:
 ; ---------------------------------------------
        .section .text, "ax",@progbits
 
-res_service:           ; processor restart
-       flag    0x1     ; not implemented
-       nop
-       nop
 
-reserved:              ; processor restart
-       rtie            ; jump to processor initializations
+reserved:
+       flag 1          ; Unexpected event, halt
 
 ;##################### Interrupt Handling ##############################
 
@@ -175,12 +171,25 @@ ENTRY(handle_interrupt_level2)
 
        ;------------------------------------------------------
        ; if L2 IRQ interrupted a L1 ISR, disable preemption
+       ;
+       ; This is to avoid a potential L1-L2-L1 scenario
+       ;  -L1 IRQ taken
+       ;  -L2 interrupts L1 (before L1 ISR could run)
+       ;  -preemption off IRQ, user task in syscall picked to run
+       ;  -RTIE to userspace
+       ;       Returns from L2 context fine
+       ;       But both L1 and L2 re-enabled, so another L1 can be taken
+       ;       while prev L1 is still unserviced
+       ;
        ;------------------------------------------------------
 
+       ; L2 interrupting L1 implies both L2 and L1 active
+       ; However both A2 and A1 are NOT set in STATUS32, thus
+       ; need to check STATUS32_L2 to determine if L1 was active
+
        ld r9, [sp, PT_status32]        ; get statu32_l2 (saved in pt_regs)
        bbit0 r9, STATUS_A1_BIT, 1f     ; L1 not active when L2 IRQ, so normal
 
-       ; A1 is set in status32_l2
        ; bump thread_info->preempt_count (Disable preemption)
        GET_CURR_THR_INFO_FROM_SP   r10
        ld      r9, [r10, THREAD_INFO_PREEMPT_COUNT]
@@ -320,11 +329,10 @@ END(call_do_page_fault)
        ; Note that we use realtime STATUS32 (not pt_regs->status32) to
        ; decide that.
 
-       ; if Returning from Exception
-       btst   r10, STATUS_AE_BIT
-       bnz    .Lexcep_ret
+       and.f   0, r10, (STATUS_A1_MASK|STATUS_A2_MASK)
+       bz      .Lexcep_or_pure_K_ret
 
-       ; Not Exception so maybe Interrupts (Level 1 or 2)
+       ; Returning from Interrupts (Level 1 or 2)
 
 #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
 
@@ -365,8 +373,7 @@ END(call_do_page_fault)
        st      r9, [r10, THREAD_INFO_PREEMPT_COUNT]
 
 149:
-       ;return from level 2
-       INTERRUPT_EPILOGUE 2
+       INTERRUPT_EPILOGUE 2    ; return from level 2 interrupt
 debug_marker_l2:
        rtie
 
@@ -374,15 +381,11 @@ not_level2_interrupt:
 
 #endif
 
-       bbit0  r10, STATUS_A1_BIT, .Lpure_k_mode_ret
-
-       ;return from level 1
-       INTERRUPT_EPILOGUE 1
+       INTERRUPT_EPILOGUE 1    ; return from level 1 interrupt
 debug_marker_l1:
        rtie
 
-.Lexcep_ret:
-.Lpure_k_mode_ret:
+.Lexcep_or_pure_K_ret:
 
        ;this case is for syscalls or Exceptions or pure kernel mode
 
index 812f95e6ae6946d56550cbd80a0baaa8a6e94cc0..689dd867fdff53eeafa0d01d980ecf425f66a759 100644 (file)
 .endm
 
        .section .init.text, "ax",@progbits
-       .type stext, @function
-       .globl stext
-stext:
-       ;-------------------------------------------------------------------
-       ; Don't clobber r0-r2 yet. It might have bootloader provided info
-       ;-------------------------------------------------------------------
+
+;----------------------------------------------------------------
+; Default Reset Handler (jumped into from Reset vector)
+; - Don't clobber r0,r1,r2 as they might have u-boot provided args
+; - Platforms can override this weak version if needed
+;----------------------------------------------------------------
+WEAK(res_service)
+       j       stext
+END(res_service)
+
+;----------------------------------------------------------------
+; Kernel Entry point
+;----------------------------------------------------------------
+ENTRY(stext)
 
        CPU_EARLY_SETUP
 
 #ifdef CONFIG_SMP
-       ; Ensure Boot (Master) proceeds. Others wait in platform dependent way
-       ;       IDENTITY Reg [ 3  2  1  0 ]
-       ;       (cpu-id)             ^^^        => Zero for UP ARC700
-       ;                                       => #Core-ID if SMP (Master 0)
-       ; Note that non-boot CPUs might not land here if halt-on-reset and
-       ; instead breath life from @first_lines_of_secondary, but we still
-       ; need to make sure only boot cpu takes this path.
        GET_CPU_ID  r5
        cmp     r5, 0
-       mov.ne  r0, r5
-       jne     arc_platform_smp_wait_to_boot
+       mov.nz  r0, r5
+#ifdef CONFIG_ARC_SMP_HALT_ON_RESET
+       ; Non-Master can proceed as system would be booted sufficiently
+       jnz     first_lines_of_secondary
+#else
+       ; Non-Masters wait for Master to boot enough and bring them up
+       jnz     arc_platform_smp_wait_to_boot
 #endif
+       ; Master falls thru
+#endif
+
        ; Clear BSS before updating any globals
        ; XXX: use ZOL here
        mov     r5, __bss_start
@@ -102,18 +111,14 @@ stext:
        GET_TSK_STACK_BASE r9, sp       ; r9 = tsk, sp = stack base(output)
 
        j       start_kernel    ; "C" entry point
+END(stext)
 
 #ifdef CONFIG_SMP
 ;----------------------------------------------------------------
 ;     First lines of code run by secondary before jumping to 'C'
 ;----------------------------------------------------------------
        .section .text, "ax",@progbits
-       .type first_lines_of_secondary, @function
-       .globl first_lines_of_secondary
-
-first_lines_of_secondary:
-
-       CPU_EARLY_SETUP
+ENTRY(first_lines_of_secondary)
 
        ; setup per-cpu idle task as "current" on this CPU
        ld      r0, [@secondary_idle_tsk]
@@ -126,5 +131,5 @@ first_lines_of_secondary:
        GET_TSK_STACK_BASE r0, sp
 
        j       start_kernel_secondary
-
+END(first_lines_of_secondary)
 #endif
index 039fac30b5c1f2fca837c6f9cc11de0e0c56c35d..06bcedf19b622b4ef26503660349bc07a8a52d2e 100644 (file)
@@ -79,17 +79,16 @@ static struct irq_chip onchip_intc = {
 static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
                               irq_hw_number_t hw)
 {
-       /*
-        * XXX: the IPI IRQ needs to be handled like TIMER too. However ARC core
-        *      code doesn't own it (like TIMER0). ISS IDU / ezchip define it
-        *      in platform header which can't be included here as it goes
-        *      against multi-platform image philisophy
-        */
-       if (irq == TIMER0_IRQ)
+       switch (irq) {
+       case TIMER0_IRQ:
+#ifdef CONFIG_SMP
+       case IPI_IRQ:
+#endif
                irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
-       else
+               break;
+       default:
                irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq);
-
+       }
        return 0;
 }
 
@@ -148,78 +147,15 @@ IRQCHIP_DECLARE(arc_intc, "snps,arc700-intc", init_onchip_IRQ);
 
 void arch_local_irq_enable(void)
 {
-
        unsigned long flags = arch_local_save_flags();
 
-       /* Allow both L1 and L2 at the onset */
-       flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
-
-       /* Called from hard ISR (between irq_enter and irq_exit) */
-       if (in_irq()) {
-
-               /* If in L2 ISR, don't re-enable any further IRQs as this can
-                * cause IRQ priorities to get upside down. e.g. it could allow
-                * L1 be taken while in L2 hard ISR which is wrong not only in
-                * theory, it can also cause the dreaded L1-L2-L1 scenario
-                */
-               if (flags & STATUS_A2_MASK)
-                       flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK);
-
-               /* Even if in L1 ISR, allowe Higher prio L2 IRQs */
-               else if (flags & STATUS_A1_MASK)
-                       flags &= ~(STATUS_E1_MASK);
-       }
-
-       /* called from soft IRQ, ideally we want to re-enable all levels */
-
-       else if (in_softirq()) {
-
-               /* However if this is case of L1 interrupted by L2,
-                * re-enabling both may cause whaco L1-L2-L1 scenario
-                * because ARC700 allows level 1 to interrupt an active L2 ISR
-                * Thus we disable both
-                * However some code, executing in soft ISR wants some IRQs
-                * to be enabled so we re-enable L2 only
-                *
-                * How do we determine L1 intr by L2
-                *  -A2 is set (means in L2 ISR)
-                *  -E1 is set in this ISR's pt_regs->status32 which is
-                *      saved copy of status32_l2 when l2 ISR happened
-                */
-               struct pt_regs *pt = get_irq_regs();
-
-               if ((flags & STATUS_A2_MASK) && pt &&
-                   (pt->status32 & STATUS_A1_MASK)) {
-                       /*flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); */
-                       flags &= ~(STATUS_E1_MASK);
-               }
-       }
+       if (flags & STATUS_A2_MASK)
+               flags |= STATUS_E2_MASK;
+       else if (flags & STATUS_A1_MASK)
+               flags |= STATUS_E1_MASK;
 
        arch_local_irq_restore(flags);
 }
 
-#else /* ! CONFIG_ARC_COMPACT_IRQ_LEVELS */
-
-/*
- * Simpler version for only 1 level of interrupt
- * Here we only Worry about Level 1 Bits
- */
-void arch_local_irq_enable(void)
-{
-       unsigned long flags;
-
-       /*
-        * ARC IDE Drivers tries to re-enable interrupts from hard-isr
-        * context which is simply wrong
-        */
-       if (in_irq()) {
-               WARN_ONCE(1, "IRQ enabled from hard-isr");
-               return;
-       }
-
-       flags = arch_local_save_flags();
-       flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
-       arch_local_irq_restore(flags);
-}
-#endif
 EXPORT_SYMBOL(arch_local_irq_enable);
+#endif
index 2989a7bcf8a863709734d7f5343bb16c789089f2..2ee226546c6a821f739a079326ed92ad52fe16b8 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/interrupt.h>
 #include <linux/irqchip.h>
 #include <asm/mach_desc.h>
+#include <asm/smp.h>
 
 /*
  * Late Interrupt system init called from start_kernel for Boot CPU only
  */
 void __init init_IRQ(void)
 {
-       /* Any external intc can be setup here */
-       if (machine_desc->init_irq)
-               machine_desc->init_irq();
-
-       /* process the entire interrupt tree in one go */
+       /*
+        * process the entire interrupt tree in one go
+        * Any external intc will be setup provided DT chains them
+        * properly
+        */
        irqchip_init();
 
 #ifdef CONFIG_SMP
-       /* Master CPU can initialize it's side of IPI */
-       if (machine_desc->init_smp)
-               machine_desc->init_smp(smp_processor_id());
+       /* a SMP H/w block could do IPI IRQ request here */
+       if (plat_smp_ops.init_irq_cpu)
+               plat_smp_ops.init_irq_cpu(smp_processor_id());
+
+       if (machine_desc->init_cpu_smp)
+               machine_desc->init_cpu_smp(smp_processor_id());
 #endif
 }
 
index 4ffd1855f1bdc586c5f258f376a109ea11646a30..74a9b074ac3e4e64d97ef8e069f73a9104531682 100644 (file)
 #include <linux/irq.h>
 #include <linux/spinlock.h>
 #include <asm/mcip.h>
+#include <asm/setup.h>
 
 static char smp_cpuinfo_buf[128];
 static int idu_detected;
 
 static DEFINE_RAW_SPINLOCK(mcip_lock);
 
-/*
- * Any SMP specific init any CPU does when it comes up.
- * Here we setup the CPU to enable Inter-Processor-Interrupts
- * Called for each CPU
- * -Master      : init_IRQ()
- * -Other(s)    : start_kernel_secondary()
- */
-void mcip_init_smp(unsigned int cpu)
+static void mcip_setup_per_cpu(int cpu)
 {
        smp_ipi_irq_setup(cpu, IPI_IRQ);
 }
@@ -96,34 +90,8 @@ static void mcip_ipi_clear(int irq)
 #endif
 }
 
-volatile int wake_flag;
-
-static void mcip_wakeup_cpu(int cpu, unsigned long pc)
-{
-       BUG_ON(cpu == 0);
-       wake_flag = cpu;
-}
-
-void arc_platform_smp_wait_to_boot(int cpu)
+static void mcip_probe_n_setup(void)
 {
-       while (wake_flag != cpu)
-               ;
-
-       wake_flag = 0;
-       __asm__ __volatile__("j @first_lines_of_secondary       \n");
-}
-
-struct plat_smp_ops plat_smp_ops = {
-       .info           = smp_cpuinfo_buf,
-       .cpu_kick       = mcip_wakeup_cpu,
-       .ipi_send       = mcip_ipi_send,
-       .ipi_clear      = mcip_ipi_clear,
-};
-
-void mcip_init_early_smp(void)
-{
-#define IS_AVAIL1(var, str)    ((var) ? str : "")
-
        struct mcip_bcr {
 #ifdef CONFIG_CPU_BIG_ENDIAN
                unsigned int pad3:8,
@@ -161,6 +129,14 @@ void mcip_init_early_smp(void)
                panic("kernel trying to use non-existent GRTC\n");
 }
 
+struct plat_smp_ops plat_smp_ops = {
+       .info           = smp_cpuinfo_buf,
+       .init_early_smp = mcip_probe_n_setup,
+       .init_irq_cpu   = mcip_setup_per_cpu,
+       .ipi_send       = mcip_ipi_send,
+       .ipi_clear      = mcip_ipi_clear,
+};
+
 /***************************************************************************
  * ARCv2 Interrupt Distribution Unit (IDU)
  *
index cabde9dc0696479cc3a4d3074fd526cf89c85182..c33e77c0ad3e9eb60367b6c2510c4e35c60d3b52 100644 (file)
@@ -160,10 +160,6 @@ static const struct cpuinfo_data arc_cpu_tbl[] = {
        { {0x00, NULL           } }
 };
 
-#define IS_AVAIL1(v, s)                ((v) ? s : "")
-#define IS_USED_RUN(v)         ((v) ? "" : "(not used) ")
-#define IS_USED_CFG(cfg)       IS_USED_RUN(IS_ENABLED(cfg))
-#define IS_AVAIL2(v, s, cfg)   IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
 
 static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
 {
@@ -415,8 +411,9 @@ void __init setup_arch(char **cmdline_p)
        if (machine_desc->init_early)
                machine_desc->init_early();
 
-       setup_processor();
        smp_init_cpus();
+
+       setup_processor();
        setup_arch_memory();
 
        /* copy flat DT out of .init and then unflatten it */
index be13d12420bad642c5141a58fdc82d5798204b59..580587805fa302d0d28f7adc89434b920b3be651 100644 (file)
@@ -42,8 +42,13 @@ void __init smp_prepare_boot_cpu(void)
 }
 
 /*
- * Initialise the CPU possible map early - this describes the CPUs
- * which may be present or become present in the system.
+ * Called from setup_arch() before calling setup_processor()
+ *
+ * - Initialise the CPU possible map early - this describes the CPUs
+ *   which may be present or become present in the system.
+ * - Call early smp init hook. This can initialize a specific multi-core
+ *   IP which is say common to several platforms (hence not part of
+ *   platform specific int_early() hook)
  */
 void __init smp_init_cpus(void)
 {
@@ -51,6 +56,9 @@ void __init smp_init_cpus(void)
 
        for (i = 0; i < NR_CPUS; i++)
                set_cpu_possible(i, true);
+
+       if (plat_smp_ops.init_early_smp)
+               plat_smp_ops.init_early_smp();
 }
 
 /* called from init ( ) =>  process 1 */
@@ -72,35 +80,29 @@ void __init smp_cpus_done(unsigned int max_cpus)
 }
 
 /*
- * After power-up, a non Master CPU needs to wait for Master to kick start it
- *
- * The default implementation halts
- *
- * This relies on platform specific support allowing Master to directly set
- * this CPU's PC (to be @first_lines_of_secondary() and kick start it.
- *
- * In lack of such h/w assist, platforms can override this function
- *   - make this function busy-spin on a token, eventually set by Master
- *     (from arc_platform_smp_wakeup_cpu())
- *   - Once token is available, jump to @first_lines_of_secondary
- *     (using inline asm).
- *
- * Alert: can NOT use stack here as it has not been determined/setup for CPU.
- *        If it turns out to be elaborate, it's better to code it in assembly
- *
+ * Default smp boot helper for Run-on-reset case where all cores start off
+ * together. Non-masters need to wait for Master to start running.
+ * This is implemented using a flag in memory, which Non-masters spin-wait on.
+ * Master sets it to cpu-id of core to "ungate" it.
  */
-void __weak arc_platform_smp_wait_to_boot(int cpu)
+static volatile int wake_flag;
+
+static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
 {
-       /*
-        * As a hack for debugging - since debugger will single-step over the
-        * FLAG insn - wrap the halt itself it in a self loop
-        */
-       __asm__ __volatile__(
-       "1:             \n"
-       "       flag 1  \n"
-       "       b 1b    \n");
+       BUG_ON(cpu == 0);
+       wake_flag = cpu;
+}
+
+void arc_platform_smp_wait_to_boot(int cpu)
+{
+       while (wake_flag != cpu)
+               ;
+
+       wake_flag = 0;
+       __asm__ __volatile__("j @first_lines_of_secondary       \n");
 }
 
+
 const char *arc_platform_smp_cpuinfo(void)
 {
        return plat_smp_ops.info ? : "";
@@ -129,8 +131,12 @@ void start_kernel_secondary(void)
 
        pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
 
-       if (machine_desc->init_smp)
-               machine_desc->init_smp(cpu);
+       /* Some SMP H/w setup - for each cpu */
+       if (plat_smp_ops.init_irq_cpu)
+               plat_smp_ops.init_irq_cpu(cpu);
+
+       if (machine_desc->init_cpu_smp)
+               machine_desc->init_cpu_smp(cpu);
 
        arc_local_timer_setup();
 
@@ -161,6 +167,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
        if (plat_smp_ops.cpu_kick)
                plat_smp_ops.cpu_kick(cpu,
                                (unsigned long)first_lines_of_secondary);
+       else
+               arc_default_smp_cpu_kick(cpu, (unsigned long)NULL);
 
        /* wait for 1 sec after kicking the secondary */
        wait_till = jiffies + HZ;
index 4294761a2b3e7ad3b36f5eca5bc26490e31ed61f..dfad287f1db1c6b55b86faacc0b40d2472636795 100644 (file)
@@ -285,7 +285,4 @@ void __init time_init(void)
 
        /* sets up the periodic event timer */
        arc_local_timer_setup();
-
-       if (machine_desc->init_time)
-               machine_desc->init_time();
 }
index dd35bde39f6938e483b2cfd2a2e39a1cf2148c71..894e696bddaa3ca3fab6bdca6ed4f7ac451bb290 100644 (file)
@@ -12,7 +12,7 @@
 #include <asm/thread_info.h>
 
 OUTPUT_ARCH(arc)
-ENTRY(_stext)
+ENTRY(res_service)
 
 #ifdef CONFIG_CPU_BIG_ENDIAN
 jiffies = jiffies_64 + 4;
index 7beb941556c3f73567b8174b6dc1cd15c2ef2d49..3703a4969349186bbf726965cd528db80e2660b5 100644 (file)
@@ -8,3 +8,4 @@
 
 obj-y  := extable.o ioremap.o dma.o fault.o init.o
 obj-y  += tlb.o tlbex.o cache.o mmap.o
+obj-$(CONFIG_HIGHMEM)  += highmem.o
index 0d1a6e96839fbfc6636f324f6d05fb483d97dcea..ff7ff6cbb8112408c05a38a2f8e001265d5d3726 100644 (file)
@@ -25,7 +25,7 @@ static int l2_line_sz;
 int ioc_exists;
 volatile int slc_enable = 1, ioc_enable = 1;
 
-void (*_cache_line_loop_ic_fn)(unsigned long paddr, unsigned long vaddr,
+void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
                               unsigned long sz, const int cacheop);
 
 void (*__dma_cache_wback_inv)(unsigned long start, unsigned long sz);
@@ -37,7 +37,6 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
        int n = 0;
        struct cpuinfo_arc_cache *p;
 
-#define IS_USED_RUN(v)         ((v) ? "" : "(disabled) ")
 #define PR_CACHE(p, cfg, str)                                          \
        if (!(p)->ver)                                                  \
                n += scnprintf(buf + n, len - n, str"\t\t: N/A\n");     \
@@ -47,7 +46,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
                        (p)->sz_k, (p)->assoc, (p)->line_len,           \
                        (p)->vipt ? "VIPT" : "PIPT",                    \
                        (p)->alias ? " aliasing" : "",                  \
-                       IS_ENABLED(cfg) ? "" : " (not used)");
+                       IS_USED_CFG(cfg));
 
        PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
        PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
@@ -63,7 +62,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
 
        if (ioc_exists)
                n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n",
-                               IS_USED_RUN(ioc_enable));
+                               IS_DISABLED_RUN(ioc_enable));
 
        return buf;
 }
@@ -217,7 +216,7 @@ slc_chk:
  */
 
 static inline
-void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr,
+void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
                          unsigned long sz, const int op)
 {
        unsigned int aux_cmd;
@@ -254,8 +253,12 @@ void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr,
        }
 }
 
+/*
+ * For ARC700 MMUv3 I-cache and D-cache flushes
+ * Also reused for HS38 aliasing I-cache configuration
+ */
 static inline
-void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
+void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
                          unsigned long sz, const int op)
 {
        unsigned int aux_cmd, aux_tag;
@@ -290,6 +293,16 @@ void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
        if (full_page)
                write_aux_reg(aux_tag, paddr);
 
+       /*
+        * This is technically for MMU v4, using the MMU v3 programming model
+        * Special work for HS38 aliasing I-cache configuratino with PAE40
+        *   - upper 8 bits of paddr need to be written into PTAG_HI
+        *   - (and needs to be written before the lower 32 bits)
+        * Note that PTAG_HI is hoisted outside the line loop
+        */
+       if (is_pae40_enabled() && op == OP_INV_IC)
+               write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
+
        while (num_lines-- > 0) {
                if (!full_page) {
                        write_aux_reg(aux_tag, paddr);
@@ -302,14 +315,20 @@ void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
 }
 
 /*
- * In HS38x (MMU v4), although icache is VIPT, only paddr is needed for cache
- * maintenance ops (in IVIL reg), as long as icache doesn't alias.
+ * In HS38x (MMU v4), I-cache is VIPT (can alias), D-cache is PIPT
+ * Here's how cache ops are implemented
+ *
+ *  - D-cache: only paddr needed (in DC_IVDL/DC_FLDL)
+ *  - I-cache Non Aliasing: Despite VIPT, only paddr needed (in IC_IVIL)
+ *  - I-cache Aliasing: Both vaddr and paddr needed (in IC_IVIL, IC_PTAG
+ *    respectively, similar to MMU v3 programming model, hence
+ *    __cache_line_loop_v3() is used)
  *
- * For Aliasing icache, vaddr is also needed (in IVIL), while paddr is
- * specified in PTAG (similar to MMU v3)
+ * If PAE40 is enabled, independent of aliasing considerations, the higher bits
+ * needs to be written into PTAG_HI
  */
 static inline
-void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr,
+void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
                          unsigned long sz, const int cacheop)
 {
        unsigned int aux_cmd;
@@ -336,6 +355,22 @@ void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr,
 
        num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
 
+       /*
+        * For HS38 PAE40 configuration
+        *   - upper 8 bits of paddr need to be written into PTAG_HI
+        *   - (and needs to be written before the lower 32 bits)
+        */
+       if (is_pae40_enabled()) {
+               if (cacheop == OP_INV_IC)
+                       /*
+                        * Non aliasing I-cache in HS38,
+                        * aliasing I-cache handled in __cache_line_loop_v3()
+                        */
+                       write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
+               else
+                       write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
+       }
+
        while (num_lines-- > 0) {
                write_aux_reg(aux_cmd, paddr);
                paddr += L1_CACHE_BYTES;
@@ -413,7 +448,7 @@ static inline void __dc_entire_op(const int op)
 /*
  * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
  */
-static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
+static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
                                unsigned long sz, const int op)
 {
        unsigned long flags;
@@ -446,7 +481,7 @@ static inline void __ic_entire_inv(void)
 }
 
 static inline void
-__ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
+__ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
                          unsigned long sz)
 {
        unsigned long flags;
@@ -463,7 +498,7 @@ __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
 #else
 
 struct ic_inv_args {
-       unsigned long paddr, vaddr;
+       phys_addr_t paddr, vaddr;
        int sz;
 };
 
@@ -474,7 +509,7 @@ static void __ic_line_inv_vaddr_helper(void *info)
         __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
 }
 
-static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
+static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
                                unsigned long sz)
 {
        struct ic_inv_args ic_inv = {
@@ -495,7 +530,7 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
 
 #endif /* CONFIG_ARC_HAS_ICACHE */
 
-noinline void slc_op(unsigned long paddr, unsigned long sz, const int op)
+noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
 {
 #ifdef CONFIG_ISA_ARCV2
        /*
@@ -585,7 +620,7 @@ void flush_dcache_page(struct page *page)
        } else if (page_mapped(page)) {
 
                /* kernel reading from page with U-mapping */
-               unsigned long paddr = (unsigned long)page_address(page);
+               phys_addr_t paddr = (unsigned long)page_address(page);
                unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
 
                if (addr_not_cache_congruent(paddr, vaddr))
@@ -733,14 +768,14 @@ EXPORT_SYMBOL(flush_icache_range);
  *    builtin kernel page will not have any virtual mappings.
  *    kprobe on loadable module will be kernel vaddr.
  */
-void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
+void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
 {
        __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
        __ic_line_inv_vaddr(paddr, vaddr, len);
 }
 
 /* wrapper to compile time eliminate alignment checks in flush loop */
-void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
+void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr)
 {
        __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
 }
@@ -749,7 +784,7 @@ void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
  * wrapper to clearout kernel or userspace mappings of a page
  * For kernel mappings @vaddr == @paddr
  */
-void __flush_dcache_page(unsigned long paddr, unsigned long vaddr)
+void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr)
 {
        __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
 }
@@ -807,8 +842,8 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page,
 void copy_user_highpage(struct page *to, struct page *from,
        unsigned long u_vaddr, struct vm_area_struct *vma)
 {
-       unsigned long kfrom = (unsigned long)page_address(from);
-       unsigned long kto = (unsigned long)page_address(to);
+       void *kfrom = kmap_atomic(from);
+       void *kto = kmap_atomic(to);
        int clean_src_k_mappings = 0;
 
        /*
@@ -818,13 +853,16 @@ void copy_user_highpage(struct page *to, struct page *from,
         *
         * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
         * equally valid for SRC page as well
+        *
+        * For !VIPT cache, all of this gets compiled out as
+        * addr_not_cache_congruent() is 0
         */
        if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
-               __flush_dcache_page(kfrom, u_vaddr);
+               __flush_dcache_page((unsigned long)kfrom, u_vaddr);
                clean_src_k_mappings = 1;
        }
 
-       copy_page((void *)kto, (void *)kfrom);
+       copy_page(kto, kfrom);
 
        /*
         * Mark DST page K-mapping as dirty for a later finalization by
@@ -841,11 +879,14 @@ void copy_user_highpage(struct page *to, struct page *from,
         * sync the kernel mapping back to physical page
         */
        if (clean_src_k_mappings) {
-               __flush_dcache_page(kfrom, kfrom);
+               __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
                set_bit(PG_dc_clean, &from->flags);
        } else {
                clear_bit(PG_dc_clean, &from->flags);
        }
+
+       kunmap_atomic(kto);
+       kunmap_atomic(kfrom);
 }
 
 void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
index d948e4e9d89c4ebe7e5676f449c9377b4fbe3535..af63f4a13e605eda26bcb7de54595bda5ecad997 100644 (file)
 #include <asm/pgalloc.h>
 #include <asm/mmu.h>
 
-static int handle_vmalloc_fault(unsigned long address)
+/*
+ * kernel virtual address is required to implement vmalloc/pkmap/fixmap
+ * Refer to asm/processor.h for System Memory Map
+ *
+ * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
+ * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
+ */
+noinline static int handle_kernel_vaddr_fault(unsigned long address)
 {
        /*
         * Synchronize this task's top level page-table
@@ -72,8 +79,8 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
         * only copy the information from the master page table,
         * nothing more.
         */
-       if (address >= VMALLOC_START && address <= VMALLOC_END) {
-               ret = handle_vmalloc_fault(address);
+       if (address >= VMALLOC_START) {
+               ret = handle_kernel_vaddr_fault(address);
                if (unlikely(ret))
                        goto bad_area_nosemaphore;
                else
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
new file mode 100644 (file)
index 0000000..065ee6b
--- /dev/null
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bootmem.h>
+#include <linux/export.h>
+#include <linux/highmem.h>
+#include <asm/processor.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+/*
+ * HIGHMEM API:
+ *
+ * kmap() API provides sleep semantics hence refered to as "permanent maps"
+ * It allows mapping LAST_PKMAP pages, using @last_pkmap_nr as the cursor
+ * for book-keeping
+ *
+ * kmap_atomic() can't sleep (calls pagefault_disable()), thus it provides
+ * shortlived ala "temporary mappings" which historically were implemented as
+ * fixmaps (compile time addr etc). Their book-keeping is done per cpu.
+ *
+ *     Both these facts combined (preemption disabled and per-cpu allocation)
+ *     means the total number of concurrent fixmaps will be limited to max
+ *     such allocations in a single control path. Thus KM_TYPE_NR (another
+ *     historic relic) is a small'ish number which caps max percpu fixmaps
+ *
+ * ARC HIGHMEM Details
+ *
+ * - the kernel vaddr space from 0x7z to 0x8z (currently used by vmalloc/module)
+ *   is now shared between vmalloc and kmap (non overlapping though)
+ *
+ * - Both fixmap/pkmap use a dedicated page table each, hooked up to swapper PGD
+ *   This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
+ *   2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
+ *
+ * - fixmap anyhow needs a limited number of mappings. So 2M kvaddr == 256 PTE
+ *   slots across NR_CPUS would be more than sufficient (generic code defines
+ *   KM_TYPE_NR as 20).
+ *
+ * - pkmap being preemptible, in theory could do with more than 256 concurrent
+ *   mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
+ *   the PGD and only works with a single page table @pkmap_page_table, hence
+ *   sets the limit
+ */
+
+extern pte_t * pkmap_page_table;
+static pte_t * fixmap_page_table;
+
+void *kmap(struct page *page)
+{
+       BUG_ON(in_interrupt());
+       if (!PageHighMem(page))
+               return page_address(page);
+
+       return kmap_high(page);
+}
+
+void *kmap_atomic(struct page *page)
+{
+       int idx, cpu_idx;
+       unsigned long vaddr;
+
+       preempt_disable();
+       pagefault_disable();
+       if (!PageHighMem(page))
+               return page_address(page);
+
+       cpu_idx = kmap_atomic_idx_push();
+       idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
+       vaddr = FIXMAP_ADDR(idx);
+
+       set_pte_at(&init_mm, vaddr, fixmap_page_table + idx,
+                  mk_pte(page, kmap_prot));
+
+       return (void *)vaddr;
+}
+EXPORT_SYMBOL(kmap_atomic);
+
+void __kunmap_atomic(void *kv)
+{
+       unsigned long kvaddr = (unsigned long)kv;
+
+       if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) {
+
+               /*
+                * Because preemption is disabled, this vaddr can be associated
+                * with the current allocated index.
+                * But in case of multiple live kmap_atomic(), it still relies on
+                * callers to unmap in right order.
+                */
+               int cpu_idx = kmap_atomic_idx();
+               int idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
+
+               WARN_ON(kvaddr != FIXMAP_ADDR(idx));
+
+               pte_clear(&init_mm, kvaddr, fixmap_page_table + idx);
+               local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
+
+               kmap_atomic_idx_pop();
+       }
+
+       pagefault_enable();
+       preempt_enable();
+}
+EXPORT_SYMBOL(__kunmap_atomic);
+
+noinline pte_t *alloc_kmap_pgtable(unsigned long kvaddr)
+{
+       pgd_t *pgd_k;
+       pud_t *pud_k;
+       pmd_t *pmd_k;
+       pte_t *pte_k;
+
+       pgd_k = pgd_offset_k(kvaddr);
+       pud_k = pud_offset(pgd_k, kvaddr);
+       pmd_k = pmd_offset(pud_k, kvaddr);
+
+       pte_k = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+       pmd_populate_kernel(&init_mm, pmd_k, pte_k);
+       return pte_k;
+}
+
+void kmap_init(void)
+{
+       /* Due to recursive include hell, we can't do this in processor.h */
+       BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
+
+       BUILD_BUG_ON(KM_TYPE_NR > PTRS_PER_PTE);
+       pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
+
+       BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
+       fixmap_page_table = alloc_kmap_pgtable(FIXMAP_BASE);
+}
index d44eedd8c3220e6923b26ea8d10fc6f0f84005f3..a9305b5a2cd4ba091f7ae18914f6ef5e64d5236c 100644 (file)
@@ -15,6 +15,7 @@
 #endif
 #include <linux/swap.h>
 #include <linux/module.h>
+#include <linux/highmem.h>
 #include <asm/page.h>
 #include <asm/pgalloc.h>
 #include <asm/sections.h>
@@ -24,16 +25,22 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
 char empty_zero_page[PAGE_SIZE] __aligned(PAGE_SIZE);
 EXPORT_SYMBOL(empty_zero_page);
 
-/* Default tot mem from .config */
-static unsigned long arc_mem_sz = 0x20000000;  /* some default */
+static const unsigned long low_mem_start = CONFIG_LINUX_LINK_BASE;
+static unsigned long low_mem_sz;
+
+#ifdef CONFIG_HIGHMEM
+static unsigned long min_high_pfn;
+static u64 high_mem_start;
+static u64 high_mem_sz;
+#endif
 
 /* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
 static int __init setup_mem_sz(char *str)
 {
-       arc_mem_sz = memparse(str, NULL) & PAGE_MASK;
+       low_mem_sz = memparse(str, NULL) & PAGE_MASK;
 
        /* early console might not be setup yet - it will show up later */
-       pr_info("\"mem=%s\": mem sz set to %ldM\n", str, TO_MB(arc_mem_sz));
+       pr_info("\"mem=%s\": mem sz set to %ldM\n", str, TO_MB(low_mem_sz));
 
        return 0;
 }
@@ -41,8 +48,22 @@ early_param("mem", setup_mem_sz);
 
 void __init early_init_dt_add_memory_arch(u64 base, u64 size)
 {
-       arc_mem_sz = size & PAGE_MASK;
-       pr_info("Memory size set via devicetree %ldM\n", TO_MB(arc_mem_sz));
+       int in_use = 0;
+
+       if (!low_mem_sz) {
+               BUG_ON(base != low_mem_start);
+               low_mem_sz = size;
+               in_use = 1;
+       } else {
+#ifdef CONFIG_HIGHMEM
+               high_mem_start = base;
+               high_mem_sz = size;
+               in_use = 1;
+#endif
+       }
+
+       pr_info("Memory @ %llx [%lldM] %s\n",
+               base, TO_MB(size), !in_use ? "Not used":"");
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -72,46 +93,62 @@ early_param("initrd", early_initrd);
 void __init setup_arch_memory(void)
 {
        unsigned long zones_size[MAX_NR_ZONES];
-       unsigned long end_mem = CONFIG_LINUX_LINK_BASE + arc_mem_sz;
+       unsigned long zones_holes[MAX_NR_ZONES];
 
        init_mm.start_code = (unsigned long)_text;
        init_mm.end_code = (unsigned long)_etext;
        init_mm.end_data = (unsigned long)_edata;
        init_mm.brk = (unsigned long)_end;
 
-       /*
-        * We do it here, so that memory is correctly instantiated
-        * even if "mem=xxx" cmline over-ride is given and/or
-        * DT has memory node. Each causes an update to @arc_mem_sz
-        * and we finally add memory one here
-        */
-       memblock_add(CONFIG_LINUX_LINK_BASE, arc_mem_sz);
-
-       /*------------- externs in mm need setting up ---------------*/
-
        /* first page of system - kernel .vector starts here */
        min_low_pfn = ARCH_PFN_OFFSET;
 
-       /* Last usable page of low mem (no HIGHMEM yet for ARC port) */
-       max_low_pfn = max_pfn = PFN_DOWN(end_mem);
+       /* Last usable page of low mem */
+       max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz);
 
-       max_mapnr = max_low_pfn - min_low_pfn;
+#ifdef CONFIG_HIGHMEM
+       min_high_pfn = PFN_DOWN(high_mem_start);
+       max_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
+#endif
+
+       max_mapnr = max_pfn - min_low_pfn;
 
-       /*------------- reserve kernel image -----------------------*/
-       memblock_reserve(CONFIG_LINUX_LINK_BASE,
-                        __pa(_end) - CONFIG_LINUX_LINK_BASE);
+       /*------------- bootmem allocator setup -----------------------*/
+
+       /*
+        * seed the bootmem allocator after any DT memory node parsing or
+        * "mem=xxx" cmdline overrides have potentially updated @arc_mem_sz
+        *
+        * Only low mem is added, otherwise we have crashes when allocating
+        * mem_map[] itself. NO_BOOTMEM allocates mem_map[] at the end of
+        * avail memory, ending in highmem with a > 32-bit address. However
+        * it then tries to memset it with a truncaed 32-bit handle, causing
+        * the crash
+        */
+
+       memblock_add(low_mem_start, low_mem_sz);
+       memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);
 
 #ifdef CONFIG_BLK_DEV_INITRD
-       /*------------- reserve initrd image -----------------------*/
        if (initrd_start)
                memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
 #endif
 
        memblock_dump_all();
 
-       /*-------------- node setup --------------------------------*/
+       /*----------------- node/zones setup --------------------------*/
        memset(zones_size, 0, sizeof(zones_size));
-       zones_size[ZONE_NORMAL] = max_mapnr;
+       memset(zones_holes, 0, sizeof(zones_holes));
+
+       zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
+       zones_holes[ZONE_NORMAL] = 0;
+
+#ifdef CONFIG_HIGHMEM
+       zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
+
+       /* This handles the peripheral address space hole */
+       zones_holes[ZONE_HIGHMEM] = min_high_pfn - max_low_pfn;
+#endif
 
        /*
         * We can't use the helper free_area_init(zones[]) because it uses
@@ -122,9 +159,12 @@ void __init setup_arch_memory(void)
        free_area_init_node(0,                  /* node-id */
                            zones_size,         /* num pages per zone */
                            min_low_pfn,        /* first pfn of node */
-                           NULL);              /* NO holes */
+                           zones_holes);       /* holes */
 
-       high_memory = (void *)end_mem;
+#ifdef CONFIG_HIGHMEM
+       high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
+       kmap_init();
+#endif
 }
 
 /*
@@ -135,6 +175,14 @@ void __init setup_arch_memory(void)
  */
 void __init mem_init(void)
 {
+#ifdef CONFIG_HIGHMEM
+       unsigned long tmp;
+
+       reset_all_zones_managed_pages();
+       for (tmp = min_high_pfn; tmp < max_pfn; tmp++)
+               free_highmem_page(pfn_to_page(tmp));
+#endif
+
        free_all_bootmem();
        mem_init_print_info(NULL);
 }
index 2c7ce8bb74758c127673582426f214a0ee0d0af7..0ee7398468476f57b301bde2fa7c7e13735bb3fb 100644 (file)
@@ -109,6 +109,10 @@ DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
 static inline void __tlb_entry_erase(void)
 {
        write_aux_reg(ARC_REG_TLBPD1, 0);
+
+       if (is_pae40_enabled())
+               write_aux_reg(ARC_REG_TLBPD1HI, 0);
+
        write_aux_reg(ARC_REG_TLBPD0, 0);
        write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
 }
@@ -182,7 +186,7 @@ static void utlb_invalidate(void)
 
 }
 
-static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
+static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
 {
        unsigned int idx;
 
@@ -225,10 +229,14 @@ static void tlb_entry_erase(unsigned int vaddr_n_asid)
        write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
 }
 
-static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
+static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
 {
        write_aux_reg(ARC_REG_TLBPD0, pd0);
        write_aux_reg(ARC_REG_TLBPD1, pd1);
+
+       if (is_pae40_enabled())
+               write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32);
+
        write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
 }
 
@@ -240,22 +248,39 @@ static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
 
 noinline void local_flush_tlb_all(void)
 {
+       struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
        unsigned long flags;
        unsigned int entry;
-       struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+       int num_tlb = mmu->sets * mmu->ways;
 
        local_irq_save(flags);
 
        /* Load PD0 and PD1 with template for a Blank Entry */
        write_aux_reg(ARC_REG_TLBPD1, 0);
+
+       if (is_pae40_enabled())
+               write_aux_reg(ARC_REG_TLBPD1HI, 0);
+
        write_aux_reg(ARC_REG_TLBPD0, 0);
 
-       for (entry = 0; entry < mmu->num_tlb; entry++) {
+       for (entry = 0; entry < num_tlb; entry++) {
                /* write this entry to the TLB */
                write_aux_reg(ARC_REG_TLBINDEX, entry);
                write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
        }
 
+       if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
+               const int stlb_idx = 0x800;
+
+               /* Blank sTLB entry */
+               write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ);
+
+               for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
+                       write_aux_reg(ARC_REG_TLBINDEX, entry);
+                       write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
+               }
+       }
+
        utlb_invalidate();
 
        local_irq_restore(flags);
@@ -409,6 +434,15 @@ static inline void ipi_flush_tlb_range(void *arg)
        local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void ipi_flush_pmd_tlb_range(void *arg)
+{
+       struct tlb_args *ta = arg;
+
+       local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
+}
+#endif
+
 static inline void ipi_flush_tlb_kernel_range(void *arg)
 {
        struct tlb_args *ta = (struct tlb_args *)arg;
@@ -449,6 +483,20 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
        on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+                        unsigned long end)
+{
+       struct tlb_args ta = {
+               .ta_vma = vma,
+               .ta_start = start,
+               .ta_end = end
+       };
+
+       on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1);
+}
+#endif
+
 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
        struct tlb_args ta = {
@@ -463,11 +511,12 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 /*
  * Routine to create a TLB entry
  */
-void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
 {
        unsigned long flags;
        unsigned int asid_or_sasid, rwx;
-       unsigned long pd0, pd1;
+       unsigned long pd0;
+       pte_t pd1;
 
        /*
         * create_tlb() assumes that current->mm == vma->mm, since
@@ -499,9 +548,9 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
 
        local_irq_save(flags);
 
-       tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), address);
+       tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), vaddr);
 
-       address &= PAGE_MASK;
+       vaddr &= PAGE_MASK;
 
        /* update this PTE credentials */
        pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
@@ -511,7 +560,7 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
        /* ASID for this task */
        asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
 
-       pd0 = address | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
+       pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
 
        /*
         * ARC MMU provides fully orthogonal access bits for K/U mode,
@@ -547,7 +596,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
                      pte_t *ptep)
 {
        unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
-       unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
+       phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
        struct page *page = pfn_to_page(pte_pfn(*ptep));
 
        create_tlb(vma, vaddr, ptep);
@@ -580,6 +629,95 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
        }
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+/*
+ * MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP
+ * support.
+ *
+ * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
+ * new bit "SZ" in TLB page desciptor to distinguish between them.
+ * Super Page size is configurable in hardware (4K to 16M), but fixed once
+ * RTL builds.
+ *
+ * The exact THP size a Linx configuration will support is a function of:
+ *  - MMU page size (typical 8K, RTL fixed)
+ *  - software page walker address split between PGD:PTE:PFN (typical
+ *    11:8:13, but can be changed with 1 line)
+ * So for above default, THP size supported is 8K * (2^8) = 2M
+ *
+ * Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime
+ * reduces to 1 level (as PTE is folded into PGD and canonically referred
+ * to as PMD).
+ * Thus THP PMD accessors are implemented in terms of PTE (just like sparc)
+ */
+
+void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+                                pmd_t *pmd)
+{
+       pte_t pte = __pte(pmd_val(*pmd));
+       update_mmu_cache(vma, addr, &pte);
+}
+
+void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+                               pgtable_t pgtable)
+{
+       struct list_head *lh = (struct list_head *) pgtable;
+
+       assert_spin_locked(&mm->page_table_lock);
+
+       /* FIFO */
+       if (!pmd_huge_pte(mm, pmdp))
+               INIT_LIST_HEAD(lh);
+       else
+               list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
+       pmd_huge_pte(mm, pmdp) = pgtable;
+}
+
+pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
+{
+       struct list_head *lh;
+       pgtable_t pgtable;
+
+       assert_spin_locked(&mm->page_table_lock);
+
+       pgtable = pmd_huge_pte(mm, pmdp);
+       lh = (struct list_head *) pgtable;
+       if (list_empty(lh))
+               pmd_huge_pte(mm, pmdp) = NULL;
+       else {
+               pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
+               list_del(lh);
+       }
+
+       pte_val(pgtable[0]) = 0;
+       pte_val(pgtable[1]) = 0;
+
+       return pgtable;
+}
+
+void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+                              unsigned long end)
+{
+       unsigned int cpu;
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       cpu = smp_processor_id();
+
+       if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {
+               unsigned int asid = hw_pid(vma->vm_mm, cpu);
+
+               /* No need to loop here: this will always be for 1 Huge Page */
+               tlb_entry_erase(start | _PAGE_HW_SZ | asid);
+       }
+
+       local_irq_restore(flags);
+}
+
+#endif
+
 /* Read the Cache Build Confuration Registers, Decode them and save into
  * the cpuinfo structure for later use.
  * No Validation is done here, simply read/convert the BCRs
@@ -598,10 +736,10 @@ void read_decode_mmu_bcr(void)
 
        struct bcr_mmu_3 {
 #ifdef CONFIG_CPU_BIG_ENDIAN
-       unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4,
+       unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
                     u_itlb:4, u_dtlb:4;
 #else
-       unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4,
+       unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
                     ways:4, ver:8;
 #endif
        } *mmu3;
@@ -622,7 +760,7 @@ void read_decode_mmu_bcr(void)
 
        if (mmu->ver <= 2) {
                mmu2 = (struct bcr_mmu_1_2 *)&tmp;
-               mmu->pg_sz_k = TO_KB(PAGE_SIZE);
+               mmu->pg_sz_k = TO_KB(0x2000);
                mmu->sets = 1 << mmu2->sets;
                mmu->ways = 1 << mmu2->ways;
                mmu->u_dtlb = mmu2->u_dtlb;
@@ -634,6 +772,7 @@ void read_decode_mmu_bcr(void)
                mmu->ways = 1 << mmu3->ways;
                mmu->u_dtlb = mmu3->u_dtlb;
                mmu->u_itlb = mmu3->u_itlb;
+               mmu->sasid = mmu3->sasid;
        } else {
                mmu4 = (struct bcr_mmu_4 *)&tmp;
                mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
@@ -642,9 +781,9 @@ void read_decode_mmu_bcr(void)
                mmu->ways = mmu4->n_ways * 2;
                mmu->u_dtlb = mmu4->u_dtlb * 4;
                mmu->u_itlb = mmu4->u_itlb * 4;
+               mmu->sasid = mmu4->sasid;
+               mmu->pae = mmu4->pae;
        }
-
-       mmu->num_tlb = mmu->sets * mmu->ways;
 }
 
 char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
@@ -655,14 +794,15 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
 
        if (p_mmu->s_pg_sz_m)
                scnprintf(super_pg, 64, "%dM Super Page%s, ",
-                         p_mmu->s_pg_sz_m, " (not used)");
+                         p_mmu->s_pg_sz_m,
+                         IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
 
        n += scnprintf(buf + n, len - n,
-                     "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s\n",
+                     "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s%s\n",
                       p_mmu->ver, p_mmu->pg_sz_k, super_pg,
-                      p_mmu->num_tlb, p_mmu->sets, p_mmu->ways,
+                      p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
                       p_mmu->u_dtlb, p_mmu->u_itlb,
-                      IS_ENABLED(CONFIG_ARC_MMU_SASID) ? ",SASID" : "");
+                      IS_AVAIL2(p_mmu->pae, "PAE40 ", CONFIG_ARC_HAS_PAE40));
 
        return buf;
 }
@@ -690,6 +830,14 @@ void arc_mmu_init(void)
        if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
                panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
 
+       if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
+           mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE))
+               panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n",
+                     (unsigned long)TO_MB(HPAGE_PMD_SIZE));
+
+       if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)
+               panic("Hardware doesn't support PAE40\n");
+
        /* Enable the MMU */
        write_aux_reg(ARC_REG_PID, MMU_ENABLE);
 
@@ -725,15 +873,15 @@ void arc_mmu_init(void)
  *      the duplicate one.
  * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
  */
-volatile int dup_pd_verbose = 1;/* Be slient abt it or complain (default) */
+volatile int dup_pd_silent; /* Be slient abt it or complain (default) */
 
 void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
                          struct pt_regs *regs)
 {
-       int set, way, n;
-       unsigned long flags, is_valid;
        struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
-       unsigned int pd0[mmu->ways], pd1[mmu->ways];
+       unsigned int pd0[mmu->ways];
+       unsigned long flags;
+       int set;
 
        local_irq_save(flags);
 
@@ -743,14 +891,16 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
        /* loop thru all sets of TLB */
        for (set = 0; set < mmu->sets; set++) {
 
+               int is_valid, way;
+
                /* read out all the ways of current set */
                for (way = 0, is_valid = 0; way < mmu->ways; way++) {
                        write_aux_reg(ARC_REG_TLBINDEX,
                                          SET_WAY_TO_IDX(mmu, set, way));
                        write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
                        pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
-                       pd1[way] = read_aux_reg(ARC_REG_TLBPD1);
                        is_valid |= pd0[way] & _PAGE_PRESENT;
+                       pd0[way] &= PAGE_MASK;
                }
 
                /* If all the WAYS in SET are empty, skip to next SET */
@@ -759,30 +909,28 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
 
                /* Scan the set for duplicate ways: needs a nested loop */
                for (way = 0; way < mmu->ways - 1; way++) {
+
+                       int n;
+
                        if (!pd0[way])
                                continue;
 
                        for (n = way + 1; n < mmu->ways; n++) {
-                               if ((pd0[way] & PAGE_MASK) ==
-                                   (pd0[n] & PAGE_MASK)) {
-
-                                       if (dup_pd_verbose) {
-                                               pr_info("Duplicate PD's @"
-                                                       "[%d:%d]/[%d:%d]\n",
-                                                    set, way, set, n);
-                                               pr_info("TLBPD0[%u]: %08x\n",
-                                                    way, pd0[way]);
-                                       }
-
-                                       /*
-                                        * clear entry @way and not @n. This is
-                                        * critical to our optimised loop
-                                        */
-                                       pd0[way] = pd1[way] = 0;
-                                       write_aux_reg(ARC_REG_TLBINDEX,
+                               if (pd0[way] != pd0[n])
+                                       continue;
+
+                               if (!dup_pd_silent)
+                                       pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n",
+                                               pd0[way], set, way, n);
+
+                               /*
+                                * clear entry @way and not @n.
+                                * This is critical to our optimised loop
+                                */
+                               pd0[way] = 0;
+                               write_aux_reg(ARC_REG_TLBINDEX,
                                                SET_WAY_TO_IDX(mmu, set, way));
-                                       __tlb_entry_erase();
-                               }
+                               __tlb_entry_erase();
                        }
                }
        }
index f6f4c3cb505d1341a8c24b1172a6a89f71e6fc26..63860adc4814083dd5365d83ffd790fa6d390ab4 100644 (file)
@@ -205,20 +205,38 @@ ex_saved_reg1:
 #endif
 
        lsr     r0, r2, PGDIR_SHIFT     ; Bits for indexing into PGD
-       ld.as   r1, [r1, r0]            ; PGD entry corresp to faulting addr
-       and.f   r1, r1, PAGE_MASK       ; Ignoring protection and other flags
-       ;   contains Ptr to Page Table
-       bz.d    do_slow_path_pf         ; if no Page Table, do page fault
+       ld.as   r3, [r1, r0]            ; PGD entry corresp to faulting addr
+       tst     r3, r3
+       bz      do_slow_path_pf         ; if no Page Table, do page fault
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       and.f   0, r3, _PAGE_HW_SZ      ; Is this Huge PMD (thp)
+       add2.nz r1, r1, r0
+       bnz.d   2f              ; YES: PGD == PMD has THP PTE: stop pgd walk
+       mov.nz  r0, r3
+
+#endif
+       and     r1, r3, PAGE_MASK
 
        ; Get the PTE entry: The idea is
        ; (1) x = addr >> PAGE_SHIFT    -> masks page-off bits from @fault-addr
        ; (2) y = x & (PTRS_PER_PTE - 1) -> to get index
-       ; (3) z = pgtbl[y]
-       ; To avoid the multiply by in end, we do the -2, <<2 below
+       ; (3) z = (pgtbl + y * 4)
+
+#ifdef CONFIG_ARC_HAS_PAE40
+#define PTE_SIZE_LOG   3       /* 8 == 2 ^ 3 */
+#else
+#define PTE_SIZE_LOG   2       /* 4 == 2 ^ 2 */
+#endif
+
+       ; multiply in step (3) above avoided by shifting lesser in step (1)
+       lsr     r0, r2, ( PAGE_SHIFT - PTE_SIZE_LOG )
+       and     r0, r0, ( (PTRS_PER_PTE - 1) << PTE_SIZE_LOG )
+       ld.aw   r0, [r1, r0]            ; r0: PTE (lower word only for PAE40)
+                                       ; r1: PTE ptr
+
+2:
 
-       lsr     r0, r2, (PAGE_SHIFT - 2)
-       and     r0, r0, ( (PTRS_PER_PTE - 1) << 2)
-       ld.aw   r0, [r1, r0]            ; get PTE and PTE ptr for fault addr
 #ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
        and.f 0, r0, _PAGE_PRESENT
        bz   1f
@@ -233,18 +251,23 @@ ex_saved_reg1:
 ;-----------------------------------------------------------------
 ; Convert Linux PTE entry into TLB entry
 ; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu
+;    (for PAE40, two-words PTE, while three-word TLB Entry [PD0:PD1:PD1HI])
 ; IN: r0 = PTE, r1 = ptr to PTE
 
 .macro CONV_PTE_TO_TLB
-       and    r3, r0, PTE_BITS_RWX     ;       r w x
-       lsl    r2, r3, 3                ; r w x 0 0 0 (GLOBAL, kernel only)
+       and    r3, r0, PTE_BITS_RWX     ;          r  w  x
+       lsl    r2, r3, 3                ; Kr Kw Kx 0  0  0 (GLOBAL, kernel only)
        and.f  0,  r0, _PAGE_GLOBAL
-       or.z   r2, r2, r3               ; r w x r w x (!GLOBAL, user page)
+       or.z   r2, r2, r3               ; Kr Kw Kx Ur Uw Ux (!GLOBAL, user page)
 
        and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE
        or  r3, r3, r2
 
-       sr  r3, [ARC_REG_TLBPD1]        ; these go in PD1
+       sr  r3, [ARC_REG_TLBPD1]        ; paddr[31..13] | Kr Kw Kx Ur Uw Ux | C
+#ifdef CONFIG_ARC_HAS_PAE40
+       ld      r3, [r1, 4]             ; paddr[39..32]
+       sr      r3, [ARC_REG_TLBPD1HI]
+#endif
 
        and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb
 
@@ -365,7 +388,7 @@ ENTRY(EV_TLBMissD)
        lr      r3, [ecr]
        or      r0, r0, _PAGE_ACCESSED        ; Accessed bit always
        btst_s  r3,  ECR_C_BIT_DTLB_ST_MISS   ; See if it was a Write Access ?
-       or.nz   r0, r0, _PAGE_MODIFIED        ; if Write, set Dirty bit as well
+       or.nz   r0, r0, _PAGE_DIRTY           ; if Write, set Dirty bit as well
        st_s    r0, [r1]                      ; Write back PTE
 
        CONV_PTE_TO_TLB
index 0a77b19e1df8db1d37af0346e36a4b53254ca272..1b0f0f458a2bde2438802f431f63748779245c82 100644 (file)
@@ -455,11 +455,6 @@ static void __init axs103_early_init(void)
        axs10x_print_board_ver(AXC003_CREG + 4088, "AXC003 CPU Card");
 
        axs10x_early_init();
-
-#ifdef CONFIG_ARC_MCIP
-       /* No Hardware init, but filling the smp ops callbacks */
-       mcip_init_early_smp();
-#endif
 }
 #endif
 
@@ -487,9 +482,6 @@ static const char *axs103_compat[] __initconst = {
 MACHINE_START(AXS103, "axs103")
        .dt_compat      = axs103_compat,
        .init_early     = axs103_early_init,
-#ifdef CONFIG_ARC_MCIP
-       .init_smp       = mcip_init_smp,
-#endif
 MACHINE_END
 
 /*
index d9e35b4a2f0861bd9e0bd9ad38360e959029d62c..dde692812bc16ac70bc3bfba24fc14ba71d158a2 100644 (file)
@@ -30,8 +30,4 @@ static const char *simulation_compat[] __initconst = {
 
 MACHINE_START(SIMULATION, "simulation")
        .dt_compat      = simulation_compat,
-#ifdef CONFIG_ARC_MCIP
-       .init_early     = mcip_init_early_smp,
-       .init_smp       = mcip_init_smp,
-#endif
 MACHINE_END
index 72ad724c67ae94cd6682ec15f3834966dd7028c0..f1ed1109f4889e006e9df4c6110be001841c0f82 100644 (file)
@@ -645,6 +645,7 @@ config ARCH_SHMOBILE_LEGACY
 
 config ARCH_RPC
        bool "RiscPC"
+       depends on MMU
        select ARCH_ACORN
        select ARCH_MAY_HAVE_PC_FDC
        select ARCH_SPARSEMEM_ENABLE
@@ -819,6 +820,7 @@ config ARCH_VIRT
        bool "Dummy Virtual Machine" if ARCH_MULTI_V7
        select ARM_AMBA
        select ARM_GIC
+       select ARM_GIC_V3
        select ARM_PSCI
        select HAVE_ARM_ARCH_TIMER
 
@@ -1410,7 +1412,6 @@ config HAVE_ARM_ARCH_TIMER
 
 config HAVE_ARM_TWD
        bool
-       depends on SMP
        select CLKSRC_OF if OF
        help
          This options enables support for the ARM timer and watchdog unit
@@ -1470,6 +1471,8 @@ choice
 
        config VMSPLIT_3G
                bool "3G/1G user/kernel split"
+       config VMSPLIT_3G_OPT
+               bool "3G/1G user/kernel split (for full 1G low memory)"
        config VMSPLIT_2G
                bool "2G/2G user/kernel split"
        config VMSPLIT_1G
@@ -1481,6 +1484,7 @@ config PAGE_OFFSET
        default PHYS_OFFSET if !MMU
        default 0x40000000 if VMSPLIT_1G
        default 0x80000000 if VMSPLIT_2G
+       default 0xB0000000 if VMSPLIT_3G_OPT
        default 0xC0000000
 
 config NR_CPUS
@@ -1695,8 +1699,9 @@ config HIGHMEM
          If unsure, say n.
 
 config HIGHPTE
-       bool "Allocate 2nd-level pagetables from highmem"
+       bool "Allocate 2nd-level pagetables from highmem" if EXPERT
        depends on HIGHMEM
+       default y
        help
          The VM uses one page of physical memory for each page table.
          For systems with a lot of processes, this can use a lot of
index 233159d2eaab3eac34d47c4d4a9308b0d1bdec60..bb8fa023d5741dff9b4f9033ed2cf6f6d7b69f2d 100644 (file)
@@ -578,7 +578,7 @@ dtb-$(CONFIG_MACH_SUN4I) += \
        sun4i-a10-hackberry.dtb \
        sun4i-a10-hyundai-a7hd.dtb \
        sun4i-a10-inet97fv2.dtb \
-       sun4i-a10-itead-iteaduino-plus.dts \
+       sun4i-a10-itead-iteaduino-plus.dtb \
        sun4i-a10-jesurun-q5.dtb \
        sun4i-a10-marsboard.dtb \
        sun4i-a10-mini-xplus.dtb \
index 568adf5efde059f8c9a5ee56d182ea8def05788e..d55e3ea89fda51ba1d6b45f69eeaa8849dad9487 100644 (file)
                                /* SMPS9 unused */
 
                                ldo1_reg: ldo1 {
-                                       /* VDD_SD  */
+                                       /* VDD_SD / VDDSHV8  */
                                        regulator-name = "ldo1";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <3300000>;
                                        regulator-boot-on;
+                                       regulator-always-on;
                                };
 
                                ldo2_reg: ldo2 {
index 89f5a95954ed9020c070491cae36cc7b2556eccf..4047621b137e6b107f875dc2f7c82292752bf448 100644 (file)
@@ -46,7 +46,7 @@
 
 / {
        model = "Marvell Armada 385 Access Point Development Board";
-       compatible = "marvell,a385-db-ap", "marvell,armada385", "marvell,armada38x";
+       compatible = "marvell,a385-db-ap", "marvell,armada385", "marvell,armada380";
 
        chosen {
                stdout-path = "serial1:115200n8";
index 63a48490e2f9653ff83f7f6202fd993d101b6ec9..d4dbd28d348c0b74ae4b23b5886b1dfb29dc3aa6 100644 (file)
                };
 
                usb_phy2: phy@a2f400 {
-                       compatible = "marvell,berlin2-usb-phy";
+                       compatible = "marvell,berlin2cd-usb-phy";
                        reg = <0xa2f400 0x128>;
                        #phy-cells = <0>;
                        resets = <&chip_rst 0x104 14>;
                };
 
                usb_phy0: phy@b74000 {
-                       compatible = "marvell,berlin2-usb-phy";
+                       compatible = "marvell,berlin2cd-usb-phy";
                        reg = <0xb74000 0x128>;
                        #phy-cells = <0>;
                        resets = <&chip_rst 0x104 12>;
                };
 
                usb_phy1: phy@b78000 {
-                       compatible = "marvell,berlin2-usb-phy";
+                       compatible = "marvell,berlin2cd-usb-phy";
                        reg = <0xb78000 0x128>;
                        #phy-cells = <0>;
                        resets = <&chip_rst 0x104 13>;
index 955c24ee4a8cbfea89248c2efcb93d9536c0221b..8c24975e8f9d66387b23d9eeb38ab96142609421 100644 (file)
 
                button@1 {
                        debounce_interval = <50>;
-                       wakeup = <1>;
+                       wakeup-source;
                        label = "DSW2-1";
                        linux,code = <KEY_1>;
                        gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>;
                };
                button@2 {
                        debounce_interval = <50>;
-                       wakeup = <1>;
+                       wakeup-source;
                        label = "DSW2-2";
                        linux,code = <KEY_2>;
                        gpios = <&gpio0 15 GPIO_ACTIVE_HIGH>;
                };
                button@3 {
                        debounce_interval = <50>;
-                       wakeup = <1>;
+                       wakeup-source;
                        label = "DSW2-3";
                        linux,code = <KEY_3>;
                        gpios = <&gpio0 16 GPIO_ACTIVE_HIGH>;
                };
                button@4 {
                        debounce_interval = <50>;
-                       wakeup = <1>;
+                       wakeup-source;
                        label = "DSW2-4";
                        linux,code = <KEY_4>;
                        gpios = <&gpio0 17 GPIO_ACTIVE_HIGH>;
index ca0e3c15977f13febd2ae550949ae704ecbf33cb..294cfe40388dd582d77d45eac441b15318ac1cde 100644 (file)
@@ -98,6 +98,7 @@
                        opp-hz = /bits/ 64 <800000000>;
                        opp-microvolt = <1000000>;
                        clock-latency-ns = <200000>;
+                       opp-suspend;
                };
                opp07 {
                        opp-hz = /bits/ 64 <900000000>;
index 15aea760c1dadee45c631d78c64366cea7739276..c625e71217aa94d74c640c113286a5292179b62a 100644 (file)
                                regulator-name = "P1.8V_LDO_OUT10";
                                regulator-min-microvolt = <1800000>;
                                regulator-max-microvolt = <1800000>;
+                               regulator-always-on;
                        };
 
                        ldo11_reg: LDO11 {
index 8f4d76c5e11c5821ef7e504f21aa87c6103c92de..1b95da79293c58a173ce833d2e72e995a7761cb0 100644 (file)
        };
 };
 
+&pmu_system_controller {
+       assigned-clocks = <&pmu_system_controller 0>;
+       assigned-clock-parents = <&clock CLK_FIN_PLL>;
+};
+
 &rtc {
        status = "okay";
        clocks = <&clock CLK_RTC>, <&max77802 MAX77802_CLK_32K_AP>;
index df9aee92ecf4d71c714e763665013e7d2f6f4591..1b3d6c769a3cbb37f88fe55914707316abea023c 100644 (file)
                interrupt-parent = <&combiner>;
                interrupts = <3 0>;
                clock-names = "sysmmu", "master";
-               clocks = <&clock CLK_SMMU_FIMD1M0>, <&clock CLK_FIMD1>;
+               clocks = <&clock CLK_SMMU_FIMD1M1>, <&clock CLK_FIMD1>;
                power-domains = <&disp_pd>;
                #iommu-cells = <0>;
        };
index 79ffdfe712aa4a8ad193d4afd671962edfb73646..3b43e57845ae92bea4fc25b5c9af428c26ce2aa6 100644 (file)
         */
        pinctrl-0 = <&pwm0_out &pwm1_out &pwm2_out &pwm3_out>;
        pinctrl-names = "default";
-       samsung,pwm-outputs = <0>;
        status = "okay";
 };
 
index 7d5b386b5ae6aeb32aed5baf55a2d50ddf374b39..8f40c7e549bd5ef48d77c4ee5c9dacaaae65d820 100644 (file)
        };
 };
 
+&pmu_system_controller {
+       assigned-clocks = <&pmu_system_controller 0>;
+       assigned-clock-parents = <&clock CLK_FIN_PLL>;
+};
+
 &rtc {
        status = "okay";
        clocks = <&clock CLK_RTC>, <&max77802 MAX77802_CLK_32K_AP>;
index 66e47de5e826b0b33aaa6d066aa3c6c3104ff796..96d7eede412e1343d5e4e0a982044cec3ac347d3 100644 (file)
@@ -36,7 +36,7 @@
                pinctrl-0 = <&pinctrl_pmic>;
                reg = <0x08>;
                interrupt-parent = <&gpio5>;
-               interrupts = <23 0x8>;
+               interrupts = <23 IRQ_TYPE_LEVEL_HIGH>;
                regulators {
                        sw1_reg: sw1a {
                                regulator-name = "SW1";
index c3e3ca9362fbb78b6b2ecb8ec0125b83abdb7352..cd170376eaca6be3bc6416363250271590b75153 100644 (file)
@@ -15,6 +15,7 @@
 #include <dt-bindings/clock/imx5-clock.h>
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
 
 / {
        aliases {
index 3373fd958e95c72b098ed14ea2a3228ba7903ea6..a503562438888fe936de6c8b9b255a06a4455d26 100644 (file)
@@ -35,7 +35,6 @@
                        compatible = "regulator-fixed";
                        reg = <1>;
                        pinctrl-names = "default";
-                       pinctrl-0 = <&pinctrl_usbh1>;
                        regulator-name = "usbh1_vbus";
                        regulator-min-microvolt = <5000000>;
                        regulator-max-microvolt = <5000000>;
@@ -47,7 +46,6 @@
                        compatible = "regulator-fixed";
                        reg = <2>;
                        pinctrl-names = "default";
-                       pinctrl-0 = <&pinctrl_usbotg>;
                        regulator-name = "usb_otg_vbus";
                        regulator-min-microvolt = <5000000>;
                        regulator-max-microvolt = <5000000>;
index b738ce0f9d9bc31f5369b1c97284cf1ec7ff9bb7..6e444bb873f92ee28dd5cebec93d5947f3b8ea15 100644 (file)
                                status = "disabled";
                        };
 
-                       uart2: serial@30870000 {
+                       uart2: serial@30890000 {
                                compatible = "fsl,imx7d-uart",
                                             "fsl,imx6q-uart";
-                               reg = <0x30870000 0x10000>;
+                               reg = <0x30890000 0x10000>;
                                interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clks IMX7D_UART2_ROOT_CLK>,
                                        <&clks IMX7D_UART2_ROOT_CLK>;
index 36155b749d9fcbbf2bb579e0af5cccd066234867..d2d44df9c8c0b867242725f7c53d918e99379480 100644 (file)
                         clock-frequency = <32768>;
               };
        };
+
+       netxbig-leds {
+               blue-sata2 {
+                       label = "netxbig:blue:sata2";
+                       mode-addr = <5>;
+                       mode-val = <NETXBIG_LED_OFF 0
+                                   NETXBIG_LED_ON 7
+                                   NETXBIG_LED_SATA 1
+                                   NETXBIG_LED_TIMER1 3>;
+                       bright-addr = <2>;
+                       max-brightness = <7>;
+               };
+               red-sata2 {
+                       label = "netxbig:red:sata2";
+                       mode-addr = <5>;
+                       mode-val = <NETXBIG_LED_OFF 0
+                                   NETXBIG_LED_ON 2
+                                   NETXBIG_LED_TIMER1 4>;
+                       bright-addr = <2>;
+                       max-brightness = <7>;
+               };
+               blue-sata3 {
+                       label = "netxbig:blue:sata3";
+                       mode-addr = <6>;
+                       mode-val = <NETXBIG_LED_OFF 0
+                                   NETXBIG_LED_ON 7
+                                   NETXBIG_LED_SATA 1
+                                   NETXBIG_LED_TIMER1 3>;
+                       bright-addr = <2>;
+                       max-brightness = <7>;
+               };
+               red-sata3 {
+                       label = "netxbig:red:sata3";
+                       mode-addr = <6>;
+                       mode-val = <NETXBIG_LED_OFF 0
+                                   NETXBIG_LED_ON 2
+                                   NETXBIG_LED_TIMER1 4>;
+                       bright-addr = <2>;
+                       max-brightness = <7>;
+               };
+               blue-sata4 {
+                       label = "netxbig:blue:sata4";
+                       mode-addr = <7>;
+                       mode-val = <NETXBIG_LED_OFF 0
+                                   NETXBIG_LED_ON 7
+                                   NETXBIG_LED_SATA 1
+                                   NETXBIG_LED_TIMER1 3>;
+                       bright-addr = <2>;
+                       max-brightness = <7>;
+               };
+               red-sata4 {
+                       label = "netxbig:red:sata4";
+                       mode-addr = <7>;
+                       mode-val = <NETXBIG_LED_OFF 0
+                                   NETXBIG_LED_ON 2
+                                   NETXBIG_LED_TIMER1 4>;
+                       bright-addr = <2>;
+                       max-brightness = <7>;
+               };
+       };
 };
 
 &mdio {
index 1508b12147df35804837bf2ec9b8882a1681ef09..62515a8b99b995a4a7ae2f2e7d2a42149f99bec1 100644 (file)
@@ -13,6 +13,7 @@
  * warranty of any kind, whether express or implied.
 */
 
+#include <dt-bindings/leds/leds-netxbig.h>
 #include "kirkwood.dtsi"
 #include "kirkwood-6281.dtsi"
 
                        gpio = <&gpio0 16 GPIO_ACTIVE_HIGH>;
                };
        };
+
+       netxbig_gpio_ext: netxbig-gpio-ext {
+               compatible = "lacie,netxbig-gpio-ext";
+
+               addr-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH
+                             &gpio1 16 GPIO_ACTIVE_HIGH
+                             &gpio1 17 GPIO_ACTIVE_HIGH>;
+               data-gpios = <&gpio1 12 GPIO_ACTIVE_HIGH
+                             &gpio1 13 GPIO_ACTIVE_HIGH
+                             &gpio1 14 GPIO_ACTIVE_HIGH>;
+               enable-gpio = <&gpio0 29 GPIO_ACTIVE_HIGH>;
+       };
+
+       netxbig-leds {
+               compatible = "lacie,netxbig-leds";
+
+               gpio-ext = <&netxbig_gpio_ext>;
+
+               timers = <NETXBIG_LED_TIMER1 500 500
+                         NETXBIG_LED_TIMER2 500 1000>;
+
+               blue-power {
+                       label = "netxbig:blue:power";
+                       mode-addr = <0>;
+                       mode-val = <NETXBIG_LED_OFF 0
+                                   NETXBIG_LED_ON 1
+                                   NETXBIG_LED_TIMER1 3
+                                   NETXBIG_LED_TIMER2 7>;
+                       bright-addr = <1>;
+                       max-brightness = <7>;
+               };
+               red-power {
+                       label = "netxbig:red:power";
+                       mode-addr = <0>;
+                       mode-val = <NETXBIG_LED_OFF 0
+                                   NETXBIG_LED_ON 2
+                                   NETXBIG_LED_TIMER1 4>;
+                       bright-addr = <1>;
+                       max-brightness = <7>;
+               };
+               blue-sata0 {
+                       label = "netxbig:blue:sata0";
+                       mode-addr = <3>;
+                       mode-val = <NETXBIG_LED_OFF 0
+                                   NETXBIG_LED_ON 7
+                                   NETXBIG_LED_SATA 1
+                                   NETXBIG_LED_TIMER1 3>;
+                       bright-addr = <2>;
+                       max-brightness = <7>;
+               };
+               red-sata0 {
+                       label = "netxbig:red:sata0";
+                       mode-addr = <3>;
+                       mode-val = <NETXBIG_LED_OFF 0
+                                   NETXBIG_LED_ON 2
+                                   NETXBIG_LED_TIMER1 4>;
+                       bright-addr = <2>;
+                       max-brightness = <7>;
+               };
+               blue-sata1 {
+                       label = "netxbig:blue:sata1";
+                       mode-addr = <4>;
+                       mode-val = <NETXBIG_LED_OFF 0
+                                   NETXBIG_LED_ON 7
+                                   NETXBIG_LED_SATA 1
+                                   NETXBIG_LED_TIMER1 3>;
+                       bright-addr = <2>;
+                       max-brightness = <7>;
+               };
+               red-sata1 {
+                       label = "netxbig:red:sata1";
+                       mode-addr = <4>;
+                       mode-val = <NETXBIG_LED_OFF 0
+                                   NETXBIG_LED_ON 2
+                                   NETXBIG_LED_TIMER1 4>;
+                       bright-addr = <2>;
+                       max-brightness = <7>;
+               };
+       };
 };
 
 &mdio {
index 91146c318798ff3422f5fecb87ec0d8f0c97df45..5b0430041ec6d1980cb47ba253ce410bcebd05d0 100644 (file)
@@ -12,7 +12,7 @@
 
 / {
        model = "LogicPD Zoom DM3730 Torpedo Development Kit";
-       compatible = "logicpd,dm3730-torpedo-devkit", "ti,omap36xx";
+       compatible = "logicpd,dm3730-torpedo-devkit", "ti,omap3630", "ti,omap3";
 
        gpio_keys {
                compatible = "gpio-keys";
index 548441384d2a39488d7daffef79ed6c707576a86..8c77c87660cdf2d72e1df3f00d2c76a8e447293c 100644 (file)
@@ -67,7 +67,7 @@
 
        timer@c1109940 {
                compatible = "amlogic,meson6-timer";
-               reg = <0xc1109940 0x14>;
+               reg = <0xc1109940 0x18>;
                interrupts = <0 10 1>;
        };
 
                wdt: watchdog@c1109900 {
                        compatible = "amlogic,meson6-wdt";
                        reg = <0xc1109900 0x8>;
+                       interrupts = <0 0 1>;
                };
 
                uart_AO: serial@c81004c0 {
                        compatible = "amlogic,meson-uart";
-                       reg = <0xc81004c0 0x14>;
+                       reg = <0xc81004c0 0x18>;
                        interrupts = <0 90 1>;
                        clocks = <&clk81>;
                        status = "disabled";
                };
 
-               uart_A: serial@c81084c0 {
+               uart_A: serial@c11084c0 {
                        compatible = "amlogic,meson-uart";
-                       reg = <0xc81084c0 0x14>;
-                       interrupts = <0 90 1>;
+                       reg = <0xc11084c0 0x18>;
+                       interrupts = <0 26 1>;
                        clocks = <&clk81>;
                        status = "disabled";
                };
 
-               uart_B: serial@c81084dc {
+               uart_B: serial@c11084dc {
                        compatible = "amlogic,meson-uart";
-                       reg = <0xc81084dc 0x14>;
-                       interrupts = <0 90 1>;
+                       reg = <0xc11084dc 0x18>;
+                       interrupts = <0 75 1>;
                        clocks = <&clk81>;
                        status = "disabled";
                };
 
-               uart_C: serial@c8108700 {
+               uart_C: serial@c1108700 {
                        compatible = "amlogic,meson-uart";
-                       reg = <0xc8108700 0x14>;
-                       interrupts = <0 90 1>;
+                       reg = <0xc1108700 0x18>;
+                       interrupts = <0 93 1>;
                        clocks = <&clk81>;
                        status = "disabled";
                };
index 16e8ce350ddaae4f4bedc3d86f9d0418e4852f87..bb339d1648e071c4c456a3f627de68f805e1aa9b 100644 (file)
@@ -13,7 +13,7 @@
 
 / {
        model = "TI OMAP37XX EVM (TMDSEVM3730)";
-       compatible = "ti,omap3-evm-37xx", "ti,omap36xx";
+       compatible = "ti,omap3-evm-37xx", "ti,omap3630", "ti,omap3";
 
        memory {
                device_type = "memory";
index a0b2a79cbfbdf0b42286dea205fe6751fdaf57f6..4624d0f2a75425310a65b462221374eabde85210 100644 (file)
                                "mix.0", "mix.1",
                                "dvc.0", "dvc.1",
                                "clk_a", "clk_b", "clk_c", "clk_i";
+               power-domains = <&cpg_clocks>;
 
                status = "disabled";
 
index 831525dd39a60ad75b024e07b8d6153ff027e6b3..1666c8a6b1432e4f1c8a1a822fb59cb7d5de7375 100644 (file)
                                "mix.0", "mix.1",
                                "dvc.0", "dvc.1",
                                "clk_a", "clk_b", "clk_c", "clk_i";
+               power-domains = <&cpg_clocks>;
 
                status = "disabled";
 
index b5334ecff13cdf1b9d7bfd0649f6737c56ce09dd..fec076eb7aef8f766c939e8b91dca6ef72595bb3 100644 (file)
@@ -90,7 +90,7 @@
        regulators {
                vccio_sd: LDO_REG4 {
                        regulator-name = "vccio_sd";
-                       regulator-min-microvolt = <3300000>;
+                       regulator-min-microvolt = <1800000>;
                        regulator-max-microvolt = <3300000>;
                        regulator-state-mem {
                                regulator-off-in-suspend;
        cap-sd-highspeed;
        card-detect-delay = <200>;
        cd-gpios = <&gpio7 5 GPIO_ACTIVE_LOW>;
+       rockchip,default-sample-phase = <90>;
        num-slots = <1>;
+       sd-uhs-sdr12;
+       sd-uhs-sdr25;
+       sd-uhs-sdr50;
+       sd-uhs-sdr104;
        vmmc-supply = <&vcc33_sd>;
        vqmmc-supply = <&vccio_sd>;
 };
index 275c78ccc0f3e1e378983871f0c71df91ab1dd89..860cea0a7613166d64bfc5924a14af5a1dab8aeb 100644 (file)
        broken-cd;
        bus-width = <8>;
        cap-mmc-highspeed;
+       rockchip,default-sample-phase = <158>;
        disable-wp;
+       mmc-hs200-1_8v;
        mmc-pwrseq = <&emmc_pwrseq>;
        non-removable;
        num-slots = <1>;
        num-slots = <1>;
        pinctrl-names = "default";
        pinctrl-0 = <&sdio0_clk &sdio0_cmd &sdio0_bus4>;
+       sd-uhs-sdr12;
+       sd-uhs-sdr25;
+       sd-uhs-sdr50;
+       sd-uhs-sdr104;
        vmmc-supply = <&vcc33_sys>;
        vqmmc-supply = <&vcc18_wl>;
 };
index 906e938fb6bfc70d9ec7948171ad425f34ae11f3..4e7c6b7392afdb70974078c80154b10a052fb024 100644 (file)
        sdmmc: dwmmc@ff0c0000 {
                compatible = "rockchip,rk3288-dw-mshc";
                clock-freq-min-max = <400000 150000000>;
-               clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>;
-               clock-names = "biu", "ciu";
+               clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
+                        <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
                interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
                reg = <0xff0c0000 0x4000>;
        sdio0: dwmmc@ff0d0000 {
                compatible = "rockchip,rk3288-dw-mshc";
                clock-freq-min-max = <400000 150000000>;
-               clocks = <&cru HCLK_SDIO0>, <&cru SCLK_SDIO0>;
-               clock-names = "biu", "ciu";
+               clocks = <&cru HCLK_SDIO0>, <&cru SCLK_SDIO0>,
+                        <&cru SCLK_SDIO0_DRV>, <&cru SCLK_SDIO0_SAMPLE>;
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
                interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
                reg = <0xff0d0000 0x4000>;
        sdio1: dwmmc@ff0e0000 {
                compatible = "rockchip,rk3288-dw-mshc";
                clock-freq-min-max = <400000 150000000>;
-               clocks = <&cru HCLK_SDIO1>, <&cru SCLK_SDIO1>;
-               clock-names = "biu", "ciu";
+               clocks = <&cru HCLK_SDIO1>, <&cru SCLK_SDIO1>,
+                        <&cru SCLK_SDIO1_DRV>, <&cru SCLK_SDIO1_SAMPLE>;
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
                interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
                reg = <0xff0e0000 0x4000>;
        emmc: dwmmc@ff0f0000 {
                compatible = "rockchip,rk3288-dw-mshc";
                clock-freq-min-max = <400000 150000000>;
-               clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>;
-               clock-names = "biu", "ciu";
+               clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
+                        <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
                interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
                reg = <0xff0f0000 0x4000>;
index 034cd48ae28b49b8f29153818abba1dc0187fe0a..cc05cde0f9a4145436f5b3807d78e056b140094e 100644 (file)
                                clocks = <&twi1_clk>;
                                status = "disabled";
                        };
+
+                       pioA: pinctrl@fc038000 {
+                               compatible = "atmel,sama5d2-pinctrl";
+                               reg = <0xfc038000 0x600>;
+                               interrupts = <18 IRQ_TYPE_LEVEL_HIGH 7>,
+                                            <68 IRQ_TYPE_LEVEL_HIGH 7>,
+                                            <69 IRQ_TYPE_LEVEL_HIGH 7>,
+                                            <70 IRQ_TYPE_LEVEL_HIGH 7>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               clocks = <&pioA_clk>;
+                       };
                };
        };
 };
index 810cda743b6d56ae19118260367f986dcee690af..9c2387b34d0c73c6942c4051d7f6ce72ee2a0aec 100644 (file)
@@ -56,7 +56,7 @@
                                        /* VMMCI level-shifter enable */
                                        default_hrefv60_cfg2 {
                                                pins = "GPIO169_D22";
-                                               ste,config = <&gpio_out_lo>;
+                                               ste,config = <&gpio_out_hi>;
                                        };
                                        /* VMMCI level-shifter voltage select */
                                        default_hrefv60_cfg3 {
index 32a5ccb14e7ebfbaec118ceed1f3bc14c2b646df..e80e42163883610005287282d9673c80377b6ce4 100644 (file)
 
                button@1 {
                        debounce_interval = <50>;
-                       wakeup = <1>;
+                       wakeup-source;
                        linux,code = <2>;
                        label = "userpb";
                        gpios = <&gpio1 0 0x4>;
                };
                button@2 {
                        debounce_interval = <50>;
-                       wakeup = <1>;
+                       wakeup-source;
                        linux,code = <3>;
                        label = "extkb1";
                        gpios = <&gpio4 23 0x4>;
                };
                button@3 {
                        debounce_interval = <50>;
-                       wakeup = <1>;
+                       wakeup-source;
                        linux,code = <4>;
                        label = "extkb2";
                        gpios = <&gpio4 24 0x4>;
                };
                button@4 {
                        debounce_interval = <50>;
-                       wakeup = <1>;
+                       wakeup-source;
                        linux,code = <5>;
                        label = "extkb3";
                        gpios = <&gpio5 1 0x4>;
                };
                button@5 {
                        debounce_interval = <50>;
-                       wakeup = <1>;
+                       wakeup-source;
                        linux,code = <6>;
                        label = "extkb4";
                        gpios = <&gpio5 2 0x4>;
index 2bebaa286f9a3a49c65908f8a4874d5e3091b32a..391230c3dc938fb1a0a4c92ea91e05a209319baa 100644 (file)
                                720000  1200000
                                528000  1100000
                                312000  1000000
-                               144000  900000
+                               144000  1000000
                                >;
                        #cooling-cells = <2>;
                        cooling-min-level = <0>;
index 9d4f86e9c50ada156a5ae785399799b23d67efba..d845bd1448b5459f9a6e4ab52c538c541b8832ef 100644 (file)
                gpio-controller;
                #interrupt-cells = <2>;
                interrupt-controller;
+               /*
                gpio-ranges = <&pinmux 0 0 246>;
+               */
        };
 
        apbmisc@70000800 {
index 1e204a6de12c3a4156821a289a2f7064bd346574..819e2ae2cabe28b09952807ca2f0249c1aa82b7f 100644 (file)
                gpio-controller;
                #interrupt-cells = <2>;
                interrupt-controller;
+               /*
                gpio-ranges = <&pinmux 0 0 251>;
+               */
        };
 
        apbdma: dma@0,60020000 {
index e058709e6d98c0db886bf4549366722605380985..969b828505ae4404846ff169ce17ad7ffd178ec2 100644 (file)
                gpio-controller;
                #interrupt-cells = <2>;
                interrupt-controller;
+               /*
                gpio-ranges = <&pinmux 0 0 224>;
+               */
        };
 
        apbmisc@70000800 {
index fe04fb5e155f4c7dd39509b8877d95eb7a6f8021..c6938ad1b543fb93cf22d297dda5b2d18795afe8 100644 (file)
                gpio-controller;
                #interrupt-cells = <2>;
                interrupt-controller;
+               /*
                gpio-ranges = <&pinmux 0 0 248>;
+               */
        };
 
        apbmisc@70000800 {
index 33963acd7e8f9227eaca31a60c240a37d9f6e754..f80f772d99fb5750ca4cb484a49bbea18b8ba61a 100644 (file)
@@ -85,7 +85,7 @@
 };
 
 &ethsc {
-       interrupts = <0 50 4>;
+       interrupts = <0 52 4>;
 };
 
 &serial0 {
index 1ff2bfa2e183f45087571875197888e81e3cc8ad..13ba48c4b03b0e0095cf5cc1a40bb9d26da77f92 100644 (file)
@@ -166,7 +166,6 @@ CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_S3C=y
 CONFIG_MMC_SDHCI_S3C_DMA=y
 CONFIG_MMC_DW=y
-CONFIG_MMC_DW_IDMAC=y
 CONFIG_MMC_DW_EXYNOS=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_MAX77686=y
index 5997dbc69822af26b1c7cbd86679ae2488529de8..b2e340b272eea44362f6a494cc23667a4537241a 100644 (file)
@@ -69,7 +69,6 @@ CONFIG_NOP_USB_XCEIV=y
 CONFIG_MMC=y
 CONFIG_RTC_CLASS=y
 CONFIG_MMC_DW=y
-CONFIG_MMC_DW_IDMAC=y
 CONFIG_MMC_DW_PLTFM=y
 CONFIG_RTC_DRV_PL031=y
 CONFIG_DMADEVICES=y
index 1c47f86c3970ae926b169e00c642fa853f9f365d..b7e8cdab51f97ab4689a46fee6f8ad82a827bf7d 100644 (file)
@@ -119,7 +119,6 @@ CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_ROOT_HUB_TT=y
 CONFIG_MMC=y
 CONFIG_MMC_DW=y
-CONFIG_MMC_DW_IDMAC=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_PCA9532=y
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
new file mode 100644 (file)
index 0000000..6607d97
--- /dev/null
@@ -0,0 +1,188 @@
+/*
+ * arch/arm/include/asm/arch_gicv3.h
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_ARCH_GICV3_H
+#define __ASM_ARCH_GICV3_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/io.h>
+
+#define __ACCESS_CP15(CRn, Op1, CRm, Op2)      p15, Op1, %0, CRn, CRm, Op2
+#define __ACCESS_CP15_64(Op1, CRm)             p15, Op1, %Q0, %R0, CRm
+
+#define ICC_EOIR1                      __ACCESS_CP15(c12, 0, c12, 1)
+#define ICC_DIR                                __ACCESS_CP15(c12, 0, c11, 1)
+#define ICC_IAR1                       __ACCESS_CP15(c12, 0, c12, 0)
+#define ICC_SGI1R                      __ACCESS_CP15_64(0, c12)
+#define ICC_PMR                                __ACCESS_CP15(c4, 0, c6, 0)
+#define ICC_CTLR                       __ACCESS_CP15(c12, 0, c12, 4)
+#define ICC_SRE                                __ACCESS_CP15(c12, 0, c12, 5)
+#define ICC_IGRPEN1                    __ACCESS_CP15(c12, 0, c12, 7)
+
+#define ICC_HSRE                       __ACCESS_CP15(c12, 4, c9, 5)
+
+#define ICH_VSEIR                      __ACCESS_CP15(c12, 4, c9, 4)
+#define ICH_HCR                                __ACCESS_CP15(c12, 4, c11, 0)
+#define ICH_VTR                                __ACCESS_CP15(c12, 4, c11, 1)
+#define ICH_MISR                       __ACCESS_CP15(c12, 4, c11, 2)
+#define ICH_EISR                       __ACCESS_CP15(c12, 4, c11, 3)
+#define ICH_ELSR                       __ACCESS_CP15(c12, 4, c11, 5)
+#define ICH_VMCR                       __ACCESS_CP15(c12, 4, c11, 7)
+
+#define __LR0(x)                       __ACCESS_CP15(c12, 4, c12, x)
+#define __LR8(x)                       __ACCESS_CP15(c12, 4, c13, x)
+
+#define ICH_LR0                                __LR0(0)
+#define ICH_LR1                                __LR0(1)
+#define ICH_LR2                                __LR0(2)
+#define ICH_LR3                                __LR0(3)
+#define ICH_LR4                                __LR0(4)
+#define ICH_LR5                                __LR0(5)
+#define ICH_LR6                                __LR0(6)
+#define ICH_LR7                                __LR0(7)
+#define ICH_LR8                                __LR8(0)
+#define ICH_LR9                                __LR8(1)
+#define ICH_LR10                       __LR8(2)
+#define ICH_LR11                       __LR8(3)
+#define ICH_LR12                       __LR8(4)
+#define ICH_LR13                       __LR8(5)
+#define ICH_LR14                       __LR8(6)
+#define ICH_LR15                       __LR8(7)
+
+/* LR top half */
+#define __LRC0(x)                      __ACCESS_CP15(c12, 4, c14, x)
+#define __LRC8(x)                      __ACCESS_CP15(c12, 4, c15, x)
+
+#define ICH_LRC0                       __LRC0(0)
+#define ICH_LRC1                       __LRC0(1)
+#define ICH_LRC2                       __LRC0(2)
+#define ICH_LRC3                       __LRC0(3)
+#define ICH_LRC4                       __LRC0(4)
+#define ICH_LRC5                       __LRC0(5)
+#define ICH_LRC6                       __LRC0(6)
+#define ICH_LRC7                       __LRC0(7)
+#define ICH_LRC8                       __LRC8(0)
+#define ICH_LRC9                       __LRC8(1)
+#define ICH_LRC10                      __LRC8(2)
+#define ICH_LRC11                      __LRC8(3)
+#define ICH_LRC12                      __LRC8(4)
+#define ICH_LRC13                      __LRC8(5)
+#define ICH_LRC14                      __LRC8(6)
+#define ICH_LRC15                      __LRC8(7)
+
+#define __AP0Rx(x)                     __ACCESS_CP15(c12, 4, c8, x)
+#define ICH_AP0R0                      __AP0Rx(0)
+#define ICH_AP0R1                      __AP0Rx(1)
+#define ICH_AP0R2                      __AP0Rx(2)
+#define ICH_AP0R3                      __AP0Rx(3)
+
+#define __AP1Rx(x)                     __ACCESS_CP15(c12, 4, c9, x)
+#define ICH_AP1R0                      __AP1Rx(0)
+#define ICH_AP1R1                      __AP1Rx(1)
+#define ICH_AP1R2                      __AP1Rx(2)
+#define ICH_AP1R3                      __AP1Rx(3)
+
+/* Low-level accessors */
+
+static inline void gic_write_eoir(u32 irq)
+{
+       asm volatile("mcr " __stringify(ICC_EOIR1) : : "r" (irq));
+       isb();
+}
+
+static inline void gic_write_dir(u32 val)
+{
+       asm volatile("mcr " __stringify(ICC_DIR) : : "r" (val));
+       isb();
+}
+
+static inline u32 gic_read_iar(void)
+{
+       u32 irqstat;
+
+       asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat));
+       return irqstat;
+}
+
+static inline void gic_write_pmr(u32 val)
+{
+       asm volatile("mcr " __stringify(ICC_PMR) : : "r" (val));
+}
+
+static inline void gic_write_ctlr(u32 val)
+{
+       asm volatile("mcr " __stringify(ICC_CTLR) : : "r" (val));
+       isb();
+}
+
+static inline void gic_write_grpen1(u32 val)
+{
+       asm volatile("mcr " __stringify(ICC_IGRPEN1) : : "r" (val));
+       isb();
+}
+
+static inline void gic_write_sgi1r(u64 val)
+{
+       asm volatile("mcrr " __stringify(ICC_SGI1R) : : "r" (val));
+}
+
+static inline u32 gic_read_sre(void)
+{
+       u32 val;
+
+       asm volatile("mrc " __stringify(ICC_SRE) : "=r" (val));
+       return val;
+}
+
+static inline void gic_write_sre(u32 val)
+{
+       asm volatile("mcr " __stringify(ICC_SRE) : : "r" (val));
+       isb();
+}
+
+/*
+ * Even in 32bit systems that use LPAE, there is no guarantee that the I/O
+ * interface provides true 64bit atomic accesses, so using strd/ldrd doesn't
+ * make much sense.
+ * Moreover, 64bit I/O emulation is extremely difficult to implement on
+ * AArch32, since the syndrome register doesn't provide any information for
+ * them.
+ * Consequently, the following IO helpers use 32bit accesses.
+ *
+ * There are only two registers that need 64bit accesses in this driver:
+ * - GICD_IROUTERn, contain the affinity values associated to each interrupt.
+ *   The upper-word (aff3) will always be 0, so there is no need for a lock.
+ * - GICR_TYPER is an ID register and doesn't need atomicity.
+ */
+static inline void gic_write_irouter(u64 val, volatile void __iomem *addr)
+{
+       writel_relaxed((u32)val, addr);
+       writel_relaxed((u32)(val >> 32), addr + 4);
+}
+
+static inline u64 gic_read_typer(const volatile void __iomem *addr)
+{
+       u64 val;
+
+       val = readl_relaxed(addr);
+       val |= (u64)readl_relaxed(addr + 4) << 32;
+       return val;
+}
+
+#endif /* !__ASSEMBLY__ */
+#endif /* !__ASM_ARCH_GICV3_H */
index fe3ef397f5a407073e6a737cc12fbbbcdbe714c1..9e10c4567eb4dd3c898486f5fc34777c3760adc9 100644 (file)
@@ -27,8 +27,8 @@
  * strex/ldrex monitor on some implementations. The reason we can use it for
  * atomic_set() is the clrex or dummy strex done on every exception return.
  */
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
-#define atomic_set(v,i)        (((v)->counter) = (i))
+#define atomic_read(v) READ_ONCE((v)->counter)
+#define atomic_set(v,i)        WRITE_ONCE(((v)->counter), (i))
 
 #if __LINUX_ARM_ARCH__ >= 6
 
@@ -210,8 +210,8 @@ ATOMIC_OP(xor, ^=, eor)
 
 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
-#define atomic_inc_return(v)    (atomic_add_return(1, v))
-#define atomic_dec_return(v)    (atomic_sub_return(1, v))
+#define atomic_inc_return_relaxed(v)    (atomic_add_return_relaxed(1, v))
+#define atomic_dec_return_relaxed(v)    (atomic_sub_return_relaxed(1, v))
 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
 
 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
@@ -442,11 +442,11 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
 
 #define atomic64_add_negative(a, v)    (atomic64_add_return((a), (v)) < 0)
 #define atomic64_inc(v)                        atomic64_add(1LL, (v))
-#define atomic64_inc_return(v)         atomic64_add_return(1LL, (v))
+#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v))
 #define atomic64_inc_and_test(v)       (atomic64_inc_return(v) == 0)
 #define atomic64_sub_and_test(a, v)    (atomic64_sub_return((a), (v)) == 0)
 #define atomic64_dec(v)                        atomic64_sub(1LL, (v))
-#define atomic64_dec_return(v)         atomic64_sub_return(1LL, (v))
+#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v))
 #define atomic64_dec_and_test(v)       (atomic64_dec_return((v)) == 0)
 #define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1LL, 0LL)
 
index 916a2744d5c66da7c442ed9fec56c01261ff2eb4..97882f9bad12938780b3455d5ae8f8b184d5f5be 100644 (file)
@@ -39,6 +39,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
 
        switch (size) {
 #if __LINUX_ARM_ARCH__ >= 6
+#ifndef CONFIG_CPU_V6 /* MIN ARCH >= V6K */
        case 1:
                asm volatile("@ __xchg1\n"
                "1:     ldrexb  %0, [%3]\n"
@@ -49,6 +50,17 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
                        : "r" (x), "r" (ptr)
                        : "memory", "cc");
                break;
+       case 2:
+               asm volatile("@ __xchg2\n"
+               "1:     ldrexh  %0, [%3]\n"
+               "       strexh  %1, %2, [%3]\n"
+               "       teq     %1, #0\n"
+               "       bne     1b"
+                       : "=&r" (ret), "=&r" (tmp)
+                       : "r" (x), "r" (ptr)
+                       : "memory", "cc");
+               break;
+#endif
        case 4:
                asm volatile("@ __xchg4\n"
                "1:     ldrex   %0, [%3]\n"
index 43908146a5cf05c473967d603247a792f73b5663..e6b70d9d084ea5d369c237f9a3f81c92e331a429 100644 (file)
@@ -54,6 +54,14 @@ static inline void arch_local_irq_disable(void)
 
 #define local_fiq_enable()  __asm__("cpsie f   @ __stf" : : : "memory", "cc")
 #define local_fiq_disable() __asm__("cpsid f   @ __clf" : : : "memory", "cc")
+
+#ifndef CONFIG_CPU_V7M
+#define local_abt_enable()  __asm__("cpsie a   @ __sta" : : : "memory", "cc")
+#define local_abt_disable() __asm__("cpsid a   @ __cla" : : : "memory", "cc")
+#else
+#define local_abt_enable()     do { } while (0)
+#define local_abt_disable()    do { } while (0)
+#endif
 #else
 
 /*
@@ -136,6 +144,8 @@ static inline void arch_local_irq_disable(void)
        : "memory", "cc");                                      \
        })
 
+#define local_abt_enable()     do { } while (0)
+#define local_abt_disable()    do { } while (0)
 #endif
 
 /*
index cb3a40717edd045042f97d1f1d7a81ba658dca3f..5c1ad11aa39264aee7e9210cbf6747adab443930 100644 (file)
@@ -47,7 +47,7 @@ struct machine_desc {
        unsigned                l2c_aux_val;    /* L2 cache aux value   */
        unsigned                l2c_aux_mask;   /* L2 cache aux mask    */
        void                    (*l2c_write_sec)(unsigned long, unsigned);
-       struct smp_operations   *smp;           /* SMP operations       */
+       const struct smp_operations     *smp;   /* SMP operations       */
        bool                    (*smp_init)(void);
        void                    (*fixup)(struct tag *, char **);
        void                    (*dt_fixup)(void);
index 98d58bb04ac57853910860ae5f124c1fd1cb14bf..c79b57bf71c40d1fc2083f67b198377936e0002f 100644 (file)
  */
 #define XIP_VIRT_ADDR(physaddr)  (MODULES_VADDR + ((physaddr) & 0x000fffff))
 
+#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
 /*
  * Allow 16MB-aligned ioremap pages
  */
 #define IOREMAP_MAX_ORDER      24
+#endif
 
 #else /* CONFIG_MMU */
 
index f40354198bad4078717ac8933966d511c6dc130b..348caabb7625ee7b12fc0ddd8e2546b22f338b1c 100644 (file)
@@ -43,7 +43,7 @@
  */
 #define VMALLOC_OFFSET         (8*1024*1024)
 #define VMALLOC_START          (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
-#define VMALLOC_END            0xff000000UL
+#define VMALLOC_END            0xff800000UL
 
 #define LIBRARY_TEXT_START     0x0c000000
 
index ef356659b4f43e6125443e7edd67aa1706bc8d2e..3d6dc8b460e4b4ea6b32d66f3ec0004378e53d9f 100644 (file)
@@ -112,7 +112,7 @@ struct smp_operations {
 
 struct of_cpu_method {
        const char *method;
-       struct smp_operations *ops;
+       const struct smp_operations *ops;
 };
 
 #define CPU_METHOD_OF_DECLARE(name, _method, _ops)                     \
@@ -122,6 +122,6 @@ struct of_cpu_method {
 /*
  * set platform specific SMP operations
  */
-extern void smp_set_ops(struct smp_operations *);
+extern void smp_set_ops(const struct smp_operations *);
 
 #endif /* ifndef __ASM_ARM_SMP_H */
index 7cba573c2cc9541a23ff2e0675a701117f7cf269..7b84657fba3577ea3d29ecf1b93e3267feecbb59 100644 (file)
  */
 #define __NR_syscalls  (392)
 
-/*
- * *NOTE*: This is a ghost syscall private to the kernel.  Only the
- * __kuser_cmpxchg code in entry-armv.S should be aware of its
- * existence.  Don't ever use this from user code.
- */
-#define __ARM_NR_cmpxchg               (__ARM_NR_BASE+0x00fff0)
-
 #define __ARCH_WANT_STAT64
 #define __ARCH_WANT_SYS_GETHOSTNAME
 #define __ARCH_WANT_SYS_PAUSE
index 11c54de9f8cfa1e6227ea38dcfddddf2f67ee403..65addcbf5b308acf5dc584419b90f6c65b28d343 100644 (file)
@@ -101,6 +101,7 @@ void __init arm_dt_init_cpu_maps(void)
                if (of_property_read_u32(cpu, "reg", &hwid)) {
                        pr_debug(" * %s missing reg property\n",
                                     cpu->full_name);
+                       of_node_put(cpu);
                        return;
                }
 
@@ -108,8 +109,10 @@ void __init arm_dt_init_cpu_maps(void)
                 * 8 MSBs must be set to 0 in the DT since the reg property
                 * defines the MPIDR[23:0].
                 */
-               if (hwid & ~MPIDR_HWID_BITMASK)
+               if (hwid & ~MPIDR_HWID_BITMASK) {
+                       of_node_put(cpu);
                        return;
+               }
 
                /*
                 * Duplicate MPIDRs are a recipe for disaster.
@@ -119,9 +122,11 @@ void __init arm_dt_init_cpu_maps(void)
                 * to avoid matching valid MPIDR[23:0] values.
                 */
                for (j = 0; j < cpuidx; j++)
-                       if (WARN(tmp_map[j] == hwid, "Duplicate /cpu reg "
-                                                    "properties in the DT\n"))
+                       if (WARN(tmp_map[j] == hwid,
+                                "Duplicate /cpu reg properties in the DT\n")) {
+                               of_node_put(cpu);
                                return;
+                       }
 
                /*
                 * Build a stashed array of MPIDR values. Numbering scheme
@@ -143,6 +148,7 @@ void __init arm_dt_init_cpu_maps(void)
                                               "max cores %u, capping them\n",
                                               cpuidx, nr_cpu_ids)) {
                        cpuidx = nr_cpu_ids;
+                       of_node_put(cpu);
                        break;
                }
 
index 3e1c26eb32b43e13a5fa3e70b2d09f91a07ebd4e..3ce377f7251f3429668c2e2b563fcd8062c991ae 100644 (file)
@@ -427,8 +427,7 @@ ENDPROC(__fiq_abt)
        .endm
 
        .macro  kuser_cmpxchg_check
-#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
-    !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
+#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
 #ifndef CONFIG_MMU
 #warning "NPTL on non MMU needs fixing"
 #else
@@ -859,20 +858,7 @@ __kuser_helper_start:
 
 __kuser_cmpxchg64:                             @ 0xffff0f60
 
-#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
-
-       /*
-        * Poor you.  No fast solution possible...
-        * The kernel itself must perform the operation.
-        * A special ghost syscall is used for that (see traps.c).
-        */
-       stmfd   sp!, {r7, lr}
-       ldr     r7, 1f                  @ it's 20 bits
-       swi     __ARM_NR_cmpxchg64
-       ldmfd   sp!, {r7, pc}
-1:     .word   __ARM_NR_cmpxchg64
-
-#elif defined(CONFIG_CPU_32v6K)
+#if defined(CONFIG_CPU_32v6K)
 
        stmfd   sp!, {r4, r5, r6, r7}
        ldrd    r4, r5, [r0]                    @ load old val
@@ -948,20 +934,7 @@ __kuser_memory_barrier:                            @ 0xffff0fa0
 
 __kuser_cmpxchg:                               @ 0xffff0fc0
 
-#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
-
-       /*
-        * Poor you.  No fast solution possible...
-        * The kernel itself must perform the operation.
-        * A special ghost syscall is used for that (see traps.c).
-        */
-       stmfd   sp!, {r7, lr}
-       ldr     r7, 1f                  @ it's 20 bits
-       swi     __ARM_NR_cmpxchg
-       ldmfd   sp!, {r7, pc}
-1:     .word   __ARM_NR_cmpxchg
-
-#elif __LINUX_ARM_ARCH__ < 6
+#if __LINUX_ARM_ARCH__ < 6
 
 #ifdef CONFIG_MMU
 
index dc7d0a95bd3651c2949454027a3b6c47dd77863c..6284779d64ee6394dc11b38cc32e24cefac276b1 100644 (file)
@@ -35,7 +35,6 @@
 #include <asm/cputype.h>
 #include <asm/current.h>
 #include <asm/hw_breakpoint.h>
-#include <asm/kdebug.h>
 #include <asm/traps.h>
 
 /* Breakpoint currently in use for each BRP. */
index fd9eefce0a7b8d66e019b048e4ee24753da7acc0..9232caee70604c686e07b7aef4c21598c7196fa4 100644 (file)
@@ -74,7 +74,7 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
 void
 sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
 {
-       struct pt_regs *thread_regs;
+       struct thread_info *ti;
        int regno;
 
        /* Just making sure... */
@@ -86,24 +86,17 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
                gdb_regs[regno] = 0;
 
        /* Otherwise, we have only some registers from switch_to() */
-       thread_regs             = task_pt_regs(task);
-       gdb_regs[_R0]           = thread_regs->ARM_r0;
-       gdb_regs[_R1]           = thread_regs->ARM_r1;
-       gdb_regs[_R2]           = thread_regs->ARM_r2;
-       gdb_regs[_R3]           = thread_regs->ARM_r3;
-       gdb_regs[_R4]           = thread_regs->ARM_r4;
-       gdb_regs[_R5]           = thread_regs->ARM_r5;
-       gdb_regs[_R6]           = thread_regs->ARM_r6;
-       gdb_regs[_R7]           = thread_regs->ARM_r7;
-       gdb_regs[_R8]           = thread_regs->ARM_r8;
-       gdb_regs[_R9]           = thread_regs->ARM_r9;
-       gdb_regs[_R10]          = thread_regs->ARM_r10;
-       gdb_regs[_FP]           = thread_regs->ARM_fp;
-       gdb_regs[_IP]           = thread_regs->ARM_ip;
-       gdb_regs[_SPT]          = thread_regs->ARM_sp;
-       gdb_regs[_LR]           = thread_regs->ARM_lr;
-       gdb_regs[_PC]           = thread_regs->ARM_pc;
-       gdb_regs[_CPSR]         = thread_regs->ARM_cpsr;
+       ti                      = task_thread_info(task);
+       gdb_regs[_R4]           = ti->cpu_context.r4;
+       gdb_regs[_R5]           = ti->cpu_context.r5;
+       gdb_regs[_R6]           = ti->cpu_context.r6;
+       gdb_regs[_R7]           = ti->cpu_context.r7;
+       gdb_regs[_R8]           = ti->cpu_context.r8;
+       gdb_regs[_R9]           = ti->cpu_context.r9;
+       gdb_regs[_R10]          = ti->cpu_context.sl;
+       gdb_regs[_FP]           = ti->cpu_context.fp;
+       gdb_regs[_SPT]          = ti->cpu_context.sp;
+       gdb_regs[_PC]           = ti->cpu_context.pc;
 }
 
 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
index 48185a773852d4ec501702ae3a6471d38b03ce10..b26361355daeb39b61c238333c20dd2d68ec4a61 100644 (file)
@@ -80,7 +80,7 @@ static DECLARE_COMPLETION(cpu_running);
 
 static struct smp_operations smp_ops;
 
-void __init smp_set_ops(struct smp_operations *ops)
+void __init smp_set_ops(const struct smp_operations *ops)
 {
        if (ops)
                smp_ops = *ops;
@@ -400,6 +400,7 @@ asmlinkage void secondary_start_kernel(void)
 
        local_irq_enable();
        local_fiq_enable();
+       local_abt_enable();
 
        /*
         * OK, it's off to the idle thread for us
@@ -748,6 +749,15 @@ core_initcall(register_cpufreq_notifier);
 
 static void raise_nmi(cpumask_t *mask)
 {
+       /*
+        * Generate the backtrace directly if we are running in a calling
+        * context that is not preemptible by the backtrace IPI. Note
+        * that nmi_cpu_backtrace() automatically removes the current cpu
+        * from mask.
+        */
+       if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled())
+               nmi_cpu_backtrace(NULL);
+
        smp_cross_call(mask, IPI_CPU_BACKTRACE);
 }
 
index e9035cda148563e9231ccf184d141930990f6662..1bfa7a7f55336119bfb647eb1e419fe81c086a05 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/of_irq.h>
 #include <linux/of_address.h>
 
-#include <asm/smp_plat.h>
 #include <asm/smp_twd.h>
 
 /* set up by the platform code */
@@ -34,6 +33,8 @@ static unsigned long twd_timer_rate;
 static DEFINE_PER_CPU(bool, percpu_setup_called);
 
 static struct clock_event_device __percpu *twd_evt;
+static unsigned int twd_features =
+               CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
 static int twd_ppi;
 
 static int twd_shutdown(struct clock_event_device *clk)
@@ -294,8 +295,7 @@ static void twd_timer_setup(void)
        writel_relaxed(0, twd_base + TWD_TIMER_CONTROL);
 
        clk->name = "local_timer";
-       clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
-                       CLOCK_EVT_FEAT_C3STOP;
+       clk->features = twd_features;
        clk->rating = 350;
        clk->set_state_shutdown = twd_shutdown;
        clk->set_state_periodic = twd_set_periodic;
@@ -350,6 +350,8 @@ static int __init twd_local_timer_common_register(struct device_node *np)
                goto out_irq;
 
        twd_get_clock(np);
+       if (!of_property_read_bool(np, "always-on"))
+               twd_features |= CLOCK_EVT_FEAT_C3STOP;
 
        /*
         * Immediately configure the timer on the boot CPU, unless we need
@@ -392,9 +394,6 @@ static void __init twd_local_timer_of_register(struct device_node *np)
 {
        int err;
 
-       if (!is_smp() || !setup_max_cpus)
-               return;
-
        twd_ppi = irq_of_parse_and_map(np, 0);
        if (!twd_ppi) {
                err = -EINVAL;
index 969f9d9e665f4d49b2951ed65cfde9c279e1aa55..bc698383e82253a47427885359e07e22daa24179 100644 (file)
@@ -625,58 +625,6 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
                set_tls(regs->ARM_r0);
                return 0;
 
-#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
-       /*
-        * Atomically store r1 in *r2 if *r2 is equal to r0 for user space.
-        * Return zero in r0 if *MEM was changed or non-zero if no exchange
-        * happened.  Also set the user C flag accordingly.
-        * If access permissions have to be fixed up then non-zero is
-        * returned and the operation has to be re-attempted.
-        *
-        * *NOTE*: This is a ghost syscall private to the kernel.  Only the
-        * __kuser_cmpxchg code in entry-armv.S should be aware of its
-        * existence.  Don't ever use this from user code.
-        */
-       case NR(cmpxchg):
-       for (;;) {
-               extern void do_DataAbort(unsigned long addr, unsigned int fsr,
-                                        struct pt_regs *regs);
-               unsigned long val;
-               unsigned long addr = regs->ARM_r2;
-               struct mm_struct *mm = current->mm;
-               pgd_t *pgd; pmd_t *pmd; pte_t *pte;
-               spinlock_t *ptl;
-
-               regs->ARM_cpsr &= ~PSR_C_BIT;
-               down_read(&mm->mmap_sem);
-               pgd = pgd_offset(mm, addr);
-               if (!pgd_present(*pgd))
-                       goto bad_access;
-               pmd = pmd_offset(pgd, addr);
-               if (!pmd_present(*pmd))
-                       goto bad_access;
-               pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
-               if (!pte_present(*pte) || !pte_write(*pte) || !pte_dirty(*pte)) {
-                       pte_unmap_unlock(pte, ptl);
-                       goto bad_access;
-               }
-               val = *(unsigned long *)addr;
-               val -= regs->ARM_r0;
-               if (val == 0) {
-                       *(unsigned long *)addr = regs->ARM_r1;
-                       regs->ARM_cpsr |= PSR_C_BIT;
-               }
-               pte_unmap_unlock(pte, ptl);
-               up_read(&mm->mmap_sem);
-               return val;
-
-               bad_access:
-               up_read(&mm->mmap_sem);
-               /* simulate a write access fault */
-               do_DataAbort(addr, 15 + (1 << 11), regs);
-       }
-#endif
-
        default:
                /* Calls 9f00xx..9f07ff are defined to return -ENOSYS
                   if not implemented, rather than raising SIGILL.  This
index 210eccadb69a9770ba1b51beb077bb071f565261..356970f3b25e3d2f54b691c8c8b1bb9baca1d7b1 100644 (file)
@@ -21,6 +21,7 @@ config KVM
        depends on MMU && OF
        select PREEMPT_NOTIFIERS
        select ANON_INODES
+       select ARM_GIC
        select HAVE_KVM_CPU_RELAX_INTERCEPT
        select HAVE_KVM_ARCH_TLB_FLUSH_ALL
        select KVM_MMIO
index dc017adfddc8b83698fa8486e2b9b6dbc1e189a0..78b286994577183b8d9ef415ae3ba5f6a41113c4 100644 (file)
@@ -1080,7 +1080,7 @@ static int init_hyp_mode(void)
         */
        err = kvm_timer_hyp_init();
        if (err)
-               goto out_free_mappings;
+               goto out_free_context;
 
 #ifndef CONFIG_HOTPLUG_CPU
        free_boot_hyp_pgd();
index 970d6c0437743cda6a78620e1439eccb91398da2..e936352ccb0013e040fcd9b22bda1c583cfff361 100644 (file)
@@ -9,6 +9,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/unwind.h>
 
                .text
 
@@ -20,6 +21,8 @@
  */
 ENTRY(__clear_user_std)
 WEAK(arm_clear_user)
+UNWIND(.fnstart)
+UNWIND(.save {r1, lr})
                stmfd   sp!, {r1, lr}
                mov     r2, #0
                cmp     r1, #4
@@ -44,6 +47,7 @@ WEAK(arm_clear_user)
 USER(          strnebt r2, [r0])
                mov     r0, #0
                ldmfd   sp!, {r1, pc}
+UNWIND(.fnend)
 ENDPROC(arm_clear_user)
 ENDPROC(__clear_user_std)
 
index 9bdf54795f05de26283881729a4427bd781ea2eb..56978199c4798fa236394c232e50d58a61e4fd3d 100644 (file)
@@ -20,6 +20,7 @@
 #include <asm/cputype.h>
 #include <asm/cp15.h>
 #include <asm/mcpm.h>
+#include <asm/smp_plat.h>
 
 #include "regs-pmu.h"
 #include "common.h"
@@ -70,7 +71,31 @@ static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
                cluster >= EXYNOS5420_NR_CLUSTERS)
                return -EINVAL;
 
-       exynos_cpu_power_up(cpunr);
+       if (!exynos_cpu_power_state(cpunr)) {
+               exynos_cpu_power_up(cpunr);
+
+               /*
+                * This assumes the cluster number of the big cores(Cortex A15)
+                * is 0 and the Little cores(Cortex A7) is 1.
+                * When the system was booted from the Little core,
+                * they should be reset during power up cpu.
+                */
+               if (cluster &&
+                   cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) {
+                       /*
+                        * Before we reset the Little cores, we should wait
+                        * the SPARE2 register is set to 1 because the init
+                        * codes of the iROM will set the register after
+                        * initialization.
+                        */
+                       while (!pmu_raw_readl(S5P_PMU_SPARE2))
+                               udelay(10);
+
+                       pmu_raw_writel(EXYNOS5420_KFC_CORE_RESET(cpu),
+                                       EXYNOS_SWRESET);
+               }
+       }
+
        return 0;
 }
 
index 4a87e86dec45d1546153ca0ebb7310bbd5f82d93..7c21760f590ffd0d4cd47fcafcbcaffe64a85952 100644 (file)
@@ -200,15 +200,15 @@ no_clk:
                args.args_count = 0;
                child_domain = of_genpd_get_from_provider(&args);
                if (IS_ERR(child_domain))
-                       goto next_pd;
+                       continue;
 
                if (of_parse_phandle_with_args(np, "power-domains",
                                         "#power-domain-cells", 0, &args) != 0)
-                       goto next_pd;
+                       continue;
 
                parent_domain = of_genpd_get_from_provider(&args);
                if (IS_ERR(parent_domain))
-                       goto next_pd;
+                       continue;
 
                if (pm_genpd_add_subdomain(parent_domain, child_domain))
                        pr_warn("%s failed to add subdomain: %s\n",
@@ -216,8 +216,6 @@ no_clk:
                else
                        pr_info("%s has as child subdomain: %s.\n",
                                parent_domain->name, child_domain->name);
-next_pd:
-               of_node_put(np);
        }
 
        return 0;
index b7614333d2968befa767109f693bf7947528db4a..fba9068ed260de7f8211525e772ffc25d7d88f0a 100644 (file)
@@ -513,6 +513,12 @@ static inline unsigned int exynos_pmu_cpunr(unsigned int mpidr)
 #define SPREAD_ENABLE                                          0xF
 #define SPREAD_USE_STANDWFI                                    0xF
 
+#define EXYNOS5420_KFC_CORE_RESET0                             BIT(8)
+#define EXYNOS5420_KFC_ETM_RESET0                              BIT(20)
+
+#define EXYNOS5420_KFC_CORE_RESET(_nr)                         \
+       ((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr))
+
 #define EXYNOS5420_BB_CON1                                     0x0784
 #define EXYNOS5420_BB_SEL_EN                                   BIT(31)
 #define EXYNOS5420_BB_PMOS_EN                                  BIT(7)
index e00eb39453a41ff3cf0090a6db53ee80a2afc7dc..5a7e47ceec91f7a6486821cc0a4b93e7d51257a9 100644 (file)
@@ -177,54 +177,57 @@ static struct irq_chip exynos_pmu_chip = {
 #endif
 };
 
-static int exynos_pmu_domain_xlate(struct irq_domain *domain,
-                                  struct device_node *controller,
-                                  const u32 *intspec,
-                                  unsigned int intsize,
-                                  unsigned long *out_hwirq,
-                                  unsigned int *out_type)
+static int exynos_pmu_domain_translate(struct irq_domain *d,
+                                      struct irq_fwspec *fwspec,
+                                      unsigned long *hwirq,
+                                      unsigned int *type)
 {
-       if (domain->of_node != controller)
-               return -EINVAL; /* Shouldn't happen, really... */
-       if (intsize != 3)
-               return -EINVAL; /* Not GIC compliant */
-       if (intspec[0] != 0)
-               return -EINVAL; /* No PPI should point to this domain */
+       if (is_of_node(fwspec->fwnode)) {
+               if (fwspec->param_count != 3)
+                       return -EINVAL;
 
-       *out_hwirq = intspec[1];
-       *out_type = intspec[2];
-       return 0;
+               /* No PPI should point to this domain */
+               if (fwspec->param[0] != 0)
+                       return -EINVAL;
+
+               *hwirq = fwspec->param[1];
+               *type = fwspec->param[2];
+               return 0;
+       }
+
+       return -EINVAL;
 }
 
 static int exynos_pmu_domain_alloc(struct irq_domain *domain,
                                   unsigned int virq,
                                   unsigned int nr_irqs, void *data)
 {
-       struct of_phandle_args *args = data;
-       struct of_phandle_args parent_args;
+       struct irq_fwspec *fwspec = data;
+       struct irq_fwspec parent_fwspec;
        irq_hw_number_t hwirq;
        int i;
 
-       if (args->args_count != 3)
+       if (fwspec->param_count != 3)
                return -EINVAL; /* Not GIC compliant */
-       if (args->args[0] != 0)
+       if (fwspec->param[0] != 0)
                return -EINVAL; /* No PPI should point to this domain */
 
-       hwirq = args->args[1];
+       hwirq = fwspec->param[1];
 
        for (i = 0; i < nr_irqs; i++)
                irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
                                              &exynos_pmu_chip, NULL);
 
-       parent_args = *args;
-       parent_args.np = domain->parent->of_node;
-       return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_args);
+       parent_fwspec = *fwspec;
+       parent_fwspec.fwnode = domain->parent->fwnode;
+       return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+                                           &parent_fwspec);
 }
 
 static const struct irq_domain_ops exynos_pmu_domain_ops = {
-       .xlate  = exynos_pmu_domain_xlate,
-       .alloc  = exynos_pmu_domain_alloc,
-       .free   = irq_domain_free_irqs_common,
+       .translate      = exynos_pmu_domain_translate,
+       .alloc          = exynos_pmu_domain_alloc,
+       .free           = irq_domain_free_irqs_common,
 };
 
 static int __init exynos_pmu_irq_init(struct device_node *node,
index 8c4467fad8370c73374ff104209b779e580852e1..10bf7159b27def3adf90403541fed2214b6bac4e 100644 (file)
@@ -181,40 +181,42 @@ static struct irq_chip imx_gpc_chip = {
 #endif
 };
 
-static int imx_gpc_domain_xlate(struct irq_domain *domain,
-                               struct device_node *controller,
-                               const u32 *intspec,
-                               unsigned int intsize,
-                               unsigned long *out_hwirq,
-                               unsigned int *out_type)
+static int imx_gpc_domain_translate(struct irq_domain *d,
+                                   struct irq_fwspec *fwspec,
+                                   unsigned long *hwirq,
+                                   unsigned int *type)
 {
-       if (domain->of_node != controller)
-               return -EINVAL; /* Shouldn't happen, really... */
-       if (intsize != 3)
-               return -EINVAL; /* Not GIC compliant */
-       if (intspec[0] != 0)
-               return -EINVAL; /* No PPI should point to this domain */
+       if (is_of_node(fwspec->fwnode)) {
+               if (fwspec->param_count != 3)
+                       return -EINVAL;
 
-       *out_hwirq = intspec[1];
-       *out_type = intspec[2];
-       return 0;
+               /* No PPI should point to this domain */
+               if (fwspec->param[0] != 0)
+                       return -EINVAL;
+
+               *hwirq = fwspec->param[1];
+               *type = fwspec->param[2];
+               return 0;
+       }
+
+       return -EINVAL;
 }
 
 static int imx_gpc_domain_alloc(struct irq_domain *domain,
                                  unsigned int irq,
                                  unsigned int nr_irqs, void *data)
 {
-       struct of_phandle_args *args = data;
-       struct of_phandle_args parent_args;
+       struct irq_fwspec *fwspec = data;
+       struct irq_fwspec parent_fwspec;
        irq_hw_number_t hwirq;
        int i;
 
-       if (args->args_count != 3)
+       if (fwspec->param_count != 3)
                return -EINVAL; /* Not GIC compliant */
-       if (args->args[0] != 0)
+       if (fwspec->param[0] != 0)
                return -EINVAL; /* No PPI should point to this domain */
 
-       hwirq = args->args[1];
+       hwirq = fwspec->param[1];
        if (hwirq >= GPC_MAX_IRQS)
                return -EINVAL; /* Can't deal with this */
 
@@ -222,15 +224,16 @@ static int imx_gpc_domain_alloc(struct irq_domain *domain,
                irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i,
                                              &imx_gpc_chip, NULL);
 
-       parent_args = *args;
-       parent_args.np = domain->parent->of_node;
-       return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs, &parent_args);
+       parent_fwspec = *fwspec;
+       parent_fwspec.fwnode = domain->parent->fwnode;
+       return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs,
+                                           &parent_fwspec);
 }
 
 static const struct irq_domain_ops imx_gpc_domain_ops = {
-       .xlate  = imx_gpc_domain_xlate,
-       .alloc  = imx_gpc_domain_alloc,
-       .free   = irq_domain_free_irqs_common,
+       .translate      = imx_gpc_domain_translate,
+       .alloc          = imx_gpc_domain_alloc,
+       .free           = irq_domain_free_irqs_common,
 };
 
 static int __init imx_gpc_init(struct device_node *node,
index c86a5a0aefac7bad66c45b94157cdb6c2b8e8b28..e20fc4178b15b029f098690f0bc9420be170c141 100644 (file)
@@ -117,11 +117,4 @@ config MACH_KIRKWOOD
          Say 'Y' here if you want your kernel to support boards based
          on the Marvell Kirkwood device tree.
 
-config MACH_NETXBIG
-       bool "LaCie 2Big and 5Big Network v2"
-       depends on MACH_KIRKWOOD
-       help
-         Say 'Y' here if you want your kernel to support the
-         LaCie 2Big and 5Big Network v2
-
 endif
index b4f01497ce0bba1db1bf865b8a850714feca3082..ecf9e0c3b107808752fe49af062b8758e1784986 100644 (file)
@@ -13,4 +13,3 @@ endif
 
 obj-$(CONFIG_MACH_DOVE)                 += dove.o
 obj-$(CONFIG_MACH_KIRKWOOD)     += kirkwood.o kirkwood-pm.o
-obj-$(CONFIG_MACH_NETXBIG)      += netxbig.o
diff --git a/arch/arm/mach-mvebu/board.h b/arch/arm/mach-mvebu/board.h
deleted file mode 100644 (file)
index 98e32cc..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Board functions for Marvell System On Chip
- *
- * Copyright (C) 2014
- *
- * Andrew Lunn <andrew@lunn.ch>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2.  This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __ARCH_MVEBU_BOARD_H
-#define __ARCH_MVEBU_BOARD_H
-
-#ifdef CONFIG_MACH_NETXBIG
-void netxbig_init(void);
-#else
-static inline void netxbig_init(void) {};
-#endif
-#endif
index 925f75f54268ee5d4b5b9093ad48b6b1e290ad0a..f9d8e1ea7183a510cb3920056d990407d3ffb0ac 100644 (file)
@@ -25,7 +25,6 @@
 #include "kirkwood.h"
 #include "kirkwood-pm.h"
 #include "common.h"
-#include "board.h"
 
 static struct resource kirkwood_cpufreq_resources[] = {
        [0] = {
@@ -180,9 +179,6 @@ static void __init kirkwood_dt_init(void)
        kirkwood_pm_init();
        kirkwood_dt_eth_fixup();
 
-       if (of_machine_is_compatible("lacie,netxbig"))
-               netxbig_init();
-
        of_platform_populate(NULL, of_default_bus_match_table, auxdata, NULL);
 }
 
diff --git a/arch/arm/mach-mvebu/netxbig.c b/arch/arm/mach-mvebu/netxbig.c
deleted file mode 100644 (file)
index 94b11b6..0000000
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * arch/arm/mach-mvbu/board-netxbig.c
- *
- * LaCie 2Big and 5Big Network v2 board setup
- *
- * Copyright (C) 2010 Simon Guinot <sguinot@lacie.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/leds-kirkwood-netxbig.h>
-#include "common.h"
-
-/*****************************************************************************
- * GPIO extension LEDs
- ****************************************************************************/
-
-/*
- * The LEDs are controlled by a CPLD and can be configured through a GPIO
- * extension bus:
- *
- * - address register : bit [0-2] -> GPIO [47-49]
- * - data register    : bit [0-2] -> GPIO [44-46]
- * - enable register  : GPIO 29
- */
-
-static int netxbig_v2_gpio_ext_addr[] = { 47, 48, 49 };
-static int netxbig_v2_gpio_ext_data[] = { 44, 45, 46 };
-
-static struct netxbig_gpio_ext netxbig_v2_gpio_ext = {
-       .addr           = netxbig_v2_gpio_ext_addr,
-       .num_addr       = ARRAY_SIZE(netxbig_v2_gpio_ext_addr),
-       .data           = netxbig_v2_gpio_ext_data,
-       .num_data       = ARRAY_SIZE(netxbig_v2_gpio_ext_data),
-       .enable         = 29,
-};
-
-/*
- * Address register selection:
- *
- * addr | register
- * ----------------------------
- *   0  | front LED
- *   1  | front LED brightness
- *   2  | SATA LED brightness
- *   3  | SATA0 LED
- *   4  | SATA1 LED
- *   5  | SATA2 LED
- *   6  | SATA3 LED
- *   7  | SATA4 LED
- *
- * Data register configuration:
- *
- * data | LED brightness
- * -------------------------------------------------
- *   0  | min (off)
- *   -  | -
- *   7  | max
- *
- * data | front LED mode
- * -------------------------------------------------
- *   0  | fix off
- *   1  | fix blue on
- *   2  | fix red on
- *   3  | blink blue on=1 sec and blue off=1 sec
- *   4  | blink red on=1 sec and red off=1 sec
- *   5  | blink blue on=2.5 sec and red on=0.5 sec
- *   6  | blink blue on=1 sec and red on=1 sec
- *   7  | blink blue on=0.5 sec and blue off=2.5 sec
- *
- * data | SATA LED mode
- * -------------------------------------------------
- *   0  | fix off
- *   1  | SATA activity blink
- *   2  | fix red on
- *   3  | blink blue on=1 sec and blue off=1 sec
- *   4  | blink red on=1 sec and red off=1 sec
- *   5  | blink blue on=2.5 sec and red on=0.5 sec
- *   6  | blink blue on=1 sec and red on=1 sec
- *   7  | fix blue on
- */
-
-static int netxbig_v2_red_mled[NETXBIG_LED_MODE_NUM] = {
-       [NETXBIG_LED_OFF]       = 0,
-       [NETXBIG_LED_ON]        = 2,
-       [NETXBIG_LED_SATA]      = NETXBIG_LED_INVALID_MODE,
-       [NETXBIG_LED_TIMER1]    = 4,
-       [NETXBIG_LED_TIMER2]    = NETXBIG_LED_INVALID_MODE,
-};
-
-static int netxbig_v2_blue_pwr_mled[NETXBIG_LED_MODE_NUM] = {
-       [NETXBIG_LED_OFF]       = 0,
-       [NETXBIG_LED_ON]        = 1,
-       [NETXBIG_LED_SATA]      = NETXBIG_LED_INVALID_MODE,
-       [NETXBIG_LED_TIMER1]    = 3,
-       [NETXBIG_LED_TIMER2]    = 7,
-};
-
-static int netxbig_v2_blue_sata_mled[NETXBIG_LED_MODE_NUM] = {
-       [NETXBIG_LED_OFF]       = 0,
-       [NETXBIG_LED_ON]        = 7,
-       [NETXBIG_LED_SATA]      = 1,
-       [NETXBIG_LED_TIMER1]    = 3,
-       [NETXBIG_LED_TIMER2]    = NETXBIG_LED_INVALID_MODE,
-};
-
-static struct netxbig_led_timer netxbig_v2_led_timer[] = {
-       [0] = {
-               .delay_on       = 500,
-               .delay_off      = 500,
-               .mode           = NETXBIG_LED_TIMER1,
-       },
-       [1] = {
-               .delay_on       = 500,
-               .delay_off      = 1000,
-               .mode           = NETXBIG_LED_TIMER2,
-       },
-};
-
-#define NETXBIG_LED(_name, maddr, mval, baddr)                 \
-       { .name         = _name,                                \
-         .mode_addr    = maddr,                                \
-         .mode_val     = mval,                                 \
-         .bright_addr  = baddr }
-
-static struct netxbig_led net2big_v2_leds_ctrl[] = {
-       NETXBIG_LED("net2big-v2:blue:power", 0, netxbig_v2_blue_pwr_mled,  1),
-       NETXBIG_LED("net2big-v2:red:power",  0, netxbig_v2_red_mled,       1),
-       NETXBIG_LED("net2big-v2:blue:sata0", 3, netxbig_v2_blue_sata_mled, 2),
-       NETXBIG_LED("net2big-v2:red:sata0",  3, netxbig_v2_red_mled,       2),
-       NETXBIG_LED("net2big-v2:blue:sata1", 4, netxbig_v2_blue_sata_mled, 2),
-       NETXBIG_LED("net2big-v2:red:sata1",  4, netxbig_v2_red_mled,       2),
-};
-
-static struct netxbig_led_platform_data net2big_v2_leds_data = {
-       .gpio_ext       = &netxbig_v2_gpio_ext,
-       .timer          = netxbig_v2_led_timer,
-       .num_timer      = ARRAY_SIZE(netxbig_v2_led_timer),
-       .leds           = net2big_v2_leds_ctrl,
-       .num_leds       = ARRAY_SIZE(net2big_v2_leds_ctrl),
-};
-
-static struct netxbig_led net5big_v2_leds_ctrl[] = {
-       NETXBIG_LED("net5big-v2:blue:power", 0, netxbig_v2_blue_pwr_mled,  1),
-       NETXBIG_LED("net5big-v2:red:power",  0, netxbig_v2_red_mled,       1),
-       NETXBIG_LED("net5big-v2:blue:sata0", 3, netxbig_v2_blue_sata_mled, 2),
-       NETXBIG_LED("net5big-v2:red:sata0",  3, netxbig_v2_red_mled,       2),
-       NETXBIG_LED("net5big-v2:blue:sata1", 4, netxbig_v2_blue_sata_mled, 2),
-       NETXBIG_LED("net5big-v2:red:sata1",  4, netxbig_v2_red_mled,       2),
-       NETXBIG_LED("net5big-v2:blue:sata2", 5, netxbig_v2_blue_sata_mled, 2),
-       NETXBIG_LED("net5big-v2:red:sata2",  5, netxbig_v2_red_mled,       2),
-       NETXBIG_LED("net5big-v2:blue:sata3", 6, netxbig_v2_blue_sata_mled, 2),
-       NETXBIG_LED("net5big-v2:red:sata3",  6, netxbig_v2_red_mled,       2),
-       NETXBIG_LED("net5big-v2:blue:sata4", 7, netxbig_v2_blue_sata_mled, 2),
-       NETXBIG_LED("net5big-v2:red:sata4",  7, netxbig_v2_red_mled,       2),
-};
-
-static struct netxbig_led_platform_data net5big_v2_leds_data = {
-       .gpio_ext       = &netxbig_v2_gpio_ext,
-       .timer          = netxbig_v2_led_timer,
-       .num_timer      = ARRAY_SIZE(netxbig_v2_led_timer),
-       .leds           = net5big_v2_leds_ctrl,
-       .num_leds       = ARRAY_SIZE(net5big_v2_leds_ctrl),
-};
-
-static struct platform_device netxbig_v2_leds = {
-       .name           = "leds-netxbig",
-       .id             = -1,
-       .dev            = {
-               .platform_data  = &net2big_v2_leds_data,
-       },
-};
-
-void __init netxbig_init(void)
-{
-
-       if (of_machine_is_compatible("lacie,net5big_v2"))
-               netxbig_v2_leds.dev.platform_data = &net5big_v2_leds_data;
-       platform_device_register(&netxbig_v2_leds);
-}
index b3a0dff67e3fc48bb34b13567778d0f178298834..33d1460a56391c397ef305d8ed8275fa94b15720 100644 (file)
@@ -49,6 +49,7 @@ config SOC_OMAP5
        select OMAP_INTERCONNECT
        select OMAP_INTERCONNECT_BARRIER
        select PM_OPP if PM
+       select ZONE_DMA if ARM_LPAE
 
 config SOC_AM33XX
        bool "TI AM33XX"
@@ -78,6 +79,7 @@ config SOC_DRA7XX
        select OMAP_INTERCONNECT
        select OMAP_INTERCONNECT_BARRIER
        select PM_OPP if PM
+       select ZONE_DMA if ARM_LPAE
 
 config ARCH_OMAP2PLUS
        bool
index 6133eaac685df545ec5a6665db0ae72051d8c2fa..fb219a30c10c60ff56f8b6c0ae754a861f886dc0 100644 (file)
@@ -106,6 +106,7 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)")
 MACHINE_END
 
 static const char *const omap36xx_boards_compat[] __initconst = {
+       "ti,omap3630",
        "ti,omap36xx",
        NULL,
 };
@@ -243,6 +244,9 @@ static const char *const omap5_boards_compat[] __initconst = {
 };
 
 DT_MACHINE_START(OMAP5_DT, "Generic OMAP5 (Flattened Device Tree)")
+#if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE)
+       .dma_zone_size  = SZ_2G,
+#endif
        .reserve        = omap_reserve,
        .smp            = smp_ops(omap4_smp_ops),
        .map_io         = omap5_map_io,
@@ -288,6 +292,9 @@ static const char *const dra74x_boards_compat[] __initconst = {
 };
 
 DT_MACHINE_START(DRA74X_DT, "Generic DRA74X (Flattened Device Tree)")
+#if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE)
+       .dma_zone_size  = SZ_2G,
+#endif
        .reserve        = omap_reserve,
        .smp            = smp_ops(omap4_smp_ops),
        .map_io         = dra7xx_map_io,
@@ -308,6 +315,9 @@ static const char *const dra72x_boards_compat[] __initconst = {
 };
 
 DT_MACHINE_START(DRA72X_DT, "Generic DRA72X (Flattened Device Tree)")
+#if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE)
+       .dma_zone_size  = SZ_2G,
+#endif
        .reserve        = omap_reserve,
        .map_io         = dra7xx_map_io,
        .init_early     = dra7xx_init_early,
index e1d2e991d17a31fc15f1c44616c9b4b7f9de3a84..db7e0bab3587cb975ea1ecb5c89e10956fcc7214 100644 (file)
@@ -399,40 +399,42 @@ static struct irq_chip wakeupgen_chip = {
 #endif
 };
 
-static int wakeupgen_domain_xlate(struct irq_domain *domain,
-                                 struct device_node *controller,
-                                 const u32 *intspec,
-                                 unsigned int intsize,
-                                 unsigned long *out_hwirq,
-                                 unsigned int *out_type)
+static int wakeupgen_domain_translate(struct irq_domain *d,
+                                     struct irq_fwspec *fwspec,
+                                     unsigned long *hwirq,
+                                     unsigned int *type)
 {
-       if (domain->of_node != controller)
-               return -EINVAL; /* Shouldn't happen, really... */
-       if (intsize != 3)
-               return -EINVAL; /* Not GIC compliant */
-       if (intspec[0] != 0)
-               return -EINVAL; /* No PPI should point to this domain */
+       if (is_of_node(fwspec->fwnode)) {
+               if (fwspec->param_count != 3)
+                       return -EINVAL;
 
-       *out_hwirq = intspec[1];
-       *out_type = intspec[2];
-       return 0;
+               /* No PPI should point to this domain */
+               if (fwspec->param[0] != 0)
+                       return -EINVAL;
+
+               *hwirq = fwspec->param[1];
+               *type = fwspec->param[2];
+               return 0;
+       }
+
+       return -EINVAL;
 }
 
 static int wakeupgen_domain_alloc(struct irq_domain *domain,
                                  unsigned int virq,
                                  unsigned int nr_irqs, void *data)
 {
-       struct of_phandle_args *args = data;
-       struct of_phandle_args parent_args;
+       struct irq_fwspec *fwspec = data;
+       struct irq_fwspec parent_fwspec;
        irq_hw_number_t hwirq;
        int i;
 
-       if (args->args_count != 3)
+       if (fwspec->param_count != 3)
                return -EINVAL; /* Not GIC compliant */
-       if (args->args[0] != 0)
+       if (fwspec->param[0] != 0)
                return -EINVAL; /* No PPI should point to this domain */
 
-       hwirq = args->args[1];
+       hwirq = fwspec->param[1];
        if (hwirq >= MAX_IRQS)
                return -EINVAL; /* Can't deal with this */
 
@@ -440,15 +442,16 @@ static int wakeupgen_domain_alloc(struct irq_domain *domain,
                irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
                                              &wakeupgen_chip, NULL);
 
-       parent_args = *args;
-       parent_args.np = domain->parent->of_node;
-       return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_args);
+       parent_fwspec = *fwspec;
+       parent_fwspec.fwnode = domain->parent->fwnode;
+       return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+                                           &parent_fwspec);
 }
 
 static const struct irq_domain_ops wakeupgen_domain_ops = {
-       .xlate  = wakeupgen_domain_xlate,
-       .alloc  = wakeupgen_domain_alloc,
-       .free   = irq_domain_free_irqs_common,
+       .translate      = wakeupgen_domain_translate,
+       .alloc          = wakeupgen_domain_alloc,
+       .free           = irq_domain_free_irqs_common,
 };
 
 /*
index ea56397599c21b1f0c7d806ccfe9b1f4fb062f52..1dfe34654c43a353a34ac2cb9b941ff9f951b275 100644 (file)
@@ -559,7 +559,14 @@ static void pdata_quirks_check(struct pdata_init *quirks)
 
 void __init pdata_quirks_init(const struct of_device_id *omap_dt_match_table)
 {
-       omap_sdrc_init(NULL, NULL);
+       /*
+        * We still need this for omap2420 and omap3 PM to work, others are
+        * using drivers/misc/sram.c already.
+        */
+       if (of_machine_is_compatible("ti,omap2420") ||
+           of_machine_is_compatible("ti,omap3"))
+               omap_sdrc_init(NULL, NULL);
+
        pdata_quirks_check(auxdata_quirks);
        of_platform_populate(NULL, omap_dt_match_table,
                             omap_auxdata_lookup, NULL);
index 06005d3f2ba33523d1ee4f9e96499290a9908572..20ce2d386f172c849459e94d1bb88601f1fdd0e7 100644 (file)
 #define PECR_IS(n)     ((1 << ((n) * 2)) << 29)
 
 extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int));
-#ifdef CONFIG_PM
-
-#define ISRAM_START    0x5c000000
-#define ISRAM_SIZE     SZ_256K
 
 /*
  * NAND NFC: DFI bus arbitration subset
@@ -54,6 +50,11 @@ extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int));
 #define NDCR_ND_ARB_EN         (1 << 12)
 #define NDCR_ND_ARB_CNTL       (1 << 19)
 
+#ifdef CONFIG_PM
+
+#define ISRAM_START    0x5c000000
+#define ISRAM_SIZE     SZ_256K
+
 static void __iomem *sram;
 static unsigned long wakeup_src;
 
index df7537f12469a15669b89aba42bd96445e789305..c21941349b3ef761680d4b2c9b58ee7cf4008f96 100644 (file)
@@ -419,28 +419,24 @@ config CPU_THUMBONLY
 config CPU_32v3
        bool
        select CPU_USE_DOMAINS if MMU
-       select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
        select NEED_KUSER_HELPERS
        select TLS_REG_EMUL if SMP || !MMU
 
 config CPU_32v4
        bool
        select CPU_USE_DOMAINS if MMU
-       select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
        select NEED_KUSER_HELPERS
        select TLS_REG_EMUL if SMP || !MMU
 
 config CPU_32v4T
        bool
        select CPU_USE_DOMAINS if MMU
-       select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
        select NEED_KUSER_HELPERS
        select TLS_REG_EMUL if SMP || !MMU
 
 config CPU_32v5
        bool
        select CPU_USE_DOMAINS if MMU
-       select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
        select NEED_KUSER_HELPERS
        select TLS_REG_EMUL if SMP || !MMU
 
@@ -805,14 +801,6 @@ config TLS_REG_EMUL
          a few prototypes like that in existence) and therefore access to
          that required register must be emulated.
 
-config NEEDS_SYSCALL_FOR_CMPXCHG
-       bool
-       select NEED_KUSER_HELPERS
-       help
-         SMP on a pre-ARMv6 processor?  Well OK then.
-         Forget about fast user space cmpxchg support.
-         It is just not possible.
-
 config NEED_KUSER_HELPERS
        bool
 
index 1a7815e5421b6b1d3ad774b28915b84b52726136..ad4eb2d26e1697fc6a16f47a8805e532e198a693 100644 (file)
@@ -1407,12 +1407,19 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
        unsigned long uaddr = vma->vm_start;
        unsigned long usize = vma->vm_end - vma->vm_start;
        struct page **pages = __iommu_get_pages(cpu_addr, attrs);
+       unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       unsigned long off = vma->vm_pgoff;
 
        vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
 
        if (!pages)
                return -ENXIO;
 
+       if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off)
+               return -ENXIO;
+
+       pages += off;
+
        do {
                int ret = vm_insert_page(vma, uaddr, *pages++);
                if (ret) {
index 0d629b8f973fc2ca63aacb59e5baaf718b194543..daafcf121ce082aa0a0fbb43be3e3712b2942f3e 100644 (file)
@@ -593,6 +593,28 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
        arm_notify_die("", regs, &info, ifsr, 0);
 }
 
+/*
+ * Abort handler to be used only during first unmasking of asynchronous aborts
+ * on the boot CPU. This makes sure that the machine will not die if the
+ * firmware/bootloader left an imprecise abort pending for us to trip over.
+ */
+static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
+                                     struct pt_regs *regs)
+{
+       pr_warn("Hit pending asynchronous external abort (FSR=0x%08x) during "
+               "first unmask, this is most likely caused by a "
+               "firmware/bootloader bug.\n", fsr);
+
+       return 0;
+}
+
+void __init early_abt_enable(void)
+{
+       fsr_info[22].fn = early_abort_handler;
+       local_abt_enable();
+       fsr_info[22].fn = do_bad;
+}
+
 #ifndef CONFIG_ARM_LPAE
 static int __init exceptions_init(void)
 {
index cf08bdfbe0d6b6168970e7962341dd407c2823fd..05ec5e0df32d7bad17f4564a74ad3fcbed88ee9c 100644 (file)
@@ -24,5 +24,6 @@ static inline int fsr_fs(unsigned int fsr)
 
 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
 unsigned long search_exception_table(unsigned long addr);
+void early_abt_enable(void);
 
 #endif /* __ARCH_ARM_FAULT_H */
index 7cd15143a507740155ad6dbe01e3dbef371111fe..4867f5daf82c99bdf4ee64ebb6b61613cd792a3e 100644 (file)
@@ -38,6 +38,7 @@
 #include <asm/mach/pci.h>
 #include <asm/fixmap.h>
 
+#include "fault.h"
 #include "mm.h"
 #include "tcm.h"
 
@@ -1363,6 +1364,9 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
         */
        local_flush_tlb_all();
        flush_cache_all();
+
+       /* Enable asynchronous aborts */
+       early_abt_enable();
 }
 
 static void __init kmap_init(void)
index 876060bcceeb3ea989e24fe18b42910f3cce4058..b8efb8cd1f73ee1cb7de196ae88d9173203c4d2a 100644 (file)
@@ -614,6 +614,7 @@ load_common:
                case BPF_LD | BPF_B | BPF_IND:
                        load_order = 0;
 load_ind:
+                       update_on_xread(ctx);
                        OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
                        goto load_common;
                case BPF_LDX | BPF_IMM:
index 2235081a04eeac818f58b44923aa565026c0b746..8861c367d06114651920b05420e53ebde3099712 100644 (file)
@@ -495,7 +495,7 @@ void __init orion_ge00_switch_init(struct dsa_platform_data *d, int irq)
 
        d->netdev = &orion_ge00.dev;
        for (i = 0; i < d->nr_chips; i++)
-               d->chip[i].host_dev = &orion_ge00_shared.dev;
+               d->chip[i].host_dev = &orion_ge_mvmdio.dev;
        orion_switch_device.dev.platform_data = d;
 
        platform_device_register(&orion_switch_device);
index aedec81d11988181e3e6f0631113fd01e4737ac8..f6455273b2f8768a303db0ccc4a4e875df08eb71 100644 (file)
@@ -45,7 +45,6 @@
  * it does.
  */
 
-#include <byteswap.h>
 #include <elf.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <sys/types.h>
 #include <unistd.h>
 
+#define swab16(x) \
+       ((((x) & 0x00ff) << 8) | \
+        (((x) & 0xff00) >> 8))
+
+#define swab32(x) \
+       ((((x) & 0x000000ff) << 24) | \
+        (((x) & 0x0000ff00) <<  8) | \
+        (((x) & 0x00ff0000) >>  8) | \
+        (((x) & 0xff000000) >> 24))
+
 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
 #define HOST_ORDER ELFDATA2LSB
 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
@@ -104,17 +113,17 @@ static void cleanup(void)
 
 static Elf32_Word read_elf_word(Elf32_Word word, bool swap)
 {
-       return swap ? bswap_32(word) : word;
+       return swap ? swab32(word) : word;
 }
 
 static Elf32_Half read_elf_half(Elf32_Half half, bool swap)
 {
-       return swap ? bswap_16(half) : half;
+       return swap ? swab16(half) : half;
 }
 
 static void write_elf_word(Elf32_Word val, Elf32_Word *dst, bool swap)
 {
-       *dst = swap ? bswap_32(val) : val;
+       *dst = swap ? swab32(val) : val;
 }
 
 int main(int argc, char **argv)
index 07d1811aa03fcd1ecd5ee7c260688a59f7ab97e4..440d906429deab9c1daf0de497ead020378891af 100644 (file)
@@ -348,6 +348,33 @@ config ARM64_ERRATUM_843419
 
          If unsure, say Y.
 
+config CAVIUM_ERRATUM_22375
+       bool "Cavium erratum 22375, 24313"
+       default y
+       help
+         Enable workaround for erratum 22375, 24313.
+
+         This implements two gicv3-its errata workarounds for ThunderX. Both
+         with small impact affecting only ITS table allocation.
+
+           erratum 22375: only alloc 8MB table size
+           erratum 24313: ignore memory access type
+
+         The fixes are in ITS initialization and basically ignore memory access
+         type and table size provided by the TYPER and BASER registers.
+
+         If unsure, say Y.
+
+config CAVIUM_ERRATUM_23154
+       bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed"
+       default y
+       help
+         The gicv3 of ThunderX requires a modified version for
+         reading the IAR status to ensure data synchronization
+         (access to icc_iar1_el1 is not sync'ed before and after).
+
+         If unsure, say Y.
+
 endmenu
 
 
index f9914d7c1bb00b5c4cbe7a19c0f62c8eca54cf81..d10b5d483022f5374fa16c7783b7fb62014c3acb 100644 (file)
@@ -42,7 +42,7 @@ endif
 CHECKFLAGS     += -D__aarch64__
 
 ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
-CFLAGS_MODULE  += -mcmodel=large
+KBUILD_CFLAGS_MODULE   += -mcmodel=large
 endif
 
 # Default value
index d831bc2ac204b9ab19b98859f135c0386864db35..fac1720472f9f6ce4e9ae9bbf538f1bf6a036557 100644 (file)
                                reg = <0x0 0x7c600000 0x0 0x200000>;
                                pmd-controller = <3>;
                        };
+
+                       edacl3@7e600000 {
+                               compatible = "apm,xgene-edac-l3";
+                               reg = <0x0 0x7e600000 0x0 0x1000>;
+                       };
+
+                       edacsoc@7e930000 {
+                               compatible = "apm,xgene-edac-soc-v1";
+                               reg = <0x0 0x7e930000 0x0 0x1000>;
+                       };
                };
 
                pcie0: pcie@1f2b0000 {
index 637e046f0e367dd23fbe391dc1542dd97a315504..3c386680357ebc2af6b0b734e26cdb1165194020 100644 (file)
 
                                button@1 {
                                        debounce_interval = <50>;
-                                       wakeup = <1>;
+                                       wakeup-source;
                                        linux,code = <116>;
                                        label = "POWER";
                                        gpios = <&iofpga_gpio0 0 0x4>;
                                };
                                button@2 {
                                        debounce_interval = <50>;
-                                       wakeup = <1>;
+                                       wakeup-source;
                                        linux,code = <102>;
                                        label = "HOME";
                                        gpios = <&iofpga_gpio0 1 0x4>;
                                };
                                button@3 {
                                        debounce_interval = <50>;
-                                       wakeup = <1>;
+                                       wakeup-source;
                                        linux,code = <152>;
                                        label = "RLOCK";
                                        gpios = <&iofpga_gpio0 2 0x4>;
                                };
                                button@4 {
                                        debounce_interval = <50>;
-                                       wakeup = <1>;
+                                       wakeup-source;
                                        linux,code = <115>;
                                        label = "VOL+";
                                        gpios = <&iofpga_gpio0 3 0x4>;
                                };
                                button@5 {
                                        debounce_interval = <50>;
-                                       wakeup = <1>;
+                                       wakeup-source;
                                        linux,code = <114>;
                                        label = "VOL-";
                                        gpios = <&iofpga_gpio0 4 0x4>;
                                };
                                button@6 {
                                        debounce_interval = <50>;
-                                       wakeup = <1>;
+                                       wakeup-source;
                                        linux,code = <99>;
                                        label = "NMI";
                                        gpios = <&iofpga_gpio0 5 0x4>;
index 208cec08a74fa09973473adca3c69dfbcdb6111c..5f8a38dee2744d6655bb6eceb3cabf6917c80ad4 100644 (file)
@@ -92,4 +92,9 @@ static inline const char *acpi_get_enable_method(int cpu)
 {
        return acpi_psci_present() ? "psci" : NULL;
 }
+
+#ifdef CONFIG_ACPI_APEI
+pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr);
+#endif
+
 #endif /*_ASM_ACPI_H*/
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
new file mode 100644 (file)
index 0000000..030cdcb
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+ * arch/arm64/include/asm/arch_gicv3.h
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_ARCH_GICV3_H
+#define __ASM_ARCH_GICV3_H
+
+#include <asm/sysreg.h>
+
+#define ICC_EOIR1_EL1                  sys_reg(3, 0, 12, 12, 1)
+#define ICC_DIR_EL1                    sys_reg(3, 0, 12, 11, 1)
+#define ICC_IAR1_EL1                   sys_reg(3, 0, 12, 12, 0)
+#define ICC_SGI1R_EL1                  sys_reg(3, 0, 12, 11, 5)
+#define ICC_PMR_EL1                    sys_reg(3, 0, 4, 6, 0)
+#define ICC_CTLR_EL1                   sys_reg(3, 0, 12, 12, 4)
+#define ICC_SRE_EL1                    sys_reg(3, 0, 12, 12, 5)
+#define ICC_GRPEN1_EL1                 sys_reg(3, 0, 12, 12, 7)
+
+#define ICC_SRE_EL2                    sys_reg(3, 4, 12, 9, 5)
+
+/*
+ * System register definitions
+ */
+#define ICH_VSEIR_EL2                  sys_reg(3, 4, 12, 9, 4)
+#define ICH_HCR_EL2                    sys_reg(3, 4, 12, 11, 0)
+#define ICH_VTR_EL2                    sys_reg(3, 4, 12, 11, 1)
+#define ICH_MISR_EL2                   sys_reg(3, 4, 12, 11, 2)
+#define ICH_EISR_EL2                   sys_reg(3, 4, 12, 11, 3)
+#define ICH_ELSR_EL2                   sys_reg(3, 4, 12, 11, 5)
+#define ICH_VMCR_EL2                   sys_reg(3, 4, 12, 11, 7)
+
+#define __LR0_EL2(x)                   sys_reg(3, 4, 12, 12, x)
+#define __LR8_EL2(x)                   sys_reg(3, 4, 12, 13, x)
+
+#define ICH_LR0_EL2                    __LR0_EL2(0)
+#define ICH_LR1_EL2                    __LR0_EL2(1)
+#define ICH_LR2_EL2                    __LR0_EL2(2)
+#define ICH_LR3_EL2                    __LR0_EL2(3)
+#define ICH_LR4_EL2                    __LR0_EL2(4)
+#define ICH_LR5_EL2                    __LR0_EL2(5)
+#define ICH_LR6_EL2                    __LR0_EL2(6)
+#define ICH_LR7_EL2                    __LR0_EL2(7)
+#define ICH_LR8_EL2                    __LR8_EL2(0)
+#define ICH_LR9_EL2                    __LR8_EL2(1)
+#define ICH_LR10_EL2                   __LR8_EL2(2)
+#define ICH_LR11_EL2                   __LR8_EL2(3)
+#define ICH_LR12_EL2                   __LR8_EL2(4)
+#define ICH_LR13_EL2                   __LR8_EL2(5)
+#define ICH_LR14_EL2                   __LR8_EL2(6)
+#define ICH_LR15_EL2                   __LR8_EL2(7)
+
+#define __AP0Rx_EL2(x)                 sys_reg(3, 4, 12, 8, x)
+#define ICH_AP0R0_EL2                  __AP0Rx_EL2(0)
+#define ICH_AP0R1_EL2                  __AP0Rx_EL2(1)
+#define ICH_AP0R2_EL2                  __AP0Rx_EL2(2)
+#define ICH_AP0R3_EL2                  __AP0Rx_EL2(3)
+
+#define __AP1Rx_EL2(x)                 sys_reg(3, 4, 12, 9, x)
+#define ICH_AP1R0_EL2                  __AP1Rx_EL2(0)
+#define ICH_AP1R1_EL2                  __AP1Rx_EL2(1)
+#define ICH_AP1R2_EL2                  __AP1Rx_EL2(2)
+#define ICH_AP1R3_EL2                  __AP1Rx_EL2(3)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/stringify.h>
+
+/*
+ * Low-level accessors
+ *
+ * These system registers are 32 bits, but we make sure that the compiler
+ * sets the GP register's most significant bits to 0 with an explicit cast.
+ */
+
+static inline void gic_write_eoir(u32 irq)
+{
+       asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" ((u64)irq));
+       isb();
+}
+
+static inline void gic_write_dir(u32 irq)
+{
+       asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" ((u64)irq));
+       isb();
+}
+
+static inline u64 gic_read_iar_common(void)
+{
+       u64 irqstat;
+
+       asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
+       return irqstat;
+}
+
+/*
+ * Cavium ThunderX erratum 23154
+ *
+ * The gicv3 of ThunderX requires a modified version for reading the
+ * IAR status to ensure data synchronization (access to icc_iar1_el1
+ * is not sync'ed before and after).
+ */
+static inline u64 gic_read_iar_cavium_thunderx(void)
+{
+       u64 irqstat;
+
+       asm volatile(
+               "nop;nop;nop;nop\n\t"
+               "nop;nop;nop;nop\n\t"
+               "mrs_s %0, " __stringify(ICC_IAR1_EL1) "\n\t"
+               "nop;nop;nop;nop"
+               : "=r" (irqstat));
+       mb();
+
+       return irqstat;
+}
+
+static inline void gic_write_pmr(u32 val)
+{
+       asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" ((u64)val));
+}
+
+static inline void gic_write_ctlr(u32 val)
+{
+       asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" ((u64)val));
+       isb();
+}
+
+static inline void gic_write_grpen1(u32 val)
+{
+       asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" ((u64)val));
+       isb();
+}
+
+static inline void gic_write_sgi1r(u64 val)
+{
+       asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
+}
+
+static inline u32 gic_read_sre(void)
+{
+       u64 val;
+
+       asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
+       return val;
+}
+
+static inline void gic_write_sre(u32 val)
+{
+       asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" ((u64)val));
+       isb();
+}
+
+#define gic_read_typer(c)              readq_relaxed(c)
+#define gic_write_irouter(v, c)                writeq_relaxed(v, c)
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_ARCH_GICV3_H */
index 35a67783cfa088d4166de9ff7ed6f993b899c09b..1e247ac2601af41012823f0a639e2f9393af6d0b 100644 (file)
@@ -54,7 +54,7 @@
 #define ATOMIC_INIT(i) { (i) }
 
 #define atomic_read(v)                 READ_ONCE((v)->counter)
-#define atomic_set(v, i)               (((v)->counter) = (i))
+#define atomic_set(v, i)               WRITE_ONCE(((v)->counter), (i))
 #define atomic_xchg(v, new)            xchg(&((v)->counter), (new))
 #define atomic_cmpxchg(v, old, new)    cmpxchg(&((v)->counter), (old), (new))
 
index 171570702bb801431f141f46238eb0d9e1895fe5..dbc78d2b8cc6bbd43af2bf4781b6fffaa54a4fe1 100644 (file)
@@ -27,8 +27,9 @@
 #define ARM64_HAS_SYSREG_GIC_CPUIF             3
 #define ARM64_HAS_PAN                          4
 #define ARM64_HAS_LSE_ATOMICS                  5
+#define ARM64_WORKAROUND_CAVIUM_23154          6
 
-#define ARM64_NCAPS                            6
+#define ARM64_NCAPS                            7
 
 #ifndef __ASSEMBLY__
 
index ee6403df9fe4c1f32e9a7b30172a7a141056ec40..100a3d1b17c854d6c1a2c465b56ad8f101ab5efa 100644 (file)
        (0xf                    << MIDR_ARCHITECTURE_SHIFT) | \
        ((partnum)              << MIDR_PARTNUM_SHIFT))
 
-#define ARM_CPU_IMP_ARM                0x41
-#define ARM_CPU_IMP_APM                0x50
+#define ARM_CPU_IMP_ARM                        0x41
+#define ARM_CPU_IMP_APM                        0x50
+#define ARM_CPU_IMP_CAVIUM             0x43
 
-#define ARM_CPU_PART_AEM_V8    0xD0F
-#define ARM_CPU_PART_FOUNDATION        0xD00
-#define ARM_CPU_PART_CORTEX_A57        0xD07
-#define ARM_CPU_PART_CORTEX_A53        0xD03
+#define ARM_CPU_PART_AEM_V8            0xD0F
+#define ARM_CPU_PART_FOUNDATION                0xD00
+#define ARM_CPU_PART_CORTEX_A57                0xD07
+#define ARM_CPU_PART_CORTEX_A53                0xD03
 
-#define APM_CPU_PART_POTENZA   0x000
+#define APM_CPU_PART_POTENZA           0x000
+
+#define CAVIUM_CPU_PART_THUNDERX       0x0A1
 
 #define ID_AA64MMFR0_BIGENDEL0_SHIFT   16
 #define ID_AA64MMFR0_BIGENDEL0_MASK    (0xf << ID_AA64MMFR0_BIGENDEL0_SHIFT)
index 6b4c3ad75a2a99b0760a38436b7d159bfe3209c3..67027c611dbd51e66b72492943394ff317c3998a 100644 (file)
@@ -94,6 +94,7 @@
 #define MT_DEVICE_GRE          2
 #define MT_NORMAL_NC           3
 #define MT_NORMAL              4
+#define MT_NORMAL_WT           5
 
 /*
  * Memory types for Stage-2 translation
index b0329be95cb129f3b283f3d75e4dfeff64214bff..571ca0ed4f0565378131b25f317ef8b2f819c3b2 100644 (file)
@@ -60,8 +60,10 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
 #define PROT_DEFAULT           (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
 #define PROT_SECT_DEFAULT      (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
 
+#define PROT_DEVICE_nGnRnE     (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
 #define PROT_DEVICE_nGnRE      (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
 #define PROT_NORMAL_NC         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
+#define PROT_NORMAL_WT         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_WT))
 #define PROT_NORMAL            (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
 
 #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
@@ -79,7 +81,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
 #define PAGE_S2                        __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
 #define PAGE_S2_DEVICE         __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
 
-#define PAGE_NONE              __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
+#define PAGE_NONE              __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
 #define PAGE_SHARED            __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
 #define PAGE_SHARED_EXEC       __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
 #define PAGE_COPY              __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
@@ -496,7 +498,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
        const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
-                             PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK;
+                             PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
        /* preserve the hardware dirty information */
        if (pte_hw_dirty(pte))
                pte = pte_mkdirty(pte);
index 3bc498c250dc08b04f81abdde71e83f886454147..41e58fe3c041e9adcade0f113064ea42e87045ba 100644 (file)
@@ -44,7 +44,7 @@
 #define __ARM_NR_compat_cacheflush     (__ARM_NR_COMPAT_BASE+2)
 #define __ARM_NR_compat_set_tls                (__ARM_NR_COMPAT_BASE+5)
 
-#define __NR_compat_syscalls           388
+#define __NR_compat_syscalls           390
 #endif
 
 #define __ARCH_WANT_SYS_CLONE
index cef934a90f17ecec303a1dcd12133a962f27b9d1..5b925b761a2a8857a62720110076e062edd4d7f3 100644 (file)
@@ -797,3 +797,12 @@ __SYSCALL(__NR_memfd_create, sys_memfd_create)
 __SYSCALL(__NR_bpf, sys_bpf)
 #define __NR_execveat 387
 __SYSCALL(__NR_execveat, compat_sys_execveat)
+#define __NR_userfaultfd 388
+__SYSCALL(__NR_userfaultfd, sys_userfaultfd)
+#define __NR_membarrier 389
+__SYSCALL(__NR_membarrier, sys_membarrier)
+
+/*
+ * Please add new compat syscalls above this comment and update
+ * __NR_compat_syscalls in asm/unistd.h.
+ */
index 8d1e7236431b428490cfb95cb6bfbeba24143854..991bf5db2ca19aa19b617e4752d6fd65280b164b 100644 (file)
@@ -19,6 +19,9 @@
 /* Required for AArch32 compatibility. */
 #define SA_RESTORER    0x04000000
 
+#define MINSIGSTKSZ 5120
+#define SIGSTKSZ    16384
+
 #include <asm-generic/signal.h>
 
 #endif
index 19de7537e7d32f407f3f75d5f295ab5f3c7c4291..137d537ddceb8001f15d9daa18c95631b561a085 100644 (file)
 #include <asm/cpu_ops.h>
 #include <asm/smp_plat.h>
 
+#ifdef CONFIG_ACPI_APEI
+# include <linux/efi.h>
+# include <asm/pgtable.h>
+#endif
+
 int acpi_noirq = 1;            /* skip ACPI IRQ initialization */
 int acpi_disabled = 1;
 EXPORT_SYMBOL(acpi_disabled);
@@ -230,3 +235,27 @@ void __init acpi_gic_init(void)
 
        early_acpi_os_unmap_memory((char *)table, tbl_size);
 }
+
+#ifdef CONFIG_ACPI_APEI
+pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
+{
+       /*
+        * According to "Table 8 Map: EFI memory types to AArch64 memory
+        * types" of UEFI 2.5 section 2.3.6.1, each EFI memory type is
+        * mapped to a corresponding MAIR attribute encoding.
+        * The EFI memory attribute advises all possible capabilities
+        * of a memory region. We use the most efficient capability.
+        */
+
+       u64 attr;
+
+       attr = efi_mem_attributes(addr);
+       if (attr & EFI_MEMORY_WB)
+               return PAGE_KERNEL;
+       if (attr & EFI_MEMORY_WT)
+               return __pgprot(PROT_NORMAL_WT);
+       if (attr & EFI_MEMORY_WC)
+               return __pgprot(PROT_NORMAL_NC);
+       return __pgprot(PROT_DEVICE_nGnRnE);
+}
+#endif
index bcee7abac68ebb5bcbcde829039113e1e76d5a65..937f5e58a4d340a27234c76b5a84fedcf9aa6373 100644 (file)
@@ -284,21 +284,23 @@ static void register_insn_emulation_sysctl(struct ctl_table *table)
        __asm__ __volatile__(                                   \
        ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,    \
                    CONFIG_ARM64_PAN)                           \
-       "       mov             %w2, %w1\n"                     \
-       "0:     ldxr"B"         %w1, [%3]\n"                    \
-       "1:     stxr"B"         %w0, %w2, [%3]\n"               \
+       "0:     ldxr"B"         %w2, [%3]\n"                    \
+       "1:     stxr"B"         %w0, %w1, [%3]\n"               \
        "       cbz             %w0, 2f\n"                      \
        "       mov             %w0, %w4\n"                     \
+       "       b               3f\n"                           \
        "2:\n"                                                  \
+       "       mov             %w1, %w2\n"                     \
+       "3:\n"                                                  \
        "       .pushsection     .fixup,\"ax\"\n"               \
        "       .align          2\n"                            \
-       "3:     mov             %w0, %w5\n"                     \
-       "       b               2b\n"                           \
+       "4:     mov             %w0, %w5\n"                     \
+       "       b               3b\n"                           \
        "       .popsection"                                    \
        "       .pushsection     __ex_table,\"a\"\n"            \
        "       .align          3\n"                            \
-       "       .quad           0b, 3b\n"                       \
-       "       .quad           1b, 3b\n"                       \
+       "       .quad           0b, 4b\n"                       \
+       "       .quad           1b, 4b\n"                       \
        "       .popsection\n"                                  \
        ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,    \
                CONFIG_ARM64_PAN)                               \
index 6ffd914385609d0aab875813e4e9e8b690976421..574450c257a4d038e17d4eb8a47b954b20b2b9bb 100644 (file)
@@ -23,6 +23,7 @@
 
 #define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
 #define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+#define MIDR_THUNDERX  MIDR_CPU_PART(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
 
 #define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
                        MIDR_ARCHITECTURE_MASK)
@@ -81,6 +82,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                .capability = ARM64_WORKAROUND_845719,
                MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
        },
+#endif
+#ifdef CONFIG_CAVIUM_ERRATUM_23154
+       {
+       /* Cavium ThunderX, pass 1.x */
+               .desc = "Cavium erratum 23154",
+               .capability = ARM64_WORKAROUND_CAVIUM_23154,
+               MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01),
+       },
 #endif
        {
        }
index 3c9aed32f70b2113773f671053a0f6ac87632b4a..305f30dc9e633fe86947621e54d802744e59df52 100644 (file)
@@ -23,6 +23,8 @@
 #include <asm/cpufeature.h>
 #include <asm/processor.h>
 
+#include <linux/irqchip/arm-gic-v3.h>
+
 static bool
 feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
 {
@@ -45,11 +47,26 @@ __ID_FEAT_CHK(id_aa64pfr0);
 __ID_FEAT_CHK(id_aa64mmfr1);
 __ID_FEAT_CHK(id_aa64isar0);
 
+static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry)
+{
+       bool has_sre;
+
+       if (!has_id_aa64pfr0_feature(entry))
+               return false;
+
+       has_sre = gic_enable_sre();
+       if (!has_sre)
+               pr_warn_once("%s present but disabled by higher exception level\n",
+                            entry->desc);
+
+       return has_sre;
+}
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
        {
                .desc = "GIC system register CPU interface",
                .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
-               .matches = has_id_aa64pfr0_feature,
+               .matches = has_useable_gicv3_cpuif,
                .field_pos = 24,
                .min_field_value = 1,
        },
index cebf78661a553775003bfee8ec89f65e33e3ec55..253021ef2769078e69793288a8cc067aebb76d34 100644 (file)
@@ -201,7 +201,7 @@ void unregister_step_hook(struct step_hook *hook)
 }
 
 /*
- * Call registered single step handers
+ * Call registered single step handlers
  * There is no Syndrome info to check for determining the handler.
  * So we call all the registered handlers, until the right handler is
  * found which returns zero.
@@ -271,20 +271,21 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
  * Use reader/writer locks instead of plain spinlock.
  */
 static LIST_HEAD(break_hook);
-static DEFINE_RWLOCK(break_hook_lock);
+static DEFINE_SPINLOCK(break_hook_lock);
 
 void register_break_hook(struct break_hook *hook)
 {
-       write_lock(&break_hook_lock);
-       list_add(&hook->node, &break_hook);
-       write_unlock(&break_hook_lock);
+       spin_lock(&break_hook_lock);
+       list_add_rcu(&hook->node, &break_hook);
+       spin_unlock(&break_hook_lock);
 }
 
 void unregister_break_hook(struct break_hook *hook)
 {
-       write_lock(&break_hook_lock);
-       list_del(&hook->node);
-       write_unlock(&break_hook_lock);
+       spin_lock(&break_hook_lock);
+       list_del_rcu(&hook->node);
+       spin_unlock(&break_hook_lock);
+       synchronize_rcu();
 }
 
 static int call_break_hook(struct pt_regs *regs, unsigned int esr)
@@ -292,11 +293,11 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
        struct break_hook *hook;
        int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
 
-       read_lock(&break_hook_lock);
-       list_for_each_entry(hook, &break_hook, node)
+       rcu_read_lock();
+       list_for_each_entry_rcu(hook, &break_hook, node)
                if ((esr & hook->esr_mask) == hook->esr_val)
                        fn = hook->fn;
-       read_unlock(&break_hook_lock);
+       rcu_read_unlock();
 
        return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
 }
index 816120ece6bcecfbcbc1c25be51324c5197b65cd..78dfbd34b6bffd2fa36312da89dc6ca43f036c3c 100644 (file)
@@ -25,10 +25,20 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
        unsigned long kernel_size, kernel_memsize = 0;
        unsigned long nr_pages;
        void *old_image_addr = (void *)*image_addr;
+       unsigned long preferred_offset;
+
+       /*
+        * The preferred offset of the kernel Image is TEXT_OFFSET bytes beyond
+        * a 2 MB aligned base, which itself may be lower than dram_base, as
+        * long as the resulting offset equals or exceeds it.
+        */
+       preferred_offset = round_down(dram_base, SZ_2M) + TEXT_OFFSET;
+       if (preferred_offset < dram_base)
+               preferred_offset += SZ_2M;
 
        /* Relocate the image, if required. */
        kernel_size = _edata - _text;
-       if (*image_addr != (dram_base + TEXT_OFFSET)) {
+       if (*image_addr != preferred_offset) {
                kernel_memsize = kernel_size + (_end - _edata);
 
                /*
@@ -42,7 +52,7 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
                 * Mustang), we can still place the kernel at the address
                 * 'dram_base + TEXT_OFFSET'.
                 */
-               *image_addr = *reserve_addr = dram_base + TEXT_OFFSET;
+               *image_addr = *reserve_addr = preferred_offset;
                nr_pages = round_up(kernel_memsize, EFI_ALLOC_ALIGN) /
                           EFI_PAGE_SIZE;
                status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
index e8ca6eaedd0252e2056530d71d519f423931d323..61eb1d17586a859a1fcc1d0d0fd653028d736b11 100644 (file)
@@ -51,15 +51,6 @@ static struct mm_struct efi_mm = {
        INIT_MM_CONTEXT(efi_mm)
 };
 
-static int uefi_debug __initdata;
-static int __init uefi_debug_setup(char *str)
-{
-       uefi_debug = 1;
-
-       return 0;
-}
-early_param("uefi_debug", uefi_debug_setup);
-
 static int __init is_normal_ram(efi_memory_desc_t *md)
 {
        if (md->attribute & EFI_MEMORY_WB)
@@ -171,14 +162,14 @@ static __init void reserve_regions(void)
        efi_memory_desc_t *md;
        u64 paddr, npages, size;
 
-       if (uefi_debug)
+       if (efi_enabled(EFI_DBG))
                pr_info("Processing EFI memory map:\n");
 
        for_each_efi_memory_desc(&memmap, md) {
                paddr = md->phys_addr;
                npages = md->num_pages;
 
-               if (uefi_debug) {
+               if (efi_enabled(EFI_DBG)) {
                        char buf[64];
 
                        pr_info("  0x%012llx-0x%012llx %s",
@@ -194,11 +185,11 @@ static __init void reserve_regions(void)
 
                if (is_reserve_region(md)) {
                        memblock_reserve(paddr, size);
-                       if (uefi_debug)
+                       if (efi_enabled(EFI_DBG))
                                pr_cont("*");
                }
 
-               if (uefi_debug)
+               if (efi_enabled(EFI_DBG))
                        pr_cont("\n");
        }
 
@@ -210,14 +201,14 @@ void __init efi_init(void)
        struct efi_fdt_params params;
 
        /* Grab UEFI information placed in FDT by stub */
-       if (!efi_get_fdt_params(&params, uefi_debug))
+       if (!efi_get_fdt_params(&params))
                return;
 
        efi_system_table = params.system_table;
 
        memblock_reserve(params.mmap & PAGE_MASK,
                         PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK)));
-       memmap.phys_map = (void *)params.mmap;
+       memmap.phys_map = params.mmap;
        memmap.map = early_memremap(params.mmap, params.mmap_size);
        memmap.map_end = memmap.map + params.mmap_size;
        memmap.desc_size = params.desc_size;
@@ -258,7 +249,8 @@ static bool __init efi_virtmap_init(void)
                 */
                if (!is_normal_ram(md))
                        prot = __pgprot(PROT_DEVICE_nGnRE);
-               else if (md->type == EFI_RUNTIME_SERVICES_CODE)
+               else if (md->type == EFI_RUNTIME_SERVICES_CODE ||
+                        !PAGE_ALIGNED(md->phys_addr))
                        prot = PAGE_KERNEL_EXEC;
                else
                        prot = PAGE_KERNEL;
@@ -290,7 +282,7 @@ static int __init arm64_enable_runtime_services(void)
        pr_info("Remapping and enabling EFI services.\n");
 
        mapsize = memmap.map_end - memmap.map;
-       memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map,
+       memmap.map = (__force void *)ioremap_cache(memmap.phys_map,
                                                   mapsize);
        if (!memmap.map) {
                pr_err("Failed to remap EFI memory map\n");
index 08cafc518b9a57ad724530b9dbb144d50683c13f..0f03a8fe23144e777b3ead0a6ea18e038b5d1066 100644 (file)
@@ -178,6 +178,24 @@ ENTRY(ftrace_stub)
 ENDPROC(ftrace_stub)
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       /* save return value regs*/
+       .macro save_return_regs
+       sub sp, sp, #64
+       stp x0, x1, [sp]
+       stp x2, x3, [sp, #16]
+       stp x4, x5, [sp, #32]
+       stp x6, x7, [sp, #48]
+       .endm
+
+       /* restore return value regs*/
+       .macro restore_return_regs
+       ldp x0, x1, [sp]
+       ldp x2, x3, [sp, #16]
+       ldp x4, x5, [sp, #32]
+       ldp x6, x7, [sp, #48]
+       add sp, sp, #64
+       .endm
+
 /*
  * void ftrace_graph_caller(void)
  *
@@ -204,11 +222,11 @@ ENDPROC(ftrace_graph_caller)
  * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
  */
 ENTRY(return_to_handler)
-       str     x0, [sp, #-16]!
+       save_return_regs
        mov     x0, x29                 //     parent's fp
        bl      ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
        mov     x30, x0                 // restore the original return address
-       ldr     x0, [sp], #16
+       restore_return_regs
        ret
 END(return_to_handler)
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
index 90d09eddd5b27368e358efd44ab552db8330d39c..351a4de1b1e26e07e3b1920d06b199e778042811 100644 (file)
@@ -498,6 +498,8 @@ CPU_LE(     bic     x0, x0, #(3 << 24)      )       // Clear the EE and E0E bits for EL1
        orr     x0, x0, #ICC_SRE_EL2_ENABLE     // Set ICC_SRE_EL2.Enable==1
        msr_s   ICC_SRE_EL2, x0
        isb                                     // Make sure SRE is now set
+       mrs_s   x0, ICC_SRE_EL2                 // Read SRE back,
+       tbz     x0, #0, 3f                      // and check that it sticks
        msr_s   ICH_HCR_EL2, xzr                // Reset ICC_HCR_EL2 to defaults
 
 3:
index f341866aa810340e47aa9b7283b6b472ca2eae84..c08b9ad6f42931e8766d0186daa51a6cce8dbe39 100644 (file)
@@ -85,7 +85,7 @@ bool aarch64_insn_is_branch_imm(u32 insn)
                aarch64_insn_is_bcond(insn));
 }
 
-static DEFINE_SPINLOCK(patch_lock);
+static DEFINE_RAW_SPINLOCK(patch_lock);
 
 static void __kprobes *patch_map(void *addr, int fixmap)
 {
@@ -131,13 +131,13 @@ static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
        unsigned long flags = 0;
        int ret;
 
-       spin_lock_irqsave(&patch_lock, flags);
+       raw_spin_lock_irqsave(&patch_lock, flags);
        waddr = patch_map(addr, FIX_TEXT_POKE0);
 
        ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
 
        patch_unmap(FIX_TEXT_POKE0);
-       spin_unlock_irqrestore(&patch_lock, flags);
+       raw_spin_unlock_irqrestore(&patch_lock, flags);
 
        return ret;
 }
index 6bab21f84a9ff38402e70345016ed50ae8e95e30..232247945b1c215c25fbfd708573fe3def5c68c5 100644 (file)
@@ -364,6 +364,8 @@ static void __init relocate_initrd(void)
                to_free = ram_end - orig_start;
 
        size = orig_end - orig_start;
+       if (!size)
+               return;
 
        /* initrd needs to be relocated completely inside linear mapping */
        new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn),
index 407991bf79f5116eed6d5aa4aa2f1314ecabe0fc..ccb6078ed9f20fb55132deb48504df3a3134a784 100644 (file)
@@ -48,11 +48,7 @@ int notrace unwind_frame(struct stackframe *frame)
 
        frame->sp = fp + 0x10;
        frame->fp = *(unsigned long *)(fp);
-       /*
-        * -4 here because we care about the PC at time of bl,
-        * not where the return will go.
-        */
-       frame->pc = *(unsigned long *)(fp + 8) - 4;
+       frame->pc = *(unsigned long *)(fp + 8);
 
        return 0;
 }
index 8297d502217e13010bc891e7d68b5f7279ea62b9..44ca4143b013227ebf9eb40eafd721454ab45074 100644 (file)
@@ -80,17 +80,21 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
        if (ret == 0) {
                /*
                 * We are resuming from reset with TTBR0_EL1 set to the
-                * idmap to enable the MMU; restore the active_mm mappings in
-                * TTBR0_EL1 unless the active_mm == &init_mm, in which case
-                * the thread entered cpu_suspend with TTBR0_EL1 set to
-                * reserved TTBR0 page tables and should be restored as such.
+                * idmap to enable the MMU; set the TTBR0 to the reserved
+                * page tables to prevent speculative TLB allocations, flush
+                * the local tlb and set the default tcr_el1.t0sz so that
+                * the TTBR0 address space set-up is properly restored.
+                * If the current active_mm != &init_mm we entered cpu_suspend
+                * with mappings in TTBR0 that must be restored, so we switch
+                * them back to complete the address space configuration
+                * restoration before returning.
                 */
-               if (mm == &init_mm)
-                       cpu_set_reserved_ttbr0();
-               else
-                       cpu_switch_mm(mm->pgd, mm);
-
+               cpu_set_reserved_ttbr0();
                flush_tlb_all();
+               cpu_set_default_tcr_t0sz();
+
+               if (mm != &init_mm)
+                       cpu_switch_mm(mm->pgd, mm);
 
                /*
                 * Restore per-cpu offset before any kernel
index 5c7e920e486132c5257255ff843d81a6616670b1..ff5292c6277c4764734a1a1769af06347ccb8c4b 100644 (file)
@@ -16,6 +16,9 @@ menuconfig VIRTUALIZATION
 
 if VIRTUALIZATION
 
+config KVM_ARM_VGIC_V3
+       bool
+
 config KVM
        bool "Kernel-based Virtual Machine (KVM) support"
        depends on OF
@@ -31,6 +34,7 @@ config KVM
        select KVM_VFIO
        select HAVE_KVM_EVENTFD
        select HAVE_KVM_IRQFD
+       select KVM_ARM_VGIC_V3
        ---help---
          Support hosting virtualized guest machines.
 
index aba9ead1384c036a0d6a441c92ced63cfd7ed4ae..9fadf6d7039b721b072379b5af51abce726f5b92 100644 (file)
@@ -287,6 +287,7 @@ retry:
                         * starvation.
                         */
                        mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
+                       mm_flags |= FAULT_FLAG_TRIED;
                        goto retry;
                }
        }
index e4ee7bd8830aed60b9b0b493c4c4dc93863d419c..7783ff05f74cc262c643b1a96932a717e47445c5 100644 (file)
@@ -163,12 +163,14 @@ ENTRY(__cpu_setup)
         *   DEVICE_GRE         010     00001100
         *   NORMAL_NC          011     01000100
         *   NORMAL             100     11111111
+        *   NORMAL_WT          101     10111011
         */
        ldr     x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \
                     MAIR(0x04, MT_DEVICE_nGnRE) | \
                     MAIR(0x0c, MT_DEVICE_GRE) | \
                     MAIR(0x44, MT_NORMAL_NC) | \
-                    MAIR(0xff, MT_NORMAL)
+                    MAIR(0xff, MT_NORMAL) | \
+                    MAIR(0xbb, MT_NORMAL_WT)
        msr     mair_el1, x5
        /*
         * Prepare SCTLR
index 91146b416cdba5b5d76946db679849b649545742..99b0a798495077a8aeddce9957f657581a9f92c4 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/leds_pwm.h>
 #include <linux/input.h>
 #include <linux/gpio_keys.h>
-#include <linux/atmel_serial.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/ads7846.h>
 
index f61f2dd67464746c728474a7b5503dd7dfbdcb67..241b9b9729d821510fb2addde4b276e73e6185a7 100644 (file)
@@ -20,4 +20,5 @@ generic-y += sections.h
 generic-y += topology.h
 generic-y += trace_clock.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 97c9bdf8340962209dfe09354175acb1eead01f8..d74fd8ce980aeb327b831fe8c7b4d862b6421524 100644 (file)
@@ -19,8 +19,8 @@
 
 #define ATOMIC_INIT(i)  { (i) }
 
-#define atomic_read(v)         ACCESS_ONCE((v)->counter)
-#define atomic_set(v, i)       (((v)->counter) = i)
+#define atomic_read(v)         READ_ONCE((v)->counter)
+#define atomic_set(v, i)       WRITE_ONCE(((v)->counter), (i))
 
 #define ATOMIC_OP_RETURN(op, asm_op, asm_con)                          \
 static inline int __atomic_##op##_return(int i, atomic_t *v)           \
index 61cd1e786a142c440caa231a665349ed3d8f8e01..91d49c0a31185041055e627689ec09b2ab708bd1 100644 (file)
@@ -46,4 +46,5 @@ generic-y += types.h
 generic-y += ucontext.h
 generic-y += unaligned.h
 generic-y += user.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index f17c4dc6050c7d23ade635f749aa75eed8f0ac1d..945544ec603ee12408928c0abd4db19cfbc784d9 100644 (file)
@@ -59,4 +59,5 @@ generic-y += types.h
 generic-y += ucontext.h
 generic-y += user.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index ddcb45d7dfa7dcb4f16408ec8eaca6eeae03566d..43afc03e41251d45c2ccd0f0a00656ad5e368fe9 100644 (file)
@@ -178,7 +178,7 @@ static void __init set_megamod_mux(struct megamod_pic *pic, int src, int output)
 static void __init parse_priority_map(struct megamod_pic *pic,
                                      int *mapping, int size)
 {
-       struct device_node *np = pic->irqhost->of_node;
+       struct device_node *np = irq_domain_get_of_node(pic->irqhost);
        const __be32 *map;
        int i, maplen;
        u32 val;
index 8da5653bd8958605e1dbb2e3e0335aaa24abc9dc..e086f9e937280ec2b6676065a6e3b19bbfcc5ba1 100644 (file)
@@ -57,7 +57,6 @@ config CRIS
        select ARCH_WANT_IPC_PARSE_VERSION
        select GENERIC_IRQ_SHOW
        select GENERIC_IOMAP
-       select GENERIC_CMOS_UPDATE
        select MODULES_USE_ELF_RELA
        select CLONE_BACKWARDS2
        select OLD_SIGSUSPEND
index 4a146e1749c93ce2b2e9d35cec6c167bf4815232..a4877a4217560b5074ca1421377913286a646255 100644 (file)
@@ -354,63 +354,6 @@ no_command_line:
        blo     1b
        nop
 
-#ifdef CONFIG_BLK_DEV_ETRAXIDE
-       ;; disable ATA before enabling it in genconfig below
-       moveq   0,$r0
-       move.d  $r0,[R_ATA_CTRL_DATA]
-       move.d  $r0,[R_ATA_TRANSFER_CNT]
-       move.d  $r0,[R_ATA_CONFIG]
-#if 0
-       move.d  R_PORT_G_DATA, $r1
-       move.d  $r0, [$r1]; assert ATA bus-reset
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       move.d  0x08000000,$r0
-       move.d  $r0,[$r1]
-#endif
-#endif
-
-#ifdef CONFIG_JULIETTE
-       ;; configure external DMA channel 0 before enabling it in genconfig
-
-       moveq   0,$r0
-       move.d  $r0,[R_EXT_DMA_0_ADDR]
-       ; cnt enable, word size, output, stop, size 0
-       move.d    IO_STATE (R_EXT_DMA_0_CMD, cnt, enable)       \
-               | IO_STATE (R_EXT_DMA_0_CMD, rqpol, ahigh)      \
-               | IO_STATE (R_EXT_DMA_0_CMD, apol, ahigh)       \
-               | IO_STATE (R_EXT_DMA_0_CMD, rq_ack, burst)     \
-               | IO_STATE (R_EXT_DMA_0_CMD, wid, word)         \
-               | IO_STATE (R_EXT_DMA_0_CMD, dir, output)       \
-               | IO_STATE (R_EXT_DMA_0_CMD, run, stop)         \
-               | IO_FIELD (R_EXT_DMA_0_CMD, trf_count, 0),$r0
-       move.d  $r0,[R_EXT_DMA_0_CMD]
-
-       ;; reset dma4 and wait for completion
-
-       moveq   IO_STATE (R_DMA_CH4_CMD, cmd, reset),$r0
-       move.b  $r0,[R_DMA_CH4_CMD]
-1:     move.b  [R_DMA_CH4_CMD],$r0
-       and.b   IO_MASK (R_DMA_CH4_CMD, cmd),$r0
-       cmp.b   IO_STATE (R_DMA_CH4_CMD, cmd, reset),$r0
-       beq     1b
-       nop
-
-       ;; reset dma5 and wait for completion
-
-       moveq   IO_STATE (R_DMA_CH5_CMD, cmd, reset),$r0
-       move.b  $r0,[R_DMA_CH5_CMD]
-1:     move.b  [R_DMA_CH5_CMD],$r0
-       and.b   IO_MASK (R_DMA_CH5_CMD, cmd),$r0
-       cmp.b   IO_STATE (R_DMA_CH5_CMD, cmd, reset),$r0
-       beq     1b
-       nop
-#endif
-
        ;; Etrax product HW genconfig setup
 
        moveq   0,$r0
@@ -447,21 +390,6 @@ no_command_line:
                | IO_STATE (R_GEN_CONFIG, dma9, usb),$r0
 
 
-#if defined(CONFIG_ETRAX_DEF_R_PORT_G0_DIR_OUT)
-        or.d      IO_STATE (R_GEN_CONFIG, g0dir, out),$r0
-#endif
-
-#if defined(CONFIG_ETRAX_DEF_R_PORT_G8_15_DIR_OUT)
-        or.d      IO_STATE (R_GEN_CONFIG, g8_15dir, out),$r0
-#endif
-#if defined(CONFIG_ETRAX_DEF_R_PORT_G16_23_DIR_OUT)
-       or.d      IO_STATE (R_GEN_CONFIG, g16_23dir, out),$r0
-#endif
-
-#if defined(CONFIG_ETRAX_DEF_R_PORT_G24_DIR_OUT)
-       or.d      IO_STATE (R_GEN_CONFIG, g24dir, out),$r0
-#endif
-
        move.d  $r0,[genconfig_shadow] ; init a shadow register of R_GEN_CONFIG
 
        move.d  $r0,[R_GEN_CONFIG]
@@ -500,19 +428,9 @@ no_command_line:
        ;; including their shadow registers
 
        move.b  CONFIG_ETRAX_DEF_R_PORT_PA_DIR,$r0
-#if defined(CONFIG_BLUETOOTH) && defined(CONFIG_BLUETOOTH_RESET_PA7)
-       or.b    IO_STATE (R_PORT_PA_DIR, dir7, output),$r0
-#endif
        move.b  $r0,[port_pa_dir_shadow]
        move.b  $r0,[R_PORT_PA_DIR]
        move.b  CONFIG_ETRAX_DEF_R_PORT_PA_DATA,$r0
-#if defined(CONFIG_BLUETOOTH) && defined(CONFIG_BLUETOOTH_RESET_PA7)
-#if defined(CONFIG_BLUETOOTH_RESET_ACTIVE_HIGH)
-       and.b   ~(1 << 7),$r0
-#else
-       or.b    (1 << 7),$r0
-#endif
-#endif
        move.b  $r0,[port_pa_data_shadow]
        move.b  $r0,[R_PORT_PA_DATA]
 
@@ -520,19 +438,9 @@ no_command_line:
        move.b  $r0,[port_pb_config_shadow]
        move.b  $r0,[R_PORT_PB_CONFIG]
        move.b  CONFIG_ETRAX_DEF_R_PORT_PB_DIR,$r0
-#if defined(CONFIG_BLUETOOTH) && defined(CONFIG_BLUETOOTH_RESET_PB5)
-       or.b    IO_STATE (R_PORT_PB_DIR, dir5, output),$r0
-#endif
        move.b  $r0,[port_pb_dir_shadow]
        move.b  $r0,[R_PORT_PB_DIR]
        move.b  CONFIG_ETRAX_DEF_R_PORT_PB_DATA,$r0
-#if defined(CONFIG_BLUETOOTH) && defined(CONFIG_BLUETOOTH_RESET_PB5)
-#if defined(CONFIG_BLUETOOTH_RESET_ACTIVE_HIGH)
-       and.b   ~(1 << 5),$r0
-#else
-       or.b    (1 << 5),$r0
-#endif
-#endif
        move.b  $r0,[port_pb_data_shadow]
        move.b  $r0,[R_PORT_PB_DATA]
 
@@ -541,20 +449,6 @@ no_command_line:
        move.d  $r0, [R_PORT_PB_I2C]
 
        moveq   0,$r0
-#if defined(CONFIG_BLUETOOTH) && defined(CONFIG_BLUETOOTH_RESET_G10)
-#if defined(CONFIG_BLUETOOTH_RESET_ACTIVE_HIGH)
-       and.d   ~(1 << 10),$r0
-#else
-       or.d    (1 << 10),$r0
-#endif
-#endif
-#if defined(CONFIG_BLUETOOTH) && defined(CONFIG_BLUETOOTH_RESET_G11)
-#if defined(CONFIG_BLUETOOTH_RESET_ACTIVE_HIGH)
-       and.d   ~(1 << 11),$r0
-#else
-       or.d    (1 << 11),$r0
-#endif
-#endif
        move.d  $r0,[port_g_data_shadow]
        move.d  $r0,[R_PORT_G_DATA]
 
index 22d846bfc570c6add486d620a9c1430bbd11a46a..ed71ade93a73aea9a3be5a23b74ef87adcd9fa9d 100644 (file)
@@ -275,7 +275,7 @@ static char remcomOutBuffer[BUFMAX];
 /* Error and warning messages. */
 enum error_type
 {
-       SUCCESS, E01, E02, E03, E04, E05, E06, E07
+       SUCCESS, E01, E02, E03, E04, E05, E06, E07, E08
 };
 static char *error_message[] =
 {
@@ -286,7 +286,8 @@ static char *error_message[] =
        "E04 The command is not supported - [s,C,S,!,R,d,r] - internal error.",
        "E05 Change register content - P - the register is not implemented..",
        "E06 Change memory content - M - internal error.",
-       "E07 Change register content - P - the register is not stored on the stack"
+       "E07 Change register content - P - the register is not stored on the stack",
+       "E08 Invalid parameter"
 };
 /********************************* Register image ****************************/
 /* Use the order of registers as defined in "AXIS ETRAX CRIS Programmer's
@@ -351,7 +352,7 @@ char internal_stack[INTERNAL_STACK_SIZE];
    breakpoint to be handled. A static breakpoint uses the content of register
    BRP as it is whereas a dynamic breakpoint requires subtraction with 2
    in order to execute the instruction. The first breakpoint is static. */
-static unsigned char is_dyn_brkp = 0;
+static unsigned char __used is_dyn_brkp;
 
 /********************************* String library ****************************/
 /* Single-step over library functions creates trap loops. */
@@ -413,18 +414,6 @@ gdb_cris_strtol (const char *s, char **endptr, int base)
 }
 
 /********************************** Packet I/O ******************************/
-/* Returns the integer equivalent of a hexadecimal character. */
-static int
-hex (char ch)
-{
-       if ((ch >= 'a') && (ch <= 'f'))
-               return (ch - 'a' + 10);
-       if ((ch >= '0') && (ch <= '9'))
-               return (ch - '0');
-       if ((ch >= 'A') && (ch <= 'F'))
-               return (ch - 'A' + 10);
-       return (-1);
-}
 
 /* Convert the memory, pointed to by mem into hexadecimal representation.
    Put the result in buf, and return a pointer to the last character
@@ -455,22 +444,6 @@ mem2hex(char *buf, unsigned char *mem, int count)
        return (buf);
 }
 
-/* Convert the array, in hexadecimal representation, pointed to by buf into
-   binary representation. Put the result in mem, and return a pointer to
-   the character after the last byte written. */
-static unsigned char*
-hex2mem (unsigned char *mem, char *buf, int count)
-{
-       int i;
-       unsigned char ch;
-       for (i = 0; i < count; i++) {
-               ch = hex (*buf++) << 4;
-               ch = ch + hex (*buf++);
-               *mem++ = ch;
-       }
-       return (mem);
-}
-
 /* Put the content of the array, in binary representation, pointed to by buf
    into memory pointed to by mem, and return a pointer to the character after
    the last byte written.
@@ -524,8 +497,8 @@ getpacket (char *buffer)
                buffer[count] = '\0';
                
                if (ch == '#') {
-                       xmitcsum = hex (getDebugChar ()) << 4;
-                       xmitcsum += hex (getDebugChar ());
+                       xmitcsum = hex_to_bin(getDebugChar()) << 4;
+                       xmitcsum += hex_to_bin(getDebugChar());
                        if (checksum != xmitcsum) {
                                /* Wrong checksum */
                                putDebugChar ('-');
@@ -599,7 +572,7 @@ putDebugString (const unsigned char *str, int length)
 
 /********************************* Register image ****************************/
 /* Write a value to a specified register in the register image of the current
-   thread. Returns status code SUCCESS, E02 or E05. */
+   thread. Returns status code SUCCESS, E02, E05 or E08. */
 static int
 write_register (int regno, char *val)
 {
@@ -608,8 +581,9 @@ write_register (int regno, char *val)
 
         if (regno >= R0 && regno <= PC) {
                /* 32-bit register with simple offset. */
-               hex2mem ((unsigned char *)current_reg + regno * sizeof(unsigned int),
-                        val, sizeof(unsigned int));
+               if (hex2bin((unsigned char *)current_reg + regno * sizeof(unsigned int),
+                           val, sizeof(unsigned int)))
+                       status = E08;
        }
         else if (regno == P0 || regno == VR || regno == P4 || regno == P8) {
                /* Do not support read-only registers. */
@@ -618,13 +592,15 @@ write_register (int regno, char *val)
         else if (regno == CCR) {
                /* 16 bit register with complex offset. (P4 is read-only, P6 is not implemented, 
                    and P7 (MOF) is 32 bits in ETRAX 100LX. */
-               hex2mem ((unsigned char *)&(current_reg->ccr) + (regno-CCR) * sizeof(unsigned short),
-                        val, sizeof(unsigned short));
+               if (hex2bin((unsigned char *)&(current_reg->ccr) + (regno-CCR) * sizeof(unsigned short),
+                           val, sizeof(unsigned short)))
+                       status = E08;
        }
        else if (regno >= MOF && regno <= USP) {
                /* 32 bit register with complex offset.  (P8 has been taken care of.) */
-               hex2mem ((unsigned char *)&(current_reg->ibr) + (regno-IBR) * sizeof(unsigned int),
-                        val, sizeof(unsigned int));
+               if (hex2bin((unsigned char *)&(current_reg->ibr) + (regno-IBR) * sizeof(unsigned int),
+                           val, sizeof(unsigned int)))
+                       status = E08;
        } 
         else {
                /* Do not support nonexisting or unimplemented registers (P2, P3, and P6). */
@@ -759,9 +735,11 @@ handle_exception (int sigval)
                                /* Write registers. GXX..XX
                                   Each byte of register data  is described by two hex digits.
                                   Success: OK
-                                  Failure: void. */
-                               hex2mem((char *)&cris_reg, &remcomInBuffer[1], sizeof(registers));
-                               gdb_cris_strcpy (remcomOutBuffer, "OK");
+                                  Failure: E08. */
+                               if (hex2bin((char *)&cris_reg, &remcomInBuffer[1], sizeof(registers)))
+                                       gdb_cris_strcpy (remcomOutBuffer, error_message[E08]);
+                               else
+                                       gdb_cris_strcpy (remcomOutBuffer, "OK");
                                break;
                                
                        case 'P':
@@ -771,7 +749,7 @@ handle_exception (int sigval)
                                   for each byte in the register (target byte order). P1f=11223344 means
                                   set register 31 to 44332211.
                                   Success: OK
-                                  Failure: E02, E05 */
+                                  Failure: E02, E05, E08 */
                                {
                                        char *suffix;
                                        int regno = gdb_cris_strtol (&remcomInBuffer[1], &suffix, 16);
@@ -791,6 +769,10 @@ handle_exception (int sigval)
                                                        /* Do not support non-existing registers on the stack. */
                                                        gdb_cris_strcpy (remcomOutBuffer, error_message[E07]);
                                                        break;
+                                               case E08:
+                                                       /* Invalid parameter. */
+                                                       gdb_cris_strcpy (remcomOutBuffer, error_message[E08]);
+                                                       break;
                                                default:
                                                        /* Valid register number. */
                                                        gdb_cris_strcpy (remcomOutBuffer, "OK");
@@ -826,7 +808,7 @@ handle_exception (int sigval)
                                   AA..AA is the start address,  LLLL is the number of bytes, and
                                   XX..XX is the hexadecimal data.
                                   Success: OK
-                                  Failure: void. */
+                                  Failure: E08. */
                                {
                                        char *lenptr;
                                        char *dataptr;
@@ -835,14 +817,15 @@ handle_exception (int sigval)
                                        int length = gdb_cris_strtol(lenptr+1, &dataptr, 16);
                                        if (*lenptr == ',' && *dataptr == ':') {
                                                if (remcomInBuffer[0] == 'M') {
-                                                       hex2mem(addr, dataptr + 1, length);
-                                               }
-                                               else /* X */ {
+                                                       if (hex2bin(addr, dataptr + 1, length))
+                                                               gdb_cris_strcpy (remcomOutBuffer, error_message[E08]);
+                                                       else
+                                                               gdb_cris_strcpy (remcomOutBuffer, "OK");
+                                               } else /* X */ {
                                                        bin2mem(addr, dataptr + 1, length);
+                                                       gdb_cris_strcpy (remcomOutBuffer, "OK");
                                                }
-                                               gdb_cris_strcpy (remcomOutBuffer, "OK");
-                                       }
-                                       else {
+                                       } else {
                                                gdb_cris_strcpy (remcomOutBuffer, error_message[E06]);
                                        }
                                }
@@ -970,7 +953,7 @@ asm ("\n"
 "  move     $ibr,[cris_reg+0x4E]  ; P9,\n"
 "  move     $irp,[cris_reg+0x52]  ; P10,\n"
 "  move     $srp,[cris_reg+0x56]  ; P11,\n"
-"  move     $dtp0,[cris_reg+0x5A] ; P12, register BAR, assembler might not know BAR\n"
+"  move     $bar,[cris_reg+0x5A]  ; P12,\n"
 "                            ; P13, register DCCR already saved\n"
 ";; Due to the old assembler-versions BRP might not be recognized\n"
 "  .word 0xE670              ; move brp,r0\n"
@@ -1063,7 +1046,7 @@ asm ("\n"
 "  move     $ibr,[cris_reg+0x4E]  ; P9,\n"
 "  move     $irp,[cris_reg+0x52]  ; P10,\n"
 "  move     $srp,[cris_reg+0x56]  ; P11,\n"
-"  move     $dtp0,[cris_reg+0x5A] ; P12, register BAR, assembler might not know BAR\n"
+"  move     $bar,[cris_reg+0x5A]  ; P12,\n"
 "                            ; P13, register DCCR already saved\n"
 ";; Due to the old assembler-versions BRP might not be recognized\n"
 "  .word 0xE670              ; move brp,r0\n"
index e7f8066105aa39e9e81505b762393492261329cc..85e3f1b1f3aca848ed20b80270e915e79428e1a2 100644 (file)
@@ -68,14 +68,10 @@ paging_init(void)
 
        *R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg  ) |  /* bootrom */
                        IO_STATE(R_MMU_KSEG, seg_e, page ) |
-                       IO_STATE(R_MMU_KSEG, seg_d, page ) | 
-                       IO_STATE(R_MMU_KSEG, seg_c, page ) |   
+                       IO_STATE(R_MMU_KSEG, seg_d, page ) |
+                       IO_STATE(R_MMU_KSEG, seg_c, page ) |
                        IO_STATE(R_MMU_KSEG, seg_b, seg  ) |  /* kernel reg area */
-#ifdef CONFIG_JULIETTE
-                       IO_STATE(R_MMU_KSEG, seg_a, seg  ) |  /* ARTPEC etc. */
-#else
                        IO_STATE(R_MMU_KSEG, seg_a, page ) |
-#endif
                        IO_STATE(R_MMU_KSEG, seg_9, seg  ) |  /* LED's on some boards */
                        IO_STATE(R_MMU_KSEG, seg_8, seg  ) |  /* CSE0/1, flash and I/O */
                        IO_STATE(R_MMU_KSEG, seg_7, page ) |  /* kernel vmalloc area */
@@ -92,14 +88,10 @@ paging_init(void)
                            IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) |
                            IO_FIELD(R_MMU_KBASE_HI, base_c, 0x0 ) |
                            IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) |
-#ifdef CONFIG_JULIETTE
-                           IO_FIELD(R_MMU_KBASE_HI, base_a, 0xa ) |
-#else
                            IO_FIELD(R_MMU_KBASE_HI, base_a, 0x0 ) |
-#endif
                            IO_FIELD(R_MMU_KBASE_HI, base_9, 0x9 ) |
                            IO_FIELD(R_MMU_KBASE_HI, base_8, 0x8 ) );
-       
+
        *R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) |
                            IO_FIELD(R_MMU_KBASE_LO, base_6, 0x4 ) |
                            IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) |
index 21bbd93be34f3732986d228d5139df43182058cb..17dbe03af5f4d1f61e4e60c24d48d284849a78f5 100644 (file)
@@ -10,95 +10,6 @@ config ETRAX_DRAM_VIRTUAL_BASE
        depends on ETRAX_ARCH_V32
        default "c0000000"
 
-choice
-       prompt "Nbr of Ethernet LED groups"
-       depends on ETRAX_ARCH_V32
-       default ETRAX_NBR_LED_GRP_ONE
-       help
-         Select how many Ethernet LED groups that can be used. Usually one per Ethernet
-         interface is a good choice.
-
-config ETRAX_NBR_LED_GRP_ZERO
-       bool "Use zero LED groups"
-       help
-         Select this if you do not want any Ethernet LEDs.
-
-config ETRAX_NBR_LED_GRP_ONE
-       bool "Use one LED group"
-       help
-         Select this if you want one Ethernet LED group. This LED group
-         can be used for one or more Ethernet interfaces. However, it is
-         recommended that each Ethernet interface use a dedicated LED group.
-
-config ETRAX_NBR_LED_GRP_TWO
-       bool "Use two LED groups"
-       help
-         Select this if you want two Ethernet LED groups. This is the
-         best choice if you have more than one Ethernet interface and
-         would like to have separate LEDs for the interfaces.
-
-endchoice
-
-config ETRAX_LED_G_NET0
-       string "Ethernet LED group 0 green LED bit"
-       depends on ETRAX_ARCH_V32 && (ETRAX_NBR_LED_GRP_ONE || ETRAX_NBR_LED_GRP_TWO)
-       default "PA3"
-       help
-         Bit to use for the green LED in Ethernet LED group 0.
-
-config ETRAX_LED_R_NET0
-       string "Ethernet LED group 0 red LED bit"
-       depends on ETRAX_ARCH_V32 && (ETRAX_NBR_LED_GRP_ONE || ETRAX_NBR_LED_GRP_TWO)
-       default "PA4"
-       help
-         Bit to use for the red LED in Ethernet LED group 0.
-
-config ETRAX_LED_G_NET1
-       string "Ethernet group 1 green LED bit"
-       depends on ETRAX_ARCH_V32 && ETRAX_NBR_LED_GRP_TWO
-       default ""
-       help
-         Bit to use for the green LED in Ethernet LED group 1.
-
-config ETRAX_LED_R_NET1
-       string "Ethernet group 1 red LED bit"
-       depends on ETRAX_ARCH_V32 && ETRAX_NBR_LED_GRP_TWO
-       default ""
-       help
-         Bit to use for the red LED in Ethernet LED group 1.
-
-config ETRAX_V32_LED2G
-       string "Second green LED bit"
-       depends on ETRAX_ARCH_V32
-       default "PA5"
-       help
-         Bit to use for the first green LED (status LED).
-         Most Axis products use bit A5 here.
-
-config ETRAX_V32_LED2R
-       string "Second red LED bit"
-       depends on ETRAX_ARCH_V32
-       default "PA6"
-       help
-         Bit to use for the first red LED (network LED).
-         Most Axis products use bit A6 here.
-
-config ETRAX_V32_LED3G
-       string "Third green LED bit"
-       depends on ETRAX_ARCH_V32
-       default "PA7"
-       help
-         Bit to use for the first green LED (drive/power LED).
-         Most Axis products use bit A7 here.
-
-config ETRAX_V32_LED3R
-       string "Third red LED bit"
-       depends on ETRAX_ARCH_V32
-       default "PA7"
-       help
-         Bit to use for the first red LED (drive/power LED).
-         Most Axis products use bit A7 here.
-
 choice
        prompt "Kernel GDB port"
        depends on ETRAX_KGDB
index e6c523cc40bc824bd1c83735873b25426f0a46bc..2735eb7671a5ad9203251fe88726145b3a33a92a 100644 (file)
@@ -149,173 +149,6 @@ config ETRAX_NANDBOOT
          Say Y if your boot code, kernel and root file system is in
          NAND flash. Say N if they are in NOR flash.
 
-config ETRAX_I2C
-       bool "I2C driver"
-       depends on ETRAX_ARCH_V32
-       help
-         This option enables the I2C driver used by e.g. the RTC driver.
-
-config ETRAX_V32_I2C_DATA_PORT
-       string "I2C data pin"
-       depends on ETRAX_I2C
-       help
-         The pin to use for I2C data.
-
-config ETRAX_V32_I2C_CLK_PORT
-       string "I2C clock pin"
-       depends on ETRAX_I2C
-       help
-         The pin to use for I2C clock.
-
-config ETRAX_GPIO
-       bool "GPIO support"
-       depends on ETRAX_ARCH_V32
-       ---help---
-         Enables the ETRAX general port device (major 120, minors 0-4).
-         You can use this driver to access the general port bits. It supports
-         these ioctl's:
-         #include <linux/etraxgpio.h>
-         fd = open("/dev/gpioa", O_RDWR); // or /dev/gpiob
-         ioctl(fd, _IO(ETRAXGPIO_IOCTYPE, IO_SETBITS), bits_to_set);
-         ioctl(fd, _IO(ETRAXGPIO_IOCTYPE, IO_CLRBITS), bits_to_clear);
-         err = ioctl(fd, _IO(ETRAXGPIO_IOCTYPE, IO_READ_INBITS), &val);
-         Remember that you need to setup the port directions appropriately in
-         the General configuration.
-
-config ETRAX_VIRTUAL_GPIO
-       bool "Virtual GPIO support"
-       depends on ETRAX_GPIO
-       help
-         Enables the virtual Etrax general port device (major 120, minor 6).
-         It uses an I/O expander for the I2C-bus.
-
-config ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN
-       int "Virtual GPIO interrupt pin on PA pin"
-       range 0 7
-       depends on ETRAX_VIRTUAL_GPIO
-       help
-         The pin to use on PA for virtual gpio interrupt.
-
-config ETRAX_PA_CHANGEABLE_DIR
-       hex "PA user changeable dir mask"
-       depends on ETRAX_GPIO
-       default "0x00" if ETRAXFS
-       default "0x00000000" if !ETRAXFS
-       help
-         This is a bitmask with information of what bits in PA that a
-         user can change direction on using ioctl's.
-         Bit set = changeable.
-         You probably want 0 here, but it depends on your hardware.
-
-config ETRAX_PA_CHANGEABLE_BITS
-       hex "PA user changeable bits mask"
-       depends on ETRAX_GPIO
-       default "0x00" if ETRAXFS
-       default "0x00000000" if !ETRAXFS
-       help
-         This is a bitmask with information of what bits in PA
-         that a user can change the value on using ioctl's.
-         Bit set = changeable.
-
-config ETRAX_PB_CHANGEABLE_DIR
-       hex "PB user changeable dir mask"
-       depends on ETRAX_GPIO
-       default "0x00000" if ETRAXFS
-       default "0x00000000" if !ETRAXFS
-       help
-         This is a bitmask with information of what bits in PB
-         that a user can change direction on using ioctl's.
-         Bit set = changeable.
-         You probably want 0 here, but it depends on your hardware.
-
-config ETRAX_PB_CHANGEABLE_BITS
-       hex "PB user changeable bits mask"
-       depends on ETRAX_GPIO
-       default "0x00000" if ETRAXFS
-       default "0x00000000" if !ETRAXFS
-       help
-         This is a bitmask with information of what bits in PB
-         that a user can change the value on using ioctl's.
-         Bit set = changeable.
-
-config ETRAX_PC_CHANGEABLE_DIR
-       hex "PC user changeable dir mask"
-       depends on ETRAX_GPIO
-       default "0x00000" if ETRAXFS
-       default "0x00000000" if !ETRAXFS
-       help
-         This is a bitmask with information of what bits in PC
-         that a user can change direction on using ioctl's.
-         Bit set = changeable.
-         You probably want 0 here, but it depends on your hardware.
-
-config ETRAX_PC_CHANGEABLE_BITS
-       hex "PC user changeable bits mask"
-       depends on ETRAX_GPIO
-       default "0x00000" if ETRAXFS
-       default "0x00000000" if !ETRAXFS
-       help
-         This is a bitmask with information of what bits in PC
-         that a user can change the value on using ioctl's.
-         Bit set = changeable.
-
-config ETRAX_PD_CHANGEABLE_DIR
-       hex "PD user changeable dir mask"
-       depends on ETRAX_GPIO && ETRAXFS
-       default "0x00000"
-       help
-         This is a bitmask with information of what bits in PD
-         that a user can change direction on using ioctl's.
-         Bit set = changeable.
-         You probably want 0x00000 here, but it depends on your hardware.
-
-config ETRAX_PD_CHANGEABLE_BITS
-       hex "PD user changeable bits mask"
-       depends on ETRAX_GPIO && ETRAXFS
-       default "0x00000"
-       help
-         This is a bitmask (18 bits) with information of what bits in PD
-         that a user can change the value on using ioctl's.
-         Bit set = changeable.
-
-config ETRAX_PE_CHANGEABLE_DIR
-       hex "PE user changeable dir mask"
-       depends on ETRAX_GPIO && ETRAXFS
-       default "0x00000"
-       help
-         This is a bitmask (18 bits) with information of what bits in PE
-         that a user can change direction on using ioctl's.
-         Bit set = changeable.
-         You probably want 0x00000 here, but it depends on your hardware.
-
-config ETRAX_PE_CHANGEABLE_BITS
-       hex "PE user changeable bits mask"
-       depends on ETRAX_GPIO && ETRAXFS
-       default "0x00000"
-       help
-         This is a bitmask (18 bits) with information of what bits in PE
-         that a user can change the value on using ioctl's.
-         Bit set = changeable.
-
-config ETRAX_PV_CHANGEABLE_DIR
-       hex "PV user changeable dir mask"
-       depends on ETRAX_VIRTUAL_GPIO
-       default "0x0000"
-       help
-         This is a bitmask (16 bits) with information of what bits in PV
-         that a user can change direction on using ioctl's.
-         Bit set = changeable.
-         You probably want 0x0000 here, but it depends on your hardware.
-
-config ETRAX_PV_CHANGEABLE_BITS
-       hex "PV user changeable bits mask"
-       depends on ETRAX_VIRTUAL_GPIO
-       default "0x0000"
-       help
-         This is a bitmask (16 bits) with information of what bits in PV
-         that a user can change the value on using ioctl's.
-         Bit set = changeable.
-
 config ETRAX_CARDBUS
         bool "Cardbus support"
         depends on ETRAX_ARCH_V32
index 15fbfefced2c43f6998198162a069aa7070073c1..b5a75fdce77bef639d65ce62ec824059fc541af5 100644 (file)
@@ -7,6 +7,5 @@ obj-$(CONFIG_ETRAX_AXISFLASHMAP)        += axisflashmap.o
 obj-$(CONFIG_ETRAXFS)                   += mach-fs/
 obj-$(CONFIG_CRIS_MACH_ARTPEC3)         += mach-a3/
 obj-$(CONFIG_ETRAX_IOP_FW_LOAD)         += iop_fw_load.o
-obj-$(CONFIG_ETRAX_I2C)                        += i2c.o
 obj-$(CONFIG_ETRAX_SYNCHRONOUS_SERIAL) += sync_serial.o
 obj-$(CONFIG_PCI)                      += pci/
index 5387424683ccce2b26e912474a0e2974e6abc558..c6309a182f467e3cdea78d5545f671bf3d91103a 100644 (file)
@@ -361,7 +361,7 @@ static int __init init_axis_flash(void)
 
 #if 0 /* Dump flash memory so we can see what is going on */
        if (main_mtd) {
-               int sectoraddr, i;
+               int sectoraddr;
                for (sectoraddr = 0; sectoraddr < 2*65536+4096;
                                sectoraddr += PAGESIZE) {
                        main_mtd->read(main_mtd, sectoraddr, PAGESIZE, &len,
@@ -369,21 +369,7 @@ static int __init init_axis_flash(void)
                        printk(KERN_INFO
                               "Sector at %d (length %d):\n",
                               sectoraddr, len);
-                       for (i = 0; i < PAGESIZE; i += 16) {
-                               printk(KERN_INFO
-                                      "%02x %02x %02x %02x "
-                                      "%02x %02x %02x %02x "
-                                      "%02x %02x %02x %02x "
-                                      "%02x %02x %02x %02x\n",
-                                      page[i] & 255, page[i+1] & 255,
-                                      page[i+2] & 255, page[i+3] & 255,
-                                      page[i+4] & 255, page[i+5] & 255,
-                                      page[i+6] & 255, page[i+7] & 255,
-                                      page[i+8] & 255, page[i+9] & 255,
-                                      page[i+10] & 255, page[i+11] & 255,
-                                      page[i+12] & 255, page[i+13] & 255,
-                                      page[i+14] & 255, page[i+15] & 255);
-                       }
+                       print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, page, PAGESIZE, false);
                }
        }
 #endif
@@ -417,25 +403,11 @@ static int __init init_axis_flash(void)
 
 #if 0 /* Dump partition table so we can see what is going on */
                printk(KERN_INFO
-                      "axisflashmap: flash read %d bytes at 0x%08x, data: "
-                      "%02x %02x %02x %02x %02x %02x %02x %02x\n",
-                      len, CONFIG_ETRAX_PTABLE_SECTOR,
-                      page[0] & 255, page[1] & 255,
-                      page[2] & 255, page[3] & 255,
-                      page[4] & 255, page[5] & 255,
-                      page[6] & 255, page[7] & 255);
+                      "axisflashmap: flash read %d bytes at 0x%08x, data: %8ph\n",
+                      len, CONFIG_ETRAX_PTABLE_SECTOR, page);
                printk(KERN_INFO
-                      "axisflashmap: partition table offset %d, data: "
-                      "%02x %02x %02x %02x %02x %02x %02x %02x\n",
-                      PARTITION_TABLE_OFFSET,
-                      page[PARTITION_TABLE_OFFSET+0] & 255,
-                      page[PARTITION_TABLE_OFFSET+1] & 255,
-                      page[PARTITION_TABLE_OFFSET+2] & 255,
-                      page[PARTITION_TABLE_OFFSET+3] & 255,
-                      page[PARTITION_TABLE_OFFSET+4] & 255,
-                      page[PARTITION_TABLE_OFFSET+5] & 255,
-                      page[PARTITION_TABLE_OFFSET+6] & 255,
-                      page[PARTITION_TABLE_OFFSET+7] & 255);
+                      "axisflashmap: partition table offset %d, data: %8ph\n",
+                      PARTITION_TABLE_OFFSET, page + PARTITION_TABLE_OFFSET);
 #endif
        }
 
diff --git a/arch/cris/arch-v32/drivers/i2c.c b/arch/cris/arch-v32/drivers/i2c.c
deleted file mode 100644 (file)
index 3b2c82c..0000000
+++ /dev/null
@@ -1,751 +0,0 @@
-/*!***************************************************************************
-*!
-*! FILE NAME  : i2c.c
-*!
-*! DESCRIPTION: implements an interface for IIC/I2C, both directly from other
-*!              kernel modules (i2c_writereg/readreg) and from userspace using
-*!              ioctl()'s
-*!
-*! Nov 30 1998  Torbjorn Eliasson  Initial version.
-*!              Bjorn Wesen        Elinux kernel version.
-*! Jan 14 2000  Johan Adolfsson    Fixed PB shadow register stuff -
-*!                                 don't use PB_I2C if DS1302 uses same bits,
-*!                                 use PB.
-*| June 23 2003 Pieter Grimmerink  Added 'i2c_sendnack'. i2c_readreg now
-*|                                 generates nack on last received byte,
-*|                                 instead of ack.
-*|                                 i2c_getack changed data level while clock
-*|                                 was high, causing DS75 to see  a stop condition
-*!
-*! ---------------------------------------------------------------------------
-*!
-*! (C) Copyright 1999-2007 Axis Communications AB, LUND, SWEDEN
-*!
-*!***************************************************************************/
-
-/****************** INCLUDE FILES SECTION ***********************************/
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/mutex.h>
-
-#include <asm/etraxi2c.h>
-
-#include <asm/io.h>
-#include <asm/delay.h>
-
-#include "i2c.h"
-
-/****************** I2C DEFINITION SECTION *************************/
-
-#define D(x)
-
-#define I2C_MAJOR 123  /* LOCAL/EXPERIMENTAL */
-static DEFINE_MUTEX(i2c_mutex);
-static const char i2c_name[] = "i2c";
-
-#define CLOCK_LOW_TIME            8
-#define CLOCK_HIGH_TIME           8
-#define START_CONDITION_HOLD_TIME 8
-#define STOP_CONDITION_HOLD_TIME  8
-#define ENABLE_OUTPUT 0x01
-#define ENABLE_INPUT 0x00
-#define I2C_CLOCK_HIGH 1
-#define I2C_CLOCK_LOW 0
-#define I2C_DATA_HIGH 1
-#define I2C_DATA_LOW 0
-
-#define i2c_enable()
-#define i2c_disable()
-
-/* enable or disable output-enable, to select output or input on the i2c bus */
-
-#define i2c_dir_out() crisv32_io_set_dir(&cris_i2c_data, crisv32_io_dir_out)
-#define i2c_dir_in() crisv32_io_set_dir(&cris_i2c_data, crisv32_io_dir_in)
-
-/* control the i2c clock and data signals */
-
-#define i2c_clk(x) crisv32_io_set(&cris_i2c_clk, x)
-#define i2c_data(x) crisv32_io_set(&cris_i2c_data, x)
-
-/* read a bit from the i2c interface */
-
-#define i2c_getbit() crisv32_io_rd(&cris_i2c_data)
-
-#define i2c_delay(usecs) udelay(usecs)
-
-static DEFINE_SPINLOCK(i2c_lock); /* Protect directions etc */
-
-/****************** VARIABLE SECTION ************************************/
-
-static struct crisv32_iopin cris_i2c_clk;
-static struct crisv32_iopin cris_i2c_data;
-
-/****************** FUNCTION DEFINITION SECTION *************************/
-
-
-/* generate i2c start condition */
-
-void
-i2c_start(void)
-{
-       /*
-        * SCL=1 SDA=1
-        */
-       i2c_dir_out();
-       i2c_delay(CLOCK_HIGH_TIME/6);
-       i2c_data(I2C_DATA_HIGH);
-       i2c_clk(I2C_CLOCK_HIGH);
-       i2c_delay(CLOCK_HIGH_TIME);
-       /*
-        * SCL=1 SDA=0
-        */
-       i2c_data(I2C_DATA_LOW);
-       i2c_delay(START_CONDITION_HOLD_TIME);
-       /*
-        * SCL=0 SDA=0
-        */
-       i2c_clk(I2C_CLOCK_LOW);
-       i2c_delay(CLOCK_LOW_TIME);
-}
-
-/* generate i2c stop condition */
-
-void
-i2c_stop(void)
-{
-       i2c_dir_out();
-
-       /*
-        * SCL=0 SDA=0
-        */
-       i2c_clk(I2C_CLOCK_LOW);
-       i2c_data(I2C_DATA_LOW);
-       i2c_delay(CLOCK_LOW_TIME*2);
-       /*
-        * SCL=1 SDA=0
-        */
-       i2c_clk(I2C_CLOCK_HIGH);
-       i2c_delay(CLOCK_HIGH_TIME*2);
-       /*
-        * SCL=1 SDA=1
-        */
-       i2c_data(I2C_DATA_HIGH);
-       i2c_delay(STOP_CONDITION_HOLD_TIME);
-
-       i2c_dir_in();
-}
-
-/* write a byte to the i2c interface */
-
-void
-i2c_outbyte(unsigned char x)
-{
-       int i;
-
-       i2c_dir_out();
-
-       for (i = 0; i < 8; i++) {
-               if (x & 0x80) {
-                       i2c_data(I2C_DATA_HIGH);
-               } else {
-                       i2c_data(I2C_DATA_LOW);
-               }
-
-               i2c_delay(CLOCK_LOW_TIME/2);
-               i2c_clk(I2C_CLOCK_HIGH);
-               i2c_delay(CLOCK_HIGH_TIME);
-               i2c_clk(I2C_CLOCK_LOW);
-               i2c_delay(CLOCK_LOW_TIME/2);
-               x <<= 1;
-       }
-       i2c_data(I2C_DATA_LOW);
-       i2c_delay(CLOCK_LOW_TIME/2);
-
-       /*
-        * enable input
-        */
-       i2c_dir_in();
-}
-
-/* read a byte from the i2c interface */
-
-unsigned char
-i2c_inbyte(void)
-{
-       unsigned char aBitByte = 0;
-       int i;
-
-       /* Switch off I2C to get bit */
-       i2c_disable();
-       i2c_dir_in();
-       i2c_delay(CLOCK_HIGH_TIME/2);
-
-       /* Get bit */
-       aBitByte |= i2c_getbit();
-
-       /* Enable I2C */
-       i2c_enable();
-       i2c_delay(CLOCK_LOW_TIME/2);
-
-       for (i = 1; i < 8; i++) {
-               aBitByte <<= 1;
-               /* Clock pulse */
-               i2c_clk(I2C_CLOCK_HIGH);
-               i2c_delay(CLOCK_HIGH_TIME);
-               i2c_clk(I2C_CLOCK_LOW);
-               i2c_delay(CLOCK_LOW_TIME);
-
-               /* Switch off I2C to get bit */
-               i2c_disable();
-               i2c_dir_in();
-               i2c_delay(CLOCK_HIGH_TIME/2);
-
-               /* Get bit */
-               aBitByte |= i2c_getbit();
-
-               /* Enable I2C */
-               i2c_enable();
-               i2c_delay(CLOCK_LOW_TIME/2);
-       }
-       i2c_clk(I2C_CLOCK_HIGH);
-       i2c_delay(CLOCK_HIGH_TIME);
-
-       /*
-        * we leave the clock low, getbyte is usually followed
-        * by sendack/nack, they assume the clock to be low
-        */
-       i2c_clk(I2C_CLOCK_LOW);
-       return aBitByte;
-}
-
-/*#---------------------------------------------------------------------------
-*#
-*# FUNCTION NAME: i2c_getack
-*#
-*# DESCRIPTION  : checks if ack was received from ic2
-*#
-*#--------------------------------------------------------------------------*/
-
-int
-i2c_getack(void)
-{
-       int ack = 1;
-       /*
-        * enable output
-        */
-       i2c_dir_out();
-       /*
-        * Release data bus by setting
-        * data high
-        */
-       i2c_data(I2C_DATA_HIGH);
-       /*
-        * enable input
-        */
-       i2c_dir_in();
-       i2c_delay(CLOCK_HIGH_TIME/4);
-       /*
-        * generate ACK clock pulse
-        */
-       i2c_clk(I2C_CLOCK_HIGH);
-#if 0
-       /*
-        * Use PORT PB instead of I2C
-        * for input. (I2C not working)
-        */
-       i2c_clk(1);
-       i2c_data(1);
-       /*
-        * switch off I2C
-        */
-       i2c_data(1);
-       i2c_disable();
-       i2c_dir_in();
-#endif
-
-       /*
-        * now wait for ack
-        */
-       i2c_delay(CLOCK_HIGH_TIME/2);
-       /*
-        * check for ack
-        */
-       if (i2c_getbit())
-               ack = 0;
-       i2c_delay(CLOCK_HIGH_TIME/2);
-       if (!ack) {
-               if (!i2c_getbit()) /* receiver pulld SDA low */
-                       ack = 1;
-               i2c_delay(CLOCK_HIGH_TIME/2);
-       }
-
-   /*
-    * our clock is high now, make sure data is low
-    * before we enable our output. If we keep data high
-    * and enable output, we would generate a stop condition.
-    */
-#if 0
-   i2c_data(I2C_DATA_LOW);
-
-       /*
-        * end clock pulse
-        */
-       i2c_enable();
-       i2c_dir_out();
-#endif
-       i2c_clk(I2C_CLOCK_LOW);
-       i2c_delay(CLOCK_HIGH_TIME/4);
-       /*
-        * enable output
-        */
-       i2c_dir_out();
-       /*
-        * remove ACK clock pulse
-        */
-       i2c_data(I2C_DATA_HIGH);
-       i2c_delay(CLOCK_LOW_TIME/2);
-       return ack;
-}
-
-/*#---------------------------------------------------------------------------
-*#
-*# FUNCTION NAME: I2C::sendAck
-*#
-*# DESCRIPTION  : Send ACK on received data
-*#
-*#--------------------------------------------------------------------------*/
-void
-i2c_sendack(void)
-{
-       /*
-        * enable output
-        */
-       i2c_delay(CLOCK_LOW_TIME);
-       i2c_dir_out();
-       /*
-        * set ack pulse high
-        */
-       i2c_data(I2C_DATA_LOW);
-       /*
-        * generate clock pulse
-        */
-       i2c_delay(CLOCK_HIGH_TIME/6);
-       i2c_clk(I2C_CLOCK_HIGH);
-       i2c_delay(CLOCK_HIGH_TIME);
-       i2c_clk(I2C_CLOCK_LOW);
-       i2c_delay(CLOCK_LOW_TIME/6);
-       /*
-        * reset data out
-        */
-       i2c_data(I2C_DATA_HIGH);
-       i2c_delay(CLOCK_LOW_TIME);
-
-       i2c_dir_in();
-}
-
-/*#---------------------------------------------------------------------------
-*#
-*# FUNCTION NAME: i2c_sendnack
-*#
-*# DESCRIPTION  : Sends NACK on received data
-*#
-*#--------------------------------------------------------------------------*/
-void
-i2c_sendnack(void)
-{
-       /*
-        * enable output
-        */
-       i2c_delay(CLOCK_LOW_TIME);
-       i2c_dir_out();
-       /*
-        * set data high
-        */
-       i2c_data(I2C_DATA_HIGH);
-       /*
-        * generate clock pulse
-        */
-       i2c_delay(CLOCK_HIGH_TIME/6);
-       i2c_clk(I2C_CLOCK_HIGH);
-       i2c_delay(CLOCK_HIGH_TIME);
-       i2c_clk(I2C_CLOCK_LOW);
-       i2c_delay(CLOCK_LOW_TIME);
-
-       i2c_dir_in();
-}
-
-/*#---------------------------------------------------------------------------
-*#
-*# FUNCTION NAME: i2c_write
-*#
-*# DESCRIPTION  : Writes a value to an I2C device
-*#
-*#--------------------------------------------------------------------------*/
-int
-i2c_write(unsigned char theSlave, void *data, size_t nbytes)
-{
-       int error, cntr = 3;
-       unsigned char bytes_wrote = 0;
-       unsigned char value;
-       unsigned long flags;
-
-       spin_lock_irqsave(&i2c_lock, flags);
-
-       do {
-               error = 0;
-
-               i2c_start();
-               /*
-                * send slave address
-                */
-               i2c_outbyte((theSlave & 0xfe));
-               /*
-                * wait for ack
-                */
-               if (!i2c_getack())
-                       error = 1;
-               /*
-                * send data
-                */
-               for (bytes_wrote = 0; bytes_wrote < nbytes; bytes_wrote++) {
-                       memcpy(&value, data + bytes_wrote, sizeof value);
-                       i2c_outbyte(value);
-                       /*
-                        * now it's time to wait for ack
-                        */
-                       if (!i2c_getack())
-                               error |= 4;
-               }
-               /*
-                * end byte stream
-                */
-               i2c_stop();
-
-       } while (error && cntr--);
-
-       i2c_delay(CLOCK_LOW_TIME);
-
-       spin_unlock_irqrestore(&i2c_lock, flags);
-
-       return -error;
-}
-
-/*#---------------------------------------------------------------------------
-*#
-*# FUNCTION NAME: i2c_read
-*#
-*# DESCRIPTION  : Reads a value from an I2C device
-*#
-*#--------------------------------------------------------------------------*/
-int
-i2c_read(unsigned char theSlave, void *data, size_t nbytes)
-{
-       unsigned char b = 0;
-       unsigned char bytes_read = 0;
-       int error, cntr = 3;
-       unsigned long flags;
-
-       spin_lock_irqsave(&i2c_lock, flags);
-
-       do {
-               error = 0;
-               memset(data, 0, nbytes);
-               /*
-                * generate start condition
-                */
-               i2c_start();
-               /*
-                * send slave address
-                */
-               i2c_outbyte((theSlave | 0x01));
-               /*
-                * wait for ack
-                */
-               if (!i2c_getack())
-                       error = 1;
-               /*
-                * fetch data
-                */
-               for (bytes_read = 0; bytes_read < nbytes; bytes_read++) {
-                       b = i2c_inbyte();
-                       memcpy(data + bytes_read, &b, sizeof b);
-
-                       if (bytes_read < (nbytes - 1))
-                               i2c_sendack();
-               }
-               /*
-                * last received byte needs to be nacked
-                * instead of acked
-                */
-               i2c_sendnack();
-               /*
-                * end sequence
-                */
-               i2c_stop();
-       } while (error && cntr--);
-
-       spin_unlock_irqrestore(&i2c_lock, flags);
-
-       return -error;
-}
-
-/*#---------------------------------------------------------------------------
-*#
-*# FUNCTION NAME: i2c_writereg
-*#
-*# DESCRIPTION  : Writes a value to an I2C device
-*#
-*#--------------------------------------------------------------------------*/
-int
-i2c_writereg(unsigned char theSlave, unsigned char theReg,
-            unsigned char theValue)
-{
-       int error, cntr = 3;
-       unsigned long flags;
-
-       spin_lock_irqsave(&i2c_lock, flags);
-
-       do {
-               error = 0;
-
-               i2c_start();
-               /*
-                * send slave address
-                */
-               i2c_outbyte((theSlave & 0xfe));
-               /*
-                * wait for ack
-                */
-               if(!i2c_getack())
-                       error = 1;
-               /*
-                * now select register
-                */
-               i2c_dir_out();
-               i2c_outbyte(theReg);
-               /*
-                * now it's time to wait for ack
-                */
-               if(!i2c_getack())
-                       error |= 2;
-               /*
-                * send register register data
-                */
-               i2c_outbyte(theValue);
-               /*
-                * now it's time to wait for ack
-                */
-               if(!i2c_getack())
-                       error |= 4;
-               /*
-                * end byte stream
-                */
-               i2c_stop();
-       } while(error && cntr--);
-
-       i2c_delay(CLOCK_LOW_TIME);
-
-       spin_unlock_irqrestore(&i2c_lock, flags);
-
-       return -error;
-}
-
-/*#---------------------------------------------------------------------------
-*#
-*# FUNCTION NAME: i2c_readreg
-*#
-*# DESCRIPTION  : Reads a value from the decoder registers.
-*#
-*#--------------------------------------------------------------------------*/
-unsigned char
-i2c_readreg(unsigned char theSlave, unsigned char theReg)
-{
-       unsigned char b = 0;
-       int error, cntr = 3;
-       unsigned long flags;
-
-       spin_lock_irqsave(&i2c_lock, flags);
-
-       do {
-               error = 0;
-               /*
-                * generate start condition
-                */
-               i2c_start();
-
-               /*
-                * send slave address
-                */
-               i2c_outbyte((theSlave & 0xfe));
-               /*
-                * wait for ack
-                */
-               if(!i2c_getack())
-                       error = 1;
-               /*
-                * now select register
-                */
-               i2c_dir_out();
-               i2c_outbyte(theReg);
-               /*
-                * now it's time to wait for ack
-                */
-               if(!i2c_getack())
-                       error |= 2;
-               /*
-                * repeat start condition
-                */
-               i2c_delay(CLOCK_LOW_TIME);
-               i2c_start();
-               /*
-                * send slave address
-                */
-               i2c_outbyte(theSlave | 0x01);
-               /*
-                * wait for ack
-                */
-               if(!i2c_getack())
-                       error |= 4;
-               /*
-                * fetch register
-                */
-               b = i2c_inbyte();
-               /*
-                * last received byte needs to be nacked
-                * instead of acked
-                */
-               i2c_sendnack();
-               /*
-                * end sequence
-                */
-               i2c_stop();
-
-       } while(error && cntr--);
-
-       spin_unlock_irqrestore(&i2c_lock, flags);
-
-       return b;
-}
-
-static int
-i2c_open(struct inode *inode, struct file *filp)
-{
-       return 0;
-}
-
-static int
-i2c_release(struct inode *inode, struct file *filp)
-{
-       return 0;
-}
-
-/* Main device API. ioctl's to write or read to/from i2c registers.
- */
-
-static long
-i2c_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-       int ret;
-       if(_IOC_TYPE(cmd) != ETRAXI2C_IOCTYPE) {
-               return -ENOTTY;
-       }
-
-       switch (_IOC_NR(cmd)) {
-               case I2C_WRITEREG:
-                       /* write to an i2c slave */
-                       D(printk("i2cw %d %d %d\n",
-                                I2C_ARGSLAVE(arg),
-                                I2C_ARGREG(arg),
-                                I2C_ARGVALUE(arg)));
-
-                       mutex_lock(&i2c_mutex);
-                       ret = i2c_writereg(I2C_ARGSLAVE(arg),
-                                           I2C_ARGREG(arg),
-                                           I2C_ARGVALUE(arg));
-                       mutex_unlock(&i2c_mutex);
-                       return ret;
-
-               case I2C_READREG:
-               {
-                       unsigned char val;
-                       /* read from an i2c slave */
-                       D(printk("i2cr %d %d ",
-                               I2C_ARGSLAVE(arg),
-                               I2C_ARGREG(arg)));
-                       mutex_lock(&i2c_mutex);
-                       val = i2c_readreg(I2C_ARGSLAVE(arg), I2C_ARGREG(arg));
-                       mutex_unlock(&i2c_mutex);
-                       D(printk("= %d\n", val));
-                       return val;
-               }
-               default:
-                       return -EINVAL;
-
-       }
-
-       return 0;
-}
-
-static const struct file_operations i2c_fops = {
-       .owner          = THIS_MODULE,
-       .unlocked_ioctl = i2c_ioctl,
-       .open           = i2c_open,
-       .release        = i2c_release,
-       .llseek         = noop_llseek,
-};
-
-static int __init i2c_init(void)
-{
-       static int res;
-       static int first = 1;
-
-       if (!first)
-               return res;
-
-       first = 0;
-
-       /* Setup and enable the DATA and CLK pins */
-
-       res = crisv32_io_get_name(&cris_i2c_data,
-               CONFIG_ETRAX_V32_I2C_DATA_PORT);
-       if (res < 0)
-               return res;
-
-       res = crisv32_io_get_name(&cris_i2c_clk, CONFIG_ETRAX_V32_I2C_CLK_PORT);
-       crisv32_io_set_dir(&cris_i2c_clk, crisv32_io_dir_out);
-
-       return res;
-}
-
-
-static int __init i2c_register(void)
-{
-       int res;
-
-       res = i2c_init();
-       if (res < 0)
-               return res;
-
-       /* register char device */
-
-       res = register_chrdev(I2C_MAJOR, i2c_name, &i2c_fops);
-       if (res < 0) {
-               printk(KERN_ERR "i2c: couldn't get a major number.\n");
-               return res;
-       }
-
-       printk(KERN_INFO
-               "I2C driver v2.2, (c) 1999-2007 Axis Communications AB\n");
-
-       return 0;
-}
-/* this makes sure that i2c_init is called during boot */
-module_init(i2c_register);
-
-/****************** END OF FILE i2c.c ********************************/
diff --git a/arch/cris/arch-v32/drivers/i2c.h b/arch/cris/arch-v32/drivers/i2c.h
deleted file mode 100644 (file)
index d9cc856..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-
-#include <linux/init.h>
-
-/* High level I2C actions */
-int i2c_write(unsigned char theSlave, void *data, size_t nbytes);
-int i2c_read(unsigned char theSlave, void *data, size_t nbytes);
-int i2c_writereg(unsigned char theSlave, unsigned char theReg, unsigned char theValue);
-unsigned char i2c_readreg(unsigned char theSlave, unsigned char theReg);
-
-/* Low level I2C */
-void i2c_start(void);
-void i2c_stop(void);
-void i2c_outbyte(unsigned char x);
-unsigned char i2c_inbyte(void);
-int i2c_getack(void);
-void i2c_sendack(void);
index 5c6d2a2a080ee4bbbbeca7c6bef44899389179c5..59028d0b981c094dd67a3570ed583d6bfa12ed75 100644 (file)
@@ -3,4 +3,3 @@
 #
 
 obj-$(CONFIG_ETRAX_NANDFLASH)   += nandflash.o
-obj-$(CONFIG_ETRAX_GPIO)        += gpio.o
diff --git a/arch/cris/arch-v32/drivers/mach-a3/gpio.c b/arch/cris/arch-v32/drivers/mach-a3/gpio.c
deleted file mode 100644 (file)
index c92e1da..0000000
+++ /dev/null
@@ -1,999 +0,0 @@
-/*
- * Artec-3 general port I/O device
- *
- * Copyright (c) 2007 Axis Communications AB
- *
- * Authors:    Bjorn Wesen      (initial version)
- *             Ola Knutsson     (LED handling)
- *             Johan Adolfsson  (read/set directions, write, port G,
- *                               port to ETRAX FS.
- *             Ricard Wanderlof (PWM for Artpec-3)
- *
- */
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/string.h>
-#include <linux/poll.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-
-#include <asm/etraxgpio.h>
-#include <hwregs/reg_map.h>
-#include <hwregs/reg_rdwr.h>
-#include <hwregs/gio_defs.h>
-#include <hwregs/intr_vect_defs.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <mach/pinmux.h>
-
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-#include "../i2c.h"
-
-#define VIRT_I2C_ADDR 0x40
-#endif
-
-/* The following gio ports on ARTPEC-3 is available:
- * pa 32 bits
- * pb 32 bits
- * pc 16 bits
- * each port has a rw_px_dout, r_px_din and rw_px_oe register.
- */
-
-#define GPIO_MAJOR 120  /* experimental MAJOR number */
-
-#define I2C_INTERRUPT_BITS 0x300 /* i2c0_done and i2c1_done bits */
-
-#define D(x)
-
-#if 0
-static int dp_cnt;
-#define DP(x) \
-       do { \
-               dp_cnt++; \
-               if (dp_cnt % 1000 == 0) \
-                       x; \
-       } while (0)
-#else
-#define DP(x)
-#endif
-
-static DEFINE_MUTEX(gpio_mutex);
-static char gpio_name[] = "etrax gpio";
-
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-static int virtual_gpio_ioctl(struct file *file, unsigned int cmd,
-                             unsigned long arg);
-#endif
-static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-static ssize_t gpio_write(struct file *file, const char __user *buf,
-       size_t count, loff_t *off);
-static int gpio_open(struct inode *inode, struct file *filp);
-static int gpio_release(struct inode *inode, struct file *filp);
-static unsigned int gpio_poll(struct file *filp,
-       struct poll_table_struct *wait);
-
-/* private data per open() of this driver */
-
-struct gpio_private {
-       struct gpio_private *next;
-       /* The IO_CFG_WRITE_MODE_VALUE only support 8 bits: */
-       unsigned char clk_mask;
-       unsigned char data_mask;
-       unsigned char write_msb;
-       unsigned char pad1;
-       /* These fields are generic */
-       unsigned long highalarm, lowalarm;
-       wait_queue_head_t alarm_wq;
-       int minor;
-};
-
-static void gpio_set_alarm(struct gpio_private *priv);
-static int gpio_leds_ioctl(unsigned int cmd, unsigned long arg);
-static int gpio_pwm_ioctl(struct gpio_private *priv, unsigned int cmd,
-       unsigned long arg);
-
-
-/* linked list of alarms to check for */
-
-static struct gpio_private *alarmlist;
-
-static int wanted_interrupts;
-
-static DEFINE_SPINLOCK(gpio_lock);
-
-#define NUM_PORTS (GPIO_MINOR_LAST+1)
-#define GIO_REG_RD_ADDR(reg) \
-       (unsigned long *)(regi_gio + REG_RD_ADDR_gio_##reg)
-#define GIO_REG_WR_ADDR(reg) \
-       (unsigned long *)(regi_gio + REG_WR_ADDR_gio_##reg)
-static unsigned long led_dummy;
-static unsigned long port_d_dummy;     /* Only input on Artpec-3 */
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-static unsigned long port_e_dummy;     /* Non existent on Artpec-3 */
-static unsigned long virtual_dummy;
-static unsigned long virtual_rw_pv_oe = CONFIG_ETRAX_DEF_GIO_PV_OE;
-static unsigned short cached_virtual_gpio_read;
-#endif
-
-static unsigned long *data_out[NUM_PORTS] = {
-       GIO_REG_WR_ADDR(rw_pa_dout),
-       GIO_REG_WR_ADDR(rw_pb_dout),
-       &led_dummy,
-       GIO_REG_WR_ADDR(rw_pc_dout),
-       &port_d_dummy,
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       &port_e_dummy,
-       &virtual_dummy,
-#endif
-};
-
-static unsigned long *data_in[NUM_PORTS] = {
-       GIO_REG_RD_ADDR(r_pa_din),
-       GIO_REG_RD_ADDR(r_pb_din),
-       &led_dummy,
-       GIO_REG_RD_ADDR(r_pc_din),
-       GIO_REG_RD_ADDR(r_pd_din),
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       &port_e_dummy,
-       &virtual_dummy,
-#endif
-};
-
-static unsigned long changeable_dir[NUM_PORTS] = {
-       CONFIG_ETRAX_PA_CHANGEABLE_DIR,
-       CONFIG_ETRAX_PB_CHANGEABLE_DIR,
-       0,
-       CONFIG_ETRAX_PC_CHANGEABLE_DIR,
-       0,
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       0,
-       CONFIG_ETRAX_PV_CHANGEABLE_DIR,
-#endif
-};
-
-static unsigned long changeable_bits[NUM_PORTS] = {
-       CONFIG_ETRAX_PA_CHANGEABLE_BITS,
-       CONFIG_ETRAX_PB_CHANGEABLE_BITS,
-       0,
-       CONFIG_ETRAX_PC_CHANGEABLE_BITS,
-       0,
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       0,
-       CONFIG_ETRAX_PV_CHANGEABLE_BITS,
-#endif
-};
-
-static unsigned long *dir_oe[NUM_PORTS] = {
-       GIO_REG_WR_ADDR(rw_pa_oe),
-       GIO_REG_WR_ADDR(rw_pb_oe),
-       &led_dummy,
-       GIO_REG_WR_ADDR(rw_pc_oe),
-       &port_d_dummy,
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       &port_e_dummy,
-       &virtual_rw_pv_oe,
-#endif
-};
-
-static void gpio_set_alarm(struct gpio_private *priv)
-{
-       int bit;
-       int intr_cfg;
-       int mask;
-       int pins;
-       unsigned long flags;
-
-       spin_lock_irqsave(&gpio_lock, flags);
-       intr_cfg = REG_RD_INT(gio, regi_gio, rw_intr_cfg);
-       pins = REG_RD_INT(gio, regi_gio, rw_intr_pins);
-       mask = REG_RD_INT(gio, regi_gio, rw_intr_mask) & I2C_INTERRUPT_BITS;
-
-       for (bit = 0; bit < 32; bit++) {
-               int intr = bit % 8;
-               int pin = bit / 8;
-               if (priv->minor < GPIO_MINOR_LEDS)
-                       pin += priv->minor * 4;
-               else
-                       pin += (priv->minor - 1) * 4;
-
-               if (priv->highalarm & (1<<bit)) {
-                       intr_cfg |= (regk_gio_hi << (intr * 3));
-                       mask |= 1 << intr;
-                       wanted_interrupts = mask & 0xff;
-                       pins |= pin << (intr * 4);
-               } else if (priv->lowalarm & (1<<bit)) {
-                       intr_cfg |= (regk_gio_lo << (intr * 3));
-                       mask |= 1 << intr;
-                       wanted_interrupts = mask & 0xff;
-                       pins |= pin << (intr * 4);
-               }
-       }
-
-       REG_WR_INT(gio, regi_gio, rw_intr_cfg, intr_cfg);
-       REG_WR_INT(gio, regi_gio, rw_intr_pins, pins);
-       REG_WR_INT(gio, regi_gio, rw_intr_mask, mask);
-
-       spin_unlock_irqrestore(&gpio_lock, flags);
-}
-
-static unsigned int gpio_poll(struct file *file, struct poll_table_struct *wait)
-{
-       unsigned int mask = 0;
-       struct gpio_private *priv = file->private_data;
-       unsigned long data;
-       unsigned long tmp;
-
-       if (priv->minor >= GPIO_MINOR_PWM0 &&
-           priv->minor <= GPIO_MINOR_LAST_PWM)
-               return 0;
-
-       poll_wait(file, &priv->alarm_wq, wait);
-       if (priv->minor <= GPIO_MINOR_D) {
-               data = readl(data_in[priv->minor]);
-               REG_WR_INT(gio, regi_gio, rw_ack_intr, wanted_interrupts);
-               tmp = REG_RD_INT(gio, regi_gio, rw_intr_mask);
-               tmp &= I2C_INTERRUPT_BITS;
-               tmp |= wanted_interrupts;
-               REG_WR_INT(gio, regi_gio, rw_intr_mask, tmp);
-       } else
-               return 0;
-
-       if ((data & priv->highalarm) || (~data & priv->lowalarm))
-               mask = POLLIN|POLLRDNORM;
-
-       DP(printk(KERN_DEBUG "gpio_poll ready: mask 0x%08X\n", mask));
-       return mask;
-}
-
-static irqreturn_t gpio_interrupt(int irq, void *dev_id)
-{
-       reg_gio_rw_intr_mask intr_mask;
-       reg_gio_r_masked_intr masked_intr;
-       reg_gio_rw_ack_intr ack_intr;
-       unsigned long flags;
-       unsigned long tmp;
-       unsigned long tmp2;
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       unsigned char enable_gpiov_ack = 0;
-#endif
-
-       /* Find what PA interrupts are active */
-       masked_intr = REG_RD(gio, regi_gio, r_masked_intr);
-       tmp = REG_TYPE_CONV(unsigned long, reg_gio_r_masked_intr, masked_intr);
-
-       /* Find those that we have enabled */
-       spin_lock_irqsave(&gpio_lock, flags);
-       tmp &= wanted_interrupts;
-       spin_unlock_irqrestore(&gpio_lock, flags);
-
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       /* Something changed on virtual GPIO. Interrupt is acked by
-        * reading the device.
-        */
-       if (tmp & (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN)) {
-               i2c_read(VIRT_I2C_ADDR, (void *)&cached_virtual_gpio_read,
-                       sizeof(cached_virtual_gpio_read));
-               enable_gpiov_ack = 1;
-       }
-#endif
-
-       /* Ack them */
-       ack_intr = REG_TYPE_CONV(reg_gio_rw_ack_intr, unsigned long, tmp);
-       REG_WR(gio, regi_gio, rw_ack_intr, ack_intr);
-
-       /* Disable those interrupts.. */
-       intr_mask = REG_RD(gio, regi_gio, rw_intr_mask);
-       tmp2 = REG_TYPE_CONV(unsigned long, reg_gio_rw_intr_mask, intr_mask);
-       tmp2 &= ~tmp;
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       /* Do not disable interrupt on virtual GPIO. Changes on virtual
-        * pins are only noticed by an interrupt.
-        */
-       if (enable_gpiov_ack)
-               tmp2 |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN);
-#endif
-       intr_mask = REG_TYPE_CONV(reg_gio_rw_intr_mask, unsigned long, tmp2);
-       REG_WR(gio, regi_gio, rw_intr_mask, intr_mask);
-
-       return IRQ_RETVAL(tmp);
-}
-
-static void gpio_write_bit(unsigned long *port, unsigned char data, int bit,
-       unsigned char clk_mask, unsigned char data_mask)
-{
-       unsigned long shadow = readl(port) & ~clk_mask;
-       writel(shadow, port);
-       if (data & 1 << bit)
-               shadow |= data_mask;
-       else
-               shadow &= ~data_mask;
-       writel(shadow, port);
-       /* For FPGA: min 5.0ns (DCC) before CCLK high */
-       shadow |= clk_mask;
-       writel(shadow, port);
-}
-
-static void gpio_write_byte(struct gpio_private *priv, unsigned long *port,
-               unsigned char data)
-{
-       int i;
-
-       if (priv->write_msb)
-               for (i = 7; i >= 0; i--)
-                       gpio_write_bit(port, data, i, priv->clk_mask,
-                               priv->data_mask);
-       else
-               for (i = 0; i <= 7; i++)
-                       gpio_write_bit(port, data, i, priv->clk_mask,
-                               priv->data_mask);
-}
-
-
-static ssize_t gpio_write(struct file *file, const char __user *buf,
-       size_t count, loff_t *off)
-{
-       struct gpio_private *priv = file->private_data;
-       unsigned long flags;
-       ssize_t retval = count;
-       /* Only bits 0-7 may be used for write operations but allow all
-          devices except leds... */
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       if (priv->minor == GPIO_MINOR_V)
-               return -EFAULT;
-#endif
-       if (priv->minor == GPIO_MINOR_LEDS)
-               return -EFAULT;
-
-       if (priv->minor >= GPIO_MINOR_PWM0 &&
-           priv->minor <= GPIO_MINOR_LAST_PWM)
-               return -EFAULT;
-
-       if (!access_ok(VERIFY_READ, buf, count))
-               return -EFAULT;
-
-       /* It must have been configured using the IO_CFG_WRITE_MODE */
-       /* Perhaps a better error code? */
-       if (priv->clk_mask == 0 || priv->data_mask == 0)
-               return -EPERM;
-
-       D(printk(KERN_DEBUG "gpio_write: %lu to data 0x%02X clk 0x%02X "
-               "msb: %i\n",
-               count, priv->data_mask, priv->clk_mask, priv->write_msb));
-
-       spin_lock_irqsave(&gpio_lock, flags);
-
-       while (count--)
-               gpio_write_byte(priv, data_out[priv->minor], *buf++);
-
-       spin_unlock_irqrestore(&gpio_lock, flags);
-       return retval;
-}
-
-static int gpio_open(struct inode *inode, struct file *filp)
-{
-       struct gpio_private *priv;
-       int p = iminor(inode);
-
-       if (p > GPIO_MINOR_LAST_PWM ||
-           (p > GPIO_MINOR_LAST && p < GPIO_MINOR_PWM0))
-               return -EINVAL;
-
-       priv = kmalloc(sizeof(struct gpio_private), GFP_KERNEL);
-
-       if (!priv)
-               return -ENOMEM;
-
-       mutex_lock(&gpio_mutex);
-       memset(priv, 0, sizeof(*priv));
-
-       priv->minor = p;
-       filp->private_data = priv;
-
-       /* initialize the io/alarm struct, not for PWM ports though  */
-       if (p <= GPIO_MINOR_LAST) {
-
-               priv->clk_mask = 0;
-               priv->data_mask = 0;
-               priv->highalarm = 0;
-               priv->lowalarm = 0;
-
-               init_waitqueue_head(&priv->alarm_wq);
-
-               /* link it into our alarmlist */
-               spin_lock_irq(&gpio_lock);
-               priv->next = alarmlist;
-               alarmlist = priv;
-               spin_unlock_irq(&gpio_lock);
-       }
-
-       mutex_unlock(&gpio_mutex);
-       return 0;
-}
-
-static int gpio_release(struct inode *inode, struct file *filp)
-{
-       struct gpio_private *p;
-       struct gpio_private *todel;
-       /* local copies while updating them: */
-       unsigned long a_high, a_low;
-
-       /* prepare to free private structure */
-       todel = filp->private_data;
-
-       /* unlink from alarmlist - only for non-PWM ports though */
-       if (todel->minor <= GPIO_MINOR_LAST) {
-               spin_lock_irq(&gpio_lock);
-               p = alarmlist;
-
-               if (p == todel)
-                       alarmlist = todel->next;
-                else {
-                       while (p->next != todel)
-                               p = p->next;
-                       p->next = todel->next;
-               }
-
-               /* Check if there are still any alarms set */
-               p = alarmlist;
-               a_high = 0;
-               a_low = 0;
-               while (p) {
-                       if (p->minor == GPIO_MINOR_A) {
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-                               p->lowalarm |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN);
-#endif
-                               a_high |= p->highalarm;
-                               a_low |= p->lowalarm;
-                       }
-
-                       p = p->next;
-               }
-
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       /* Variable 'a_low' needs to be set here again
-        * to ensure that interrupt for virtual GPIO is handled.
-        */
-               a_low |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN);
-#endif
-
-               spin_unlock_irq(&gpio_lock);
-       }
-       kfree(todel);
-
-       return 0;
-}
-
-/* Main device API. ioctl's to read/set/clear bits, as well as to
- * set alarms to wait for using a subsequent select().
- */
-
-inline unsigned long setget_input(struct gpio_private *priv, unsigned long arg)
-{
-       /* Set direction 0=unchanged 1=input,
-        * return mask with 1=input
-        */
-       unsigned long flags;
-       unsigned long dir_shadow;
-
-       spin_lock_irqsave(&gpio_lock, flags);
-
-       dir_shadow = readl(dir_oe[priv->minor]) &
-               ~(arg & changeable_dir[priv->minor]);
-       writel(dir_shadow, dir_oe[priv->minor]);
-
-       spin_unlock_irqrestore(&gpio_lock, flags);
-
-       if (priv->minor == GPIO_MINOR_C)
-               dir_shadow ^= 0xFFFF;           /* Only 16 bits */
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       else if (priv->minor == GPIO_MINOR_V)
-               dir_shadow ^= 0xFFFF;           /* Only 16 bits */
-#endif
-       else
-               dir_shadow ^= 0xFFFFFFFF;       /* PA, PB and PD 32 bits */
-
-       return dir_shadow;
-
-} /* setget_input */
-
-static inline unsigned long setget_output(struct gpio_private *priv,
-       unsigned long arg)
-{
-       unsigned long flags;
-       unsigned long dir_shadow;
-
-       spin_lock_irqsave(&gpio_lock, flags);
-
-       dir_shadow = readl(dir_oe[priv->minor]) |
-               (arg & changeable_dir[priv->minor]);
-       writel(dir_shadow, dir_oe[priv->minor]);
-
-       spin_unlock_irqrestore(&gpio_lock, flags);
-       return dir_shadow;
-} /* setget_output */
-
-static long gpio_ioctl_unlocked(struct file *file,
-       unsigned int cmd, unsigned long arg)
-{
-       unsigned long flags;
-       unsigned long val;
-       unsigned long shadow;
-       struct gpio_private *priv = file->private_data;
-
-       if (_IOC_TYPE(cmd) != ETRAXGPIO_IOCTYPE)
-               return -ENOTTY;
-
-       /* Check for special ioctl handlers first */
-
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       if (priv->minor == GPIO_MINOR_V)
-               return virtual_gpio_ioctl(file, cmd, arg);
-#endif
-
-       if (priv->minor == GPIO_MINOR_LEDS)
-               return gpio_leds_ioctl(cmd, arg);
-
-       if (priv->minor >= GPIO_MINOR_PWM0 &&
-           priv->minor <= GPIO_MINOR_LAST_PWM)
-               return gpio_pwm_ioctl(priv, cmd, arg);
-
-       switch (_IOC_NR(cmd)) {
-       case IO_READBITS: /* Use IO_READ_INBITS and IO_READ_OUTBITS instead */
-               /* Read the port. */
-               return readl(data_in[priv->minor]);
-       case IO_SETBITS:
-               spin_lock_irqsave(&gpio_lock, flags);
-               /* Set changeable bits with a 1 in arg. */
-               shadow = readl(data_out[priv->minor]) |
-                       (arg & changeable_bits[priv->minor]);
-               writel(shadow, data_out[priv->minor]);
-               spin_unlock_irqrestore(&gpio_lock, flags);
-               break;
-       case IO_CLRBITS:
-               spin_lock_irqsave(&gpio_lock, flags);
-               /* Clear changeable bits with a 1 in arg. */
-               shadow = readl(data_out[priv->minor]) &
-                       ~(arg & changeable_bits[priv->minor]);
-               writel(shadow, data_out[priv->minor]);
-               spin_unlock_irqrestore(&gpio_lock, flags);
-               break;
-       case IO_HIGHALARM:
-               /* Set alarm when bits with 1 in arg go high. */
-               priv->highalarm |= arg;
-               gpio_set_alarm(priv);
-               break;
-       case IO_LOWALARM:
-               /* Set alarm when bits with 1 in arg go low. */
-               priv->lowalarm |= arg;
-               gpio_set_alarm(priv);
-               break;
-       case IO_CLRALARM:
-               /* Clear alarm for bits with 1 in arg. */
-               priv->highalarm &= ~arg;
-               priv->lowalarm  &= ~arg;
-               gpio_set_alarm(priv);
-               break;
-       case IO_READDIR: /* Use IO_SETGET_INPUT/OUTPUT instead! */
-               /* Read direction 0=input 1=output */
-               return readl(dir_oe[priv->minor]);
-
-       case IO_SETINPUT: /* Use IO_SETGET_INPUT instead! */
-               /* Set direction 0=unchanged 1=input,
-                * return mask with 1=input
-                */
-               return setget_input(priv, arg);
-
-       case IO_SETOUTPUT: /* Use IO_SETGET_OUTPUT instead! */
-               /* Set direction 0=unchanged 1=output,
-                * return mask with 1=output
-                */
-               return setget_output(priv, arg);
-
-       case IO_CFG_WRITE_MODE:
-       {
-               int res = -EPERM;
-               unsigned long dir_shadow, clk_mask, data_mask, write_msb;
-
-               clk_mask = arg & 0xFF;
-               data_mask = (arg >> 8) & 0xFF;
-               write_msb = (arg >> 16) & 0x01;
-
-               /* Check if we're allowed to change the bits and
-                * the direction is correct
-                */
-               spin_lock_irqsave(&gpio_lock, flags);
-               dir_shadow = readl(dir_oe[priv->minor]);
-               if ((clk_mask & changeable_bits[priv->minor]) &&
-                   (data_mask & changeable_bits[priv->minor]) &&
-                   (clk_mask & dir_shadow) &&
-                   (data_mask & dir_shadow)) {
-                       priv->clk_mask = clk_mask;
-                       priv->data_mask = data_mask;
-                       priv->write_msb = write_msb;
-                       res = 0;
-               }
-               spin_unlock_irqrestore(&gpio_lock, flags);
-
-               return res;
-       }
-       case IO_READ_INBITS:
-               /* *arg is result of reading the input pins */
-               val = readl(data_in[priv->minor]);
-               if (copy_to_user((void __user *)arg, &val, sizeof(val)))
-                       return -EFAULT;
-               return 0;
-       case IO_READ_OUTBITS:
-                /* *arg is result of reading the output shadow */
-               val = *data_out[priv->minor];
-               if (copy_to_user((void __user *)arg, &val, sizeof(val)))
-                       return -EFAULT;
-               break;
-       case IO_SETGET_INPUT:
-               /* bits set in *arg is set to input,
-                * *arg updated with current input pins.
-                */
-               if (copy_from_user(&val, (void __user *)arg, sizeof(val)))
-                       return -EFAULT;
-               val = setget_input(priv, val);
-               if (copy_to_user((void __user *)arg, &val, sizeof(val)))
-                       return -EFAULT;
-               break;
-       case IO_SETGET_OUTPUT:
-               /* bits set in *arg is set to output,
-                * *arg updated with current output pins.
-                */
-               if (copy_from_user(&val, (void __user *)arg, sizeof(val)))
-                       return -EFAULT;
-               val = setget_output(priv, val);
-               if (copy_to_user((void __user *)arg, &val, sizeof(val)))
-                       return -EFAULT;
-               break;
-       default:
-               return -EINVAL;
-       } /* switch */
-
-       return 0;
-}
-
-static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-       long ret;
-
-       mutex_lock(&gpio_mutex);
-       ret = gpio_ioctl_unlocked(file, cmd, arg);
-       mutex_unlock(&gpio_mutex);
-
-       return ret;
-}
-
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-static int virtual_gpio_ioctl(struct file *file, unsigned int cmd,
-       unsigned long arg)
-{
-       unsigned long flags;
-       unsigned short val;
-       unsigned short shadow;
-       struct gpio_private *priv = file->private_data;
-
-       switch (_IOC_NR(cmd)) {
-       case IO_SETBITS:
-               spin_lock_irqsave(&gpio_lock, flags);
-               /* Set changeable bits with a 1 in arg. */
-               i2c_read(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow));
-               shadow |= ~readl(dir_oe[priv->minor]) |
-                       (arg & changeable_bits[priv->minor]);
-               i2c_write(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow));
-               spin_unlock_irqrestore(&gpio_lock, flags);
-               break;
-       case IO_CLRBITS:
-               spin_lock_irqsave(&gpio_lock, flags);
-               /* Clear changeable bits with a 1 in arg. */
-               i2c_read(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow));
-               shadow |= ~readl(dir_oe[priv->minor]) &
-                       ~(arg & changeable_bits[priv->minor]);
-               i2c_write(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow));
-               spin_unlock_irqrestore(&gpio_lock, flags);
-               break;
-       case IO_HIGHALARM:
-               /* Set alarm when bits with 1 in arg go high. */
-               priv->highalarm |= arg;
-               break;
-       case IO_LOWALARM:
-               /* Set alarm when bits with 1 in arg go low. */
-               priv->lowalarm |= arg;
-               break;
-       case IO_CLRALARM:
-               /* Clear alarm for bits with 1 in arg. */
-               priv->highalarm &= ~arg;
-               priv->lowalarm  &= ~arg;
-               break;
-       case IO_CFG_WRITE_MODE:
-       {
-               unsigned long dir_shadow;
-               dir_shadow = readl(dir_oe[priv->minor]);
-
-               priv->clk_mask = arg & 0xFF;
-               priv->data_mask = (arg >> 8) & 0xFF;
-               priv->write_msb = (arg >> 16) & 0x01;
-               /* Check if we're allowed to change the bits and
-                * the direction is correct
-                */
-               if (!((priv->clk_mask & changeable_bits[priv->minor]) &&
-                     (priv->data_mask & changeable_bits[priv->minor]) &&
-                     (priv->clk_mask & dir_shadow) &&
-                     (priv->data_mask & dir_shadow))) {
-                       priv->clk_mask = 0;
-                       priv->data_mask = 0;
-                       return -EPERM;
-               }
-               break;
-       }
-       case IO_READ_INBITS:
-               /* *arg is result of reading the input pins */
-               val = cached_virtual_gpio_read & ~readl(dir_oe[priv->minor]);
-               if (copy_to_user((void __user *)arg, &val, sizeof(val)))
-                       return -EFAULT;
-               return 0;
-
-       case IO_READ_OUTBITS:
-                /* *arg is result of reading the output shadow */
-               i2c_read(VIRT_I2C_ADDR, (void *)&val, sizeof(val));
-               val &= readl(dir_oe[priv->minor]);
-               if (copy_to_user((void __user *)arg, &val, sizeof(val)))
-                       return -EFAULT;
-               break;
-       case IO_SETGET_INPUT:
-       {
-               /* bits set in *arg is set to input,
-                * *arg updated with current input pins.
-                */
-               unsigned short input_mask = ~readl(dir_oe[priv->minor]);
-               if (copy_from_user(&val, (void __user *)arg, sizeof(val)))
-                       return -EFAULT;
-               val = setget_input(priv, val);
-               if (copy_to_user((void __user *)arg, &val, sizeof(val)))
-                       return -EFAULT;
-               if ((input_mask & val) != input_mask) {
-                       /* Input pins changed. All ports desired as input
-                        * should be set to logic 1.
-                        */
-                       unsigned short change = input_mask ^ val;
-                       i2c_read(VIRT_I2C_ADDR, (void *)&shadow,
-                               sizeof(shadow));
-                       shadow &= ~change;
-                       shadow |= val;
-                       i2c_write(VIRT_I2C_ADDR, (void *)&shadow,
-                               sizeof(shadow));
-               }
-               break;
-       }
-       case IO_SETGET_OUTPUT:
-               /* bits set in *arg is set to output,
-                * *arg updated with current output pins.
-                */
-               if (copy_from_user(&val, (void __user *)arg, sizeof(val)))
-                       return -EFAULT;
-               val = setget_output(priv, val);
-               if (copy_to_user((void __user *)arg, &val, sizeof(val)))
-                       return -EFAULT;
-               break;
-       default:
-               return -EINVAL;
-       } /* switch */
-       return 0;
-}
-#endif /* CONFIG_ETRAX_VIRTUAL_GPIO */
-
-static int gpio_leds_ioctl(unsigned int cmd, unsigned long arg)
-{
-       unsigned char green;
-       unsigned char red;
-
-       switch (_IOC_NR(cmd)) {
-       case IO_LEDACTIVE_SET:
-               green = ((unsigned char) arg) & 1;
-               red   = (((unsigned char) arg) >> 1) & 1;
-               CRIS_LED_ACTIVE_SET_G(green);
-               CRIS_LED_ACTIVE_SET_R(red);
-               break;
-
-       default:
-               return -EINVAL;
-       } /* switch */
-
-       return 0;
-}
-
-static int gpio_pwm_set_mode(unsigned long arg, int pwm_port)
-{
-       int pinmux_pwm = pinmux_pwm0 + pwm_port;
-       int mode;
-       reg_gio_rw_pwm0_ctrl rw_pwm_ctrl = {
-               .ccd_val = 0,
-               .ccd_override = regk_gio_no,
-               .mode = regk_gio_no
-       };
-       int allocstatus;
-
-       if (get_user(mode, &((struct io_pwm_set_mode *) arg)->mode))
-               return -EFAULT;
-       rw_pwm_ctrl.mode = mode;
-       if (mode != PWM_OFF)
-               allocstatus = crisv32_pinmux_alloc_fixed(pinmux_pwm);
-       else
-               allocstatus = crisv32_pinmux_dealloc_fixed(pinmux_pwm);
-       if (allocstatus)
-               return allocstatus;
-       REG_WRITE(reg_gio_rw_pwm0_ctrl, REG_ADDR(gio, regi_gio, rw_pwm0_ctrl) +
-               12 * pwm_port, rw_pwm_ctrl);
-       return 0;
-}
-
-static int gpio_pwm_set_period(unsigned long arg, int pwm_port)
-{
-       struct io_pwm_set_period periods;
-       reg_gio_rw_pwm0_var rw_pwm_widths;
-
-       if (copy_from_user(&periods, (void __user *)arg, sizeof(periods)))
-               return -EFAULT;
-       if (periods.lo > 8191 || periods.hi > 8191)
-               return -EINVAL;
-       rw_pwm_widths.lo = periods.lo;
-       rw_pwm_widths.hi = periods.hi;
-       REG_WRITE(reg_gio_rw_pwm0_var, REG_ADDR(gio, regi_gio, rw_pwm0_var) +
-               12 * pwm_port, rw_pwm_widths);
-       return 0;
-}
-
-static int gpio_pwm_set_duty(unsigned long arg, int pwm_port)
-{
-       unsigned int duty;
-       reg_gio_rw_pwm0_data rw_pwm_duty;
-
-       if (get_user(duty, &((struct io_pwm_set_duty *) arg)->duty))
-               return -EFAULT;
-       if (duty > 255)
-               return -EINVAL;
-       rw_pwm_duty.data = duty;
-       REG_WRITE(reg_gio_rw_pwm0_data, REG_ADDR(gio, regi_gio, rw_pwm0_data) +
-               12 * pwm_port, rw_pwm_duty);
-       return 0;
-}
-
-static int gpio_pwm_ioctl(struct gpio_private *priv, unsigned int cmd,
-       unsigned long arg)
-{
-       int pwm_port = priv->minor - GPIO_MINOR_PWM0;
-
-       switch (_IOC_NR(cmd)) {
-       case IO_PWM_SET_MODE:
-               return gpio_pwm_set_mode(arg, pwm_port);
-       case IO_PWM_SET_PERIOD:
-               return gpio_pwm_set_period(arg, pwm_port);
-       case IO_PWM_SET_DUTY:
-               return gpio_pwm_set_duty(arg, pwm_port);
-       default:
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static const struct file_operations gpio_fops = {
-       .owner          = THIS_MODULE,
-       .poll           = gpio_poll,
-       .unlocked_ioctl = gpio_ioctl,
-       .write          = gpio_write,
-       .open           = gpio_open,
-       .release        = gpio_release,
-       .llseek         = noop_llseek,
-};
-
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-static void __init virtual_gpio_init(void)
-{
-       reg_gio_rw_intr_cfg intr_cfg;
-       reg_gio_rw_intr_mask intr_mask;
-       unsigned short shadow;
-
-       shadow = ~virtual_rw_pv_oe; /* Input ports should be set to logic 1 */
-       shadow |= CONFIG_ETRAX_DEF_GIO_PV_OUT;
-       i2c_write(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow));
-
-       /* Set interrupt mask and on what state the interrupt shall trigger.
-        * For virtual gpio the interrupt shall trigger on logic '0'.
-        */
-       intr_cfg = REG_RD(gio, regi_gio, rw_intr_cfg);
-       intr_mask = REG_RD(gio, regi_gio, rw_intr_mask);
-
-       switch (CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN) {
-       case 0:
-               intr_cfg.pa0 = regk_gio_lo;
-               intr_mask.pa0 = regk_gio_yes;
-               break;
-       case 1:
-               intr_cfg.pa1 = regk_gio_lo;
-               intr_mask.pa1 = regk_gio_yes;
-               break;
-       case 2:
-               intr_cfg.pa2 = regk_gio_lo;
-               intr_mask.pa2 = regk_gio_yes;
-               break;
-       case 3:
-               intr_cfg.pa3 = regk_gio_lo;
-               intr_mask.pa3 = regk_gio_yes;
-               break;
-       case 4:
-               intr_cfg.pa4 = regk_gio_lo;
-               intr_mask.pa4 = regk_gio_yes;
-               break;
-       case 5:
-               intr_cfg.pa5 = regk_gio_lo;
-               intr_mask.pa5 = regk_gio_yes;
-               break;
-       case 6:
-               intr_cfg.pa6 = regk_gio_lo;
-               intr_mask.pa6 = regk_gio_yes;
-               break;
-       case 7:
-               intr_cfg.pa7 = regk_gio_lo;
-               intr_mask.pa7 = regk_gio_yes;
-               break;
-       }
-
-       REG_WR(gio, regi_gio, rw_intr_cfg, intr_cfg);
-       REG_WR(gio, regi_gio, rw_intr_mask, intr_mask);
-}
-#endif
-
-/* main driver initialization routine, called from mem.c */
-
-static int __init gpio_init(void)
-{
-       int res, res2;
-
-       printk(KERN_INFO "ETRAX FS GPIO driver v2.7, (c) 2003-2008 "
-               "Axis Communications AB\n");
-
-       /* do the formalities */
-
-       res = register_chrdev(GPIO_MAJOR, gpio_name, &gpio_fops);
-       if (res < 0) {
-               printk(KERN_ERR "gpio: couldn't get a major number.\n");
-               return res;
-       }
-
-       /* Clear all leds */
-       CRIS_LED_NETWORK_GRP0_SET(0);
-       CRIS_LED_NETWORK_GRP1_SET(0);
-       CRIS_LED_ACTIVE_SET(0);
-       CRIS_LED_DISK_READ(0);
-       CRIS_LED_DISK_WRITE(0);
-
-       res2 = request_irq(GIO_INTR_VECT, gpio_interrupt,
-               IRQF_SHARED, "gpio", &alarmlist);
-       if (res2) {
-               printk(KERN_ERR "err: irq for gpio\n");
-               return res2;
-       }
-
-       /* No IRQs by default. */
-       REG_WR_INT(gio, regi_gio, rw_intr_pins, 0);
-
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       virtual_gpio_init();
-#endif
-
-       return res;
-}
-
-/* this makes sure that gpio_init is called during kernel boot */
-
-module_init(gpio_init);
index 5c6d2a2a080ee4bbbbeca7c6bef44899389179c5..59028d0b981c094dd67a3570ed583d6bfa12ed75 100644 (file)
@@ -3,4 +3,3 @@
 #
 
 obj-$(CONFIG_ETRAX_NANDFLASH)   += nandflash.o
-obj-$(CONFIG_ETRAX_GPIO)        += gpio.o
diff --git a/arch/cris/arch-v32/drivers/mach-fs/gpio.c b/arch/cris/arch-v32/drivers/mach-fs/gpio.c
deleted file mode 100644 (file)
index 72968fb..0000000
+++ /dev/null
@@ -1,978 +0,0 @@
-/*
- * ETRAX CRISv32 general port I/O device
- *
- * Copyright (c) 1999-2006 Axis Communications AB
- *
- * Authors:    Bjorn Wesen      (initial version)
- *             Ola Knutsson     (LED handling)
- *             Johan Adolfsson  (read/set directions, write, port G,
- *                               port to ETRAX FS.
- *
- */
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/string.h>
-#include <linux/poll.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-
-#include <asm/etraxgpio.h>
-#include <hwregs/reg_map.h>
-#include <hwregs/reg_rdwr.h>
-#include <hwregs/gio_defs.h>
-#include <hwregs/intr_vect_defs.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-#include "../i2c.h"
-
-#define VIRT_I2C_ADDR 0x40
-#endif
-
-/* The following gio ports on ETRAX FS is available:
- * pa  8 bits, supports interrupts off, hi, low, set, posedge, negedge anyedge
- * pb 18 bits
- * pc 18 bits
- * pd 18 bits
- * pe 18 bits
- * each port has a rw_px_dout, r_px_din and rw_px_oe register.
- */
-
-#define GPIO_MAJOR 120  /* experimental MAJOR number */
-
-#define D(x)
-
-#if 0
-static int dp_cnt;
-#define DP(x) \
-       do { \
-               dp_cnt++; \
-               if (dp_cnt % 1000 == 0) \
-                       x; \
-       } while (0)
-#else
-#define DP(x)
-#endif
-
-static DEFINE_MUTEX(gpio_mutex);
-static char gpio_name[] = "etrax gpio";
-
-#if 0
-static wait_queue_head_t *gpio_wq;
-#endif
-
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-static int virtual_gpio_ioctl(struct file *file, unsigned int cmd,
-       unsigned long arg);
-#endif
-static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-static ssize_t gpio_write(struct file *file, const char *buf, size_t count,
-       loff_t *off);
-static int gpio_open(struct inode *inode, struct file *filp);
-static int gpio_release(struct inode *inode, struct file *filp);
-static unsigned int gpio_poll(struct file *filp,
-       struct poll_table_struct *wait);
-
-/* private data per open() of this driver */
-
-struct gpio_private {
-       struct gpio_private *next;
-       /* The IO_CFG_WRITE_MODE_VALUE only support 8 bits: */
-       unsigned char clk_mask;
-       unsigned char data_mask;
-       unsigned char write_msb;
-       unsigned char pad1;
-       /* These fields are generic */
-       unsigned long highalarm, lowalarm;
-       wait_queue_head_t alarm_wq;
-       int minor;
-};
-
-/* linked list of alarms to check for */
-
-static struct gpio_private *alarmlist;
-
-static int gpio_some_alarms; /* Set if someone uses alarm */
-static unsigned long gpio_pa_high_alarms;
-static unsigned long gpio_pa_low_alarms;
-
-static DEFINE_SPINLOCK(alarm_lock);
-
-#define NUM_PORTS (GPIO_MINOR_LAST+1)
-#define GIO_REG_RD_ADDR(reg) \
-       (volatile unsigned long *)(regi_gio + REG_RD_ADDR_gio_##reg)
-#define GIO_REG_WR_ADDR(reg) \
-       (volatile unsigned long *)(regi_gio + REG_RD_ADDR_gio_##reg)
-unsigned long led_dummy;
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-static unsigned long virtual_dummy;
-static unsigned long virtual_rw_pv_oe = CONFIG_ETRAX_DEF_GIO_PV_OE;
-static unsigned short cached_virtual_gpio_read;
-#endif
-
-static volatile unsigned long *data_out[NUM_PORTS] = {
-       GIO_REG_WR_ADDR(rw_pa_dout),
-       GIO_REG_WR_ADDR(rw_pb_dout),
-       &led_dummy,
-       GIO_REG_WR_ADDR(rw_pc_dout),
-       GIO_REG_WR_ADDR(rw_pd_dout),
-       GIO_REG_WR_ADDR(rw_pe_dout),
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       &virtual_dummy,
-#endif
-};
-
-static volatile unsigned long *data_in[NUM_PORTS] = {
-       GIO_REG_RD_ADDR(r_pa_din),
-       GIO_REG_RD_ADDR(r_pb_din),
-       &led_dummy,
-       GIO_REG_RD_ADDR(r_pc_din),
-       GIO_REG_RD_ADDR(r_pd_din),
-       GIO_REG_RD_ADDR(r_pe_din),
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       &virtual_dummy,
-#endif
-};
-
-static unsigned long changeable_dir[NUM_PORTS] = {
-       CONFIG_ETRAX_PA_CHANGEABLE_DIR,
-       CONFIG_ETRAX_PB_CHANGEABLE_DIR,
-       0,
-       CONFIG_ETRAX_PC_CHANGEABLE_DIR,
-       CONFIG_ETRAX_PD_CHANGEABLE_DIR,
-       CONFIG_ETRAX_PE_CHANGEABLE_DIR,
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       CONFIG_ETRAX_PV_CHANGEABLE_DIR,
-#endif
-};
-
-static unsigned long changeable_bits[NUM_PORTS] = {
-       CONFIG_ETRAX_PA_CHANGEABLE_BITS,
-       CONFIG_ETRAX_PB_CHANGEABLE_BITS,
-       0,
-       CONFIG_ETRAX_PC_CHANGEABLE_BITS,
-       CONFIG_ETRAX_PD_CHANGEABLE_BITS,
-       CONFIG_ETRAX_PE_CHANGEABLE_BITS,
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       CONFIG_ETRAX_PV_CHANGEABLE_BITS,
-#endif
-};
-
-static volatile unsigned long *dir_oe[NUM_PORTS] = {
-       GIO_REG_WR_ADDR(rw_pa_oe),
-       GIO_REG_WR_ADDR(rw_pb_oe),
-       &led_dummy,
-       GIO_REG_WR_ADDR(rw_pc_oe),
-       GIO_REG_WR_ADDR(rw_pd_oe),
-       GIO_REG_WR_ADDR(rw_pe_oe),
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       &virtual_rw_pv_oe,
-#endif
-};
-
-
-
-static unsigned int gpio_poll(struct file *file, struct poll_table_struct *wait)
-{
-       unsigned int mask = 0;
-       struct gpio_private *priv = file->private_data;
-       unsigned long data;
-       poll_wait(file, &priv->alarm_wq, wait);
-       if (priv->minor == GPIO_MINOR_A) {
-               reg_gio_rw_intr_cfg intr_cfg;
-               unsigned long tmp;
-               unsigned long flags;
-
-               local_irq_save(flags);
-               data = REG_TYPE_CONV(unsigned long, reg_gio_r_pa_din,
-                       REG_RD(gio, regi_gio, r_pa_din));
-               /* PA has support for interrupt
-                * lets activate high for those low and with highalarm set
-                */
-               intr_cfg = REG_RD(gio, regi_gio, rw_intr_cfg);
-
-               tmp = ~data & priv->highalarm & 0xFF;
-               if (tmp & (1 << 0))
-                       intr_cfg.pa0 = regk_gio_hi;
-               if (tmp & (1 << 1))
-                       intr_cfg.pa1 = regk_gio_hi;
-               if (tmp & (1 << 2))
-                       intr_cfg.pa2 = regk_gio_hi;
-               if (tmp & (1 << 3))
-                       intr_cfg.pa3 = regk_gio_hi;
-               if (tmp & (1 << 4))
-                       intr_cfg.pa4 = regk_gio_hi;
-               if (tmp & (1 << 5))
-                       intr_cfg.pa5 = regk_gio_hi;
-               if (tmp & (1 << 6))
-                       intr_cfg.pa6 = regk_gio_hi;
-               if (tmp & (1 << 7))
-                       intr_cfg.pa7 = regk_gio_hi;
-               /*
-                * lets activate low for those high and with lowalarm set
-                */
-               tmp = data & priv->lowalarm & 0xFF;
-               if (tmp & (1 << 0))
-                       intr_cfg.pa0 = regk_gio_lo;
-               if (tmp & (1 << 1))
-                       intr_cfg.pa1 = regk_gio_lo;
-               if (tmp & (1 << 2))
-                       intr_cfg.pa2 = regk_gio_lo;
-               if (tmp & (1 << 3))
-                       intr_cfg.pa3 = regk_gio_lo;
-               if (tmp & (1 << 4))
-                       intr_cfg.pa4 = regk_gio_lo;
-               if (tmp & (1 << 5))
-                       intr_cfg.pa5 = regk_gio_lo;
-               if (tmp & (1 << 6))
-                       intr_cfg.pa6 = regk_gio_lo;
-               if (tmp & (1 << 7))
-                       intr_cfg.pa7 = regk_gio_lo;
-
-               REG_WR(gio, regi_gio, rw_intr_cfg, intr_cfg);
-               local_irq_restore(flags);
-       } else if (priv->minor <= GPIO_MINOR_E)
-               data = *data_in[priv->minor];
-       else
-               return 0;
-
-       if ((data & priv->highalarm) || (~data & priv->lowalarm))
-               mask = POLLIN|POLLRDNORM;
-
-       DP(printk(KERN_DEBUG "gpio_poll ready: mask 0x%08X\n", mask));
-       return mask;
-}
-
-int etrax_gpio_wake_up_check(void)
-{
-       struct gpio_private *priv;
-       unsigned long data = 0;
-       unsigned long flags;
-       int ret = 0;
-       spin_lock_irqsave(&alarm_lock, flags);
-       priv = alarmlist;
-       while (priv) {
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-               if (priv->minor == GPIO_MINOR_V)
-                       data = (unsigned long)cached_virtual_gpio_read;
-               else {
-                       data = *data_in[priv->minor];
-                       if (priv->minor == GPIO_MINOR_A)
-                               priv->lowalarm |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN);
-               }
-#else
-               data = *data_in[priv->minor];
-#endif
-               if ((data & priv->highalarm) ||
-                   (~data & priv->lowalarm)) {
-                       DP(printk(KERN_DEBUG
-                               "etrax_gpio_wake_up_check %i\n", priv->minor));
-                       wake_up_interruptible(&priv->alarm_wq);
-                       ret = 1;
-               }
-               priv = priv->next;
-       }
-       spin_unlock_irqrestore(&alarm_lock, flags);
-       return ret;
-}
-
-static irqreturn_t
-gpio_poll_timer_interrupt(int irq, void *dev_id)
-{
-       if (gpio_some_alarms)
-               return IRQ_RETVAL(etrax_gpio_wake_up_check());
-       return IRQ_NONE;
-}
-
-static irqreturn_t
-gpio_pa_interrupt(int irq, void *dev_id)
-{
-       reg_gio_rw_intr_mask intr_mask;
-       reg_gio_r_masked_intr masked_intr;
-       reg_gio_rw_ack_intr ack_intr;
-       unsigned long tmp;
-       unsigned long tmp2;
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       unsigned char enable_gpiov_ack = 0;
-#endif
-
-       /* Find what PA interrupts are active */
-       masked_intr = REG_RD(gio, regi_gio, r_masked_intr);
-       tmp = REG_TYPE_CONV(unsigned long, reg_gio_r_masked_intr, masked_intr);
-
-       /* Find those that we have enabled */
-       spin_lock(&alarm_lock);
-       tmp &= (gpio_pa_high_alarms | gpio_pa_low_alarms);
-       spin_unlock(&alarm_lock);
-
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       /* Something changed on virtual GPIO. Interrupt is acked by
-        * reading the device.
-        */
-       if (tmp & (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN)) {
-               i2c_read(VIRT_I2C_ADDR, (void *)&cached_virtual_gpio_read,
-                       sizeof(cached_virtual_gpio_read));
-               enable_gpiov_ack = 1;
-       }
-#endif
-
-       /* Ack them */
-       ack_intr = REG_TYPE_CONV(reg_gio_rw_ack_intr, unsigned long, tmp);
-       REG_WR(gio, regi_gio, rw_ack_intr, ack_intr);
-
-       /* Disable those interrupts.. */
-       intr_mask = REG_RD(gio, regi_gio, rw_intr_mask);
-       tmp2 = REG_TYPE_CONV(unsigned long, reg_gio_rw_intr_mask, intr_mask);
-       tmp2 &= ~tmp;
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       /* Do not disable interrupt on virtual GPIO. Changes on virtual
-        * pins are only noticed by an interrupt.
-        */
-       if (enable_gpiov_ack)
-               tmp2 |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN);
-#endif
-       intr_mask = REG_TYPE_CONV(reg_gio_rw_intr_mask, unsigned long, tmp2);
-       REG_WR(gio, regi_gio, rw_intr_mask, intr_mask);
-
-       if (gpio_some_alarms)
-               return IRQ_RETVAL(etrax_gpio_wake_up_check());
-       return IRQ_NONE;
-}
-
-
-static ssize_t gpio_write(struct file *file, const char *buf, size_t count,
-       loff_t *off)
-{
-       struct gpio_private *priv = file->private_data;
-       unsigned char data, clk_mask, data_mask, write_msb;
-       unsigned long flags;
-       unsigned long shadow;
-       volatile unsigned long *port;
-       ssize_t retval = count;
-       /* Only bits 0-7 may be used for write operations but allow all
-          devices except leds... */
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       if (priv->minor == GPIO_MINOR_V)
-               return -EFAULT;
-#endif
-       if (priv->minor == GPIO_MINOR_LEDS)
-               return -EFAULT;
-
-       if (!access_ok(VERIFY_READ, buf, count))
-               return -EFAULT;
-       clk_mask = priv->clk_mask;
-       data_mask = priv->data_mask;
-       /* It must have been configured using the IO_CFG_WRITE_MODE */
-       /* Perhaps a better error code? */
-       if (clk_mask == 0 || data_mask == 0)
-               return -EPERM;
-       write_msb = priv->write_msb;
-       D(printk(KERN_DEBUG "gpio_write: %lu to data 0x%02X clk 0x%02X "
-               "msb: %i\n", count, data_mask, clk_mask, write_msb));
-       port = data_out[priv->minor];
-
-       while (count--) {
-               int i;
-               data = *buf++;
-               if (priv->write_msb) {
-                       for (i = 7; i >= 0; i--) {
-                               local_irq_save(flags);
-                               shadow = *port;
-                               *port = shadow &= ~clk_mask;
-                               if (data & 1<<i)
-                                       *port = shadow |= data_mask;
-                               else
-                                       *port = shadow &= ~data_mask;
-                       /* For FPGA: min 5.0ns (DCC) before CCLK high */
-                               *port = shadow |= clk_mask;
-                               local_irq_restore(flags);
-                       }
-               } else {
-                       for (i = 0; i <= 7; i++) {
-                               local_irq_save(flags);
-                               shadow = *port;
-                               *port = shadow &= ~clk_mask;
-                               if (data & 1<<i)
-                                       *port = shadow |= data_mask;
-                               else
-                                       *port = shadow &= ~data_mask;
-                       /* For FPGA: min 5.0ns (DCC) before CCLK high */
-                               *port = shadow |= clk_mask;
-                               local_irq_restore(flags);
-                       }
-               }
-       }
-       return retval;
-}
-
-
-
-static int
-gpio_open(struct inode *inode, struct file *filp)
-{
-       struct gpio_private *priv;
-       int p = iminor(inode);
-
-       if (p > GPIO_MINOR_LAST)
-               return -EINVAL;
-
-       priv = kzalloc(sizeof(struct gpio_private), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
-
-       mutex_lock(&gpio_mutex);
-
-       priv->minor = p;
-
-       /* initialize the io/alarm struct */
-
-       priv->clk_mask = 0;
-       priv->data_mask = 0;
-       priv->highalarm = 0;
-       priv->lowalarm = 0;
-       init_waitqueue_head(&priv->alarm_wq);
-
-       filp->private_data = (void *)priv;
-
-       /* link it into our alarmlist */
-       spin_lock_irq(&alarm_lock);
-       priv->next = alarmlist;
-       alarmlist = priv;
-       spin_unlock_irq(&alarm_lock);
-
-       mutex_unlock(&gpio_mutex);
-       return 0;
-}
-
-static int
-gpio_release(struct inode *inode, struct file *filp)
-{
-       struct gpio_private *p;
-       struct gpio_private *todel;
-       /* local copies while updating them: */
-       unsigned long a_high, a_low;
-       unsigned long some_alarms;
-
-       /* unlink from alarmlist and free the private structure */
-
-       spin_lock_irq(&alarm_lock);
-       p = alarmlist;
-       todel = filp->private_data;
-
-       if (p == todel) {
-               alarmlist = todel->next;
-       } else {
-               while (p->next != todel)
-                       p = p->next;
-               p->next = todel->next;
-       }
-
-       kfree(todel);
-       /* Check if there are still any alarms set */
-       p = alarmlist;
-       some_alarms = 0;
-       a_high = 0;
-       a_low = 0;
-       while (p) {
-               if (p->minor == GPIO_MINOR_A) {
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-                       p->lowalarm |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN);
-#endif
-                       a_high |= p->highalarm;
-                       a_low |= p->lowalarm;
-               }
-
-               if (p->highalarm | p->lowalarm)
-                       some_alarms = 1;
-               p = p->next;
-       }
-
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       /* Variables 'some_alarms' and 'a_low' needs to be set here again
-        * to ensure that interrupt for virtual GPIO is handled.
-        */
-       some_alarms = 1;
-       a_low |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN);
-#endif
-
-       gpio_some_alarms = some_alarms;
-       gpio_pa_high_alarms = a_high;
-       gpio_pa_low_alarms = a_low;
-       spin_unlock_irq(&alarm_lock);
-
-       return 0;
-}
-
-/* Main device API. ioctl's to read/set/clear bits, as well as to
- * set alarms to wait for using a subsequent select().
- */
-
-inline unsigned long setget_input(struct gpio_private *priv, unsigned long arg)
-{
-       /* Set direction 0=unchanged 1=input,
-        * return mask with 1=input
-        */
-       unsigned long flags;
-       unsigned long dir_shadow;
-
-       local_irq_save(flags);
-       dir_shadow = *dir_oe[priv->minor];
-       dir_shadow &= ~(arg & changeable_dir[priv->minor]);
-       *dir_oe[priv->minor] = dir_shadow;
-       local_irq_restore(flags);
-
-       if (priv->minor == GPIO_MINOR_A)
-               dir_shadow ^= 0xFF;    /* Only 8 bits */
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       else if (priv->minor == GPIO_MINOR_V)
-               dir_shadow ^= 0xFFFF;  /* Only 16 bits */
-#endif
-       else
-               dir_shadow ^= 0x3FFFF; /* Only 18 bits */
-       return dir_shadow;
-
-} /* setget_input */
-
-inline unsigned long setget_output(struct gpio_private *priv, unsigned long arg)
-{
-       unsigned long flags;
-       unsigned long dir_shadow;
-
-       local_irq_save(flags);
-       dir_shadow = *dir_oe[priv->minor];
-       dir_shadow |=  (arg & changeable_dir[priv->minor]);
-       *dir_oe[priv->minor] = dir_shadow;
-       local_irq_restore(flags);
-       return dir_shadow;
-} /* setget_output */
-
-static int gpio_leds_ioctl(unsigned int cmd, unsigned long arg);
-
-static int
-gpio_ioctl_unlocked(struct file *file, unsigned int cmd, unsigned long arg)
-{
-       unsigned long flags;
-       unsigned long val;
-       unsigned long shadow;
-       struct gpio_private *priv = file->private_data;
-       if (_IOC_TYPE(cmd) != ETRAXGPIO_IOCTYPE)
-               return -EINVAL;
-
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       if (priv->minor == GPIO_MINOR_V)
-               return virtual_gpio_ioctl(file, cmd, arg);
-#endif
-
-       switch (_IOC_NR(cmd)) {
-       case IO_READBITS: /* Use IO_READ_INBITS and IO_READ_OUTBITS instead */
-               /* Read the port. */
-               return *data_in[priv->minor];
-               break;
-       case IO_SETBITS:
-               local_irq_save(flags);
-               /* Set changeable bits with a 1 in arg. */
-               shadow = *data_out[priv->minor];
-               shadow |=  (arg & changeable_bits[priv->minor]);
-               *data_out[priv->minor] = shadow;
-               local_irq_restore(flags);
-               break;
-       case IO_CLRBITS:
-               local_irq_save(flags);
-               /* Clear changeable bits with a 1 in arg. */
-               shadow = *data_out[priv->minor];
-               shadow &=  ~(arg & changeable_bits[priv->minor]);
-               *data_out[priv->minor] = shadow;
-               local_irq_restore(flags);
-               break;
-       case IO_HIGHALARM:
-               /* Set alarm when bits with 1 in arg go high. */
-               priv->highalarm |= arg;
-               spin_lock_irqsave(&alarm_lock, flags);
-               gpio_some_alarms = 1;
-               if (priv->minor == GPIO_MINOR_A)
-                       gpio_pa_high_alarms |= arg;
-               spin_unlock_irqrestore(&alarm_lock, flags);
-               break;
-       case IO_LOWALARM:
-               /* Set alarm when bits with 1 in arg go low. */
-               priv->lowalarm |= arg;
-               spin_lock_irqsave(&alarm_lock, flags);
-               gpio_some_alarms = 1;
-               if (priv->minor == GPIO_MINOR_A)
-                       gpio_pa_low_alarms |= arg;
-               spin_unlock_irqrestore(&alarm_lock, flags);
-               break;
-       case IO_CLRALARM:
-               /* Clear alarm for bits with 1 in arg. */
-               priv->highalarm &= ~arg;
-               priv->lowalarm  &= ~arg;
-               spin_lock_irqsave(&alarm_lock, flags);
-               if (priv->minor == GPIO_MINOR_A) {
-                       if (gpio_pa_high_alarms & arg ||
-                           gpio_pa_low_alarms & arg)
-                               /* Must update the gpio_pa_*alarms masks */
-                               ;
-               }
-               spin_unlock_irqrestore(&alarm_lock, flags);
-               break;
-       case IO_READDIR: /* Use IO_SETGET_INPUT/OUTPUT instead! */
-               /* Read direction 0=input 1=output */
-               return *dir_oe[priv->minor];
-       case IO_SETINPUT: /* Use IO_SETGET_INPUT instead! */
-               /* Set direction 0=unchanged 1=input,
-                * return mask with 1=input
-                */
-               return setget_input(priv, arg);
-               break;
-       case IO_SETOUTPUT: /* Use IO_SETGET_OUTPUT instead! */
-               /* Set direction 0=unchanged 1=output,
-                * return mask with 1=output
-                */
-               return setget_output(priv, arg);
-
-       case IO_CFG_WRITE_MODE:
-       {
-               unsigned long dir_shadow;
-               dir_shadow = *dir_oe[priv->minor];
-
-               priv->clk_mask = arg & 0xFF;
-               priv->data_mask = (arg >> 8) & 0xFF;
-               priv->write_msb = (arg >> 16) & 0x01;
-               /* Check if we're allowed to change the bits and
-                * the direction is correct
-                */
-               if (!((priv->clk_mask & changeable_bits[priv->minor]) &&
-                     (priv->data_mask & changeable_bits[priv->minor]) &&
-                     (priv->clk_mask & dir_shadow) &&
-                     (priv->data_mask & dir_shadow))) {
-                       priv->clk_mask = 0;
-                       priv->data_mask = 0;
-                       return -EPERM;
-               }
-               break;
-       }
-       case IO_READ_INBITS:
-               /* *arg is result of reading the input pins */
-               val = *data_in[priv->minor];
-               if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
-                       return -EFAULT;
-               return 0;
-               break;
-       case IO_READ_OUTBITS:
-                /* *arg is result of reading the output shadow */
-               val = *data_out[priv->minor];
-               if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
-                       return -EFAULT;
-               break;
-       case IO_SETGET_INPUT:
-               /* bits set in *arg is set to input,
-                * *arg updated with current input pins.
-                */
-               if (copy_from_user(&val, (unsigned long *)arg, sizeof(val)))
-                       return -EFAULT;
-               val = setget_input(priv, val);
-               if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
-                       return -EFAULT;
-               break;
-       case IO_SETGET_OUTPUT:
-               /* bits set in *arg is set to output,
-                * *arg updated with current output pins.
-                */
-               if (copy_from_user(&val, (unsigned long *)arg, sizeof(val)))
-                       return -EFAULT;
-               val = setget_output(priv, val);
-               if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
-                       return -EFAULT;
-               break;
-       default:
-               if (priv->minor == GPIO_MINOR_LEDS)
-                       return gpio_leds_ioctl(cmd, arg);
-               else
-                       return -EINVAL;
-       } /* switch */
-
-       return 0;
-}
-
-static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-       long ret;
-
-       mutex_lock(&gpio_mutex);
-       ret = gpio_ioctl_unlocked(file, cmd, arg);
-       mutex_unlock(&gpio_mutex);
-
-       return ret;
-}
-
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-static int
-virtual_gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-       unsigned long flags;
-       unsigned short val;
-       unsigned short shadow;
-       struct gpio_private *priv = file->private_data;
-
-       switch (_IOC_NR(cmd)) {
-       case IO_SETBITS:
-               local_irq_save(flags);
-               /* Set changeable bits with a 1 in arg. */
-               i2c_read(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow));
-               shadow |= ~*dir_oe[priv->minor];
-               shadow |= (arg & changeable_bits[priv->minor]);
-               i2c_write(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow));
-               local_irq_restore(flags);
-               break;
-       case IO_CLRBITS:
-               local_irq_save(flags);
-               /* Clear changeable bits with a 1 in arg. */
-               i2c_read(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow));
-               shadow |= ~*dir_oe[priv->minor];
-               shadow &= ~(arg & changeable_bits[priv->minor]);
-               i2c_write(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow));
-               local_irq_restore(flags);
-               break;
-       case IO_HIGHALARM:
-               /* Set alarm when bits with 1 in arg go high. */
-               priv->highalarm |= arg;
-               spin_lock(&alarm_lock);
-               gpio_some_alarms = 1;
-               spin_unlock(&alarm_lock);
-               break;
-       case IO_LOWALARM:
-               /* Set alarm when bits with 1 in arg go low. */
-               priv->lowalarm |= arg;
-               spin_lock(&alarm_lock);
-               gpio_some_alarms = 1;
-               spin_unlock(&alarm_lock);
-               break;
-       case IO_CLRALARM:
-               /* Clear alarm for bits with 1 in arg. */
-               priv->highalarm &= ~arg;
-               priv->lowalarm  &= ~arg;
-               spin_lock(&alarm_lock);
-               spin_unlock(&alarm_lock);
-               break;
-       case IO_CFG_WRITE_MODE:
-       {
-               unsigned long dir_shadow;
-               dir_shadow = *dir_oe[priv->minor];
-
-               priv->clk_mask = arg & 0xFF;
-               priv->data_mask = (arg >> 8) & 0xFF;
-               priv->write_msb = (arg >> 16) & 0x01;
-               /* Check if we're allowed to change the bits and
-                * the direction is correct
-                */
-               if (!((priv->clk_mask & changeable_bits[priv->minor]) &&
-                     (priv->data_mask & changeable_bits[priv->minor]) &&
-                     (priv->clk_mask & dir_shadow) &&
-                     (priv->data_mask & dir_shadow))) {
-                       priv->clk_mask = 0;
-                       priv->data_mask = 0;
-                       return -EPERM;
-               }
-               break;
-       }
-       case IO_READ_INBITS:
-               /* *arg is result of reading the input pins */
-               val = cached_virtual_gpio_read;
-               val &= ~*dir_oe[priv->minor];
-               if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
-                       return -EFAULT;
-               return 0;
-               break;
-       case IO_READ_OUTBITS:
-                /* *arg is result of reading the output shadow */
-               i2c_read(VIRT_I2C_ADDR, (void *)&val, sizeof(val));
-               val &= *dir_oe[priv->minor];
-               if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
-                       return -EFAULT;
-               break;
-       case IO_SETGET_INPUT:
-       {
-               /* bits set in *arg is set to input,
-                * *arg updated with current input pins.
-                */
-               unsigned short input_mask = ~*dir_oe[priv->minor];
-               if (copy_from_user(&val, (unsigned long *)arg, sizeof(val)))
-                       return -EFAULT;
-               val = setget_input(priv, val);
-               if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
-                       return -EFAULT;
-               if ((input_mask & val) != input_mask) {
-                       /* Input pins changed. All ports desired as input
-                        * should be set to logic 1.
-                        */
-                       unsigned short change = input_mask ^ val;
-                       i2c_read(VIRT_I2C_ADDR, (void *)&shadow,
-                               sizeof(shadow));
-                       shadow &= ~change;
-                       shadow |= val;
-                       i2c_write(VIRT_I2C_ADDR, (void *)&shadow,
-                               sizeof(shadow));
-               }
-               break;
-       }
-       case IO_SETGET_OUTPUT:
-               /* bits set in *arg is set to output,
-                * *arg updated with current output pins.
-                */
-               if (copy_from_user(&val, (unsigned long *)arg, sizeof(val)))
-                       return -EFAULT;
-               val = setget_output(priv, val);
-               if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
-                       return -EFAULT;
-               break;
-       default:
-               return -EINVAL;
-       } /* switch */
-  return 0;
-}
-#endif /* CONFIG_ETRAX_VIRTUAL_GPIO */
-
-static int
-gpio_leds_ioctl(unsigned int cmd, unsigned long arg)
-{
-       unsigned char green;
-       unsigned char red;
-
-       switch (_IOC_NR(cmd)) {
-       case IO_LEDACTIVE_SET:
-               green = ((unsigned char) arg) & 1;
-               red   = (((unsigned char) arg) >> 1) & 1;
-               CRIS_LED_ACTIVE_SET_G(green);
-               CRIS_LED_ACTIVE_SET_R(red);
-               break;
-
-       default:
-               return -EINVAL;
-       } /* switch */
-
-       return 0;
-}
-
-static const struct file_operations gpio_fops = {
-       .owner          = THIS_MODULE,
-       .poll           = gpio_poll,
-       .unlocked_ioctl = gpio_ioctl,
-       .write          = gpio_write,
-       .open           = gpio_open,
-       .release        = gpio_release,
-       .llseek         = noop_llseek,
-};
-
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-static void
-virtual_gpio_init(void)
-{
-       reg_gio_rw_intr_cfg intr_cfg;
-       reg_gio_rw_intr_mask intr_mask;
-       unsigned short shadow;
-
-       shadow = ~virtual_rw_pv_oe; /* Input ports should be set to logic 1 */
-       shadow |= CONFIG_ETRAX_DEF_GIO_PV_OUT;
-       i2c_write(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow));
-
-       /* Set interrupt mask and on what state the interrupt shall trigger.
-        * For virtual gpio the interrupt shall trigger on logic '0'.
-        */
-       intr_cfg = REG_RD(gio, regi_gio, rw_intr_cfg);
-       intr_mask = REG_RD(gio, regi_gio, rw_intr_mask);
-
-       switch (CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN) {
-       case 0:
-               intr_cfg.pa0 = regk_gio_lo;
-               intr_mask.pa0 = regk_gio_yes;
-               break;
-       case 1:
-               intr_cfg.pa1 = regk_gio_lo;
-               intr_mask.pa1 = regk_gio_yes;
-               break;
-       case 2:
-               intr_cfg.pa2 = regk_gio_lo;
-               intr_mask.pa2 = regk_gio_yes;
-               break;
-       case 3:
-               intr_cfg.pa3 = regk_gio_lo;
-               intr_mask.pa3 = regk_gio_yes;
-               break;
-       case 4:
-               intr_cfg.pa4 = regk_gio_lo;
-               intr_mask.pa4 = regk_gio_yes;
-               break;
-       case 5:
-               intr_cfg.pa5 = regk_gio_lo;
-               intr_mask.pa5 = regk_gio_yes;
-               break;
-       case 6:
-               intr_cfg.pa6 = regk_gio_lo;
-               intr_mask.pa6 = regk_gio_yes;
-               break;
-       case 7:
-               intr_cfg.pa7 = regk_gio_lo;
-               intr_mask.pa7 = regk_gio_yes;
-       break;
-       }
-
-       REG_WR(gio, regi_gio, rw_intr_cfg, intr_cfg);
-       REG_WR(gio, regi_gio, rw_intr_mask, intr_mask);
-
-       gpio_pa_low_alarms |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN);
-       gpio_some_alarms = 1;
-}
-#endif
-
-/* main driver initialization routine, called from mem.c */
-
-static __init int
-gpio_init(void)
-{
-       int res;
-
-       /* do the formalities */
-
-       res = register_chrdev(GPIO_MAJOR, gpio_name, &gpio_fops);
-       if (res < 0) {
-               printk(KERN_ERR "gpio: couldn't get a major number.\n");
-               return res;
-       }
-
-       /* Clear all leds */
-       CRIS_LED_NETWORK_GRP0_SET(0);
-       CRIS_LED_NETWORK_GRP1_SET(0);
-       CRIS_LED_ACTIVE_SET(0);
-       CRIS_LED_DISK_READ(0);
-       CRIS_LED_DISK_WRITE(0);
-
-       printk(KERN_INFO "ETRAX FS GPIO driver v2.5, (c) 2003-2007 "
-               "Axis Communications AB\n");
-       /* We call etrax_gpio_wake_up_check() from timer interrupt */
-       if (request_irq(TIMER0_INTR_VECT, gpio_poll_timer_interrupt,
-                       IRQF_SHARED, "gpio poll", &alarmlist))
-               printk(KERN_ERR "timer0 irq for gpio\n");
-
-       if (request_irq(GIO_INTR_VECT, gpio_pa_interrupt,
-                       IRQF_SHARED, "gpio PA", &alarmlist))
-               printk(KERN_ERR "PA irq for gpio\n");
-
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       virtual_gpio_init();
-#endif
-
-       return res;
-}
-
-/* this makes sure that gpio_init is called during kernel boot */
-
-module_init(gpio_init);
index bde8d1a10cadd31edb5b0120b0bc3feab73e07f9..b0566350a840319d0e8e4d603e0e892792510d36 100644 (file)
@@ -3,7 +3,6 @@
 #include <arch/dma.h>
 #include <arch/intmem.h>
 #include <mach/pinmux.h>
-#include <arch/io.h>
 
 /* Functions for allocating DMA channels */
 EXPORT_SYMBOL(crisv32_request_dma);
@@ -20,8 +19,6 @@ EXPORT_SYMBOL(crisv32_pinmux_alloc);
 EXPORT_SYMBOL(crisv32_pinmux_alloc_fixed);
 EXPORT_SYMBOL(crisv32_pinmux_dealloc);
 EXPORT_SYMBOL(crisv32_pinmux_dealloc_fixed);
-EXPORT_SYMBOL(crisv32_io_get_name);
-EXPORT_SYMBOL(crisv32_io_get);
 
 /* Functions masking/unmasking interrupts */
 EXPORT_SYMBOL(crisv32_mask_irq);
index 02e33ebe51ec50b9a71d5cd1205df1168a5fe848..d2f3f9c37102365eaed4c6e07bda0fd6cccf2bf1 100644 (file)
@@ -77,8 +77,6 @@ static struct dbg_port *port =
        &ports[2];
 #elif defined(CONFIG_ETRAX_DEBUG_PORT3)
        &ports[3];
-#elif defined(CONFIG_ETRAX_DEBUG_PORT4)
-       &ports[4];
 #else
        NULL;
 #endif
index 74a66e0e3777388a9a05a92ba83fbcce17a6c7f9..ea6366800df7bb300504ba36f48b77d5b3792828 100644 (file)
@@ -292,11 +292,7 @@ _no_romfs_in_flash:
        ;; For cramfs, partition starts with magic and length.
        ;; For jffs2, a jhead is prepended which contains with magic and length.
        ;; The jhead is not part of the jffs2 partition however.
-#ifndef CONFIG_ETRAXFS_SIM
        move.d  __bss_start, $r0
-#else
-       move.d  __end, $r0
-#endif
        move.d  [$r0], $r1
        cmp.d   CRAMFS_MAGIC, $r1 ; cramfs magic?
        beq     2f                ; yes, jump
index 6a881e0e92b43990dd14a7b7e06edeac9541dcb6..6de8db67cb097835f0812cac6845a8bdfda23a87 100644 (file)
@@ -37,7 +37,7 @@
 #define IGNOREMASK (1 << (SER0_INTR_VECT - FIRST_IRQ))
 #elif defined(CONFIG_ETRAX_KGDB_PORT1)
 #define IGNOREMASK (1 << (SER1_INTR_VECT - FIRST_IRQ))
-#elif defined(CONFIG_ETRAX_KGB_PORT2)
+#elif defined(CONFIG_ETRAX_KGDB_PORT2)
 #define IGNOREMASK (1 << (SER2_INTR_VECT - FIRST_IRQ))
 #elif defined(CONFIG_ETRAX_KGDB_PORT3)
 #define IGNOREMASK (1 << (SER3_INTR_VECT - FIRST_IRQ))
@@ -464,14 +464,14 @@ init_IRQ(void)
                etrax_irv->v[i] = weird_irq;
 
        np = of_find_compatible_node(NULL, NULL, "axis,crisv32-intc");
-       domain = irq_domain_add_legacy(np, NR_IRQS - FIRST_IRQ,
+       domain = irq_domain_add_legacy(np, NBR_INTR_VECT - FIRST_IRQ,
                                       FIRST_IRQ, FIRST_IRQ,
                                       &crisv32_irq_ops, NULL);
        BUG_ON(!domain);
        irq_set_default_host(domain);
        of_node_put(np);
 
-       for (i = FIRST_IRQ, j = 0; j < NR_IRQS; i++, j++) {
+       for (i = FIRST_IRQ, j = 0; j < NBR_INTR_VECT; i++, j++) {
                set_exception_vector(i, interrupt[j]);
        }
 
index b06813aeb120147b82502029fc3b21d7c9204fd5..e0fdea706ecad9d10e4a87f5191e9a64b2628659 100644 (file)
@@ -384,19 +384,11 @@ int getDebugChar(void);
 /* Serial port, writes one character. ETRAX 100 specific. from debugport.c */
 void putDebugChar(int val);
 
-/* Returns the integer equivalent of a hexadecimal character. */
-static int hex(char ch);
-
 /* Convert the memory, pointed to by mem into hexadecimal representation.
    Put the result in buf, and return a pointer to the last character
    in buf (null). */
 static char *mem2hex(char *buf, unsigned char *mem, int count);
 
-/* Convert the array, in hexadecimal representation, pointed to by buf into
-   binary representation. Put the result in mem, and return a pointer to
-   the character after the last byte written. */
-static unsigned char *hex2mem(unsigned char *mem, char *buf, int count);
-
 /* Put the content of the array, in binary representation, pointed to by buf
    into memory pointed to by mem, and return a pointer to
    the character after the last byte written. */
@@ -449,7 +441,7 @@ static char output_buffer[BUFMAX];
 /* Error and warning messages. */
 enum error_type
 {
-       SUCCESS, E01, E02, E03, E04, E05, E06,
+       SUCCESS, E01, E02, E03, E04, E05, E06, E07, E08
 };
 
 static char *error_message[] =
@@ -461,6 +453,8 @@ static char *error_message[] =
        "E04 The command is not supported - [s,C,S,!,R,d,r] - internal error.",
        "E05 Change register content - P - the register is not implemented..",
        "E06 Change memory content - M - internal error.",
+       "E07 Change register content - P - the register is not stored on the stack",
+       "E08 Invalid parameter"
 };
 
 /********************************** Breakpoint *******************************/
@@ -539,7 +533,7 @@ gdb_cris_strtol(const char *s, char **endptr, int base)
 /********************************* Register image ****************************/
 
 /* Write a value to a specified register in the register image of the current
-   thread. Returns status code SUCCESS, E02 or E05. */
+   thread. Returns status code SUCCESS, E02, E05 or E08. */
 static int
 write_register(int regno, char *val)
 {
@@ -547,8 +541,9 @@ write_register(int regno, char *val)
 
         if (regno >= R0 && regno <= ACR) {
                /* Consecutive 32-bit registers. */
-               hex2mem((unsigned char *)&reg.r0 + (regno - R0) * sizeof(unsigned int),
-                       val, sizeof(unsigned int));
+               if (hex2bin((unsigned char *)&reg.r0 + (regno - R0) * sizeof(unsigned int),
+                           val, sizeof(unsigned int)))
+                       status = E08;
 
        } else if (regno == BZ || regno == VR || regno == WZ || regno == DZ) {
                /* Read-only registers. */
@@ -557,16 +552,19 @@ write_register(int regno, char *val)
        } else if (regno == PID) {
                /* 32-bit register. (Even though we already checked SRS and WZ, we cannot
                   combine this with the EXS - SPC write since SRS and WZ have different size.) */
-               hex2mem((unsigned char *)&reg.pid, val, sizeof(unsigned int));
+               if (hex2bin((unsigned char *)&reg.pid, val, sizeof(unsigned int)))
+                       status = E08;
 
        } else if (regno == SRS) {
                /* 8-bit register. */
-               hex2mem((unsigned char *)&reg.srs, val, sizeof(unsigned char));
+               if (hex2bin((unsigned char *)&reg.srs, val, sizeof(unsigned char)))
+                       status = E08;
 
        } else if (regno >= EXS && regno <= SPC) {
                /* Consecutive 32-bit registers. */
-               hex2mem((unsigned char *)&reg.exs + (regno - EXS) * sizeof(unsigned int),
-                        val, sizeof(unsigned int));
+               if (hex2bin((unsigned char *)&reg.exs + (regno - EXS) * sizeof(unsigned int),
+                           val, sizeof(unsigned int)))
+                       status = E08;
 
        } else if (regno == PC) {
                /* Pseudo-register. Treat as read-only. */
@@ -574,7 +572,9 @@ write_register(int regno, char *val)
 
        } else if (regno >= S0 && regno <= S15) {
                /* 32-bit registers. */
-               hex2mem((unsigned char *)&sreg.s0_0 + (reg.srs * 16 * sizeof(unsigned int)) + (regno - S0) * sizeof(unsigned int), val, sizeof(unsigned int));
+               if (hex2bin((unsigned char *)&sreg.s0_0 + (reg.srs * 16 * sizeof(unsigned int)) + (regno - S0) * sizeof(unsigned int),
+                          val, sizeof(unsigned int)))
+                       status = E08;
        } else {
                /* Non-existing register. */
                status = E05;
@@ -630,19 +630,6 @@ read_register(char regno, unsigned int *valptr)
 }
 
 /********************************** Packet I/O ******************************/
-/* Returns the integer equivalent of a hexadecimal character. */
-static int
-hex(char ch)
-{
-       if ((ch >= 'a') && (ch <= 'f'))
-               return (ch - 'a' + 10);
-       if ((ch >= '0') && (ch <= '9'))
-               return (ch - '0');
-       if ((ch >= 'A') && (ch <= 'F'))
-               return (ch - 'A' + 10);
-       return -1;
-}
-
 /* Convert the memory, pointed to by mem into hexadecimal representation.
    Put the result in buf, and return a pointer to the last character
    in buf (null). */
@@ -689,22 +676,6 @@ mem2hex_nbo(char *buf, unsigned char *mem, int count)
        return buf;
 }
 
-/* Convert the array, in hexadecimal representation, pointed to by buf into
-   binary representation. Put the result in mem, and return a pointer to
-   the character after the last byte written. */
-static unsigned char*
-hex2mem(unsigned char *mem, char *buf, int count)
-{
-       int i;
-       unsigned char ch;
-       for (i = 0; i < count; i++) {
-               ch = hex (*buf++) << 4;
-               ch = ch + hex (*buf++);
-               *mem++ = ch;
-       }
-       return mem;
-}
-
 /* Put the content of the array, in binary representation, pointed to by buf
    into memory pointed to by mem, and return a pointer to the character after
    the last byte written.
@@ -763,8 +734,8 @@ getpacket(char *buffer)
                buffer[count] = 0;
 
                if (ch == '#') {
-                       xmitcsum = hex(getDebugChar()) << 4;
-                       xmitcsum += hex(getDebugChar());
+                       xmitcsum = hex_to_bin(getDebugChar()) << 4;
+                       xmitcsum += hex_to_bin(getDebugChar());
                        if (checksum != xmitcsum) {
                                /* Wrong checksum */
                                putDebugChar('-');
@@ -1304,14 +1275,17 @@ handle_exception(int sigval)
                                /* Write registers. GXX..XX
                                   Each byte of register data  is described by two hex digits.
                                   Success: OK
-                                  Failure: void. */
+                                  Failure: E08. */
                                /* General and special registers. */
-                               hex2mem((char *)&reg, &input_buffer[1], sizeof(registers));
+                               if (hex2bin((char *)&reg, &input_buffer[1], sizeof(registers)))
+                                       gdb_cris_strcpy(output_buffer, error_message[E08]);
                                /* Support registers. */
-                               hex2mem((char *)&sreg + (reg.srs * 16 * sizeof(unsigned int)),
+                               else if (hex2bin((char *)&sreg + (reg.srs * 16 * sizeof(unsigned int)),
                                        &input_buffer[1] + sizeof(registers),
-                                       16 * sizeof(unsigned int));
-                               gdb_cris_strcpy(output_buffer, "OK");
+                                       16 * sizeof(unsigned int)))
+                                       gdb_cris_strcpy(output_buffer, error_message[E08]);
+                               else
+                                       gdb_cris_strcpy(output_buffer, "OK");
                                break;
 
                        case 'P':
@@ -1338,6 +1312,10 @@ handle_exception(int sigval)
                                                        /* Do not support non-existing registers. */
                                                        gdb_cris_strcpy(output_buffer, error_message[E05]);
                                                        break;
+                                               case E08:
+                                                       /* Invalid parameter. */
+                                                       gdb_cris_strcpy(output_buffer, error_message[E08]);
+                                                       break;
                                                default:
                                                        /* Valid register number. */
                                                        gdb_cris_strcpy(output_buffer, "OK");
@@ -1380,7 +1358,7 @@ handle_exception(int sigval)
                                   AA..AA is the start address,  LLLL is the number of bytes, and
                                   XX..XX is the hexadecimal data.
                                   Success: OK
-                                  Failure: void. */
+                                  Failure: E08. */
                                {
                                        char *lenptr;
                                        char *dataptr;
@@ -1389,13 +1367,15 @@ handle_exception(int sigval)
                                        int len = gdb_cris_strtol(lenptr+1, &dataptr, 16);
                                        if (*lenptr == ',' && *dataptr == ':') {
                                                if (input_buffer[0] == 'M') {
-                                                       hex2mem(addr, dataptr + 1, len);
+                                                       if (hex2bin(addr, dataptr + 1, len))
+                                                               gdb_cris_strcpy(output_buffer, error_message[E08]);
+                                                       else
+                                                               gdb_cris_strcpy(output_buffer, "OK");
                                                } else /* X */ {
                                                        bin2mem(addr, dataptr + 1, len);
+                                                       gdb_cris_strcpy(output_buffer, "OK");
                                                }
-                                               gdb_cris_strcpy(output_buffer, "OK");
-                                       }
-                                       else {
+                                       } else {
                                                gdb_cris_strcpy(output_buffer, error_message[E06]);
                                        }
                                }
index cd1865d68b2e031d55149c6e29c68bc2041de5a4..fe50287aa928c415bc98dd359eff13bcaaba76b6 100644 (file)
@@ -128,10 +128,6 @@ static struct i2c_board_info __initdata i2c_info[] = {
        {I2C_BOARD_INFO("tmp100", 0x4E)},
 #ifdef CONFIG_RTC_DRV_PCF8563
        {I2C_BOARD_INFO("pcf8563", 0x51)},
-#endif
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       {I2C_BOARD_INFO("vgpio", 0x20)},
-       {I2C_BOARD_INFO("vgpio", 0x21)},
 #endif
        {I2C_BOARD_INFO("pca9536", 0x41)},
        {I2C_BOARD_INFO("fnp300", 0x40)},
@@ -146,10 +142,6 @@ static struct i2c_board_info __initdata i2c_info2[] = {
        {I2C_BOARD_INFO("tmp100", 0x4C)},
        {I2C_BOARD_INFO("tmp100", 0x4D)},
        {I2C_BOARD_INFO("tmp100", 0x4E)},
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-       {I2C_BOARD_INFO("vgpio", 0x20)},
-       {I2C_BOARD_INFO("vgpio", 0x21)},
-#endif
        {I2C_BOARD_INFO("pca9536", 0x41)},
        {I2C_BOARD_INFO("fnp300", 0x40)},
        {I2C_BOARD_INFO("fnp300", 0x42)},
index 18a227196a41b75e0fb84929976182ed4480e7a2..0cc6eebacbed7dd5b3b1470a15b3abc612de78ab 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for the linux kernel.
 #
 
-obj-y   := dma.o pinmux.o io.o arbiter.o
+obj-y   := dma.o pinmux.o arbiter.o
 
 clean:
 
diff --git a/arch/cris/arch-v32/mach-a3/io.c b/arch/cris/arch-v32/mach-a3/io.c
deleted file mode 100644 (file)
index 090ceb9..0000000
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Helper functions for I/O pins.
- *
- * Copyright (c) 2005-2007 Axis Communications AB.
- */
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/ctype.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/io.h>
-#include <mach/pinmux.h>
-#include <hwregs/gio_defs.h>
-
-struct crisv32_ioport crisv32_ioports[] = {
-       {
-               (unsigned long *)REG_ADDR(gio, regi_gio, rw_pa_oe),
-               (unsigned long *)REG_ADDR(gio, regi_gio, rw_pa_dout),
-               (unsigned long *)REG_ADDR(gio, regi_gio, r_pa_din),
-               32
-       },
-       {
-               (unsigned long *)REG_ADDR(gio, regi_gio, rw_pb_oe),
-               (unsigned long *)REG_ADDR(gio, regi_gio, rw_pb_dout),
-               (unsigned long *)REG_ADDR(gio, regi_gio, r_pb_din),
-               32
-       },
-       {
-               (unsigned long *)REG_ADDR(gio, regi_gio, rw_pc_oe),
-               (unsigned long *)REG_ADDR(gio, regi_gio, rw_pc_dout),
-               (unsigned long *)REG_ADDR(gio, regi_gio, r_pc_din),
-               16
-       },
-};
-
-#define NBR_OF_PORTS ARRAY_SIZE(crisv32_ioports)
-
-struct crisv32_iopin crisv32_led_net0_green;
-struct crisv32_iopin crisv32_led_net0_red;
-struct crisv32_iopin crisv32_led2_green;
-struct crisv32_iopin crisv32_led2_red;
-struct crisv32_iopin crisv32_led3_green;
-struct crisv32_iopin crisv32_led3_red;
-
-/* Dummy port used when green LED and red LED is on the same bit */
-static unsigned long io_dummy;
-static struct crisv32_ioport dummy_port = {
-       &io_dummy,
-       &io_dummy,
-       &io_dummy,
-       32
-};
-static struct crisv32_iopin dummy_led = {
-       &dummy_port,
-       0
-};
-
-static int __init crisv32_io_init(void)
-{
-       int ret = 0;
-
-       u32 i;
-
-       /* Locks *should* be dynamically initialized. */
-       for (i = 0; i < ARRAY_SIZE(crisv32_ioports); i++)
-               spin_lock_init(&crisv32_ioports[i].lock);
-       spin_lock_init(&dummy_port.lock);
-
-       /* Initialize LEDs */
-#if (defined(CONFIG_ETRAX_NBR_LED_GRP_ONE) || defined(CONFIG_ETRAX_NBR_LED_GRP_TWO))
-       ret += crisv32_io_get_name(&crisv32_led_net0_green,
-               CONFIG_ETRAX_LED_G_NET0);
-       crisv32_io_set_dir(&crisv32_led_net0_green, crisv32_io_dir_out);
-       if (strcmp(CONFIG_ETRAX_LED_G_NET0, CONFIG_ETRAX_LED_R_NET0)) {
-               ret += crisv32_io_get_name(&crisv32_led_net0_red,
-                       CONFIG_ETRAX_LED_R_NET0);
-               crisv32_io_set_dir(&crisv32_led_net0_red, crisv32_io_dir_out);
-       } else
-               crisv32_led_net0_red = dummy_led;
-#endif
-
-       ret += crisv32_io_get_name(&crisv32_led2_green, CONFIG_ETRAX_V32_LED2G);
-       ret += crisv32_io_get_name(&crisv32_led2_red, CONFIG_ETRAX_V32_LED2R);
-       ret += crisv32_io_get_name(&crisv32_led3_green, CONFIG_ETRAX_V32_LED3G);
-       ret += crisv32_io_get_name(&crisv32_led3_red, CONFIG_ETRAX_V32_LED3R);
-
-       crisv32_io_set_dir(&crisv32_led2_green, crisv32_io_dir_out);
-       crisv32_io_set_dir(&crisv32_led2_red, crisv32_io_dir_out);
-       crisv32_io_set_dir(&crisv32_led3_green, crisv32_io_dir_out);
-       crisv32_io_set_dir(&crisv32_led3_red, crisv32_io_dir_out);
-
-       return ret;
-}
-
-__initcall(crisv32_io_init);
-
-int crisv32_io_get(struct crisv32_iopin *iopin,
-       unsigned int port, unsigned int pin)
-{
-       if (port > NBR_OF_PORTS)
-               return -EINVAL;
-       if (port > crisv32_ioports[port].pin_count)
-               return -EINVAL;
-
-       iopin->bit = 1 << pin;
-       iopin->port = &crisv32_ioports[port];
-
-       if (crisv32_pinmux_alloc(port, pin, pin, pinmux_gpio))
-               return -EIO;
-
-       return 0;
-}
-
-int crisv32_io_get_name(struct crisv32_iopin *iopin, const char *name)
-{
-       int port;
-       int pin;
-
-       if (toupper(*name) == 'P')
-               name++;
-
-       if (toupper(*name) < 'A' || toupper(*name) > 'E')
-               return -EINVAL;
-
-       port = toupper(*name) - 'A';
-       name++;
-       pin = simple_strtoul(name, NULL, 10);
-
-       if (pin < 0 || pin > crisv32_ioports[port].pin_count)
-               return -EINVAL;
-
-       iopin->bit = 1 << pin;
-       iopin->port = &crisv32_ioports[port];
-
-       if (crisv32_pinmux_alloc(port, pin, pin, pinmux_gpio))
-               return -EIO;
-
-       return 0;
-}
-
-#ifdef CONFIG_PCI
-/* PCI I/O access stuff */
-struct cris_io_operations *cris_iops = NULL;
-EXPORT_SYMBOL(cris_iops);
-#endif
-
index 774de82abef6a5453ccc6abcf5a47b83805da5e8..7d1ab972bc0f91600719ffe844969538ca603c20 100644 (file)
@@ -192,25 +192,6 @@ config ETRAX_DEF_GIO_PE_OUT
          Configures the initial data for the general port E bits.  Most
          products should use 00000 here.
 
-config ETRAX_DEF_GIO_PV_OE
-       hex "GIO_PV_OE"
-       depends on ETRAX_VIRTUAL_GPIO
-       default "0000"
-       help
-         Configures the direction of virtual general port V bits. 1 is out,
-         0 is in. This is often totally different depending on the product
-         used. These bits are used for all kinds of stuff. If you don't know
-         what to use, it is always safe to put all as inputs, although
-         floating inputs isn't good.
-
-config ETRAX_DEF_GIO_PV_OUT
-       hex "GIO_PV_OUT"
-       depends on ETRAX_VIRTUAL_GPIO
-       default "0000"
-       help
-         Configures the initial data for the virtual general port V bits.
-         Most products should use 0000 here.
-
 endmenu
 
 endif
index 18a227196a41b75e0fb84929976182ed4480e7a2..0cc6eebacbed7dd5b3b1470a15b3abc612de78ab 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for the linux kernel.
 #
 
-obj-y   := dma.o pinmux.o io.o arbiter.o
+obj-y   := dma.o pinmux.o arbiter.o
 
 clean:
 
diff --git a/arch/cris/arch-v32/mach-fs/io.c b/arch/cris/arch-v32/mach-fs/io.c
deleted file mode 100644 (file)
index a695866..0000000
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * Helper functions for I/O pins.
- *
- * Copyright (c) 2004-2007 Axis Communications AB.
- */
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/ctype.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/io.h>
-#include <mach/pinmux.h>
-#include <hwregs/gio_defs.h>
-
-#ifndef DEBUG
-#define DEBUG(x)
-#endif
-
-struct crisv32_ioport crisv32_ioports[] = {
-       {
-               (unsigned long *)REG_ADDR(gio, regi_gio, rw_pa_oe),
-               (unsigned long *)REG_ADDR(gio, regi_gio, rw_pa_dout),
-               (unsigned long *)REG_ADDR(gio, regi_gio, r_pa_din),
-               8
-       },
-       {
-               (unsigned long *)REG_ADDR(gio, regi_gio, rw_pb_oe),
-               (unsigned long *)REG_ADDR(gio, regi_gio, rw_pb_dout),
-               (unsigned long *)REG_ADDR(gio, regi_gio, r_pb_din),
-               18
-       },
-       {
-               (unsigned long *)REG_ADDR(gio, regi_gio, rw_pc_oe),
-               (unsigned long *)REG_ADDR(gio, regi_gio, rw_pc_dout),
-               (unsigned long *)REG_ADDR(gio, regi_gio, r_pc_din),
-               18
-       },
-       {
-               (unsigned long *)REG_ADDR(gio, regi_gio, rw_pd_oe),
-               (unsigned long *)REG_ADDR(gio, regi_gio, rw_pd_dout),
-               (unsigned long *)REG_ADDR(gio, regi_gio, r_pd_din),
-               18
-       },
-       {
-               (unsigned long *)REG_ADDR(gio, regi_gio, rw_pe_oe),
-               (unsigned long *)REG_ADDR(gio, regi_gio, rw_pe_dout),
-               (unsigned long *)REG_ADDR(gio, regi_gio, r_pe_din),
-               18
-       }
-};
-
-#define NBR_OF_PORTS ARRAY_SIZE(crisv32_ioports)
-
-struct crisv32_iopin crisv32_led_net0_green;
-struct crisv32_iopin crisv32_led_net0_red;
-struct crisv32_iopin crisv32_led_net1_green;
-struct crisv32_iopin crisv32_led_net1_red;
-struct crisv32_iopin crisv32_led2_green;
-struct crisv32_iopin crisv32_led2_red;
-struct crisv32_iopin crisv32_led3_green;
-struct crisv32_iopin crisv32_led3_red;
-
-/* Dummy port used when green LED and red LED is on the same bit */
-static unsigned long io_dummy;
-static struct crisv32_ioport dummy_port = {
-       &io_dummy,
-       &io_dummy,
-       &io_dummy,
-       18
-};
-static struct crisv32_iopin dummy_led = {
-       &dummy_port,
-       0
-};
-
-static int __init crisv32_io_init(void)
-{
-       int ret = 0;
-
-       u32 i;
-
-       /* Locks *should* be dynamically initialized. */
-       for (i = 0; i < ARRAY_SIZE(crisv32_ioports); i++)
-               spin_lock_init(&crisv32_ioports[i].lock);
-       spin_lock_init(&dummy_port.lock);
-
-       /* Initialize LEDs */
-#if (defined(CONFIG_ETRAX_NBR_LED_GRP_ONE) || defined(CONFIG_ETRAX_NBR_LED_GRP_TWO))
-       ret +=
-           crisv32_io_get_name(&crisv32_led_net0_green,
-                               CONFIG_ETRAX_LED_G_NET0);
-       crisv32_io_set_dir(&crisv32_led_net0_green, crisv32_io_dir_out);
-       if (strcmp(CONFIG_ETRAX_LED_G_NET0, CONFIG_ETRAX_LED_R_NET0)) {
-               ret +=
-                   crisv32_io_get_name(&crisv32_led_net0_red,
-                                       CONFIG_ETRAX_LED_R_NET0);
-               crisv32_io_set_dir(&crisv32_led_net0_red, crisv32_io_dir_out);
-       } else
-               crisv32_led_net0_red = dummy_led;
-#endif
-
-#ifdef CONFIG_ETRAX_NBR_LED_GRP_TWO
-       ret +=
-           crisv32_io_get_name(&crisv32_led_net1_green,
-                               CONFIG_ETRAX_LED_G_NET1);
-       crisv32_io_set_dir(&crisv32_led_net1_green, crisv32_io_dir_out);
-       if (strcmp(CONFIG_ETRAX_LED_G_NET1, CONFIG_ETRAX_LED_R_NET1)) {
-               crisv32_io_get_name(&crisv32_led_net1_red,
-                                   CONFIG_ETRAX_LED_R_NET1);
-               crisv32_io_set_dir(&crisv32_led_net1_red, crisv32_io_dir_out);
-       } else
-               crisv32_led_net1_red = dummy_led;
-#endif
-
-       ret += crisv32_io_get_name(&crisv32_led2_green, CONFIG_ETRAX_V32_LED2G);
-       ret += crisv32_io_get_name(&crisv32_led2_red, CONFIG_ETRAX_V32_LED2R);
-       ret += crisv32_io_get_name(&crisv32_led3_green, CONFIG_ETRAX_V32_LED3G);
-       ret += crisv32_io_get_name(&crisv32_led3_red, CONFIG_ETRAX_V32_LED3R);
-
-       crisv32_io_set_dir(&crisv32_led2_green, crisv32_io_dir_out);
-       crisv32_io_set_dir(&crisv32_led2_red, crisv32_io_dir_out);
-       crisv32_io_set_dir(&crisv32_led3_green, crisv32_io_dir_out);
-       crisv32_io_set_dir(&crisv32_led3_red, crisv32_io_dir_out);
-
-       return ret;
-}
-
-__initcall(crisv32_io_init);
-
-int crisv32_io_get(struct crisv32_iopin *iopin,
-                  unsigned int port, unsigned int pin)
-{
-       if (port > NBR_OF_PORTS)
-               return -EINVAL;
-       if (port > crisv32_ioports[port].pin_count)
-               return -EINVAL;
-
-       iopin->bit = 1 << pin;
-       iopin->port = &crisv32_ioports[port];
-
-       /* Only allocate pinmux gpiopins if port != PORT_A (port 0) */
-       /* NOTE! crisv32_pinmux_alloc thinks PORT_B is port 0 */
-       if (port != 0 && crisv32_pinmux_alloc(port - 1, pin, pin, pinmux_gpio))
-               return -EIO;
-       DEBUG(printk(KERN_DEBUG "crisv32_io_get: Allocated pin %d on port %d\n",
-               pin, port));
-
-       return 0;
-}
-
-int crisv32_io_get_name(struct crisv32_iopin *iopin, const char *name)
-{
-       int port;
-       int pin;
-
-       if (toupper(*name) == 'P')
-               name++;
-
-       if (toupper(*name) < 'A' || toupper(*name) > 'E')
-               return -EINVAL;
-
-       port = toupper(*name) - 'A';
-       name++;
-       pin = simple_strtoul(name, NULL, 10);
-
-       if (pin < 0 || pin > crisv32_ioports[port].pin_count)
-               return -EINVAL;
-
-       iopin->bit = 1 << pin;
-       iopin->port = &crisv32_ioports[port];
-
-       /* Only allocate pinmux gpiopins if port != PORT_A (port 0) */
-       /* NOTE! crisv32_pinmux_alloc thinks PORT_B is port 0 */
-       if (port != 0 && crisv32_pinmux_alloc(port - 1, pin, pin, pinmux_gpio))
-               return -EIO;
-
-       DEBUG(printk(KERN_DEBUG
-               "crisv32_io_get_name: Allocated pin %d on port %d\n",
-               pin, port));
-
-       return 0;
-}
-
-#ifdef CONFIG_PCI
-/* PCI I/O access stuff */
-struct cris_io_operations *cris_iops = NULL;
-EXPORT_SYMBOL(cris_iops);
-#endif
diff --git a/arch/cris/boot/dts/artpec3.dtsi b/arch/cris/boot/dts/artpec3.dtsi
new file mode 100644 (file)
index 0000000..be15be6
--- /dev/null
@@ -0,0 +1,46 @@
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+       interrupt-parent = <&intc>;
+
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu@0 {
+                       device_type = "cpu";
+                       model = "axis,crisv32";
+                       reg = <0>;
+               };
+       };
+
+       soc {
+               compatible = "simple-bus";
+               model = "artpec3";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               intc: interrupt-controller {
+                       compatible = "axis,crisv32-intc";
+                       reg = <0xb002a000 0x1000>;
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+               };
+
+               gio: gpio@b0020000 {
+                       compatible = "axis,artpec3-gio";
+                       reg = <0xb0020000 0x1000>;
+                       interrupts = <61>;
+                       gpio-controller;
+                       #gpio-cells = <3>;
+               };
+
+               serial@b003e000 {
+                       compatible = "axis,etraxfs-uart";
+                       reg = <0xb003e000 0x1000>;
+                       interrupts = <64>;
+                       status = "disabled";
+               };
+       };
+};
index 4fa5a3f9d0ec1ec30a52a91b22c18377888f0c4f..b9a230d108748d97fcc2f81f2b6b6db0d97dfd0a 100644 (file)
@@ -1,5 +1,7 @@
 /dts-v1/;
 
+#include <dt-bindings/gpio/gpio.h>
+
 /include/ "etraxfs.dtsi"
 
 / {
                        status = "okay";
                };
        };
+
+       spi {
+               compatible = "spi-gpio";
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               gpio-sck = <&gio 1 0 0xd>;
+               gpio-miso = <&gio 4 0 0xd>;
+               gpio-mosi = <&gio 0 0 0xd>;
+               cs-gpios = <&gio 3 0 0xd>;
+               num-chipselects = <1>;
+
+               temp-sensor@0 {
+                       compatible = "ti,lm70";
+                       reg = <0>;
+
+                       spi-max-frequency = <100000>;
+               };
+       };
+
+       i2c {
+               compatible = "i2c-gpio";
+               gpios = <&gio 5 0 0xd>, <&gio 6 0 0xd>;
+               i2c-gpio,delay-us = <2>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               rtc@51 {
+                       compatible = "nxp,pcf8563";
+                       reg = <0x51>;
+               };
+       };
+
+       leds {
+               compatible = "gpio-leds";
+
+               network {
+                       label = "network";
+                       gpios = <&gio 2 GPIO_ACTIVE_LOW 0xa>;
+               };
+
+               status {
+                       label = "status";
+                       gpios = <&gio 3 GPIO_ACTIVE_LOW 0xa>;
+                       linux,default-trigger = "heartbeat";
+               };
+       };
 };
index 909bcedc356580da301ad06da0c3adbbe0755c99..bf1b8582d4d803408411c59e779b46ea22f8dba8 100644 (file)
                        #interrupt-cells = <1>;
                };
 
+               gio: gpio@b001a000 {
+                       compatible = "axis,etraxfs-gio";
+                       reg = <0xb001a000 0x1000>;
+                       interrupts = <50>;
+                       gpio-controller;
+                       #gpio-cells = <3>;
+               };
+
                serial@b00260000 {
                        compatible = "axis,etraxfs-uart";
                        reg = <0xb0026000 0x1000>;
diff --git a/arch/cris/boot/dts/include/dt-bindings b/arch/cris/boot/dts/include/dt-bindings
new file mode 120000 (symlink)
index 0000000..08c00e4
--- /dev/null
@@ -0,0 +1 @@
+../../../../../include/dt-bindings
\ No newline at end of file
diff --git a/arch/cris/boot/dts/p1343.dts b/arch/cris/boot/dts/p1343.dts
new file mode 100644 (file)
index 0000000..fab7bdb
--- /dev/null
@@ -0,0 +1,76 @@
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/include/ "artpec3.dtsi"
+
+/ {
+       model = "Axis P1343 Network Camera";
+       compatible = "axis,p1343";
+
+       aliases {
+               serial0 = &uart0;
+       };
+
+       soc {
+               uart0: serial@b003e000 {
+                       status = "okay";
+               };
+       };
+
+       i2c {
+               compatible = "i2c-gpio";
+               gpios = <&gio 3 0 0xa>, <&gio 2 0 0xa>;
+               i2c-gpio,delay-us = <2>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               rtc@51 {
+                       compatible = "nxp,pcf8563";
+                       reg = <0x51>;
+               };
+       };
+
+       leds {
+               compatible = "gpio-leds";
+
+               status_green {
+                       label = "status:green";
+                       gpios = <&gio 0 GPIO_ACTIVE_LOW 0xc>;
+                       linux,default-trigger = "heartbeat";
+               };
+
+               status_red {
+                       label = "status:red";
+                       gpios = <&gio 1 GPIO_ACTIVE_LOW 0xc>;
+               };
+
+               network_green {
+                       label = "network:green";
+                       gpios = <&gio 2 GPIO_ACTIVE_LOW 0xc>;
+               };
+
+               network_red {
+                       label = "network:red";
+                       gpios = <&gio 3 GPIO_ACTIVE_LOW 0xc>;
+               };
+
+               power_red {
+                       label = "power:red";
+                       gpios = <&gio 4 GPIO_ACTIVE_LOW 0xc>;
+               };
+       };
+
+       gpio_keys {
+               compatible = "gpio-keys";
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               activity-button@0 {
+                       label = "Activity Button";
+                       linux,code = <KEY_FN>;
+                       gpios = <&gio 13 GPIO_ACTIVE_LOW 0xd>;
+               };
+       };
+};
index af55df0994b38580439c29310e8cc1495474e235..1c05492f3eb20a2c219e7e6ce5f1a58d48dfc882 100644 (file)
@@ -281,9 +281,6 @@ wait_ser:
 #ifdef CONFIG_ETRAX_PB_LEDS
        move.b  $r2, [R_PORT_PB_DATA]
 #endif
-#ifdef CONFIG_ETRAX_90000000_LEDS
-       move.b  $r2, [0x90000000]
-#endif
 #endif
 
        ;; check if we got something on the serial port
diff --git a/arch/cris/include/arch-v32/arch/io.h b/arch/cris/include/arch-v32/arch/io.h
deleted file mode 100644 (file)
index adc5484..0000000
+++ /dev/null
@@ -1,140 +0,0 @@
-#ifndef _ASM_ARCH_CRIS_IO_H
-#define _ASM_ARCH_CRIS_IO_H
-
-#include <linux/spinlock.h>
-#include <hwregs/reg_map.h>
-#include <hwregs/reg_rdwr.h>
-#include <hwregs/gio_defs.h>
-
-enum crisv32_io_dir
-{
-  crisv32_io_dir_in = 0,
-  crisv32_io_dir_out = 1
-};
-
-struct crisv32_ioport
-{
-  volatile unsigned long *oe;
-  volatile unsigned long *data;
-  volatile unsigned long *data_in;
-  unsigned int pin_count;
-  spinlock_t lock;
-};
-
-struct crisv32_iopin
-{
-  struct crisv32_ioport* port;
-  int bit;
-};
-
-extern struct crisv32_ioport crisv32_ioports[];
-
-extern struct crisv32_iopin crisv32_led1_green;
-extern struct crisv32_iopin crisv32_led1_red;
-extern struct crisv32_iopin crisv32_led2_green;
-extern struct crisv32_iopin crisv32_led2_red;
-extern struct crisv32_iopin crisv32_led3_green;
-extern struct crisv32_iopin crisv32_led3_red;
-
-extern struct crisv32_iopin crisv32_led_net0_green;
-extern struct crisv32_iopin crisv32_led_net0_red;
-extern struct crisv32_iopin crisv32_led_net1_green;
-extern struct crisv32_iopin crisv32_led_net1_red;
-
-static inline void crisv32_io_set(struct crisv32_iopin *iopin, int val)
-{
-       unsigned long flags;
-       spin_lock_irqsave(&iopin->port->lock, flags);
-
-       if (iopin->port->data) {
-               if (val)
-                       *iopin->port->data |= iopin->bit;
-               else
-                       *iopin->port->data &= ~iopin->bit;
-       }
-
-       spin_unlock_irqrestore(&iopin->port->lock, flags);
-}
-
-static inline void crisv32_io_set_dir(struct crisv32_iopin* iopin,
-                              enum crisv32_io_dir dir)
-{
-       unsigned long flags;
-       spin_lock_irqsave(&iopin->port->lock, flags);
-
-       if (iopin->port->oe) {
-               if (dir == crisv32_io_dir_in)
-                       *iopin->port->oe &= ~iopin->bit;
-               else
-                       *iopin->port->oe |= iopin->bit;
-       }
-
-       spin_unlock_irqrestore(&iopin->port->lock, flags);
-}
-
-static inline int crisv32_io_rd(struct crisv32_iopin* iopin)
-{
-       return ((*iopin->port->data_in & iopin->bit) ? 1 : 0);
-}
-
-int crisv32_io_get(struct crisv32_iopin* iopin,
-                   unsigned int port, unsigned int pin);
-int crisv32_io_get_name(struct crisv32_iopin* iopin,
-                       const char *name);
-
-#define CRIS_LED_OFF    0x00
-#define CRIS_LED_GREEN  0x01
-#define CRIS_LED_RED    0x02
-#define CRIS_LED_ORANGE (CRIS_LED_GREEN | CRIS_LED_RED)
-
-#if (defined(CONFIG_ETRAX_NBR_LED_GRP_ONE) || defined(CONFIG_ETRAX_NBR_LED_GRP_TWO))
-#define CRIS_LED_NETWORK_GRP0_SET(x)                          \
-       do {                                             \
-               CRIS_LED_NETWORK_GRP0_SET_G((x) & CRIS_LED_GREEN); \
-               CRIS_LED_NETWORK_GRP0_SET_R((x) & CRIS_LED_RED);   \
-       } while (0)
-#else
-#define CRIS_LED_NETWORK_GRP0_SET(x) while (0) {}
-#endif
-
-#define CRIS_LED_NETWORK_GRP0_SET_G(x) \
-       crisv32_io_set(&crisv32_led_net0_green, !(x));
-
-#define CRIS_LED_NETWORK_GRP0_SET_R(x) \
-       crisv32_io_set(&crisv32_led_net0_red, !(x));
-
-#if defined(CONFIG_ETRAX_NBR_LED_GRP_TWO)
-#define CRIS_LED_NETWORK_GRP1_SET(x)                          \
-       do {                                             \
-               CRIS_LED_NETWORK_GRP1_SET_G((x) & CRIS_LED_GREEN); \
-               CRIS_LED_NETWORK_GRP1_SET_R((x) & CRIS_LED_RED);   \
-       } while (0)
-#else
-#define CRIS_LED_NETWORK_GRP1_SET(x) while (0) {}
-#endif
-
-#define CRIS_LED_NETWORK_GRP1_SET_G(x) \
-       crisv32_io_set(&crisv32_led_net1_green, !(x));
-
-#define CRIS_LED_NETWORK_GRP1_SET_R(x) \
-       crisv32_io_set(&crisv32_led_net1_red, !(x));
-
-#define CRIS_LED_ACTIVE_SET(x)                           \
-       do {                                        \
-               CRIS_LED_ACTIVE_SET_G((x) & CRIS_LED_GREEN);  \
-               CRIS_LED_ACTIVE_SET_R((x) & CRIS_LED_RED);    \
-       } while (0)
-
-#define CRIS_LED_ACTIVE_SET_G(x) \
-       crisv32_io_set(&crisv32_led2_green, !(x));
-#define CRIS_LED_ACTIVE_SET_R(x) \
-       crisv32_io_set(&crisv32_led2_red, !(x));
-#define CRIS_LED_DISK_WRITE(x) \
-         do{\
-               crisv32_io_set(&crisv32_led3_green, !(x)); \
-               crisv32_io_set(&crisv32_led3_red, !(x));   \
-        }while(0)
-#define CRIS_LED_DISK_READ(x) \
-       crisv32_io_set(&crisv32_led3_green, !(x));
-
-#endif
index 0c1b4d3a34e749b222c1d0ec0af3a56a4b80b952..8270a1bbfdb6d0a7468f39f52884792281b5f2d3 100644 (file)
@@ -4,7 +4,7 @@
 #include <hwregs/intr_vect.h>
 
 /* Number of non-cpu interrupts. */
-#define NR_IRQS NBR_INTR_VECT /* Exceptions + IRQs */
+#define NR_IRQS (NBR_INTR_VECT + 256) /* Exceptions + IRQs */
 #define FIRST_IRQ 0x31 /* Exception number for first IRQ */
 #define NR_REAL_IRQS (NBR_INTR_VECT - FIRST_IRQ) /* IRQs */
 #if NR_REAL_IRQS > 32
index b7f68192d15b52cb4e6c34c78eac88a02fa971d5..1778805f63809d378a5cafb6c920b517ea753c7b 100644 (file)
@@ -43,4 +43,5 @@ generic-y += topology.h
 generic-y += trace_clock.h
 generic-y += types.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 10ce36cf79a911f6ae4d55e7eac35f812710b50c..70aa448256b0ba69828e487ab4ab345f7d07f5c1 100644 (file)
@@ -45,8 +45,7 @@
    assumed that we want to share code when debugging (exposes more
    trouble). */
 #ifndef SHARE_LIB_CORE
-# if (defined(__KERNEL__) || !defined(RELOC_DEBUG)) \
-     && !defined(CONFIG_SHARE_SHLIB_CORE)
+# if (defined(__KERNEL__) || !defined(RELOC_DEBUG))
 #  define SHARE_LIB_CORE 0
 # else
 #  define SHARE_LIB_CORE 1
index 752a3f45df60cd475b51b21bb567b9bd48f90581..cce8664d5dd67b105866d476b99e1123de0b8c8b 100644 (file)
@@ -2,7 +2,9 @@
 #define _ASM_CRIS_IO_H
 
 #include <asm/page.h>   /* for __va, __pa */
+#ifdef CONFIG_ETRAX_ARCH_V10
 #include <arch/io.h>
+#endif
 #include <asm-generic/iomap.h>
 #include <linux/kernel.h>
 
index 461c089db765af02188d880741b505a7c781dddc..c6e7d57c8b248dff6d7b0cf57d113c98499852b1 100644 (file)
  *       g1-g7 and g25-g31 is both input and outputs but on different pins
  *       Also note that some bits change pins depending on what interfaces
  *       are enabled.
- *
- * For ETRAX FS (CONFIG_ETRAXFS):
- * /dev/gpioa  minor 0,  8 bit GPIO, each bit can change direction
- * /dev/gpiob  minor 1, 18 bit GPIO, each bit can change direction
- * /dev/gpioc  minor 3, 18 bit GPIO, each bit can change direction
- * /dev/gpiod  minor 4, 18 bit GPIO, each bit can change direction
- * /dev/gpioe  minor 5, 18 bit GPIO, each bit can change direction
- * /dev/leds   minor 2, Access to leds depending on kernelconfig
- *
- * For ARTPEC-3 (CONFIG_CRIS_MACH_ARTPEC3):
- * /dev/gpioa  minor 0, 32 bit GPIO, each bit can change direction
- * /dev/gpiob  minor 1, 32 bit GPIO, each bit can change direction
- * /dev/gpioc  minor 3, 16 bit GPIO, each bit can change direction
- * /dev/gpiod  minor 4, 32 bit GPIO, input only
- * /dev/leds   minor 2, Access to leds depending on kernelconfig
- * /dev/pwm0   minor 16, PWM channel 0 on PA30
- * /dev/pwm1   minor 17, PWM channel 1 on PA31
- * /dev/pwm2   minor 18, PWM channel 2 on PB26
- * /dev/ppwm   minor 19, PPWM channel
- *
  */
 #ifndef _ASM_ETRAXGPIO_H
 #define _ASM_ETRAXGPIO_H
 #define ETRAXGPIO_IOCTYPE 43
 
 /* etraxgpio _IOC_TYPE, bits 8 to 15 in ioctl cmd */
-#ifdef CONFIG_ETRAX_ARCH_V10
 #define GPIO_MINOR_A 0
 #define GPIO_MINOR_B 1
 #define GPIO_MINOR_LEDS 2
 #define GPIO_MINOR_G 3
 #define GPIO_MINOR_LAST 3
 #define GPIO_MINOR_LAST_REAL GPIO_MINOR_LAST
-#endif
-
-#ifdef CONFIG_ETRAXFS
-#define GPIO_MINOR_A 0
-#define GPIO_MINOR_B 1
-#define GPIO_MINOR_LEDS 2
-#define GPIO_MINOR_C 3
-#define GPIO_MINOR_D 4
-#define GPIO_MINOR_E 5
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-#define GPIO_MINOR_V 6
-#define GPIO_MINOR_LAST 6
-#else
-#define GPIO_MINOR_LAST 5
-#endif
-#define GPIO_MINOR_LAST_REAL GPIO_MINOR_LAST
-#endif
-
-#ifdef CONFIG_CRIS_MACH_ARTPEC3
-#define GPIO_MINOR_A 0
-#define GPIO_MINOR_B 1
-#define GPIO_MINOR_LEDS 2
-#define GPIO_MINOR_C 3
-#define GPIO_MINOR_D 4
-#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
-#define GPIO_MINOR_V 6
-#define GPIO_MINOR_LAST 6
-#else
-#define GPIO_MINOR_LAST 4
-#endif
-#define GPIO_MINOR_FIRST_PWM 16
-#define GPIO_MINOR_PWM0 (GPIO_MINOR_FIRST_PWM+0)
-#define GPIO_MINOR_PWM1 (GPIO_MINOR_FIRST_PWM+1)
-#define GPIO_MINOR_PWM2 (GPIO_MINOR_FIRST_PWM+2)
-#define GPIO_MINOR_PPWM (GPIO_MINOR_FIRST_PWM+3)
-#define GPIO_MINOR_LAST_PWM GPIO_MINOR_PPWM
-#define GPIO_MINOR_LAST_REAL GPIO_MINOR_LAST_PWM
-#endif
-
 
 
 /* supported ioctl _IOC_NR's */
 #define IO_SETGET_OUTPUT 0x13 /* bits set in *arg is set to output, */
                              /* *arg updated with current output pins. */
 
-/* The following ioctl's are applicable to the PWM channels only */
-
-#define IO_PWM_SET_MODE     0x20
-
-enum io_pwm_mode {
-       PWM_OFF = 0,            /* disabled, deallocated */
-       PWM_STANDARD = 1,       /* 390 kHz, duty cycle 0..255/256 */
-       PWM_FAST = 2,           /* variable freq, w/ 10ns active pulse len */
-       PWM_VARFREQ = 3,        /* individually configurable high/low periods */
-       PWM_SOFT = 4            /* software generated */
-};
-
-struct io_pwm_set_mode {
-       enum io_pwm_mode mode;
-};
-
-/* Only for mode PWM_VARFREQ. Period lo/high set in increments of 10ns
- * from 10ns (value = 0) to 81920ns (value = 8191)
- * (Resulting frequencies range from 50 MHz (10ns + 10ns) down to
- * 6.1 kHz (81920ns + 81920ns) at 50% duty cycle, to 12.2 kHz at min/max duty
- * cycle (81920 + 10ns or 10ns + 81920ns, respectively).)
- */
-#define IO_PWM_SET_PERIOD   0x21
-
-struct io_pwm_set_period {
-       unsigned int lo;                /* 0..8191 */
-       unsigned int hi;                /* 0..8191 */
-};
-
-/* Only for modes PWM_STANDARD and PWM_FAST.
- * For PWM_STANDARD, set duty cycle of 390 kHz PWM output signal, from
- * 0 (value = 0) to 255/256 (value = 255).
- * For PWM_FAST, set duty cycle of PWM output signal from
- * 0% (value = 0) to 100% (value = 255). Output signal in this mode
- * is a 10ns pulse surrounded by a high or low level depending on duty
- * cycle (except for 0% and 100% which result in a constant output).
- * Resulting output frequency varies from 50 MHz at 50% duty cycle,
- * down to 390 kHz at min/max duty cycle.
- */
-#define IO_PWM_SET_DUTY     0x22
-
-struct io_pwm_set_duty {
-       int duty;               /* 0..255 */
-};
-
-/* Returns information about the latest PWM pulse.
- * lo: Length of the latest low period, in units of 10ns.
- * hi: Length of the latest high period, in units of 10ns.
- * cnt: Time since last detected edge, in units of 10ns.
- *
- * The input source to PWM is decied by IO_PWM_SET_INPUT_SRC.
- *
- * NOTE: All PWM devices is connected to the same input source.
- */
-#define IO_PWM_GET_PERIOD   0x23
-
-struct io_pwm_get_period {
-       unsigned int lo;
-       unsigned int hi;
-       unsigned int cnt;
-};
-
-/* Sets the input source for the PWM input. For the src value see the
- * register description for gio:rw_pwm_in_cfg.
- *
- * NOTE: All PWM devices is connected to the same input source.
- */
-#define IO_PWM_SET_INPUT_SRC   0x24
-struct io_pwm_set_input_src {
-       unsigned int src;       /* 0..7 */
-};
-
-/* Sets the duty cycles in steps of 1/256, 0 = 0%, 255 = 100% duty cycle */
-#define IO_PPWM_SET_DUTY     0x25
-
-struct io_ppwm_set_duty {
-       int duty;               /* 0..255 */
-};
-
-/* Configuraton struct for the IO_PWMCLK_SET_CONFIG ioctl to configure
- * PWM capable gpio pins:
- */
-#define IO_PWMCLK_SETGET_CONFIG 0x26
-struct gpio_pwmclk_conf {
-  unsigned int gpiopin; /* The pin number based on the opened device */
-  unsigned int baseclk; /* The base clock to use, or sw will select one close*/
-  unsigned int low;     /* The number of low periods of the baseclk */
-  unsigned int high;    /* The number of high periods of the baseclk */
-};
-
-/* Examples:
- * To get a symmetric 12 MHz clock without knowing anything about the hardware:
- * baseclk = 12000000, low = 0, high = 0
- * To just get info of current setting:
- * baseclk = 0, low = 0, high = 0, the values will be updated by driver.
- */
-
 #endif
index e704f81f85cc7e9642a4ca64b88c180e70ef0f5d..31b4bd288cada7755c112eafeeab386ba6f49e95 100644 (file)
@@ -18,7 +18,6 @@
 #include <asm/pgtable.h>
 #include <asm/fasttimer.h>
 
-extern unsigned long get_cmos_time(void);
 extern void __Udiv(void);
 extern void __Umod(void);
 extern void __Div(void);
@@ -30,7 +29,6 @@ extern void __negdi2(void);
 extern void iounmap(volatile void * __iomem);
 
 /* Platform dependent support */
-EXPORT_SYMBOL(get_cmos_time);
 EXPORT_SYMBOL(loops_per_usec);
 
 /* Math functions */
index 7780d379522f49bcebddc54ddfc57f7d1d6d1e34..2dda6da7152159bb62663ff8b9d2158bdd75ce48 100644 (file)
 extern unsigned long loops_per_jiffy; /* init/main.c */
 unsigned long loops_per_usec;
 
-int set_rtc_mmss(unsigned long nowtime)
-{
-       D(printk(KERN_DEBUG "set_rtc_mmss(%lu)\n", nowtime));
-       return 0;
-}
-
-/* grab the time from the RTC chip */
-unsigned long get_cmos_time(void)
-{
-       return 0;
-}
-
-
-int update_persistent_clock(struct timespec now)
-{
-       return set_rtc_mmss(now.tv_sec);
-}
-
-void read_persistent_clock(struct timespec *ts)
-{
-       ts->tv_sec = 0;
-       ts->tv_nsec = 0;
-}
-
-
 extern void cris_profile_sample(struct pt_regs* regs);
 
 void
index 8e47b832cc7684af7a10d2a0680651cf5bf8cc0d..1fa084cf1a4398934889658b8b21f66154bf0ab6 100644 (file)
@@ -7,3 +7,4 @@ generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
index 0da689def4cc66feec44f0e84e7be1c7c095817b..64f02d451aa8c915c81eebbd4c34f5808449a97b 100644 (file)
@@ -32,8 +32,8 @@
  */
 
 #define ATOMIC_INIT(i)         { (i) }
-#define atomic_read(v)         ACCESS_ONCE((v)->counter)
-#define atomic_set(v, i)       (((v)->counter) = (i))
+#define atomic_read(v)         READ_ONCE((v)->counter)
+#define atomic_set(v, i)       WRITE_ONCE(((v)->counter), (i))
 
 static inline int atomic_inc_return(atomic_t *v)
 {
index 70e6ae1e700673e3acbd03452d22f57db9c1166d..373cb23301e30248bfd62f2a08c6529f93db0382 100644 (file)
@@ -73,4 +73,5 @@ generic-y += uaccess.h
 generic-y += ucontext.h
 generic-y += unaligned.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 702ee539f87da5e0e397d091a9bd56218c4173d4..4435a445ae7ec7f8d5fa88edc649d34f6dfdd257 100644 (file)
@@ -11,8 +11,8 @@
 
 #define ATOMIC_INIT(i) { (i) }
 
-#define atomic_read(v)         ACCESS_ONCE((v)->counter)
-#define atomic_set(v, i)       (((v)->counter) = i)
+#define atomic_read(v)         READ_ONCE((v)->counter)
+#define atomic_set(v, i)       WRITE_ONCE(((v)->counter), (i))
 
 #include <linux/kernel.h>
 
index daee37bd09991a3edf732451587e59a8db001d54..db8ddabc6bd2819ba579c4c407a569e2232daa51 100644 (file)
@@ -58,4 +58,5 @@ generic-y += types.h
 generic-y += ucontext.h
 generic-y += unaligned.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 811d61f6422da107e9c4da9e03d02669eff1079b..55696c4100d468208c757f30be47806c877dfab9 100644 (file)
@@ -48,7 +48,7 @@ static inline void atomic_set(atomic_t *v, int new)
  *
  * Assumes all word reads on our architecture are atomic.
  */
-#define atomic_read(v)         ((v)->counter)
+#define atomic_read(v)         READ_ONCE((v)->counter)
 
 /**
  * atomic_xchg - atomic
index 9de3ba12f6b97c0f8722e2f929c70fbae429fd34..502a91d8dbbd80df039d9a111de04c1caadf0e3c 100644 (file)
@@ -8,3 +8,4 @@ generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += trace_clock.h
 generic-y += vtime.h
+generic-y += word-at-a-time.h
index be4beeb77d57c2d2ca3a20de5ec61cfb18de38ad..8dfb5f6f6c352c45126656216edb228e15026be9 100644 (file)
 #define ATOMIC_INIT(i)         { (i) }
 #define ATOMIC64_INIT(i)       { (i) }
 
-#define atomic_read(v)         ACCESS_ONCE((v)->counter)
-#define atomic64_read(v)       ACCESS_ONCE((v)->counter)
+#define atomic_read(v)         READ_ONCE((v)->counter)
+#define atomic64_read(v)       READ_ONCE((v)->counter)
 
-#define atomic_set(v,i)                (((v)->counter) = (i))
-#define atomic64_set(v,i)      (((v)->counter) = (i))
+#define atomic_set(v,i)                WRITE_ONCE(((v)->counter), (i))
+#define atomic64_set(v,i)      WRITE_ONCE(((v)->counter), (i))
 
 #define ATOMIC_OP(op, c_op)                                            \
 static __inline__ int                                                  \
index 99c96a5e6016b50a951ba8dfccf609cb06962232..db73390568c8dd873fc045cd35773714bd35f8e1 100644 (file)
@@ -11,7 +11,7 @@
 
 
 
-#define NR_syscalls                    321 /* length of syscall table */
+#define NR_syscalls                    322 /* length of syscall table */
 
 /*
  * The following defines stop scripts/checksyscalls.sh from complaining about
index 98e94e19a5a0870fc71b34f85310f475de85697b..9038726e7d26777bf8f704f101ff86090e4d5059 100644 (file)
 #define __NR_execveat                  1342
 #define __NR_userfaultfd               1343
 #define __NR_membarrier                        1344
+#define __NR_kcmp                      1345
 
 #endif /* _UAPI_ASM_IA64_UNISTD_H */
index 37cc7a65cd3ee1fc3304d862776f438db4158b4d..dcd97f84d065435a8eca5c041f33bb67c3912797 100644 (file)
@@ -1770,5 +1770,6 @@ sys_call_table:
        data8 sys_execveat
        data8 sys_userfaultfd
        data8 sys_membarrier
+       data8 sys_kcmp                          // 1345
 
        .org sys_call_table + 8*NR_syscalls     // guard against failures to increase NR_syscalls
index e0eb704ca1fa93755d678e82c2336c8e188a34f0..fd104bd221ced1dff2c9485bdcb1be520171df7f 100644 (file)
@@ -9,3 +9,4 @@ generic-y += module.h
 generic-y += preempt.h
 generic-y += sections.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
index 025e2a1704936250bb0403f619f70f4897826304..ea35160d632bfc073e15c912262d2190d05371b7 100644 (file)
@@ -28,7 +28,7 @@
  *
  * Atomically reads the value of @v.
  */
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
+#define atomic_read(v) READ_ONCE((v)->counter)
 
 /**
  * atomic_set - set atomic variable
@@ -37,7 +37,7 @@
  *
  * Atomically sets the value of @v to @i.
  */
-#define atomic_set(v,i)        (((v)->counter) = (i))
+#define atomic_set(v,i)        WRITE_ONCE(((v)->counter), (i))
 
 #ifdef CONFIG_CHIP_M32700_TS1
 #define __ATOMIC_CLOBBER       , "r4"
index 0b6b40d37b95b5acd2f0561a82f12b3859b35880..5b4ec541ba7c99936d8f5072dc5716a778751235 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -57,7 +58,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -67,10 +67,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -179,6 +181,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -206,6 +209,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -370,6 +375,7 @@ CONFIG_ZORRO8390=y
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -537,6 +543,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index eeb3a8991fc411e952cd6a20b3e44602b80f38e0..6e5198e2c124f89b86d6b5267280f5596448cc0a 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -344,6 +349,7 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -495,6 +501,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index 3a7006654ce97c9df5bd521d66af5c29c61171fb..f75600b0ca23f78ec028948babb81b5b2c02b3ed 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -355,6 +360,7 @@ CONFIG_NE2000=y
 # CONFIG_NET_VENDOR_SEEQ is not set
 CONFIG_SMC91X=y
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -517,6 +523,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index 0586b323a673791fff0661eb5804347bc7ad16fb..a42d91c389a6fbba1c2c8f27069acb53332dd56f 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -343,6 +348,7 @@ CONFIG_BVME6000_NET=y
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index ad1dbce07aa4b532fdb49fe515db2f2a19c37aa7..77f4a11083e9964050022f4ff22625b69b8672dd 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -345,6 +350,7 @@ CONFIG_HPLANCE=y
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -497,6 +503,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index b44acacaecf41f43bae326e63318c3c8c7e7d0f8..5a329f77329b155ed5a4dc0f06ca6973c82aae94 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -54,7 +55,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -64,10 +64,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -176,6 +178,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -203,6 +206,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -364,6 +369,7 @@ CONFIG_MAC8390=y
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -519,6 +525,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index 8afca3753db1f8f52245f2b4265c35f74268ae5e..83c80d2030ec96aa4740615101422e1fca1884e5 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -64,7 +65,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -74,10 +74,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -186,6 +188,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -213,6 +216,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -281,6 +285,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -410,6 +415,7 @@ CONFIG_ZORRO8390=y
 # CONFIG_NET_VENDOR_SEEQ is not set
 CONFIG_SMC91X=y
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PLIP=m
@@ -599,6 +605,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index ef00875994d9ac34191f2bd6a572d9c44118ef4d..6cb42c3bf5a280d1a537d515396a463049d4a250 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -52,7 +53,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -62,10 +62,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -174,6 +176,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -201,6 +204,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -266,6 +270,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -343,6 +348,7 @@ CONFIG_MVME147_NET=y
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index 387c2bd90ff1490a8a1c1fcb08a11753c8b2b6b1..c7508c30330c43ee32c0f75d7c7c8112f16cdc35 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -343,6 +348,7 @@ CONFIG_MVME16x_NET=y
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index 35355c1bc714000d1e3d44b5a429b97fabeac84a..64b71664a3036aa2827984f34005fc2cd044de44 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -354,6 +359,7 @@ CONFIG_NE2000=y
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PLIP=m
@@ -510,6 +516,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index 8442d267b877202e5293c0c5178eb340113e69f8..9a4cab78a2ea82ce3043be9c8a5582328774f21b 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -341,6 +346,7 @@ CONFIG_SUN3_82586=y
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
index 0e1b542e155582a3685340bf0ce8651fcc5d8948..1a2eaac13dbdd540f223aea5c67c660723e3cf54 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -341,6 +346,7 @@ CONFIG_SUN3LANCE=y
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index 039fac120cc0ebe31d39264ea985961e184aaf3e..4858178260f90435810c8201e220d0bee16e446e 100644 (file)
@@ -17,8 +17,8 @@
 
 #define ATOMIC_INIT(i) { (i) }
 
-#define atomic_read(v)         ACCESS_ONCE((v)->counter)
-#define atomic_set(v, i)       (((v)->counter) = i)
+#define atomic_read(v)         READ_ONCE((v)->counter)
+#define atomic_set(v, i)       WRITE_ONCE(((v)->counter), (i))
 
 /*
  * The ColdFire parts cannot do some immediate to memory operations,
index 5a822bb790f72135862dcc0b7a97296954d73bd9..066e74f666ae95e4c9f58c42010bc857ca6454ca 100644 (file)
@@ -4,4 +4,34 @@
 #define __ALIGN .align 4
 #define __ALIGN_STR ".align 4"
 
+/*
+ * Make sure the compiler doesn't do anything stupid with the
+ * arguments on the stack - they are owned by the *caller*, not
+ * the callee. This just fools gcc into not spilling into them,
+ * and keeps it from doing tailcall recursion and/or using the
+ * stack slots for temporaries, since they are live and "used"
+ * all the way to the end of the function.
+ */
+#define asmlinkage_protect(n, ret, args...) \
+       __asmlinkage_protect##n(ret, ##args)
+#define __asmlinkage_protect_n(ret, args...) \
+       __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
+#define __asmlinkage_protect0(ret) \
+       __asmlinkage_protect_n(ret)
+#define __asmlinkage_protect1(ret, arg1) \
+       __asmlinkage_protect_n(ret, "m" (arg1))
+#define __asmlinkage_protect2(ret, arg1, arg2) \
+       __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
+#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
+       __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
+#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
+       __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+                             "m" (arg4))
+#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
+       __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+                             "m" (arg4), "m" (arg5))
+#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
+       __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+                             "m" (arg4), "m" (arg5), "m" (arg6))
+
 #endif
index 244e0dbe45dbeda359e233cde23b4652f0ce13dc..0793a7f174176e6d590ca4d9567a9e3523c42c50 100644 (file)
@@ -4,7 +4,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            356
+#define NR_syscalls            375
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
index 61fb6cb9d2ae3c66a1c0c6dec1ac95adb83dd810..5e6fae6c275f9b110464cb80bbb47187c2251dcd 100644 (file)
 #define __NR_memfd_create      353
 #define __NR_bpf               354
 #define __NR_execveat          355
+#define __NR_socket            356
+#define __NR_socketpair                357
+#define __NR_bind              358
+#define __NR_connect           359
+#define __NR_listen            360
+#define __NR_accept4           361
+#define __NR_getsockopt                362
+#define __NR_setsockopt                363
+#define __NR_getsockname       364
+#define __NR_getpeername       365
+#define __NR_sendto            366
+#define __NR_sendmsg           367
+#define __NR_recvfrom          368
+#define __NR_recvmsg           369
+#define __NR_shutdown          370
+#define __NR_recvmmsg          371
+#define __NR_sendmmsg          372
+#define __NR_userfaultfd       373
+#define __NR_membarrier                374
 
 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
index a0ec4303f2c8e57a04fb353178d43b0be6a461fe..5dd0e80042f51107e63e0fcd832f4c46c85b826c 100644 (file)
@@ -376,4 +376,22 @@ ENTRY(sys_call_table)
        .long sys_memfd_create
        .long sys_bpf
        .long sys_execveat              /* 355 */
-
+       .long sys_socket
+       .long sys_socketpair
+       .long sys_bind
+       .long sys_connect
+       .long sys_listen                /* 360 */
+       .long sys_accept4
+       .long sys_getsockopt
+       .long sys_setsockopt
+       .long sys_getsockname
+       .long sys_getpeername           /* 365 */
+       .long sys_sendto
+       .long sys_sendmsg
+       .long sys_recvfrom
+       .long sys_recvmsg
+       .long sys_shutdown              /* 370 */
+       .long sys_recvmmsg
+       .long sys_sendmmsg
+       .long sys_userfaultfd
+       .long sys_membarrier
index c86ac37d198349bceea1ea94aa340668402cacf0..cfe9aa4223431764f04f8f6fd29f4f00c3cb336c 100644 (file)
@@ -125,8 +125,5 @@ void __init idprom_init(void)
 
        display_system_type(idprom->id_machtype);
 
-       printk("Ethernet address: %x:%x:%x:%x:%x:%x\n",
-                   idprom->id_ethaddr[0], idprom->id_ethaddr[1],
-                   idprom->id_ethaddr[2], idprom->id_ethaddr[3],
-                   idprom->id_ethaddr[4], idprom->id_ethaddr[5]);
+       printk("Ethernet address: %pM\n", idprom->id_ethaddr);
 }
index df31353fd2001dc0e357feafcf975aa6e5537622..29acb89daaaa55f2b1640c3d751233ed034b43b7 100644 (file)
@@ -54,4 +54,5 @@ generic-y += ucontext.h
 generic-y += unaligned.h
 generic-y += user.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 21c4c268b86c6e87ce0342487b3ab4ebad618d59..a62581815624787a57881f50057e57a9fb2957cc 100644 (file)
@@ -3,7 +3,7 @@
 
 #define ATOMIC_INIT(i) { (i) }
 
-#define atomic_set(v, i)               ((v)->counter = (i))
+#define atomic_set(v, i)               WRITE_ONCE((v)->counter, (i))
 
 #include <linux/compiler.h>
 
index f8efe380fe8b334ef87cb91e905a52bfa1da2b48..0295d9b8d5bf2730dc9fe3fed7ab523444b2354e 100644 (file)
@@ -10,7 +10,7 @@
 
 static inline int atomic_read(const atomic_t *v)
 {
-       return (v)->counter;
+       return READ_ONCE((v)->counter);
 }
 
 /*
index 2f222f355c4bbc69842ccd62f3419b0cbd1732a4..b0ae88c9fed922a4ba95be0498f421d3927eb40e 100644 (file)
@@ -10,3 +10,4 @@ generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += syscalls.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
index 15ecb4831e125838477a4805810381e1f1bb6725..eeb3953ed8ac8d8051ccbe3df9af8270915329b3 100644 (file)
@@ -293,8 +293,26 @@ static int __init ath79_misc_intc_of_init(
 
        return 0;
 }
-IRQCHIP_DECLARE(ath79_misc_intc, "qca,ar7100-misc-intc",
-               ath79_misc_intc_of_init);
+
+static int __init ar7100_misc_intc_of_init(
+       struct device_node *node, struct device_node *parent)
+{
+       ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
+       return ath79_misc_intc_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc",
+               ar7100_misc_intc_of_init);
+
+static int __init ar7240_misc_intc_of_init(
+       struct device_node *node, struct device_node *parent)
+{
+       ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
+       return ath79_misc_intc_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc",
+               ar7240_misc_intc_of_init);
 
 static int __init ar79_cpu_intc_of_init(
        struct device_node *node, struct device_node *parent)
index 0352bc8d56b316588dbca209462700c4ffa03aa3..4f9eb05768840f93006384959f0a7dfffdb7caf2 100644 (file)
@@ -1094,7 +1094,7 @@ static int octeon_irq_gpio_xlat(struct irq_domain *d,
        unsigned int pin;
        unsigned int trigger;
 
-       if (d->of_node != node)
+       if (irq_domain_get_of_node(d) != node)
                return -EINVAL;
 
        if (intsize < 2)
@@ -2163,7 +2163,7 @@ static int octeon_irq_cib_map(struct irq_domain *d,
 
        if (hw >= host_data->max_bits) {
                pr_err("ERROR: %s mapping %u is to big!\n",
-                      d->of_node->name, (unsigned)hw);
+                      irq_domain_get_of_node(d)->name, (unsigned)hw);
                return -EINVAL;
        }
 
index 89a628455bc253b01c2cc7b9f3cc2a6d9a426493..bd634259eab9d6b0f7d1b8e746d0b11c01f0950d 100644 (file)
@@ -933,7 +933,7 @@ void __init plat_mem_setup(void)
        while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX)
                && (total < MAX_MEMORY)) {
                memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
-                                               __pa_symbol(&__init_end), -1,
+                                               __pa_symbol(&_end), -1,
                                                0x100000,
                                                CVMX_BOOTMEM_FLAG_NO_LOCKING);
                if (memory >= 0) {
index 642b50946943ccebd0fc24b9e4b6cc06c4a90d63..8b7429127a1d18c192677e7b04d7d9c68e6e9875 100644 (file)
@@ -257,7 +257,6 @@ CONFIG_MMC=y
 CONFIG_MMC_BLOCK_MINORS=16
 CONFIG_MMC_TEST=m
 CONFIG_MMC_DW=y
-CONFIG_MMC_DW_IDMAC=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_RTC_CLASS=y
index 40ec4ca3f946a9238afa6e0edd25c0440f86a36b..c7fe4d01e79c61bbaee58aa25b9b50bf5b9a380e 100644 (file)
@@ -17,4 +17,5 @@ generic-y += segment.h
 generic-y += serial.h
 generic-y += trace_clock.h
 generic-y += user.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 4c42fd9af7778462d18bea0724da19d2cdb35d4b..f82d3af07931656a1f295b07972d6717df89a30d 100644 (file)
@@ -30,7 +30,7 @@
  *
  * Atomically reads the value of @v.
  */
-#define atomic_read(v)         ACCESS_ONCE((v)->counter)
+#define atomic_read(v)         READ_ONCE((v)->counter)
 
 /*
  * atomic_set - set atomic variable
@@ -39,7 +39,7 @@
  *
  * Atomically sets the value of @v to @i.
  */
-#define atomic_set(v, i)               ((v)->counter = (i))
+#define atomic_set(v, i)       WRITE_ONCE((v)->counter, (i))
 
 #define ATOMIC_OP(op, c_op, asm_op)                                          \
 static __inline__ void atomic_##op(int i, atomic_t * v)                              \
@@ -315,14 +315,14 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
  * @v: pointer of type atomic64_t
  *
  */
-#define atomic64_read(v)       ACCESS_ONCE((v)->counter)
+#define atomic64_read(v)       READ_ONCE((v)->counter)
 
 /*
  * atomic64_set - set atomic variable
  * @v: pointer of type atomic64_t
  * @i: required value
  */
-#define atomic64_set(v, i)     ((v)->counter = (i))
+#define atomic64_set(v, i)     WRITE_ONCE((v)->counter, (i))
 
 #define ATOMIC64_OP(op, c_op, asm_op)                                        \
 static __inline__ void atomic64_##op(long i, atomic64_t * v)                 \
index 9801ac9826554ca0d8ab2e27e21ad6305f18da67..fe67f12ac2393b23705b4a094bbf8c3b7cc77166 100644 (file)
@@ -20,6 +20,9 @@
 #ifndef cpu_has_tlb
 #define cpu_has_tlb            (cpu_data[0].options & MIPS_CPU_TLB)
 #endif
+#ifndef cpu_has_ftlb
+#define cpu_has_ftlb           (cpu_data[0].options & MIPS_CPU_FTLB)
+#endif
 #ifndef cpu_has_tlbinv
 #define cpu_has_tlbinv         (cpu_data[0].options & MIPS_CPU_TLBINV)
 #endif
index cd89e9855775276ea7c3a185d9e61b3702f06b6f..82ad15f11049284c2f347fc66b68ea5091fb810d 100644 (file)
@@ -385,6 +385,7 @@ enum cpu_type_enum {
 #define MIPS_CPU_CDMM          0x4000000000ull /* CPU has Common Device Memory Map */
 #define MIPS_CPU_BP_GHIST      0x8000000000ull /* R12K+ Branch Prediction Global History */
 #define MIPS_CPU_SP            0x10000000000ull /* Small (1KB) page support */
+#define MIPS_CPU_FTLB          0x20000000000ull /* CPU has Fixed-page-size TLB */
 
 /*
  * CPU ASE encodings
index 9e777cd42b67190a5dce5ac11553a3de05e4375e..d10fd80dbb7e96b898d2230c2d0f5112d02c3bc1 100644 (file)
@@ -256,6 +256,7 @@ static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long si
  */
 #define ioremap_nocache(offset, size)                                  \
        __ioremap_mode((offset), (size), _CACHE_UNCACHED)
+#define ioremap_uc ioremap_nocache
 
 /*
  * ioremap_cachable -  map bus memory into CPU space
index b02891f9caaf1d8ffecf14b0e0fa7be5b7fb33c8..21d9607c80d7deaa1ef2174c0dde62bda9985bde 100644 (file)
@@ -65,6 +65,15 @@ static inline void write_maar_pair(unsigned idx, phys_addr_t lower,
        back_to_back_c0_hazard();
 }
 
+/**
+ * maar_init() - initialise MAARs
+ *
+ * Performs initialisation of MAARs for the current CPU, making use of the
+ * platforms implementation of platform_maar_init where necessary and
+ * duplicating the setup it provides on secondary CPUs.
+ */
+extern void maar_init(void);
+
 /**
  * struct maar_config - MAAR configuration data
  * @lower:     The lowest address that the MAAR pair will affect. Must be
index d75b75e78ebb4749355f95a1709202b9f2d069af..1f1927ab42690b284257faa8991f4cff31a3264d 100644 (file)
@@ -194,6 +194,7 @@ BUILD_CM_RW(reg3_mask,              MIPS_CM_GCB_OFS + 0xc8)
 BUILD_CM_R_(gic_status,                MIPS_CM_GCB_OFS + 0xd0)
 BUILD_CM_R_(cpc_status,                MIPS_CM_GCB_OFS + 0xf0)
 BUILD_CM_RW(l2_config,         MIPS_CM_GCB_OFS + 0x130)
+BUILD_CM_RW(sys_config2,       MIPS_CM_GCB_OFS + 0x150)
 
 /* Core Local & Core Other register accessor functions */
 BUILD_CM_Cx_RW(reset_release,  0x00)
@@ -316,6 +317,10 @@ BUILD_CM_Cx_R_(tcid_8_priority,    0x80)
 #define CM_GCR_L2_CONFIG_ASSOC_SHF             0
 #define CM_GCR_L2_CONFIG_ASSOC_MSK             (_ULCAST_(0xff) << 0)
 
+/* GCR_SYS_CONFIG2 register fields */
+#define CM_GCR_SYS_CONFIG2_MAXVPW_SHF          0
+#define CM_GCR_SYS_CONFIG2_MAXVPW_MSK          (_ULCAST_(0xf) << 0)
+
 /* GCR_Cx_COHERENCE register fields */
 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF    0
 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK    (_ULCAST_(0xff) << 0)
@@ -405,4 +410,38 @@ static inline int mips_cm_revision(void)
        return read_gcr_rev();
 }
 
+/**
+ * mips_cm_max_vp_width() - return the width in bits of VP indices
+ *
+ * Return: the width, in bits, of VP indices in fields that combine core & VP
+ * indices.
+ */
+static inline unsigned int mips_cm_max_vp_width(void)
+{
+       extern int smp_num_siblings;
+
+       if (mips_cm_revision() >= CM_REV_CM3)
+               return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK;
+
+       return smp_num_siblings;
+}
+
+/**
+ * mips_cm_vp_id() - calculate the hardware VP ID for a CPU
+ * @cpu: the CPU whose VP ID to calculate
+ *
+ * Hardware such as the GIC uses identifiers for VPs which may not match the
+ * CPU numbers used by Linux. This function calculates the hardware VP
+ * identifier corresponding to a given CPU.
+ *
+ * Return: the VP ID for the CPU.
+ */
+static inline unsigned int mips_cm_vp_id(unsigned int cpu)
+{
+       unsigned int core = cpu_data[cpu].core;
+       unsigned int vp = cpu_vpe_id(&cpu_data[cpu]);
+
+       return (core * mips_cm_max_vp_width()) + vp;
+}
+
 #endif /* __MIPS_ASM_MIPS_CM_H__ */
index d3cd8eac81e3a76baf455dd95e881cfd9c72eed9..c64781cf649f86b4ca4eec0fee38ba4c4da523e7 100644 (file)
 
 /* Bits specific to the MIPS32/64 PRA. */
 #define MIPS_CONF_MT           (_ULCAST_(7) <<  7)
+#define MIPS_CONF_MT_TLB       (_ULCAST_(1) <<  7)
+#define MIPS_CONF_MT_FTLB      (_ULCAST_(4) <<  7)
 #define MIPS_CONF_AR           (_ULCAST_(7) << 10)
 #define MIPS_CONF_AT           (_ULCAST_(3) << 13)
 #define MIPS_CONF_M            (_ULCAST_(1) << 31)
index c4ddc4f0d2dcb11c7aa55167434d72e7990e083a..23cd9b118c9e4f8f8fd43031dd67dcee8e610af8 100644 (file)
 
 #define __SWAB_64_THRU_32__
 
-#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) ||              \
-    defined(_MIPS_ARCH_LOONGSON3A)
+#if !defined(__mips16) &&                                      \
+       ((defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) ||  \
+        defined(_MIPS_ARCH_LOONGSON3A))
 
-static inline __attribute__((nomips16)) __attribute_const__
-               __u16 __arch_swab16(__u16 x)
+static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
 {
        __asm__(
        "       .set    push                    \n"
        "       .set    arch=mips32r2           \n"
-       "       .set    nomips16                \n"
        "       wsbh    %0, %1                  \n"
        "       .set    pop                     \n"
        : "=r" (x)
@@ -32,13 +31,11 @@ static inline __attribute__((nomips16)) __attribute_const__
 }
 #define __arch_swab16 __arch_swab16
 
-static inline __attribute__((nomips16)) __attribute_const__
-               __u32 __arch_swab32(__u32 x)
+static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
 {
        __asm__(
        "       .set    push                    \n"
        "       .set    arch=mips32r2           \n"
-       "       .set    nomips16                \n"
        "       wsbh    %0, %1                  \n"
        "       rotr    %0, %0, 16              \n"
        "       .set    pop                     \n"
@@ -54,13 +51,11 @@ static inline __attribute__((nomips16)) __attribute_const__
  * 64-bit kernel on r2 CPUs.
  */
 #ifdef __mips64
-static inline __attribute__((nomips16)) __attribute_const__
-               __u64 __arch_swab64(__u64 x)
+static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
 {
        __asm__(
        "       .set    push                    \n"
        "       .set    arch=mips64r2           \n"
-       "       .set    nomips16                \n"
        "       dsbh    %0, %1                  \n"
        "       dshd    %0, %0                  \n"
        "       .set    pop                     \n"
@@ -71,5 +66,5 @@ static inline __attribute__((nomips16)) __attribute_const__
 }
 #define __arch_swab64 __arch_swab64
 #endif /* __mips64 */
-#endif /* MIPS R2 or newer or Loongson 3A */
+#endif /* (not __mips16) and (MIPS R2 or newer or Loongson 3A) */
 #endif /* _ASM_SWAB_H */
index c03088f9f514e7c21f7ae0e185f8be0456af372b..cfabadb135d9fe94912080ea41ca761e6f1eca08 100644 (file)
 #define __NR_memfd_create              (__NR_Linux + 354)
 #define __NR_bpf                       (__NR_Linux + 355)
 #define __NR_execveat                  (__NR_Linux + 356)
+#define __NR_userfaultfd               (__NR_Linux + 357)
+#define __NR_membarrier                        (__NR_Linux + 358)
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls            356
+#define __NR_Linux_syscalls            358
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux                 4000
-#define __NR_O32_Linux_syscalls                356
+#define __NR_O32_Linux_syscalls                358
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
 #define __NR_memfd_create              (__NR_Linux + 314)
 #define __NR_bpf                       (__NR_Linux + 315)
 #define __NR_execveat                  (__NR_Linux + 316)
+#define __NR_userfaultfd               (__NR_Linux + 317)
+#define __NR_membarrier                        (__NR_Linux + 318)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            316
+#define __NR_Linux_syscalls            318
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux                  5000
-#define __NR_64_Linux_syscalls         316
+#define __NR_64_Linux_syscalls         318
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
 #define __NR_memfd_create              (__NR_Linux + 318)
 #define __NR_bpf                       (__NR_Linux + 319)
 #define __NR_execveat                  (__NR_Linux + 320)
+#define __NR_userfaultfd               (__NR_Linux + 321)
+#define __NR_membarrier                        (__NR_Linux + 322)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            320
+#define __NR_Linux_syscalls            322
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux                 6000
-#define __NR_N32_Linux_syscalls                320
+#define __NR_N32_Linux_syscalls                322
 
 #endif /* _UAPI_ASM_UNISTD_H */
index 4e62bf85d0b0f910cc56c726167cb933b35045d0..459cb017306c21eb63259b8cb0a406c65080cc8b 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/power/jz4740-battery.h>
 #include <linux/power/gpio-charger.h>
 
+#include <asm/mach-jz4740/gpio.h>
 #include <asm/mach-jz4740/jz4740_fb.h>
 #include <asm/mach-jz4740/jz4740_mmc.h>
 #include <asm/mach-jz4740/jz4740_nand.h>
index a74e181058b0fc8ce001fffc68bce8f95bc7f02d..8c6d76c9b2d69bf6603e32598e3b8d74cba15bf6 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/seq_file.h>
 
 #include <asm/mach-jz4740/base.h>
+#include <asm/mach-jz4740/gpio.h>
 
 #define JZ4740_GPIO_BASE_A (32*0)
 #define JZ4740_GPIO_BASE_B (32*1)
index 9f71c06aebf6313c4a0b2bac9b1bbdc6228cee31..209ded16806bf5a295ff202258f5b7ff42070940 100644 (file)
@@ -39,6 +39,7 @@
         mfc0   \dest, CP0_CONFIG, 3
        andi    \dest, \dest, MIPS_CONF3_MT
        beqz    \dest, \nomt
+        nop
        .endm
 
 .section .text.cps-vec
@@ -223,10 +224,9 @@ LEAF(excep_ejtag)
        END(excep_ejtag)
 
 LEAF(mips_cps_core_init)
-#ifdef CONFIG_MIPS_MT
+#ifdef CONFIG_MIPS_MT_SMP
        /* Check that the core implements the MT ASE */
        has_mt  t0, 3f
-        nop
 
        .set    push
        .set    mips64r2
@@ -310,8 +310,9 @@ LEAF(mips_cps_boot_vpes)
        PTR_ADDU t0, t0, t1
 
        /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
+       li      t9, 0
+#ifdef CONFIG_MIPS_MT_SMP
        has_mt  ta2, 1f
-        li     t9, 0
 
        /* Find the number of VPEs present in the core */
        mfc0    t1, CP0_MVPCONF0
@@ -330,6 +331,7 @@ LEAF(mips_cps_boot_vpes)
        /* Retrieve the VPE ID from EBase.CPUNum */
        mfc0    t9, $15, 1
        and     t9, t9, t1
+#endif
 
 1:     /* Calculate a pointer to this VPEs struct vpe_boot_config */
        li      t1, VPEBOOTCFG_SIZE
@@ -337,7 +339,7 @@ LEAF(mips_cps_boot_vpes)
        PTR_L   ta3, COREBOOTCFG_VPECONFIG(t0)
        PTR_ADDU v0, v0, ta3
 
-#ifdef CONFIG_MIPS_MT
+#ifdef CONFIG_MIPS_MT_SMP
 
        /* If the core doesn't support MT then return */
        bnez    ta2, 1f
@@ -451,7 +453,7 @@ LEAF(mips_cps_boot_vpes)
 
 2:     .set    pop
 
-#endif /* CONFIG_MIPS_MT */
+#endif /* CONFIG_MIPS_MT_SMP */
 
        /* Return */
        jr      ra
index 571a8e6ea5bd0048a840bc539135f27c32fec621..09a51d091941be3aa75ae37b53b386714a6d3db5 100644 (file)
@@ -410,16 +410,18 @@ static int set_ftlb_enable(struct cpuinfo_mips *c, int enable)
 static inline unsigned int decode_config0(struct cpuinfo_mips *c)
 {
        unsigned int config0;
-       int isa;
+       int isa, mt;
 
        config0 = read_c0_config();
 
        /*
         * Look for Standard TLB or Dual VTLB and FTLB
         */
-       if ((((config0 & MIPS_CONF_MT) >> 7) == 1) ||
-           (((config0 & MIPS_CONF_MT) >> 7) == 4))
+       mt = config0 & MIPS_CONF_MT;
+       if (mt == MIPS_CONF_MT_TLB)
                c->options |= MIPS_CPU_TLB;
+       else if (mt == MIPS_CONF_MT_FTLB)
+               c->options |= MIPS_CPU_TLB | MIPS_CPU_FTLB;
 
        isa = (config0 & MIPS_CONF_AT) >> 13;
        switch (isa) {
@@ -559,15 +561,18 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
        if (cpu_has_tlb) {
                if (((config4 & MIPS_CONF4_IE) >> 29) == 2)
                        c->options |= MIPS_CPU_TLBINV;
+
                /*
-                * This is a bit ugly. R6 has dropped that field from
-                * config4 and the only valid configuration is VTLB+FTLB so
-                * set a good value for mmuextdef for that case.
+                * R6 has dropped the MMUExtDef field from config4.
+                * On R6 the fields always describe the FTLB, and only if it is
+                * present according to Config.MT.
                 */
-               if (cpu_has_mips_r6)
+               if (!cpu_has_mips_r6)
+                       mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+               else if (cpu_has_ftlb)
                        mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT;
                else
-                       mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+                       mmuextdef = 0;
 
                switch (mmuextdef) {
                case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT:
index 423ae83af1fb7043a1daff5d06a079658446de5a..3375745b91980013d76ad2e7d9cd632d31b26af9 100644 (file)
@@ -18,7 +18,7 @@
        .set pop
 /*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *                    struct thread_info *next_ti, int usedfpu)
+ *                    struct thread_info *next_ti)
  */
        .align  7
        LEAF(resume)
        cpu_save_nonscratch a0
        LONG_S  ra, THREAD_REG31(a0)
 
-       /*
-        * check if we need to save FPU registers
-        */
-       .set push
-       .set noreorder
-       beqz    a3, 1f
-        PTR_L  t3, TASK_THREAD_INFO(a0)
-       .set pop
-
-       /*
-        * clear saved user stack CU1 bit
-        */
-       LONG_L  t0, ST_OFF(t3)
-       li      t1, ~ST0_CU1
-       and     t0, t0, t1
-       LONG_S  t0, ST_OFF(t3)
-
-       .set push
-       .set arch=mips64r2
-       fpu_save_double a0 t0 t1                # c0_status passed in t0
-                                               # clobbers t1
-       .set pop
-1:
-
 #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
        /* Check if we need to store CVMSEG state */
        dmfc0   t0, $11,7       /* CvmMemCtl */
index 5087a4b72e6b9a4ae12fc8acdb6f7f8f318784c5..ac27ef7d4d0ebd8be86798d8191e32c720263625 100644 (file)
  */
 #define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
 
-/*
- * FPU context is saved iff the process has used it's FPU in the current
- * time slice as indicated by TIF_USEDFPU.  In any case, the CU1 bit for user
- * space STATUS register should be 0, so that a process *always* starts its
- * userland with FPU disabled after each context switch.
- *
- * FPU will be enabled as soon as the process accesses FPU again, through
- * do_cpu() trap.
- */
-
 /*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *                    struct thread_info *next_ti, int usedfpu)
+ *                    struct thread_info *next_ti)
  */
 LEAF(resume)
        mfc0    t1, CP0_STATUS
@@ -50,22 +40,6 @@ LEAF(resume)
        cpu_save_nonscratch a0
        sw      ra, THREAD_REG31(a0)
 
-       beqz    a3, 1f
-
-       PTR_L   t3, TASK_THREAD_INFO(a0)
-
-       /*
-        * clear saved user stack CU1 bit
-        */
-       lw      t0, ST_OFF(t3)
-       li      t1, ~ST0_CU1
-       and     t0, t0, t1
-       sw      t0, ST_OFF(t3)
-
-       fpu_save_single a0, t0                  # clobbers t0
-
-1:
-
 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
        PTR_LA  t8, __stack_chk_guard
        LONG_L  t9, TASK_STACK_CANARY(a1)
index 4cc13508d967c4076e0b720b120487281003ed72..65a74e4f0f456d1de9d29ed83a62e1bcc6e644fc 100644 (file)
@@ -36,16 +36,8 @@ NESTED(handle_sys, PT_SIZE, sp)
        lw      t1, PT_EPC(sp)          # skip syscall on return
 
        subu    v0, v0, __NR_O32_Linux  # check syscall number
-       sltiu   t0, v0, __NR_O32_Linux_syscalls + 1
        addiu   t1, 4                   # skip to next instruction
        sw      t1, PT_EPC(sp)
-       beqz    t0, illegal_syscall
-
-       sll     t0, v0, 2
-       la      t1, sys_call_table
-       addu    t1, t0
-       lw      t2, (t1)                # syscall routine
-       beqz    t2, illegal_syscall
 
        sw      a3, PT_R26(sp)          # save a3 for syscall restarting
 
@@ -96,6 +88,16 @@ loads_done:
        li      t1, _TIF_WORK_SYSCALL_ENTRY
        and     t0, t1
        bnez    t0, syscall_trace_entry # -> yes
+syscall_common:
+       sltiu   t0, v0, __NR_O32_Linux_syscalls + 1
+       beqz    t0, illegal_syscall
+
+       sll     t0, v0, 2
+       la      t1, sys_call_table
+       addu    t1, t0
+       lw      t2, (t1)                # syscall routine
+
+       beqz    t2, illegal_syscall
 
        jalr    t2                      # Do The Real Thing (TM)
 
@@ -116,7 +118,7 @@ o32_syscall_exit:
 
 syscall_trace_entry:
        SAVE_STATIC
-       move    s0, t2
+       move    s0, v0
        move    a0, sp
 
        /*
@@ -129,27 +131,18 @@ syscall_trace_entry:
 
 1:     jal     syscall_trace_enter
 
-       bltz    v0, 2f                  # seccomp failed? Skip syscall
+       bltz    v0, 1f                  # seccomp failed? Skip syscall
+
+       move    v0, s0                  # restore syscall
 
-       move    t0, s0
        RESTORE_STATIC
        lw      a0, PT_R4(sp)           # Restore argument registers
        lw      a1, PT_R5(sp)
        lw      a2, PT_R6(sp)
        lw      a3, PT_R7(sp)
-       jalr    t0
-
-       li      t0, -EMAXERRNO - 1      # error?
-       sltu    t0, t0, v0
-       sw      t0, PT_R7(sp)           # set error flag
-       beqz    t0, 1f
-
-       lw      t1, PT_R2(sp)           # syscall number
-       negu    v0                      # error
-       sw      t1, PT_R0(sp)           # save it for syscall restarting
-1:     sw      v0, PT_R2(sp)           # result
+       j       syscall_common
 
-2:     j       syscall_exit
+1:     j       syscall_exit
 
 /* ------------------------------------------------------------------------ */
 
@@ -599,3 +592,5 @@ EXPORT(sys_call_table)
        PTR     sys_memfd_create
        PTR     sys_bpf                         /* 4355 */
        PTR     sys_execveat
+       PTR     sys_userfaultfd
+       PTR     sys_membarrier
index a6f6b762c47a4c5a2d395e13a1d564964595abe1..e732981cf99fde26181f1db3bcb65ebd86ea4a0d 100644 (file)
@@ -39,18 +39,11 @@ NESTED(handle_sys64, PT_SIZE, sp)
        .set    at
 #endif
 
-       dsubu   t0, v0, __NR_64_Linux   # check syscall number
-       sltiu   t0, t0, __NR_64_Linux_syscalls + 1
 #if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
        ld      t1, PT_EPC(sp)          # skip syscall on return
        daddiu  t1, 4                   # skip to next instruction
        sd      t1, PT_EPC(sp)
 #endif
-       beqz    t0, illegal_syscall
-
-       dsll    t0, v0, 3               # offset into table
-       ld      t2, (sys_call_table - (__NR_64_Linux * 8))(t0)
-                                       # syscall routine
 
        sd      a3, PT_R26(sp)          # save a3 for syscall restarting
 
@@ -59,6 +52,17 @@ NESTED(handle_sys64, PT_SIZE, sp)
        and     t0, t1, t0
        bnez    t0, syscall_trace_entry
 
+syscall_common:
+       dsubu   t2, v0, __NR_64_Linux
+       sltiu   t0, t2, __NR_64_Linux_syscalls + 1
+       beqz    t0, illegal_syscall
+
+       dsll    t0, t2, 3               # offset into table
+       dla     t2, sys_call_table
+       daddu   t0, t2, t0
+       ld      t2, (t0)                # syscall routine
+       beqz    t2, illegal_syscall
+
        jalr    t2                      # Do The Real Thing (TM)
 
        li      t0, -EMAXERRNO - 1      # error?
@@ -78,14 +82,14 @@ n64_syscall_exit:
 
 syscall_trace_entry:
        SAVE_STATIC
-       move    s0, t2
+       move    s0, v0
        move    a0, sp
        move    a1, v0
        jal     syscall_trace_enter
 
-       bltz    v0, 2f                  # seccomp failed? Skip syscall
+       bltz    v0, 1f                  # seccomp failed? Skip syscall
 
-       move    t0, s0
+       move    v0, s0
        RESTORE_STATIC
        ld      a0, PT_R4(sp)           # Restore argument registers
        ld      a1, PT_R5(sp)
@@ -93,19 +97,9 @@ syscall_trace_entry:
        ld      a3, PT_R7(sp)
        ld      a4, PT_R8(sp)
        ld      a5, PT_R9(sp)
-       jalr    t0
-
-       li      t0, -EMAXERRNO - 1      # error?
-       sltu    t0, t0, v0
-       sd      t0, PT_R7(sp)           # set error flag
-       beqz    t0, 1f
-
-       ld      t1, PT_R2(sp)           # syscall number
-       dnegu   v0                      # error
-       sd      t1, PT_R0(sp)           # save it for syscall restarting
-1:     sd      v0, PT_R2(sp)           # result
+       j       syscall_common
 
-2:     j       syscall_exit
+1:     j       syscall_exit
 
 illegal_syscall:
        /* This also isn't a 64-bit syscall, throw an error.  */
@@ -436,4 +430,6 @@ EXPORT(sys_call_table)
        PTR     sys_memfd_create
        PTR     sys_bpf                         /* 5315 */
        PTR     sys_execveat
+       PTR     sys_userfaultfd
+       PTR     sys_membarrier
        .size   sys_call_table,.-sys_call_table
index 4b2010654c463158b7dee80194de736195c04595..c794843975845df2e0a9d9c0c0fd14dad1ed17ec 100644 (file)
@@ -52,6 +52,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
        and     t0, t1, t0
        bnez    t0, n32_syscall_trace_entry
 
+syscall_common:
        jalr    t2                      # Do The Real Thing (TM)
 
        li      t0, -EMAXERRNO - 1      # error?
@@ -75,9 +76,9 @@ n32_syscall_trace_entry:
        move    a1, v0
        jal     syscall_trace_enter
 
-       bltz    v0, 2f                  # seccomp failed? Skip syscall
+       bltz    v0, 1f                  # seccomp failed? Skip syscall
 
-       move    t0, s0
+       move    t2, s0
        RESTORE_STATIC
        ld      a0, PT_R4(sp)           # Restore argument registers
        ld      a1, PT_R5(sp)
@@ -85,19 +86,9 @@ n32_syscall_trace_entry:
        ld      a3, PT_R7(sp)
        ld      a4, PT_R8(sp)
        ld      a5, PT_R9(sp)
-       jalr    t0
+       j       syscall_common
 
-       li      t0, -EMAXERRNO - 1      # error?
-       sltu    t0, t0, v0
-       sd      t0, PT_R7(sp)           # set error flag
-       beqz    t0, 1f
-
-       ld      t1, PT_R2(sp)           # syscall number
-       dnegu   v0                      # error
-       sd      t1, PT_R0(sp)           # save it for syscall restarting
-1:     sd      v0, PT_R2(sp)           # result
-
-2:     j       syscall_exit
+1:     j       syscall_exit
 
 not_n32_scall:
        /* This is not an n32 compatibility syscall, pass it on to
@@ -429,4 +420,6 @@ EXPORT(sysn32_call_table)
        PTR     sys_memfd_create
        PTR     sys_bpf
        PTR     compat_sys_execveat             /* 6320 */
+       PTR     sys_userfaultfd
+       PTR     sys_membarrier
        .size   sysn32_call_table,.-sysn32_call_table
index f543ff4feef99f8c4ce02554f4dd54da1752b6de..6369cfd390c6330269b05eb095020dafa6cdc048 100644 (file)
@@ -87,6 +87,7 @@ loads_done:
        and     t0, t1, t0
        bnez    t0, trace_a_syscall
 
+syscall_common:
        jalr    t2                      # Do The Real Thing (TM)
 
        li      t0, -EMAXERRNO - 1      # error?
@@ -130,9 +131,9 @@ trace_a_syscall:
 
 1:     jal     syscall_trace_enter
 
-       bltz    v0, 2f                  # seccomp failed? Skip syscall
+       bltz    v0, 1f                  # seccomp failed? Skip syscall
 
-       move    t0, s0
+       move    t2, s0
        RESTORE_STATIC
        ld      a0, PT_R4(sp)           # Restore argument registers
        ld      a1, PT_R5(sp)
@@ -142,19 +143,9 @@ trace_a_syscall:
        ld      a5, PT_R9(sp)
        ld      a6, PT_R10(sp)
        ld      a7, PT_R11(sp)          # For indirect syscalls
-       jalr    t0
+       j       syscall_common
 
-       li      t0, -EMAXERRNO - 1      # error?
-       sltu    t0, t0, v0
-       sd      t0, PT_R7(sp)           # set error flag
-       beqz    t0, 1f
-
-       ld      t1, PT_R2(sp)           # syscall number
-       dnegu   v0                      # error
-       sd      t1, PT_R0(sp)           # save it for syscall restarting
-1:     sd      v0, PT_R2(sp)           # result
-
-2:     j       syscall_exit
+1:     j       syscall_exit
 
 /* ------------------------------------------------------------------------ */
 
@@ -584,4 +575,6 @@ EXPORT(sys32_call_table)
        PTR     sys_memfd_create
        PTR     sys_bpf                         /* 4355 */
        PTR     compat_sys_execveat
+       PTR     sys_userfaultfd
+       PTR     sys_membarrier
        .size   sys32_call_table,.-sys32_call_table
index 35b8316002f8420d863696cdf31255d1311ead4b..479515109e5badec96942ee594e3a450ab011f11 100644 (file)
@@ -338,7 +338,7 @@ static void __init bootmem_init(void)
                if (end <= reserved_end)
                        continue;
 #ifdef CONFIG_BLK_DEV_INITRD
-               /* mapstart should be after initrd_end */
+               /* Skip zones before initrd and initrd itself */
                if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
                        continue;
 #endif
@@ -371,6 +371,14 @@ static void __init bootmem_init(void)
                max_low_pfn = PFN_DOWN(HIGHMEM_START);
        }
 
+#ifdef CONFIG_BLK_DEV_INITRD
+       /*
+        * mapstart should be after initrd_end
+        */
+       if (initrd_end)
+               mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
+#endif
+
        /*
         * Initialize the boot-time allocator with low memory only.
         */
index a31896c33716d424bb30397c17b29af07c6728bb..bd4385a8e6e86f7fbb9ca6d988f5eee155b9a8c7 100644 (file)
@@ -42,6 +42,7 @@
 #include <asm/mmu_context.h>
 #include <asm/time.h>
 #include <asm/setup.h>
+#include <asm/maar.h>
 
 cpumask_t cpu_callin_map;              /* Bitmask of started secondaries */
 
@@ -157,6 +158,7 @@ asmlinkage void start_secondary(void)
        mips_clockevent_init();
        mp_ops->init_secondary();
        cpu_report();
+       maar_init();
 
        /*
         * XXX parity protection should be folded in here when it's converted
index f6c44dd332e2a388d28e5882a0c47e84ed3ca3f9..d6d07ad56180fa7438595994833aef5e60628350 100644 (file)
@@ -64,6 +64,9 @@ void __init prom_init_env(void)
        }
        if (memsize == 0)
                memsize = 256;
+
+       loongson_sysconf.nr_uarts = 1;
+
        pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize);
 #else
        struct boot_params *boot_p;
index a914dc1cb6d1bc339cf44cc0c5aeac887a2e5f74..d8117be729a20ee26d2df8bb42a6f58b9670f513 100644 (file)
@@ -100,7 +100,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
        else
 #endif
 #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
-            if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+            if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
                dma_flag = __GFP_DMA;
        else
 #endif
index 66d0f49c5bec4bab02d8e2e194570527d9ccd4e8..8770e619185eb034b317ce3de837c5185ba05511 100644 (file)
@@ -44,6 +44,7 @@
 #include <asm/pgalloc.h>
 #include <asm/tlb.h>
 #include <asm/fixmap.h>
+#include <asm/maar.h>
 
 /*
  * We have up to 8 empty zeroed pages so we can map one of the right colour
@@ -252,6 +253,119 @@ void __init fixrange_init(unsigned long start, unsigned long end,
 #endif
 }
 
+unsigned __weak platform_maar_init(unsigned num_pairs)
+{
+       struct maar_config cfg[BOOT_MEM_MAP_MAX];
+       unsigned i, num_configured, num_cfg = 0;
+       phys_addr_t skip;
+
+       for (i = 0; i < boot_mem_map.nr_map; i++) {
+               switch (boot_mem_map.map[i].type) {
+               case BOOT_MEM_RAM:
+               case BOOT_MEM_INIT_RAM:
+                       break;
+               default:
+                       continue;
+               }
+
+               skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff);
+
+               cfg[num_cfg].lower = boot_mem_map.map[i].addr;
+               cfg[num_cfg].lower += skip;
+
+               cfg[num_cfg].upper = cfg[num_cfg].lower;
+               cfg[num_cfg].upper += boot_mem_map.map[i].size - 1;
+               cfg[num_cfg].upper -= skip;
+
+               cfg[num_cfg].attrs = MIPS_MAAR_S;
+               num_cfg++;
+       }
+
+       num_configured = maar_config(cfg, num_cfg, num_pairs);
+       if (num_configured < num_cfg)
+               pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
+                       num_pairs, num_cfg);
+
+       return num_configured;
+}
+
+void maar_init(void)
+{
+       unsigned num_maars, used, i;
+       phys_addr_t lower, upper, attr;
+       static struct {
+               struct maar_config cfgs[3];
+               unsigned used;
+       } recorded = { { { 0 } }, 0 };
+
+       if (!cpu_has_maar)
+               return;
+
+       /* Detect the number of MAARs */
+       write_c0_maari(~0);
+       back_to_back_c0_hazard();
+       num_maars = read_c0_maari() + 1;
+
+       /* MAARs should be in pairs */
+       WARN_ON(num_maars % 2);
+
+       /* Set MAARs using values we recorded already */
+       if (recorded.used) {
+               used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
+               BUG_ON(used != recorded.used);
+       } else {
+               /* Configure the required MAARs */
+               used = platform_maar_init(num_maars / 2);
+       }
+
+       /* Disable any further MAARs */
+       for (i = (used * 2); i < num_maars; i++) {
+               write_c0_maari(i);
+               back_to_back_c0_hazard();
+               write_c0_maar(0);
+               back_to_back_c0_hazard();
+       }
+
+       if (recorded.used)
+               return;
+
+       pr_info("MAAR configuration:\n");
+       for (i = 0; i < num_maars; i += 2) {
+               write_c0_maari(i);
+               back_to_back_c0_hazard();
+               upper = read_c0_maar();
+
+               write_c0_maari(i + 1);
+               back_to_back_c0_hazard();
+               lower = read_c0_maar();
+
+               attr = lower & upper;
+               lower = (lower & MIPS_MAAR_ADDR) << 4;
+               upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
+
+               pr_info("  [%d]: ", i / 2);
+               if (!(attr & MIPS_MAAR_V)) {
+                       pr_cont("disabled\n");
+                       continue;
+               }
+
+               pr_cont("%pa-%pa", &lower, &upper);
+
+               if (attr & MIPS_MAAR_S)
+                       pr_cont(" speculate");
+
+               pr_cont("\n");
+
+               /* Record the setup for use on secondary CPUs */
+               if (used <= ARRAY_SIZE(recorded.cfgs)) {
+                       recorded.cfgs[recorded.used].lower = lower;
+                       recorded.cfgs[recorded.used].upper = upper;
+                       recorded.cfgs[recorded.used].attrs = attr;
+                       recorded.used++;
+               }
+       }
+}
+
 #ifndef CONFIG_NEED_MULTIPLE_NODES
 int page_is_ram(unsigned long pagenr)
 {
@@ -334,69 +448,6 @@ static inline void mem_init_free_highmem(void)
 #endif
 }
 
-unsigned __weak platform_maar_init(unsigned num_pairs)
-{
-       struct maar_config cfg[BOOT_MEM_MAP_MAX];
-       unsigned i, num_configured, num_cfg = 0;
-       phys_addr_t skip;
-
-       for (i = 0; i < boot_mem_map.nr_map; i++) {
-               switch (boot_mem_map.map[i].type) {
-               case BOOT_MEM_RAM:
-               case BOOT_MEM_INIT_RAM:
-                       break;
-               default:
-                       continue;
-               }
-
-               skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff);
-
-               cfg[num_cfg].lower = boot_mem_map.map[i].addr;
-               cfg[num_cfg].lower += skip;
-
-               cfg[num_cfg].upper = cfg[num_cfg].lower;
-               cfg[num_cfg].upper += boot_mem_map.map[i].size - 1;
-               cfg[num_cfg].upper -= skip;
-
-               cfg[num_cfg].attrs = MIPS_MAAR_S;
-               num_cfg++;
-       }
-
-       num_configured = maar_config(cfg, num_cfg, num_pairs);
-       if (num_configured < num_cfg)
-               pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
-                       num_pairs, num_cfg);
-
-       return num_configured;
-}
-
-static void maar_init(void)
-{
-       unsigned num_maars, used, i;
-
-       if (!cpu_has_maar)
-               return;
-
-       /* Detect the number of MAARs */
-       write_c0_maari(~0);
-       back_to_back_c0_hazard();
-       num_maars = read_c0_maari() + 1;
-
-       /* MAARs should be in pairs */
-       WARN_ON(num_maars % 2);
-
-       /* Configure the required MAARs */
-       used = platform_maar_init(num_maars / 2);
-
-       /* Disable any further MAARs */
-       for (i = (used * 2); i < num_maars; i++) {
-               write_c0_maari(i);
-               back_to_back_c0_hazard();
-               write_c0_maar(0);
-               back_to_back_c0_hazard();
-       }
-}
-
 void __init mem_init(void)
 {
 #ifdef CONFIG_HIGHMEM
index 2e52cbd20cebac71a1dc50482d48983df00f15e8..7a584e0bf93332073591ab2a179b37705f20230c 100644 (file)
@@ -12,6 +12,4 @@ obj-y                         := sead3-lcd.o sead3-display.o sead3-init.o \
                                   sead3-int.o sead3-platform.o sead3-reset.o \
                                   sead3-setup.o sead3-time.o
 
-obj-y                          += leds-sead3.o
-
 obj-$(CONFIG_EARLY_PRINTK)     += sead3-console.o
index e92726099be0e40d4a7c1e0903eac239c1ad2497..5d2e0c8d29c0bd0003bae3f7337edbcab5a403c4 100644 (file)
 
 LEAF(sk_load_word)
        is_offset_negative(word)
-       .globl sk_load_word_positive
-sk_load_word_positive:
+FEXPORT(sk_load_word_positive)
        is_offset_in_header(4, word)
        /* Offset within header boundaries */
        PTR_ADDU t1, $r_skb_data, offset
+       .set    reorder
        lw      $r_A, 0(t1)
+       .set    noreorder
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
        wsbh    t0, $r_A
        rotr    $r_A, t0, 16
+# else
+       sll     t0, $r_A, 24
+       srl     t1, $r_A, 24
+       srl     t2, $r_A, 8
+       or      t0, t0, t1
+       andi    t2, t2, 0xff00
+       andi    t1, $r_A, 0xff00
+       or      t0, t0, t2
+       sll     t1, t1, 8
+       or      $r_A, t0, t1
+# endif
 #endif
        jr      $r_ra
         move   $r_ret, zero
@@ -73,15 +86,24 @@ sk_load_word_positive:
 
 LEAF(sk_load_half)
        is_offset_negative(half)
-       .globl sk_load_half_positive
-sk_load_half_positive:
+FEXPORT(sk_load_half_positive)
        is_offset_in_header(2, half)
        /* Offset within header boundaries */
        PTR_ADDU t1, $r_skb_data, offset
+       .set    reorder
        lh      $r_A, 0(t1)
+       .set    noreorder
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
        wsbh    t0, $r_A
        seh     $r_A, t0
+# else
+       sll     t0, $r_A, 24
+       andi    t1, $r_A, 0xff00
+       sra     t0, t0, 16
+       srl     t1, t1, 8
+       or      $r_A, t0, t1
+# endif
 #endif
        jr      $r_ra
         move   $r_ret, zero
@@ -89,8 +111,7 @@ sk_load_half_positive:
 
 LEAF(sk_load_byte)
        is_offset_negative(byte)
-       .globl sk_load_byte_positive
-sk_load_byte_positive:
+FEXPORT(sk_load_byte_positive)
        is_offset_in_header(1, byte)
        /* Offset within header boundaries */
        PTR_ADDU t1, $r_skb_data, offset
@@ -148,23 +169,47 @@ sk_load_byte_positive:
 NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
        bpf_slow_path_common(4)
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
        wsbh    t0, $r_s0
        jr      $r_ra
         rotr   $r_A, t0, 16
-#endif
+# else
+       sll     t0, $r_s0, 24
+       srl     t1, $r_s0, 24
+       srl     t2, $r_s0, 8
+       or      t0, t0, t1
+       andi    t2, t2, 0xff00
+       andi    t1, $r_s0, 0xff00
+       or      t0, t0, t2
+       sll     t1, t1, 8
+       jr      $r_ra
+        or     $r_A, t0, t1
+# endif
+#else
        jr      $r_ra
-       move    $r_A, $r_s0
+        move   $r_A, $r_s0
+#endif
 
        END(bpf_slow_path_word)
 
 NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
        bpf_slow_path_common(2)
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
        jr      $r_ra
         wsbh   $r_A, $r_s0
-#endif
+# else
+       sll     t0, $r_s0, 8
+       andi    t1, $r_s0, 0xff00
+       andi    t0, t0, 0xff00
+       srl     t1, t1, 8
+       jr      $r_ra
+        or     $r_A, t0, t1
+# endif
+#else
        jr      $r_ra
         move   $r_A, $r_s0
+#endif
 
        END(bpf_slow_path_half)
 
index 6edb9ee6128ebc4d45de622afdf118f0bf5d12ee..1c8dd0f5cd5d1567126f42b67112b5b0f91962a8 100644 (file)
@@ -9,3 +9,4 @@ generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += sections.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
index 375e59140c9cef2f8f9fe36962ac3ddca399d994..ce318d5ab23b06484e063d59e564bef6a2673700 100644 (file)
@@ -34,7 +34,7 @@
  *
  * Atomically reads the value of @v.  Note that the guaranteed
  */
-#define atomic_read(v) (ACCESS_ONCE((v)->counter))
+#define atomic_read(v) READ_ONCE((v)->counter)
 
 /**
  * atomic_set - set atomic variable
@@ -43,7 +43,7 @@
  *
  * Atomically sets the value of @v to @i.  Note that the guaranteed
  */
-#define atomic_set(v, i) (((v)->counter) = (i))
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
 
 #define ATOMIC_OP(op)                                                  \
 static inline void atomic_##op(int i, atomic_t *v)                     \
index 914864eb5a25daf87688f1565b33700b289be0bf..d63330e88379dcc591e29e645b5363f063122f9d 100644 (file)
@@ -61,4 +61,5 @@ generic-y += types.h
 generic-y += unaligned.h
 generic-y += user.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 2536965d00eae4c4a8c928a65b78471628cebf3d..1d109990a02242aafb76c19da65a2f74d34367b9 100644 (file)
@@ -67,7 +67,7 @@ static __inline__ void atomic_set(atomic_t *v, int i)
 
 static __inline__ int atomic_read(const atomic_t *v)
 {
-       return ACCESS_ONCE((v)->counter);
+       return READ_ONCE((v)->counter);
 }
 
 /* exported interface */
index 6bc0ee4b1070a83de003d1c74ec64f818bd3dacb..2c041b535a64ed58d3be2aa79916f94308190b92 100644 (file)
@@ -111,7 +111,7 @@ CONFIG_SCSI_QLA_FC=m
 CONFIG_SCSI_QLA_ISCSI=m
 CONFIG_SCSI_LPFC=m
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_ALUA=m
 CONFIG_ATA=y
index 7991f37e5fe2a174fd3284a4fb1b5e72d2ab267a..36871a4bfa54293b2e70851ea1aa4c1a379dd1dd 100644 (file)
@@ -114,7 +114,7 @@ CONFIG_SCSI_QLA_FC=m
 CONFIG_SCSI_QLA_ISCSI=m
 CONFIG_SCSI_LPFC=m
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_ALUA=m
 CONFIG_ATA=y
index 0dc42c5082b74a1d4fee5709e21100ee0ec8a2df..5f8229e24fe6523a73ef2335a5f480e5f3983911 100644 (file)
@@ -3,7 +3,6 @@
 
 #ifdef __KERNEL__
 
-#include <asm/reg.h>
 
 /* bytes per L1 cache line */
 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
@@ -40,12 +39,6 @@ struct ppc64_caches {
 };
 
 extern struct ppc64_caches ppc64_caches;
-
-static inline void logmpp(u64 x)
-{
-       asm volatile(PPC_LOGMPP(R1) : : "r" (x));
-}
-
 #endif /* __powerpc64__ && ! __ASSEMBLY__ */
 
 #if defined(__ASSEMBLY__)
index 827a38d7a9dbe32adfcaf7937c276f6cfce222a4..887c259556dff8e6a2f534ec38355d77a90fbeda 100644 (file)
@@ -297,8 +297,6 @@ struct kvmppc_vcore {
        u32 arch_compat;
        ulong pcr;
        ulong dpdes;            /* doorbell state (POWER8) */
-       void *mpp_buffer; /* Micro Partition Prefetch buffer */
-       bool mpp_buffer_is_valid;
        ulong conferring_threads;
 };
 
index cab6753f1be56e3f1810be80d50cf94524eaa04b..3f191f573d4f1f487b5e417ddd55eaed8d49951d 100644 (file)
@@ -61,8 +61,13 @@ struct machdep_calls {
                                               unsigned long addr,
                                               unsigned char *hpte_slot_array,
                                               int psize, int ssize, int local);
-       /* special for kexec, to be called in real mode, linear mapping is
-        * destroyed as well */
+       /*
+        * Special for kexec.
+        * To be called in real mode with interrupts disabled. No locks are
+        * taken as such, concurrent access on pre POWER5 hardware could result
+        * in a deadlock.
+        * The linear mapping is destroyed as well.
+        */
        void            (*hpte_clear_all)(void);
 
        void __iomem *  (*ioremap)(phys_addr_t addr, unsigned long size,
index 790f5d1d9a4624d6f17bdde78706be7240d29ad2..7ab04fc59e2462917501a7f6a1b8727be3685163 100644 (file)
 #define PPC_INST_ISEL                  0x7c00001e
 #define PPC_INST_ISEL_MASK             0xfc00003e
 #define PPC_INST_LDARX                 0x7c0000a8
-#define PPC_INST_LOGMPP                        0x7c0007e4
 #define PPC_INST_LSWI                  0x7c0004aa
 #define PPC_INST_LSWX                  0x7c00042a
 #define PPC_INST_LWARX                 0x7c000028
 #define __PPC_EH(eh)   0
 #endif
 
-/* POWER8 Micro Partition Prefetch (MPP) parameters */
-/* Address mask is common for LOGMPP instruction and MPPR SPR */
-#define PPC_MPPE_ADDRESS_MASK 0xffffffffc000ULL
-
-/* Bits 60 and 61 of MPP SPR should be set to one of the following */
-/* Aborting the fetch is indeed setting 00 in the table size bits */
-#define PPC_MPPR_FETCH_ABORT (0x0ULL << 60)
-#define PPC_MPPR_FETCH_WHOLE_TABLE (0x2ULL << 60)
-
-/* Bits 54 and 55 of register for LOGMPP instruction should be set to: */
-#define PPC_LOGMPP_LOG_L2 (0x02ULL << 54)
-#define PPC_LOGMPP_LOG_L2L3 (0x01ULL << 54)
-#define PPC_LOGMPP_LOG_ABORT (0x03ULL << 54)
-
 /* Deal with instructions that older assemblers aren't aware of */
 #define        PPC_DCBAL(a, b)         stringify_in_c(.long PPC_INST_DCBAL | \
                                        __PPC_RA(a) | __PPC_RB(b))
 #define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \
                                        ___PPC_RT(t) | ___PPC_RA(a) | \
                                        ___PPC_RB(b) | __PPC_EH(eh))
-#define PPC_LOGMPP(b)          stringify_in_c(.long PPC_INST_LOGMPP | \
-                                       __PPC_RB(b))
 #define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \
                                        ___PPC_RT(t) | ___PPC_RA(a) | \
                                        ___PPC_RB(b) | __PPC_EH(eh))
index aa1cc5f015eeee564e2c1c83f295acb7d0c523d8..a908ada8e0a5353f5fce19af6ad3e59779ce2e0e 100644 (file)
 #define   CTRL_TE      0x00c00000      /* thread enable */
 #define   CTRL_RUNLATCH        0x1
 #define SPRN_DAWR      0xB4
-#define SPRN_MPPR      0xB8    /* Micro Partition Prefetch Register */
 #define SPRN_RPR       0xBA    /* Relative Priority Register */
 #define SPRN_CIABR     0xBB
 #define   CIABR_PRIV           0x3
index 5b3a903adae6d761effa550a802ed5d6a2aeb656..e4396a7d0f7cf5627a92ea8c07756aba6bc52c7a 100644 (file)
@@ -40,6 +40,11 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
        return (val + c->high_bits) & ~rhs;
 }
 
+static inline unsigned long zero_bytemask(unsigned long mask)
+{
+       return ~1ul << __fls(mask);
+}
+
 #else
 
 #ifdef CONFIG_64BIT
index 59503ed98e5fcd5bdb783b23a074eadf4ac3fafa..3f1472a78f393434e2b77df6ab2e24587566befe 100644 (file)
@@ -303,7 +303,7 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
        dev->coherent_dma_mask = mask;
        return 0;
 }
-EXPORT_SYMBOL_GPL(dma_set_coherent_mask);
+EXPORT_SYMBOL(dma_set_coherent_mask);
 
 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
 
index 84bf934cf74874eab39926b292c6fa7534829d24..5a753fae8265ae8fc2f9e2e99152c0192fd62b9d 100644 (file)
@@ -1043,6 +1043,9 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
+       if (!rtas.entry)
+               return -EINVAL;
+
        if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
                return -EFAULT;
 
index 2280497868886990678c19c01f5cf1c22b25f7a5..9c26c5a96ea2bc0ea9d2286f4995b2d629be9003 100644 (file)
@@ -36,7 +36,6 @@
 
 #include <asm/reg.h>
 #include <asm/cputable.h>
-#include <asm/cache.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 #include <asm/uaccess.h>
 
 static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
 
-#if defined(CONFIG_PPC_64K_PAGES)
-#define MPP_BUFFER_ORDER       0
-#elif defined(CONFIG_PPC_4K_PAGES)
-#define MPP_BUFFER_ORDER       3
-#endif
-
 static int dynamic_mt_modes = 6;
 module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)");
@@ -1455,13 +1448,6 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
        vcore->kvm = kvm;
        INIT_LIST_HEAD(&vcore->preempt_list);
 
-       vcore->mpp_buffer_is_valid = false;
-
-       if (cpu_has_feature(CPU_FTR_ARCH_207S))
-               vcore->mpp_buffer = (void *)__get_free_pages(
-                       GFP_KERNEL|__GFP_ZERO,
-                       MPP_BUFFER_ORDER);
-
        return vcore;
 }
 
@@ -1894,33 +1880,6 @@ static int on_primary_thread(void)
        return 1;
 }
 
-static void kvmppc_start_saving_l2_cache(struct kvmppc_vcore *vc)
-{
-       phys_addr_t phy_addr, mpp_addr;
-
-       phy_addr = (phys_addr_t)virt_to_phys(vc->mpp_buffer);
-       mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
-
-       mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_ABORT);
-       logmpp(mpp_addr | PPC_LOGMPP_LOG_L2);
-
-       vc->mpp_buffer_is_valid = true;
-}
-
-static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc)
-{
-       phys_addr_t phy_addr, mpp_addr;
-
-       phy_addr = virt_to_phys(vc->mpp_buffer);
-       mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
-
-       /* We must abort any in-progress save operations to ensure
-        * the table is valid so that prefetch engine knows when to
-        * stop prefetching. */
-       logmpp(mpp_addr | PPC_LOGMPP_LOG_ABORT);
-       mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE);
-}
-
 /*
  * A list of virtual cores for each physical CPU.
  * These are vcores that could run but their runner VCPU tasks are
@@ -2471,14 +2430,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 
        srcu_idx = srcu_read_lock(&vc->kvm->srcu);
 
-       if (vc->mpp_buffer_is_valid)
-               kvmppc_start_restoring_l2_cache(vc);
-
        __kvmppc_vcore_entry();
 
-       if (vc->mpp_buffer)
-               kvmppc_start_saving_l2_cache(vc);
-
        srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
 
        spin_lock(&vc->lock);
@@ -3073,14 +3026,8 @@ static void kvmppc_free_vcores(struct kvm *kvm)
 {
        long int i;
 
-       for (i = 0; i < KVM_MAX_VCORES; ++i) {
-               if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) {
-                       struct kvmppc_vcore *vc = kvm->arch.vcores[i];
-                       free_pages((unsigned long)vc->mpp_buffer,
-                                  MPP_BUFFER_ORDER);
-               }
+       for (i = 0; i < KVM_MAX_VCORES; ++i)
                kfree(kvm->arch.vcores[i]);
-       }
        kvm->arch.online_vcores = 0;
 }
 
index 13befa35d8a8ecdd31611aadb42c6be206ba743e..c8822af10a587389999473171db475eb5462714b 100644 (file)
@@ -582,13 +582,21 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
  * be when they isi), and we are the only one left.  We rely on our kernel
  * mapping being 0xC0's and the hardware ignoring those two real bits.
  *
+ * This must be called with interrupts disabled.
+ *
+ * Taking the native_tlbie_lock is unsafe here due to the possibility of
+ * lockdep being on. On pre POWER5 hardware, not taking the lock could
+ * cause deadlock. POWER5 and newer not taking the lock is fine. This only
+ * gets called during boot before secondary CPUs have come up and during
+ * crashdump and all bets are off anyway.
+ *
  * TODO: add batching support when enabled.  remember, no dynamic memory here,
  * athough there is the control page available...
  */
 static void native_hpte_clear(void)
 {
        unsigned long vpn = 0;
-       unsigned long slot, slots, flags;
+       unsigned long slot, slots;
        struct hash_pte *hptep = htab_address;
        unsigned long hpte_v;
        unsigned long pteg_count;
@@ -596,13 +604,6 @@ static void native_hpte_clear(void)
 
        pteg_count = htab_hash_mask + 1;
 
-       local_irq_save(flags);
-
-       /* we take the tlbie lock and hold it.  Some hardware will
-        * deadlock if we try to tlbie from two processors at once.
-        */
-       raw_spin_lock(&native_tlbie_lock);
-
        slots = pteg_count * HPTES_PER_GROUP;
 
        for (slot = 0; slot < slots; slot++, hptep++) {
@@ -614,8 +615,8 @@ static void native_hpte_clear(void)
                hpte_v = be64_to_cpu(hptep->v);
 
                /*
-                * Call __tlbie() here rather than tlbie() since we
-                * already hold the native_tlbie_lock.
+                * Call __tlbie() here rather than tlbie() since we can't take the
+                * native_tlbie_lock.
                 */
                if (hpte_v & HPTE_V_VALID) {
                        hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
@@ -625,8 +626,6 @@ static void native_hpte_clear(void)
        }
 
        asm volatile("eieio; tlbsync; ptesync":::"memory");
-       raw_spin_unlock(&native_tlbie_lock);
-       local_irq_restore(flags);
 }
 
 /*
index b0382f3f1095589a599c92d1a6eb25904a988304..d1e65ce545b31f427629853a3803a6b0b4af0d9b 100644 (file)
@@ -48,7 +48,7 @@ struct cpu_hw_events {
        unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
        unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
 
-       unsigned int group_flag;
+       unsigned int txn_flags;
        int n_txn_start;
 
        /* BHRB bits */
@@ -1441,7 +1441,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
         * skip the schedulability test here, it will be performed
         * at commit time(->commit_txn) as a whole
         */
-       if (cpuhw->group_flag & PERF_EVENT_TXN)
+       if (cpuhw->txn_flags & PERF_PMU_TXN_ADD)
                goto nocheck;
 
        if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
@@ -1586,13 +1586,22 @@ static void power_pmu_stop(struct perf_event *event, int ef_flags)
  * Start group events scheduling transaction
  * Set the flag to make pmu::enable() not perform the
  * schedulability test, it will be performed at commit time
+ *
+ * We only support PERF_PMU_TXN_ADD transactions. Save the
+ * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
+ * transactions.
  */
-static void power_pmu_start_txn(struct pmu *pmu)
+static void power_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
 {
        struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
 
+       WARN_ON_ONCE(cpuhw->txn_flags);         /* txn already in flight */
+
+       cpuhw->txn_flags = txn_flags;
+       if (txn_flags & ~PERF_PMU_TXN_ADD)
+               return;
+
        perf_pmu_disable(pmu);
-       cpuhw->group_flag |= PERF_EVENT_TXN;
        cpuhw->n_txn_start = cpuhw->n_events;
 }
 
@@ -1604,8 +1613,15 @@ static void power_pmu_start_txn(struct pmu *pmu)
 static void power_pmu_cancel_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+       unsigned int txn_flags;
+
+       WARN_ON_ONCE(!cpuhw->txn_flags);        /* no txn in flight */
+
+       txn_flags = cpuhw->txn_flags;
+       cpuhw->txn_flags = 0;
+       if (txn_flags & ~PERF_PMU_TXN_ADD)
+               return;
 
-       cpuhw->group_flag &= ~PERF_EVENT_TXN;
        perf_pmu_enable(pmu);
 }
 
@@ -1621,7 +1637,15 @@ static int power_pmu_commit_txn(struct pmu *pmu)
 
        if (!ppmu)
                return -EAGAIN;
+
        cpuhw = this_cpu_ptr(&cpu_hw_events);
+       WARN_ON_ONCE(!cpuhw->txn_flags);        /* no txn in flight */
+
+       if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) {
+               cpuhw->txn_flags = 0;
+               return 0;
+       }
+
        n = cpuhw->n_events;
        if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
                return -EAGAIN;
@@ -1632,7 +1656,7 @@ static int power_pmu_commit_txn(struct pmu *pmu)
        for (i = cpuhw->n_txn_start; i < n; ++i)
                cpuhw->event[i]->hw.config = cpuhw->events[i];
 
-       cpuhw->group_flag &= ~PERF_EVENT_TXN;
+       cpuhw->txn_flags = 0;
        perf_pmu_enable(pmu);
        return 0;
 }
index 527c8b98e97ee5cf8d8564dc166e05e92ac6ee85..9f9dfda9ed2c13a927f6c863759a2e780a5e8941 100644 (file)
@@ -142,6 +142,15 @@ static struct attribute_group event_long_desc_group = {
 
 static struct kmem_cache *hv_page_cache;
 
+DEFINE_PER_CPU(int, hv_24x7_txn_flags);
+DEFINE_PER_CPU(int, hv_24x7_txn_err);
+
+struct hv_24x7_hw {
+       struct perf_event *events[255];
+};
+
+DEFINE_PER_CPU(struct hv_24x7_hw, hv_24x7_hw);
+
 /*
  * request_buffer and result_buffer are not required to be 4k aligned,
  * but are not allowed to cross any 4k boundary. Aligning them to 4k is
@@ -1231,9 +1240,48 @@ static void update_event_count(struct perf_event *event, u64 now)
 static void h_24x7_event_read(struct perf_event *event)
 {
        u64 now;
+       struct hv_24x7_request_buffer *request_buffer;
+       struct hv_24x7_hw *h24x7hw;
+       int txn_flags;
+
+       txn_flags = __this_cpu_read(hv_24x7_txn_flags);
+
+       /*
+        * If in a READ transaction, add this counter to the list of
+        * counters to read during the next HCALL (i.e commit_txn()).
+        * If not in a READ transaction, go ahead and make the HCALL
+        * to read this counter by itself.
+        */
+
+       if (txn_flags & PERF_PMU_TXN_READ) {
+               int i;
+               int ret;
 
-       now = h_24x7_get_value(event);
-       update_event_count(event, now);
+               if (__this_cpu_read(hv_24x7_txn_err))
+                       return;
+
+               request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
+
+               ret = add_event_to_24x7_request(event, request_buffer);
+               if (ret) {
+                       __this_cpu_write(hv_24x7_txn_err, ret);
+               } else {
+                       /*
+                        * Assoicate the event with the HCALL request index,
+                        * so ->commit_txn() can quickly find/update count.
+                        */
+                       i = request_buffer->num_requests - 1;
+
+                       h24x7hw = &get_cpu_var(hv_24x7_hw);
+                       h24x7hw->events[i] = event;
+                       put_cpu_var(h24x7hw);
+               }
+
+               put_cpu_var(hv_24x7_reqb);
+       } else {
+               now = h_24x7_get_value(event);
+               update_event_count(event, now);
+       }
 }
 
 static void h_24x7_event_start(struct perf_event *event, int flags)
@@ -1255,6 +1303,117 @@ static int h_24x7_event_add(struct perf_event *event, int flags)
        return 0;
 }
 
+/*
+ * 24x7 counters only support READ transactions. They are
+ * always counting and dont need/support ADD transactions.
+ * Cache the flags, but otherwise ignore transactions that
+ * are not PERF_PMU_TXN_READ.
+ */
+static void h_24x7_event_start_txn(struct pmu *pmu, unsigned int flags)
+{
+       struct hv_24x7_request_buffer *request_buffer;
+       struct hv_24x7_data_result_buffer *result_buffer;
+
+       /* We should not be called if we are already in a txn */
+       WARN_ON_ONCE(__this_cpu_read(hv_24x7_txn_flags));
+
+       __this_cpu_write(hv_24x7_txn_flags, flags);
+       if (flags & ~PERF_PMU_TXN_READ)
+               return;
+
+       request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
+       result_buffer = (void *)get_cpu_var(hv_24x7_resb);
+
+       init_24x7_request(request_buffer, result_buffer);
+
+       put_cpu_var(hv_24x7_resb);
+       put_cpu_var(hv_24x7_reqb);
+}
+
+/*
+ * Clean up transaction state.
+ *
+ * NOTE: Ignore state of request and result buffers for now.
+ *      We will initialize them during the next read/txn.
+ */
+static void reset_txn(void)
+{
+       __this_cpu_write(hv_24x7_txn_flags, 0);
+       __this_cpu_write(hv_24x7_txn_err, 0);
+}
+
+/*
+ * 24x7 counters only support READ transactions. They are always counting
+ * and dont need/support ADD transactions. Clear ->txn_flags but otherwise
+ * ignore transactions that are not of type PERF_PMU_TXN_READ.
+ *
+ * For READ transactions, submit all pending 24x7 requests (i.e requests
+ * that were queued by h_24x7_event_read()), to the hypervisor and update
+ * the event counts.
+ */
+static int h_24x7_event_commit_txn(struct pmu *pmu)
+{
+       struct hv_24x7_request_buffer *request_buffer;
+       struct hv_24x7_data_result_buffer *result_buffer;
+       struct hv_24x7_result *resb;
+       struct perf_event *event;
+       u64 count;
+       int i, ret, txn_flags;
+       struct hv_24x7_hw *h24x7hw;
+
+       txn_flags = __this_cpu_read(hv_24x7_txn_flags);
+       WARN_ON_ONCE(!txn_flags);
+
+       ret = 0;
+       if (txn_flags & ~PERF_PMU_TXN_READ)
+               goto out;
+
+       ret = __this_cpu_read(hv_24x7_txn_err);
+       if (ret)
+               goto out;
+
+       request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
+       result_buffer = (void *)get_cpu_var(hv_24x7_resb);
+
+       ret = make_24x7_request(request_buffer, result_buffer);
+       if (ret) {
+               log_24x7_hcall(request_buffer, result_buffer, ret);
+               goto put_reqb;
+       }
+
+       h24x7hw = &get_cpu_var(hv_24x7_hw);
+
+       /* Update event counts from hcall */
+       for (i = 0; i < request_buffer->num_requests; i++) {
+               resb = &result_buffer->results[i];
+               count = be64_to_cpu(resb->elements[0].element_data[0]);
+               event = h24x7hw->events[i];
+               h24x7hw->events[i] = NULL;
+               update_event_count(event, count);
+       }
+
+       put_cpu_var(hv_24x7_hw);
+
+put_reqb:
+       put_cpu_var(hv_24x7_resb);
+       put_cpu_var(hv_24x7_reqb);
+out:
+       reset_txn();
+       return ret;
+}
+
+/*
+ * 24x7 counters only support READ transactions. They are always counting
+ * and dont need/support ADD transactions. However, regardless of type
+ * of transaction, all we need to do is cleanup, so we don't have to check
+ * the type of transaction.
+ */
+static void h_24x7_event_cancel_txn(struct pmu *pmu)
+{
+       WARN_ON_ONCE(!__this_cpu_read(hv_24x7_txn_flags));
+       reset_txn();
+}
+
 static struct pmu h_24x7_pmu = {
        .task_ctx_nr = perf_invalid_context,
 
@@ -1266,6 +1425,9 @@ static struct pmu h_24x7_pmu = {
        .start       = h_24x7_event_start,
        .stop        = h_24x7_event_stop,
        .read        = h_24x7_event_read,
+       .start_txn   = h_24x7_event_start_txn,
+       .commit_txn  = h_24x7_event_commit_txn,
+       .cancel_txn  = h_24x7_event_cancel_txn,
 };
 
 static int hv_24x7_init(void)
index 396351db601b42906c83a8dbf592e40b47db376a..7d5e295255b7b25934d51792fb6876c16462d420 100644 (file)
@@ -676,6 +676,9 @@ static u64 power8_bhrb_filter_map(u64 branch_sample_type)
        if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
                return -1;
 
+       if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL)
+               return -1;
+
        if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
                pmu_bhrb_filter |= POWER8_MMCRA_IFM1;
                return pmu_bhrb_filter;
index e0e68a1c0d3c20dd5d618e62cdccba2e429af475..aed7714495c10fd7fa319675f12f4c7baacdd7ed 100644 (file)
@@ -327,7 +327,7 @@ static void axon_msi_shutdown(struct platform_device *device)
        u32 tmp;
 
        pr_devel("axon_msi: disabling %s\n",
-                 msic->irq_domain->of_node->full_name);
+                irq_domain_get_of_node(msic->irq_domain)->full_name);
        tmp  = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
        tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
        msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
index 9d27de62dc62755c4800d46bcc8b4409a81a5115..54ee5743cb724c9b2a28e77607d095da38065d05 100644 (file)
@@ -231,20 +231,23 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
        const u32 *imap, *tmp;
        int imaplen, intsize, unit;
        struct device_node *iic;
+       struct device_node *of_node;
+
+       of_node = irq_domain_get_of_node(pic->host);
 
        /* First, we check whether we have a real "interrupts" in the device
         * tree in case the device-tree is ever fixed
         */
-       virq = irq_of_parse_and_map(pic->host->of_node, 0);
+       virq = irq_of_parse_and_map(of_node, 0);
        if (virq)
                return virq;
 
        /* Now do the horrible hacks */
-       tmp = of_get_property(pic->host->of_node, "#interrupt-cells", NULL);
+       tmp = of_get_property(of_node, "#interrupt-cells", NULL);
        if (tmp == NULL)
                return NO_IRQ;
        intsize = *tmp;
-       imap = of_get_property(pic->host->of_node, "interrupt-map", &imaplen);
+       imap = of_get_property(of_node, "interrupt-map", &imaplen);
        if (imap == NULL || imaplen < (intsize + 1))
                return NO_IRQ;
        iic = of_find_node_by_phandle(imap[intsize]);
index b304a9fe55cc410e2bcbd9693a84bce1d2bf5365..d9af76342d9980791a6dce16db4c2ce394d9c386 100644 (file)
@@ -144,9 +144,11 @@ int mpic_pasemi_msi_init(struct mpic *mpic)
 {
        int rc;
        struct pci_controller *phb;
+       struct device_node *of_node;
 
-       if (!mpic->irqhost->of_node ||
-           !of_device_is_compatible(mpic->irqhost->of_node,
+       of_node = irq_domain_get_of_node(mpic->irqhost);
+       if (!of_node ||
+           !of_device_is_compatible(of_node,
                                     "pasemi,pwrficient-openpic"))
                return -ENODEV;
 
index 2c91ee7800b90e09edb93a35aa162da74071ba8c..6ccfb6c1c707b40e9fd6ba4cb202d70c1faf57cb 100644 (file)
@@ -137,7 +137,7 @@ static void opal_handle_irq_work(struct irq_work *work)
 static int opal_event_match(struct irq_domain *h, struct device_node *node,
                            enum irq_domain_bus_token bus_token)
 {
-       return h->of_node == node;
+       return irq_domain_get_of_node(h) == node;
 }
 
 static int opal_event_xlate(struct irq_domain *h, struct device_node *np,
index 230f3a7cdea45f8d160797fe55eb7b154c9c9dba..4296d55e88f30afa7cb91fd54d06e6b2a532d577 100644 (file)
@@ -487,9 +487,12 @@ int opal_machine_check(struct pt_regs *regs)
         *    PRD component would have already got notified about this
         *    error through other channels.
         *
-        * In any case, let us just fall through. We anyway heading
-        * down to panic path.
+        * If hardware marked this as an unrecoverable MCE, we are
+        * going to panic anyway. Even if it didn't, it's not safe to
+        * continue at this point, so we should explicitly panic.
         */
+
+       panic("PowerNV Unrecovered Machine Check");
        return 0;
 }
 
index 8f70ba681a78b91e6755e3e123c3dc784857e3c6..ca264833ee64d5c7a55035c66c21454d7b4d0b14 100644 (file)
@@ -171,7 +171,26 @@ static void pnv_smp_cpu_kill_self(void)
         * so clear LPCR:PECE1. We keep PECE2 enabled.
         */
        mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
+
+       /*
+        * Hard-disable interrupts, and then clear irq_happened flags
+        * that we can safely ignore while off-line, since they
+        * are for things for which we do no processing when off-line
+        * (or in the case of HMI, all the processing we need to do
+        * is done in lower-level real-mode code).
+        */
+       hard_irq_disable();
+       local_paca->irq_happened &= ~(PACA_IRQ_DEC | PACA_IRQ_HMI);
+
        while (!generic_check_cpu_restart(cpu)) {
+               /*
+                * Clear IPI flag, since we don't handle IPIs while
+                * offline, except for those when changing micro-threading
+                * mode, which are handled explicitly below, and those
+                * for coming online, which are handled via
+                * generic_check_cpu_restart() calls.
+                */
+               kvmppc_set_host_ipi(cpu, 0);
 
                ppc64_runlatch_off();
 
@@ -196,20 +215,20 @@ static void pnv_smp_cpu_kill_self(void)
                 * having finished executing in a KVM guest, then srr1
                 * contains 0.
                 */
-               if ((srr1 & wmask) == SRR1_WAKEEE) {
+               if (((srr1 & wmask) == SRR1_WAKEEE) ||
+                   (local_paca->irq_happened & PACA_IRQ_EE)) {
                        icp_native_flush_interrupt();
-                       local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
-                       smp_mb();
                } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
                        unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
                        asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
-                       kvmppc_set_host_ipi(cpu, 0);
                }
+               local_paca->irq_happened &= ~(PACA_IRQ_EE | PACA_IRQ_DBELL);
+               smp_mb();
 
                if (cpu_core_split_required())
                        continue;
 
-               if (!generic_check_cpu_restart(cpu))
+               if (srr1 && !generic_check_cpu_restart(cpu))
                        DBG("CPU%d Unexpected exit while offline !\n", cpu);
        }
        mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
index 09787139834ddd8bd01fbce0448ca34297a1cd5c..3db53e8aff9279cfe761ac9f43926559e347eaf4 100644 (file)
@@ -194,11 +194,6 @@ static const struct os_area_db_id os_area_db_id_rtc_diff = {
        .key = OS_AREA_DB_KEY_RTC_DIFF
 };
 
-static const struct os_area_db_id os_area_db_id_video_mode = {
-       .owner = OS_AREA_DB_OWNER_LINUX,
-       .key = OS_AREA_DB_KEY_VIDEO_MODE
-};
-
 #define SECONDS_FROM_1970_TO_2000 946684800LL
 
 /**
index eca0b00794fa567edb3edc2d12650328b4dec486..bffcc7a486a11c9821ded7f18178c336aa58b9b7 100644 (file)
@@ -181,7 +181,8 @@ static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node,
                              enum irq_domain_bus_token bus_token)
 {
        /* Exact match, unless ehv_pic node is NULL */
-       return h->of_node == NULL || h->of_node == node;
+       struct device_node *of_node = irq_domain_get_of_node(h);
+       return of_node == NULL || of_node == node;
 }
 
 static int ehv_pic_host_map(struct irq_domain *h, unsigned int virq,
index 48a576aa47b92455e97a3f437b5cc18b6007f91a..3a2be3676f4353eb6b4e3b237334cd0b3903d37f 100644 (file)
@@ -110,7 +110,7 @@ static int fsl_msi_init_allocator(struct fsl_msi *msi_data)
        int rc, hwirq;
 
        rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS_MAX,
-                             msi_data->irqhost->of_node);
+                             irq_domain_get_of_node(msi_data->irqhost));
        if (rc)
                return rc;
 
index e1a9c2c2d5d357c7e88083d60a6f89c0d04219c2..6f99ed3967fdef3ad382a03758d361e1ee3223db 100644 (file)
@@ -165,7 +165,8 @@ static struct resource pic_edgectrl_iores = {
 static int i8259_host_match(struct irq_domain *h, struct device_node *node,
                            enum irq_domain_bus_token bus_token)
 {
-       return h->of_node == NULL || h->of_node == node;
+       struct device_node *of_node = irq_domain_get_of_node(h);
+       return of_node == NULL || of_node == node;
 }
 
 static int i8259_host_map(struct irq_domain *h, unsigned int virq,
index b1297ab1599b46ef24457ec890b28b69ba4dc0bf..f76ee39cb3377a0a6788ad4fcd31fe3c4614743f 100644 (file)
@@ -675,7 +675,8 @@ static int ipic_host_match(struct irq_domain *h, struct device_node *node,
                           enum irq_domain_bus_token bus_token)
 {
        /* Exact match, unless ipic node is NULL */
-       return h->of_node == NULL || h->of_node == node;
+       struct device_node *of_node = irq_domain_get_of_node(h);
+       return of_node == NULL || of_node == node;
 }
 
 static int ipic_host_map(struct irq_domain *h, unsigned int virq,
index 537e5db85a060518928ee812cdab8dd00a311b06..cecd1156c1852a71b6f0432c77fd1410eda767d1 100644 (file)
@@ -1011,7 +1011,8 @@ static int mpic_host_match(struct irq_domain *h, struct device_node *node,
                           enum irq_domain_bus_token bus_token)
 {
        /* Exact match, unless mpic node is NULL */
-       return h->of_node == NULL || h->of_node == node;
+       struct device_node *of_node = irq_domain_get_of_node(h);
+       return of_node == NULL || of_node == node;
 }
 
 static int mpic_host_map(struct irq_domain *h, unsigned int virq,
index 7dc39f35a4ccdb0d9f213515ccb5b850de3e1b4e..1d48a5385905a8ce96290f10064546a39efed403 100644 (file)
@@ -84,7 +84,7 @@ int mpic_msi_init_allocator(struct mpic *mpic)
        int rc;
 
        rc = msi_bitmap_alloc(&mpic->msi_bitmap, mpic->num_sources,
-                             mpic->irqhost->of_node);
+                             irq_domain_get_of_node(mpic->irqhost));
        if (rc)
                return rc;
 
index fbcc1f855a7f2ea15937d02f0b0cb4305af5034a..ef36f16f9f6fbc9bdfd0c02e6df29e77fc83817c 100644 (file)
@@ -248,7 +248,8 @@ static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
                            enum irq_domain_bus_token bus_token)
 {
        /* Exact match, unless qe_ic node is NULL */
-       return h->of_node == NULL || h->of_node == node;
+       struct device_node *of_node = irq_domain_get_of_node(h);
+       return of_node == NULL || of_node == node;
 }
 
 static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
index d4788111c16171135422a0ef29e23e2eb866236d..fac6ac9790fad18efc2f587757068f87ca7765fd 100644 (file)
@@ -10,7 +10,7 @@ targets += misc.o piggy.o sizes.h head.o
 
 KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
-KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
+KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
 KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
 
index 0c98f1508542c9f900ee2bed1394413b8d5d8d88..ed7da281df66743f0badff631c9183bf318ec9b7 100644 (file)
@@ -381,7 +381,7 @@ CONFIG_ISCSI_TCP=m
 CONFIG_SCSI_DEBUG=m
 CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
index 82083e1fbdc4c6cc9f4ad6a2c0cfbfbcd3af1210..9858b14cde1edccdcda3a217446f547641d98944 100644 (file)
@@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m
 CONFIG_SCSI_DEBUG=m
 CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
index c05c9e0821e3bcd956b929c591e41b5445ac9565..7f14f80717d4975161a696dd2e803d4ee87011d6 100644 (file)
@@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m
 CONFIG_SCSI_DEBUG=m
 CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
index 5ad26dd94d77e83fedeba5c7f71c8eba0ff2ab29..9043d2e1e2ae0b3c01a7b6588bed848f44dd92ff 100644 (file)
@@ -6,3 +6,4 @@ generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
index 2a0efc63b9e5afb29cb2e6edd109dd9848353b27..dc19ee0c92aaa693d2ad3b8c4c614b3e0e427de7 100644 (file)
@@ -19,7 +19,7 @@ int numa_pfn_to_nid(unsigned long pfn);
 int __node_distance(int a, int b);
 void numa_update_cpu_topology(void);
 
-extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+extern cpumask_t node_to_cpumask_map[MAX_NUMNODES];
 extern int numa_debug_enabled;
 
 #else
index 27ebde643933a908c1ebb2a75ff723d8d43a65f6..94fc55fc72ce88a18eb73d3f43d5a7895ac6cd9c 100644 (file)
@@ -68,7 +68,7 @@ static inline int cpu_to_node(int cpu)
 #define cpumask_of_node cpumask_of_node
 static inline const struct cpumask *cpumask_of_node(int node)
 {
-       return node_to_cpumask_map[node];
+       return &node_to_cpumask_map[node];
 }
 
 /*
index 48c9af7a76831ea63ef6ef92760df02f15c1188c..3aeeb1b562c00ff9c7afe559452fdc2c06457116 100644 (file)
@@ -176,6 +176,7 @@ int main(void)
        DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
        DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
        DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
+       DEFINE(__LC_PERCPU_OFFSET, offsetof(struct _lowcore, percpu_offset));
        DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
        DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
        DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
index 09b039d7983d802f2674504439e43e21c03d4cae..582fe44ab07cc69aaef1d4f782f6f89364914974 100644 (file)
@@ -733,6 +733,14 @@ ENTRY(psw_idle)
        stg     %r3,__SF_EMPTY(%r15)
        larl    %r1,.Lpsw_idle_lpsw+4
        stg     %r1,__SF_EMPTY+8(%r15)
+#ifdef CONFIG_SMP
+       larl    %r1,smp_cpu_mtid
+       llgf    %r1,0(%r1)
+       ltgr    %r1,%r1
+       jz      .Lpsw_idle_stcctm
+       .insn   rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
+.Lpsw_idle_stcctm:
+#endif
        STCK    __CLOCK_IDLE_ENTER(%r2)
        stpt    __TIMER_IDLE_ENTER(%r2)
 .Lpsw_idle_lpsw:
@@ -1159,7 +1167,27 @@ cleanup_critical:
        jhe     1f
        mvc     __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
        mvc     __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
-1:     # account system time going idle
+1:     # calculate idle cycles
+#ifdef CONFIG_SMP
+       clg     %r9,BASED(.Lcleanup_idle_insn)
+       jl      3f
+       larl    %r1,smp_cpu_mtid
+       llgf    %r1,0(%r1)
+       ltgr    %r1,%r1
+       jz      3f
+       .insn   rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
+       larl    %r3,mt_cycles
+       ag      %r3,__LC_PERCPU_OFFSET
+       la      %r4,__SF_EMPTY+16(%r15)
+2:     lg      %r0,0(%r3)
+       slg     %r0,0(%r4)
+       alg     %r0,64(%r4)
+       stg     %r0,0(%r3)
+       la      %r3,8(%r3)
+       la      %r4,8(%r4)
+       brct    %r1,2b
+#endif
+3:     # account system time going idle
        lg      %r9,__LC_STEAL_TIMER
        alg     %r9,__CLOCK_IDLE_ENTER(%r2)
        slg     %r9,__LC_LAST_UPDATE_CLOCK
index a9563409c36ea7bc37bfb405411b9c6ac70c2a50..929c147e07b40c19370e621a4ba96e99c13a994e 100644 (file)
@@ -72,6 +72,7 @@ struct cpu_hw_events {
        atomic_t                ctr_set[CPUMF_CTR_SET_MAX];
        u64                     state, tx_state;
        unsigned int            flags;
+       unsigned int            txn_flags;
 };
 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
        .ctr_set = {
@@ -82,6 +83,7 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
        },
        .state = 0,
        .flags = 0,
+       .txn_flags = 0,
 };
 
 static int get_counter_set(u64 event)
@@ -538,7 +540,7 @@ static int cpumf_pmu_add(struct perf_event *event, int flags)
         * For group events transaction, the authorization check is
         * done in cpumf_pmu_commit_txn().
         */
-       if (!(cpuhw->flags & PERF_EVENT_TXN))
+       if (!(cpuhw->txn_flags & PERF_PMU_TXN_ADD))
                if (validate_ctr_auth(&event->hw))
                        return -ENOENT;
 
@@ -576,13 +578,22 @@ static void cpumf_pmu_del(struct perf_event *event, int flags)
 /*
  * Start group events scheduling transaction.
  * Set flags to perform a single test at commit time.
+ *
+ * We only support PERF_PMU_TXN_ADD transactions. Save the
+ * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
+ * transactions.
  */
-static void cpumf_pmu_start_txn(struct pmu *pmu)
+static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
 {
        struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
 
+       WARN_ON_ONCE(cpuhw->txn_flags);         /* txn already in flight */
+
+       cpuhw->txn_flags = txn_flags;
+       if (txn_flags & ~PERF_PMU_TXN_ADD)
+               return;
+
        perf_pmu_disable(pmu);
-       cpuhw->flags |= PERF_EVENT_TXN;
        cpuhw->tx_state = cpuhw->state;
 }
 
@@ -593,11 +604,18 @@ static void cpumf_pmu_start_txn(struct pmu *pmu)
  */
 static void cpumf_pmu_cancel_txn(struct pmu *pmu)
 {
+       unsigned int txn_flags;
        struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
 
+       WARN_ON_ONCE(!cpuhw->txn_flags);        /* no txn in flight */
+
+       txn_flags = cpuhw->txn_flags;
+       cpuhw->txn_flags = 0;
+       if (txn_flags & ~PERF_PMU_TXN_ADD)
+               return;
+
        WARN_ON(cpuhw->tx_state != cpuhw->state);
 
-       cpuhw->flags &= ~PERF_EVENT_TXN;
        perf_pmu_enable(pmu);
 }
 
@@ -611,13 +629,20 @@ static int cpumf_pmu_commit_txn(struct pmu *pmu)
        struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
        u64 state;
 
+       WARN_ON_ONCE(!cpuhw->txn_flags);        /* no txn in flight */
+
+       if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) {
+               cpuhw->txn_flags = 0;
+               return 0;
+       }
+
        /* check if the updated state can be scheduled */
        state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
        state >>= CPUMF_LCCTL_ENABLE_SHIFT;
        if ((state & cpuhw->info.auth_ctl) != state)
                return -ENOENT;
 
-       cpuhw->flags &= ~PERF_EVENT_TXN;
+       cpuhw->txn_flags = 0;
        perf_pmu_enable(pmu);
        return 0;
 }
index c8653435c70d9d203dbe05deed3c96d0aad6cdd9..dafc44f519c340329581c8a5b2fda6fdb6920252 100644 (file)
@@ -25,7 +25,7 @@ static DEFINE_SPINLOCK(virt_timer_lock);
 static atomic64_t virt_timer_current;
 static atomic64_t virt_timer_elapsed;
 
-static DEFINE_PER_CPU(u64, mt_cycles[32]);
+DEFINE_PER_CPU(u64, mt_cycles[8]);
 static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
 static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
 static DEFINE_PER_CPU(u64, mt_scaling_jiffies);
@@ -60,6 +60,34 @@ static inline int virt_timer_forward(u64 elapsed)
        return elapsed >= atomic64_read(&virt_timer_current);
 }
 
+static void update_mt_scaling(void)
+{
+       u64 cycles_new[8], *cycles_old;
+       u64 delta, fac, mult, div;
+       int i;
+
+       stcctm5(smp_cpu_mtid + 1, cycles_new);
+       cycles_old = this_cpu_ptr(mt_cycles);
+       fac = 1;
+       mult = div = 0;
+       for (i = 0; i <= smp_cpu_mtid; i++) {
+               delta = cycles_new[i] - cycles_old[i];
+               div += delta;
+               mult *= i + 1;
+               mult += delta * fac;
+               fac *= i + 1;
+       }
+       div *= fac;
+       if (div > 0) {
+               /* Update scaling factor */
+               __this_cpu_write(mt_scaling_mult, mult);
+               __this_cpu_write(mt_scaling_div, div);
+               memcpy(cycles_old, cycles_new,
+                      sizeof(u64) * (smp_cpu_mtid + 1));
+       }
+       __this_cpu_write(mt_scaling_jiffies, jiffies_64);
+}
+
 /*
  * Update process times based on virtual cpu times stored by entry.S
  * to the lowcore fields user_timer, system_timer & steal_clock.
@@ -69,7 +97,6 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
        struct thread_info *ti = task_thread_info(tsk);
        u64 timer, clock, user, system, steal;
        u64 user_scaled, system_scaled;
-       int i;
 
        timer = S390_lowcore.last_update_timer;
        clock = S390_lowcore.last_update_clock;
@@ -85,34 +112,10 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
        S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
        S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
 
-       /* Do MT utilization calculation */
+       /* Update MT utilization calculation */
        if (smp_cpu_mtid &&
-           time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) {
-               u64 cycles_new[32], *cycles_old;
-               u64 delta, fac, mult, div;
-
-               cycles_old = this_cpu_ptr(mt_cycles);
-               if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) {
-                       fac = 1;
-                       mult = div = 0;
-                       for (i = 0; i <= smp_cpu_mtid; i++) {
-                               delta = cycles_new[i] - cycles_old[i];
-                               div += delta;
-                               mult *= i + 1;
-                               mult += delta * fac;
-                               fac *= i + 1;
-                       }
-                       div *= fac;
-                       if (div > 0) {
-                               /* Update scaling factor */
-                               __this_cpu_write(mt_scaling_mult, mult);
-                               __this_cpu_write(mt_scaling_div, div);
-                               memcpy(cycles_old, cycles_new,
-                                      sizeof(u64) * (smp_cpu_mtid + 1));
-                       }
-               }
-               __this_cpu_write(mt_scaling_jiffies, jiffies_64);
-       }
+           time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
+               update_mt_scaling();
 
        user = S390_lowcore.user_timer - ti->user_timer;
        S390_lowcore.steal_timer -= user;
@@ -181,6 +184,11 @@ void vtime_account_irq_enter(struct task_struct *tsk)
        S390_lowcore.last_update_timer = get_vtimer();
        S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
 
+       /* Update MT utilization calculation */
+       if (smp_cpu_mtid &&
+           time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
+               update_mt_scaling();
+
        system = S390_lowcore.system_timer - ti->system_timer;
        S390_lowcore.steal_timer -= system;
        ti->system_timer = S390_lowcore.system_timer;
index 7de4e2f780d789478d4d700821944f96b3846586..30b2698a28e29a6991a7116da1877e5bdee1963e 100644 (file)
@@ -368,7 +368,7 @@ static void topology_add_core(struct toptree *core)
                cpumask_copy(&top->thread_mask, &core->mask);
                cpumask_copy(&top->core_mask, &core_mc(core)->mask);
                cpumask_copy(&top->book_mask, &core_book(core)->mask);
-               cpumask_set_cpu(cpu, node_to_cpumask_map[core_node(core)->id]);
+               cpumask_set_cpu(cpu, &node_to_cpumask_map[core_node(core)->id]);
                top->node_id = core_node(core)->id;
        }
 }
@@ -383,7 +383,7 @@ static void toptree_to_topology(struct toptree *numa)
 
        /* Clear all node masks */
        for (i = 0; i < MAX_NUMNODES; i++)
-               cpumask_clear(node_to_cpumask_map[i]);
+               cpumask_clear(&node_to_cpumask_map[i]);
 
        /* Rebuild all masks */
        toptree_for_each(core, numa, CORE)
index 09b1d2355bd9849ab583bb52c33eb789b4f9804b..43f32ce60aa3d98af0b7665090fa3eb080d12fa7 100644 (file)
@@ -23,7 +23,7 @@
 pg_data_t *node_data[MAX_NUMNODES];
 EXPORT_SYMBOL(node_data);
 
-cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+cpumask_t node_to_cpumask_map[MAX_NUMNODES];
 EXPORT_SYMBOL(node_to_cpumask_map);
 
 const struct numa_mode numa_mode_plain = {
@@ -144,7 +144,7 @@ void __init numa_setup(void)
 static int __init numa_init_early(void)
 {
        /* Attach all possible CPUs to node 0 for now. */
-       cpumask_copy(node_to_cpumask_map[0], cpu_possible_mask);
+       cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
        return 0;
 }
 early_initcall(numa_init_early);
index 92ffe397b893c553c8504f10525a5e3d9d1e9e34..a05218ff3fe465b6e4812d7655360dc1b495a519 100644 (file)
@@ -13,3 +13,4 @@ generic-y += sections.h
 generic-y += trace_clock.h
 generic-y += xor.h
 generic-y += serial.h
+generic-y += word-at-a-time.h
index 05b9f74ce2d544d3f9d7bede26cdc57c04a54e2c..c399e1c55685178d149249ec4e3c8c0178e01829 100644 (file)
@@ -14,8 +14,8 @@
 
 #define ATOMIC_INIT(i) { (i) }
 
-#define atomic_read(v)         ACCESS_ONCE((v)->counter)
-#define atomic_set(v,i)                ((v)->counter = (i))
+#define atomic_read(v)         READ_ONCE((v)->counter)
+#define atomic_set(v,i)                WRITE_ONCE((v)->counter, (i))
 
 #if defined(CONFIG_GUSA_RB)
 #include <asm/atomic-grb.h>
index fe20d14ae051a5892350185d55ce1adfc352e538..ceb5201a30ed36899010715143b8679cd4a819fb 100644 (file)
@@ -59,6 +59,7 @@ pages_do_alias(unsigned long addr1, unsigned long addr2)
 
 #define clear_page(page)       memset((void *)(page), 0, PAGE_SIZE)
 extern void copy_page(void *to, void *from);
+#define copy_user_page(to, from, vaddr, pg)  __copy_user(to, from, PAGE_SIZE)
 
 struct page;
 struct vm_area_struct;
index 2e48eb8813ffa2fccf6df34ad5cee3bcf1857f94..c90930de76ba8670041598ba0d6461ef439c9539 100644 (file)
@@ -433,6 +433,7 @@ static struct crypto_alg algs[] = { {
                .blkcipher = {
                        .min_keysize    = AES_MIN_KEY_SIZE,
                        .max_keysize    = AES_MAX_KEY_SIZE,
+                       .ivsize         = AES_BLOCK_SIZE,
                        .setkey         = aes_set_key,
                        .encrypt        = cbc_encrypt,
                        .decrypt        = cbc_decrypt,
@@ -452,6 +453,7 @@ static struct crypto_alg algs[] = { {
                .blkcipher = {
                        .min_keysize    = AES_MIN_KEY_SIZE,
                        .max_keysize    = AES_MAX_KEY_SIZE,
+                       .ivsize         = AES_BLOCK_SIZE,
                        .setkey         = aes_set_key,
                        .encrypt        = ctr_crypt,
                        .decrypt        = ctr_crypt,
index 6bf2479a12fbe2a9c82b4275e40ac9e85ac191ed..561a84d93cf682a400a7555862f065f1fb04c84c 100644 (file)
@@ -274,6 +274,7 @@ static struct crypto_alg algs[] = { {
                .blkcipher = {
                        .min_keysize    = CAMELLIA_MIN_KEY_SIZE,
                        .max_keysize    = CAMELLIA_MAX_KEY_SIZE,
+                       .ivsize         = CAMELLIA_BLOCK_SIZE,
                        .setkey         = camellia_set_key,
                        .encrypt        = cbc_encrypt,
                        .decrypt        = cbc_decrypt,
index dd6a34fa6e19d2e36f5d30de6256655dc0ee2e0c..61af794aa2d31d5df27d0a318ac8b8f9d605637b 100644 (file)
@@ -429,6 +429,7 @@ static struct crypto_alg algs[] = { {
                .blkcipher = {
                        .min_keysize    = DES_KEY_SIZE,
                        .max_keysize    = DES_KEY_SIZE,
+                       .ivsize         = DES_BLOCK_SIZE,
                        .setkey         = des_set_key,
                        .encrypt        = cbc_encrypt,
                        .decrypt        = cbc_decrypt,
@@ -485,6 +486,7 @@ static struct crypto_alg algs[] = { {
                .blkcipher = {
                        .min_keysize    = DES3_EDE_KEY_SIZE,
                        .max_keysize    = DES3_EDE_KEY_SIZE,
+                       .ivsize         = DES3_EDE_BLOCK_SIZE,
                        .setkey         = des3_ede_set_key,
                        .encrypt        = cbc3_encrypt,
                        .decrypt        = cbc3_decrypt,
index 917084ace49dee70975f195a6c53d1d5c24af4ad..f2fbf9e16fafca66c4aa01145645f1748775cde2 100644 (file)
 #define ATOMIC_INIT(i)         { (i) }
 #define ATOMIC64_INIT(i)       { (i) }
 
-#define atomic_read(v)         ACCESS_ONCE((v)->counter)
-#define atomic64_read(v)       ACCESS_ONCE((v)->counter)
+#define atomic_read(v)         READ_ONCE((v)->counter)
+#define atomic64_read(v)       READ_ONCE((v)->counter)
 
-#define atomic_set(v, i)       (((v)->counter) = i)
-#define atomic64_set(v, i)     (((v)->counter) = i)
+#define atomic_set(v, i)       WRITE_ONCE(((v)->counter), (i))
+#define atomic64_set(v, i)     WRITE_ONCE(((v)->counter), (i))
 
 #define ATOMIC_OP(op)                                                  \
 void atomic_##op(int, atomic_t *);                                     \
index 689db65f85294a95051d08e4f7846151ccc002de..b0da5aedb336c643baf9163c010c6c679ce6bbdc 100644 (file)
@@ -108,7 +108,7 @@ struct cpu_hw_events {
        /* Enabled/disable state.  */
        int                     enabled;
 
-       unsigned int            group_flag;
+       unsigned int            txn_flags;
 };
 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
 
@@ -1379,7 +1379,7 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
         * skip the schedulability test here, it will be performed
         * at commit time(->commit_txn) as a whole
         */
-       if (cpuc->group_flag & PERF_EVENT_TXN)
+       if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
                goto nocheck;
 
        if (check_excludes(cpuc->event, n0, 1))
@@ -1494,12 +1494,17 @@ static int sparc_pmu_event_init(struct perf_event *event)
  * Set the flag to make pmu::enable() not perform the
  * schedulability test, it will be performed at commit time
  */
-static void sparc_pmu_start_txn(struct pmu *pmu)
+static void sparc_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
 {
        struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
 
+       WARN_ON_ONCE(cpuhw->txn_flags);         /* txn already in flight */
+
+       cpuhw->txn_flags = txn_flags;
+       if (txn_flags & ~PERF_PMU_TXN_ADD)
+               return;
+
        perf_pmu_disable(pmu);
-       cpuhw->group_flag |= PERF_EVENT_TXN;
 }
 
 /*
@@ -1510,8 +1515,15 @@ static void sparc_pmu_start_txn(struct pmu *pmu)
 static void sparc_pmu_cancel_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+       unsigned int txn_flags;
+
+       WARN_ON_ONCE(!cpuhw->txn_flags);        /* no txn in flight */
+
+       txn_flags = cpuhw->txn_flags;
+       cpuhw->txn_flags = 0;
+       if (txn_flags & ~PERF_PMU_TXN_ADD)
+               return;
 
-       cpuhw->group_flag &= ~PERF_EVENT_TXN;
        perf_pmu_enable(pmu);
 }
 
@@ -1528,14 +1540,20 @@ static int sparc_pmu_commit_txn(struct pmu *pmu)
        if (!sparc_pmu)
                return -EINVAL;
 
-       cpuc = this_cpu_ptr(&cpu_hw_events);
+       WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
+
+       if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) {
+               cpuc->txn_flags = 0;
+               return 0;
+       }
+
        n = cpuc->n_events;
        if (check_excludes(cpuc->event, 0, n))
                return -EINVAL;
        if (sparc_check_constraints(cpuc->event, cpuc->events, n))
                return -EAGAIN;
 
-       cpuc->group_flag &= ~PERF_EVENT_TXN;
+       cpuc->txn_flags = 0;
        perf_pmu_enable(pmu);
        return 0;
 }
index ee186e13dfe6fde92c9127aa07dccc474d1253d2..f102048d9c0e78a31b6773715a491e85e755a89c 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/errno.h>
 #include <linux/io.h>
 #include <linux/module.h>
+#include <linux/string.h>
 
 #include <gxio/iorpc_globals.h>
 #include <gxio/iorpc_mpipe.h>
 /* HACK: Avoid pointless "shadow" warnings. */
 #define link link_shadow
 
-/**
- * strscpy - Copy a C-string into a sized buffer, but only if it fits
- * @dest: Where to copy the string to
- * @src: Where to copy the string from
- * @size: size of destination buffer
- *
- * Use this routine to avoid copying too-long strings.
- * The routine returns the total number of bytes copied
- * (including the trailing NUL) or zero if the buffer wasn't
- * big enough.  To ensure that programmers pay attention
- * to the return code, the destination has a single NUL
- * written at the front (if size is non-zero) when the
- * buffer is not big enough.
- */
-static size_t strscpy(char *dest, const char *src, size_t size)
-{
-       size_t len = strnlen(src, size) + 1;
-       if (len > size) {
-               if (size)
-                       dest[0] = '\0';
-               return 0;
-       }
-       memcpy(dest, src, len);
-       return len;
-}
-
 int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
 {
        char file[32];
@@ -540,7 +515,7 @@ int gxio_mpipe_link_instance(const char *link_name)
        if (!context)
                return GXIO_ERR_NO_DEVICE;
 
-       if (strscpy(name.name, link_name, sizeof(name.name)) == 0)
+       if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
                return GXIO_ERR_NO_DEVICE;
 
        return gxio_mpipe_info_instance_aux(context, name);
@@ -559,7 +534,7 @@ int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
 
        rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac);
        if (rv >= 0) {
-               if (strscpy(link_name, name.name, sizeof(name.name)) == 0)
+               if (strscpy(link_name, name.name, sizeof(name.name)) < 0)
                        return GXIO_ERR_INVAL_MEMORY_SIZE;
                memcpy(link_mac, mac.mac, sizeof(mac.mac));
        }
@@ -576,7 +551,7 @@ int gxio_mpipe_link_open(gxio_mpipe_link_t *link,
        _gxio_mpipe_link_name_t name;
        int rv;
 
-       if (strscpy(name.name, link_name, sizeof(name.name)) == 0)
+       if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
                return GXIO_ERR_NO_DEVICE;
 
        rv = gxio_mpipe_link_open_aux(context, name, flags);
index 70979846076332bf7771d8517afdcc3415518b17..9fc0107a9c5e557f226b62da9a5e086a4f7b4272 100644 (file)
@@ -34,7 +34,7 @@
  */
 static inline int atomic_read(const atomic_t *v)
 {
-       return ACCESS_ONCE(v->counter);
+       return READ_ONCE(v->counter);
 }
 
 /**
index 096a56d6ead429a2a0c06fd282a52e18a1d76a47..51cabc26e387c32fd368d8058563eae117782ff2 100644 (file)
@@ -24,7 +24,7 @@
 
 /* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
 
-#define atomic_set(v, i) ((v)->counter = (i))
+#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
 
 /*
  * The smp_mb() operations throughout are to support the fact that
@@ -82,8 +82,8 @@ static inline void atomic_xor(int i, atomic_t *v)
 
 #define ATOMIC64_INIT(i)       { (i) }
 
-#define atomic64_read(v)               ((v)->counter)
-#define atomic64_set(v, i) ((v)->counter = (i))
+#define atomic64_read(v)       READ_ONCE((v)->counter)
+#define atomic64_set(v, i)     WRITE_ONCE((v)->counter, (i))
 
 static inline void atomic64_add(long i, atomic64_t *v)
 {
index 9e5ce0d7b292160d5f544fcda08c00ea6c04f168..b66a693c2c3453e4f4642fea133890ab268a32d8 100644 (file)
@@ -6,7 +6,7 @@
 struct word_at_a_time { /* unused */ };
 #define WORD_AT_A_TIME_CONSTANTS {}
 
-/* Generate 0x01 byte values for non-zero bytes using a SIMD instruction. */
+/* Generate 0x01 byte values for zero bytes using a SIMD instruction. */
 static inline unsigned long has_zero(unsigned long val, unsigned long *data,
                                     const struct word_at_a_time *c)
 {
@@ -33,4 +33,10 @@ static inline long find_zero(unsigned long mask)
 #endif
 }
 
+#ifdef __BIG_ENDIAN
+#define zero_bytemask(mask) (~1ul << (63 - __builtin_clzl(mask)))
+#else
+#define zero_bytemask(mask) ((2ul << __builtin_ctzl(mask)) - 1)
+#endif
+
 #endif /* _ASM_WORD_AT_A_TIME_H */
index f0da5a237e94077ced050b6f3d746c89d44bd341..9f1e05e12255b84d94d59f67ecbc7cc9330d889a 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/platform_device.h>
 #include <linux/usb/tilegx.h>
 #include <linux/init.h>
+#include <linux/module.h>
 #include <linux/types.h>
 
 static u64 ehci_dmamask = DMA_BIT_MASK(32);
index 098ab3333e7cdd5e4cdce7f20b28c7678ed2a852..e3abe6f3156d3fbacca4003c072954619c7e1bb1 100644 (file)
@@ -70,8 +70,8 @@ KBUILD_AFLAGS += $(ARCH_INCLUDE)
 
 USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -I%,,$(KBUILD_CFLAGS))) \
                $(ARCH_INCLUDE) $(MODE_INCLUDE) $(filter -I%,$(CFLAGS)) \
-               -D_FILE_OFFSET_BITS=64 -idirafter include \
-               -D__KERNEL__ -D__UM_HOST__
+               -D_FILE_OFFSET_BITS=64 -idirafter $(srctree)/include \
+               -idirafter $(obj)/include -D__KERNEL__ -D__UM_HOST__
 
 #This will adjust *FLAGS accordingly to the platform.
 include $(ARCH_DIR)/Makefile-os-$(OS)
index 149ec55f9c46abd97cbb9b69c7a55afa23e23393..904f3ebf4220153f816a1deca118381190f44ec4 100644 (file)
@@ -25,4 +25,5 @@ generic-y += preempt.h
 generic-y += switch_to.h
 generic-y += topology.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index d8a9fce6ee2e5b10a63405654c3958c7361951fe..98783dd0fa2ea697a50cb65afb661aa7f7bf0322 100644 (file)
@@ -220,7 +220,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
                show_regs(container_of(regs, struct pt_regs, regs));
                panic("Segfault with no mm");
        }
-       else if (!is_user && address < TASK_SIZE) {
+       else if (!is_user && address > PAGE_SIZE && address < TASK_SIZE) {
                show_regs(container_of(regs, struct pt_regs, regs));
                panic("Kernel tried to access user memory at addr 0x%lx, ip 0x%lx",
                       address, ip);
index e3ee4a51ef63a3ec7e7314aa7f00d59ec93ce37e..3f02d42328127bc6c41b786fd9b10145256cd57f 100644 (file)
@@ -96,7 +96,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
                               "ret = %d\n", -n);
                        ret = n;
                }
-               CATCH_EINTR(waitpid(pid, NULL, __WCLONE));
+               CATCH_EINTR(waitpid(pid, NULL, __WALL));
        }
 
 out_free2:
@@ -129,7 +129,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
                return err;
        }
        if (stack_out == NULL) {
-               CATCH_EINTR(pid = waitpid(pid, &status, __WCLONE));
+               CATCH_EINTR(pid = waitpid(pid, &status, __WALL));
                if (pid < 0) {
                        err = -errno;
                        printk(UM_KERN_ERR "run_helper_thread - wait failed, "
@@ -148,7 +148,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
 int helper_wait(int pid)
 {
        int ret, status;
-       int wflags = __WCLONE;
+       int wflags = __WALL;
 
        CATCH_EINTR(ret = waitpid(pid, &status, wflags));
        if (ret < 0) {
index 1fc7a286dc6f342319ec06a81b53a087b9708ef9..256c45b3ae343c983e667b01404d8fb3e3667b4a 100644 (file)
@@ -62,4 +62,5 @@ generic-y += ucontext.h
 generic-y += unaligned.h
 generic-y += user.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 328c8352480c5dcfd34d72a70d01d6a57e5bb515..db3622f22b618303a8bc9c3c2f34762f400dbafb 100644 (file)
@@ -1123,8 +1123,10 @@ config X86_REBOOTFIXUPS
          Say N otherwise.
 
 config MICROCODE
-       tristate "CPU microcode loading support"
+       bool "CPU microcode loading support"
+       default y
        depends on CPU_SUP_AMD || CPU_SUP_INTEL
+       depends on BLK_DEV_INITRD
        select FW_LOADER
        ---help---
 
@@ -1166,24 +1168,6 @@ config MICROCODE_OLD_INTERFACE
        def_bool y
        depends on MICROCODE
 
-config MICROCODE_INTEL_EARLY
-       bool
-
-config MICROCODE_AMD_EARLY
-       bool
-
-config MICROCODE_EARLY
-       bool "Early load microcode"
-       depends on MICROCODE=y && BLK_DEV_INITRD
-       select MICROCODE_INTEL_EARLY if MICROCODE_INTEL
-       select MICROCODE_AMD_EARLY if MICROCODE_AMD
-       default y
-       help
-         This option provides functionality to read additional microcode data
-         at the beginning of initrd image. The data tells kernel to load
-         microcode to CPU's as early as possible. No functional change if no
-         microcode data is glued to the initrd, therefore it's safe to say Y.
-
 config X86_MSR
        tristate "/dev/cpu/*/msr - Model-specific register support"
        ---help---
@@ -1308,6 +1292,7 @@ config HIGHMEM
 config X86_PAE
        bool "PAE (Physical Address Extension) Support"
        depends on X86_32 && !HIGHMEM4G
+       select SWIOTLB
        ---help---
          PAE is required for NX support, and furthermore enables
          larger swapspace support for non-overcommit purposes. It
@@ -2042,6 +2027,55 @@ config COMPAT_VDSO
          If unsure, say N: if you are compiling your own kernel, you
          are unlikely to be using a buggy version of glibc.
 
+choice
+       prompt "vsyscall table for legacy applications"
+       depends on X86_64
+       default LEGACY_VSYSCALL_EMULATE
+       help
+         Legacy user code that does not know how to find the vDSO expects
+         to be able to issue three syscalls by calling fixed addresses in
+         kernel space. Since this location is not randomized with ASLR,
+         it can be used to assist security vulnerability exploitation.
+
+         This setting can be changed at boot time via the kernel command
+         line parameter vsyscall=[native|emulate|none].
+
+         On a system with recent enough glibc (2.14 or newer) and no
+         static binaries, you can say None without a performance penalty
+         to improve security.
+
+         If unsure, select "Emulate".
+
+       config LEGACY_VSYSCALL_NATIVE
+               bool "Native"
+               help
+                 Actual executable code is located in the fixed vsyscall
+                 address mapping, implementing time() efficiently. Since
+                 this makes the mapping executable, it can be used during
+                 security vulnerability exploitation (traditionally as
+                 ROP gadgets). This configuration is not recommended.
+
+       config LEGACY_VSYSCALL_EMULATE
+               bool "Emulate"
+               help
+                 The kernel traps and emulates calls into the fixed
+                 vsyscall address mapping. This makes the mapping
+                 non-executable, but it still contains known contents,
+                 which could be used in certain rare security vulnerability
+                 exploits. This configuration is recommended when userspace
+                 still uses the vsyscall area.
+
+       config LEGACY_VSYSCALL_NONE
+               bool "None"
+               help
+                 There will be no vsyscall mapping at all. This will
+                 eliminate any risk of ASLR bypass due to the vsyscall
+                 fixed address mapping. Attempts to use the vsyscalls
+                 will be reported to dmesg, so that either old or
+                 malicious userspace programs can be identified.
+
+endchoice
+
 config CMDLINE_BOOL
        bool "Built-in kernel command line"
        ---help---
index 747860c696e1d5a4f7670c3b7b8d6127d2fdeb9f..2dfaa72260b41fcb3c9ef528ceffa9840cfc691d 100644 (file)
@@ -159,6 +159,12 @@ endif
 sp-$(CONFIG_X86_32) := esp
 sp-$(CONFIG_X86_64) := rsp
 
+# do binutils support CFI?
+cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1)
+# is .cfi_signal_frame supported too?
+cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1)
+cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1)
+
 # does binutils support specific instructions?
 asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1)
 asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1)
@@ -166,8 +172,8 @@ asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1)
 avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
 avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
 
-KBUILD_AFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
-KBUILD_CFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
+KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr)
+KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr)
 
 LDFLAGS := -m elf_$(UTS_MACHINE)
 
index ee1b6d346b983ad196a003dcb47c7a1de439eb3b..583d539a41977a2c7397b9fd38e0839d3c41dbb9 100644 (file)
@@ -624,7 +624,7 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
 static efi_status_t
 __gop_query32(struct efi_graphics_output_protocol_32 *gop32,
              struct efi_graphics_output_mode_info **info,
-             unsigned long *size, u32 *fb_base)
+             unsigned long *size, u64 *fb_base)
 {
        struct efi_graphics_output_protocol_mode_32 *mode;
        efi_status_t status;
@@ -650,7 +650,8 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto,
        unsigned long nr_gops;
        u16 width, height;
        u32 pixels_per_scan_line;
-       u32 fb_base;
+       u32 ext_lfb_base;
+       u64 fb_base;
        struct efi_pixel_bitmask pixel_info;
        int pixel_format;
        efi_status_t status;
@@ -667,6 +668,7 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto,
                bool conout_found = false;
                void *dummy = NULL;
                u32 h = handles[i];
+               u64 current_fb_base;
 
                status = efi_call_early(handle_protocol, h,
                                        proto, (void **)&gop32);
@@ -678,7 +680,7 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto,
                if (status == EFI_SUCCESS)
                        conout_found = true;
 
-               status = __gop_query32(gop32, &info, &size, &fb_base);
+               status = __gop_query32(gop32, &info, &size, &current_fb_base);
                if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
                        /*
                         * Systems that use the UEFI Console Splitter may
@@ -692,6 +694,7 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto,
                        pixel_format = info->pixel_format;
                        pixel_info = info->pixel_information;
                        pixels_per_scan_line = info->pixels_per_scan_line;
+                       fb_base = current_fb_base;
 
                        /*
                         * Once we've found a GOP supporting ConOut,
@@ -713,6 +716,13 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto,
        si->lfb_width = width;
        si->lfb_height = height;
        si->lfb_base = fb_base;
+
+       ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
+       if (ext_lfb_base) {
+               si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
+               si->ext_lfb_base = ext_lfb_base;
+       }
+
        si->pages = 1;
 
        setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
@@ -727,7 +737,7 @@ out:
 static efi_status_t
 __gop_query64(struct efi_graphics_output_protocol_64 *gop64,
              struct efi_graphics_output_mode_info **info,
-             unsigned long *size, u32 *fb_base)
+             unsigned long *size, u64 *fb_base)
 {
        struct efi_graphics_output_protocol_mode_64 *mode;
        efi_status_t status;
@@ -753,7 +763,8 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto,
        unsigned long nr_gops;
        u16 width, height;
        u32 pixels_per_scan_line;
-       u32 fb_base;
+       u32 ext_lfb_base;
+       u64 fb_base;
        struct efi_pixel_bitmask pixel_info;
        int pixel_format;
        efi_status_t status;
@@ -770,6 +781,7 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto,
                bool conout_found = false;
                void *dummy = NULL;
                u64 h = handles[i];
+               u64 current_fb_base;
 
                status = efi_call_early(handle_protocol, h,
                                        proto, (void **)&gop64);
@@ -781,7 +793,7 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto,
                if (status == EFI_SUCCESS)
                        conout_found = true;
 
-               status = __gop_query64(gop64, &info, &size, &fb_base);
+               status = __gop_query64(gop64, &info, &size, &current_fb_base);
                if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
                        /*
                         * Systems that use the UEFI Console Splitter may
@@ -795,6 +807,7 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto,
                        pixel_format = info->pixel_format;
                        pixel_info = info->pixel_information;
                        pixels_per_scan_line = info->pixels_per_scan_line;
+                       fb_base = current_fb_base;
 
                        /*
                         * Once we've found a GOP supporting ConOut,
@@ -816,6 +829,13 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto,
        si->lfb_width = width;
        si->lfb_height = height;
        si->lfb_base = fb_base;
+
+       ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
+       if (ext_lfb_base) {
+               si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
+               si->ext_lfb_base = ext_lfb_base;
+       }
+
        si->pages = 1;
 
        setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
index 2d6b309c8e9a12ac67ddf9d9cb429cb1fe8a7eae..6236b9ec4b764cfd988fb47f08cc000a653fbed4 100644 (file)
@@ -154,7 +154,7 @@ extra_header_fields:
 #else
        .quad   0                               # ImageBase
 #endif
-       .long   CONFIG_PHYSICAL_ALIGN           # SectionAlignment
+       .long   0x20                            # SectionAlignment
        .long   0x20                            # FileAlignment
        .word   0                               # MajorOperatingSystemVersion
        .word   0                               # MinorOperatingSystemVersion
index 80a0e4389c9ad3f5e6e1f6d8bc5292e391801ff2..bacaa13acac544e037571bd292e91f5239256edc 100644 (file)
@@ -554,6 +554,11 @@ static int __init camellia_aesni_init(void)
 {
        const char *feature_name;
 
+       if (!cpu_has_avx || !cpu_has_aes || !cpu_has_osxsave) {
+               pr_info("AVX or AES-NI instructions are not detected.\n");
+               return -ENODEV;
+       }
+
        if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
                pr_info("CPU feature '%s' is not supported.\n", feature_name);
                return -ENODEV;
index 80dcc9261ca31a41b9db930cfb66a9f07b5340d4..a89fdbc1f0beb7e7198c7a625767a2cfe32ca9e3 100644 (file)
 
 #include <asm/desc.h>
 #include <asm/traps.h>
+#include <asm/vdso.h>
+#include <asm/uaccess.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/syscalls.h>
 
+static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs)
+{
+       unsigned long top_of_stack =
+               (unsigned long)(regs + 1) + TOP_OF_KERNEL_STACK_PADDING;
+       return (struct thread_info *)(top_of_stack - THREAD_SIZE);
+}
+
 #ifdef CONFIG_CONTEXT_TRACKING
 /* Called on entry from user mode with IRQs off. */
 __visible void enter_from_user_mode(void)
@@ -66,13 +75,14 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
  */
 unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch)
 {
+       struct thread_info *ti = pt_regs_to_thread_info(regs);
        unsigned long ret = 0;
        u32 work;
 
-       BUG_ON(regs != task_pt_regs(current));
+       if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
+               BUG_ON(regs != task_pt_regs(current));
 
-       work = ACCESS_ONCE(current_thread_info()->flags) &
-               _TIF_WORK_SYSCALL_ENTRY;
+       work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
 
 #ifdef CONFIG_CONTEXT_TRACKING
        /*
@@ -154,11 +164,12 @@ unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch)
 long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
                                unsigned long phase1_result)
 {
+       struct thread_info *ti = pt_regs_to_thread_info(regs);
        long ret = 0;
-       u32 work = ACCESS_ONCE(current_thread_info()->flags) &
-               _TIF_WORK_SYSCALL_ENTRY;
+       u32 work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
 
-       BUG_ON(regs != task_pt_regs(current));
+       if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
+               BUG_ON(regs != task_pt_regs(current));
 
        /*
         * If we stepped into a sysenter/syscall insn, it trapped in
@@ -207,19 +218,12 @@ long syscall_trace_enter(struct pt_regs *regs)
                return syscall_trace_enter_phase2(regs, arch, phase1_result);
 }
 
-static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs)
-{
-       unsigned long top_of_stack =
-               (unsigned long)(regs + 1) + TOP_OF_KERNEL_STACK_PADDING;
-       return (struct thread_info *)(top_of_stack - THREAD_SIZE);
-}
+#define EXIT_TO_USERMODE_LOOP_FLAGS                            \
+       (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE |   \
+        _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY)
 
-/* Called with IRQs disabled. */
-__visible void prepare_exit_to_usermode(struct pt_regs *regs)
+static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
 {
-       if (WARN_ON(!irqs_disabled()))
-               local_irq_disable();
-
        /*
         * In order to return to user mode, we need to have IRQs off with
         * none of _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_USER_RETURN_NOTIFY,
@@ -229,14 +233,6 @@ __visible void prepare_exit_to_usermode(struct pt_regs *regs)
         * work to clear some of the flags can sleep.
         */
        while (true) {
-               u32 cached_flags =
-                       READ_ONCE(pt_regs_to_thread_info(regs)->flags);
-
-               if (!(cached_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME |
-                                     _TIF_UPROBE | _TIF_NEED_RESCHED |
-                                     _TIF_USER_RETURN_NOTIFY)))
-                       break;
-
                /* We have work to do. */
                local_irq_enable();
 
@@ -260,50 +256,81 @@ __visible void prepare_exit_to_usermode(struct pt_regs *regs)
 
                /* Disable IRQs and retry */
                local_irq_disable();
+
+               cached_flags = READ_ONCE(pt_regs_to_thread_info(regs)->flags);
+
+               if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
+                       break;
+
        }
+}
+
+/* Called with IRQs disabled. */
+__visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
+{
+       u32 cached_flags;
+
+       if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
+               local_irq_disable();
+
+       lockdep_sys_exit();
+
+       cached_flags =
+               READ_ONCE(pt_regs_to_thread_info(regs)->flags);
+
+       if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
+               exit_to_usermode_loop(regs, cached_flags);
 
        user_enter();
 }
 
+#define SYSCALL_EXIT_WORK_FLAGS                                \
+       (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT |      \
+        _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
+
+static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
+{
+       bool step;
+
+       audit_syscall_exit(regs);
+
+       if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
+               trace_sys_exit(regs, regs->ax);
+
+       /*
+        * If TIF_SYSCALL_EMU is set, we only get here because of
+        * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
+        * We already reported this syscall instruction in
+        * syscall_trace_enter().
+        */
+       step = unlikely(
+               (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
+               == _TIF_SINGLESTEP);
+       if (step || cached_flags & _TIF_SYSCALL_TRACE)
+               tracehook_report_syscall_exit(regs, step);
+}
+
 /*
  * Called with IRQs on and fully valid regs.  Returns with IRQs off in a
  * state such that we can immediately switch to user mode.
  */
-__visible void syscall_return_slowpath(struct pt_regs *regs)
+__visible inline void syscall_return_slowpath(struct pt_regs *regs)
 {
        struct thread_info *ti = pt_regs_to_thread_info(regs);
        u32 cached_flags = READ_ONCE(ti->flags);
-       bool step;
 
        CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
 
-       if (WARN(irqs_disabled(), "syscall %ld left IRQs disabled",
-                regs->orig_ax))
+       if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
+           WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
                local_irq_enable();
 
        /*
         * First do one-time work.  If these work items are enabled, we
         * want to run them exactly once per syscall exit with IRQs on.
         */
-       if (cached_flags & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT |
-                           _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)) {
-               audit_syscall_exit(regs);
-
-               if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
-                       trace_sys_exit(regs, regs->ax);
-
-               /*
-                * If TIF_SYSCALL_EMU is set, we only get here because of
-                * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
-                * We already reported this syscall instruction in
-                * syscall_trace_enter().
-                */
-               step = unlikely(
-                       (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
-                       == _TIF_SINGLESTEP);
-               if (step || cached_flags & _TIF_SYSCALL_TRACE)
-                       tracehook_report_syscall_exit(regs, step);
-       }
+       if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
+               syscall_slow_exit_work(regs, cached_flags);
 
 #ifdef CONFIG_COMPAT
        /*
@@ -316,3 +343,144 @@ __visible void syscall_return_slowpath(struct pt_regs *regs)
        local_irq_disable();
        prepare_exit_to_usermode(regs);
 }
+
+#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
+/*
+ * Does a 32-bit syscall.  Called with IRQs on and does all entry and
+ * exit work and returns with IRQs off.  This function is extremely hot
+ * in workloads that use it, and it's usually called from
+ * do_fast_syscall_32, so forcibly inline it to improve performance.
+ */
+#ifdef CONFIG_X86_32
+/* 32-bit kernels use a trap gate for INT80, and the asm code calls here. */
+__visible
+#else
+/* 64-bit kernels use do_syscall_32_irqs_off() instead. */
+static
+#endif
+__always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
+{
+       struct thread_info *ti = pt_regs_to_thread_info(regs);
+       unsigned int nr = (unsigned int)regs->orig_ax;
+
+#ifdef CONFIG_IA32_EMULATION
+       ti->status |= TS_COMPAT;
+#endif
+
+       if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
+               /*
+                * Subtlety here: if ptrace pokes something larger than
+                * 2^32-1 into orig_ax, this truncates it.  This may or
+                * may not be necessary, but it matches the old asm
+                * behavior.
+                */
+               nr = syscall_trace_enter(regs);
+       }
+
+       if (likely(nr < IA32_NR_syscalls)) {
+               /*
+                * It's possible that a 32-bit syscall implementation
+                * takes a 64-bit parameter but nonetheless assumes that
+                * the high bits are zero.  Make sure we zero-extend all
+                * of the args.
+                */
+               regs->ax = ia32_sys_call_table[nr](
+                       (unsigned int)regs->bx, (unsigned int)regs->cx,
+                       (unsigned int)regs->dx, (unsigned int)regs->si,
+                       (unsigned int)regs->di, (unsigned int)regs->bp);
+       }
+
+       syscall_return_slowpath(regs);
+}
+
+#ifdef CONFIG_X86_64
+/* Handles INT80 on 64-bit kernels */
+__visible void do_syscall_32_irqs_off(struct pt_regs *regs)
+{
+       local_irq_enable();
+       do_syscall_32_irqs_on(regs);
+}
+#endif
+
+/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
+__visible long do_fast_syscall_32(struct pt_regs *regs)
+{
+       /*
+        * Called using the internal vDSO SYSENTER/SYSCALL32 calling
+        * convention.  Adjust regs so it looks like we entered using int80.
+        */
+
+       unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
+               vdso_image_32.sym_int80_landing_pad;
+
+       /*
+        * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
+        * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
+        * Fix it up.
+        */
+       regs->ip = landing_pad;
+
+       /*
+        * Fetch ECX from where the vDSO stashed it.
+        *
+        * WARNING: We are in CONTEXT_USER and RCU isn't paying attention!
+        */
+       local_irq_enable();
+       if (
+#ifdef CONFIG_X86_64
+               /*
+                * Micro-optimization: the pointer we're following is explicitly
+                * 32 bits, so it can't be out of range.
+                */
+               __get_user(*(u32 *)&regs->cx,
+                           (u32 __user __force *)(unsigned long)(u32)regs->sp)
+#else
+               get_user(*(u32 *)&regs->cx,
+                        (u32 __user __force *)(unsigned long)(u32)regs->sp)
+#endif
+               ) {
+
+               /* User code screwed up. */
+               local_irq_disable();
+               regs->ax = -EFAULT;
+#ifdef CONFIG_CONTEXT_TRACKING
+               enter_from_user_mode();
+#endif
+               prepare_exit_to_usermode(regs);
+               return 0;       /* Keep it simple: use IRET. */
+       }
+
+       /* Now this is just like a normal syscall. */
+       do_syscall_32_irqs_on(regs);
+
+#ifdef CONFIG_X86_64
+       /*
+        * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
+        * SYSRETL is available on all 64-bit CPUs, so we don't need to
+        * bother with SYSEXIT.
+        *
+        * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
+        * because the ECX fixup above will ensure that this is essentially
+        * never the case.
+        */
+       return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
+               regs->ip == landing_pad &&
+               (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
+#else
+       /*
+        * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
+        *
+        * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
+        * because the ECX fixup above will ensure that this is essentially
+        * never the case.
+        *
+        * We don't allow syscalls at all from VM86 mode, but we still
+        * need to check VM, because we might be returning from sys_vm86.
+        */
+       return static_cpu_has(X86_FEATURE_SEP) &&
+               regs->cs == __USER_CS && regs->ss == __USER_DS &&
+               regs->ip == landing_pad &&
+               (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
+#endif
+}
+#endif
index b2909bf8cf7029b17f6457edcd86b6e04395e94b..3eb572ed3d7ad438d8dfd1627b5b4121314c9f67 100644 (file)
@@ -3,7 +3,7 @@
  *
  * entry_32.S contains the system-call and low-level fault and trap handling routines.
  *
- * Stack layout in 'syscall_exit':
+ * Stack layout while running C code:
  *     ptrace needs to have all registers on the stack.
  *     If the order here is changed, it needs to be
  *     updated in fork.c:copy_process(), signal.c:do_signal(),
 
 #endif /* CONFIG_X86_32_LAZY_GS */
 
-.macro SAVE_ALL
+.macro SAVE_ALL pt_regs_ax=%eax
        cld
        PUSH_GS
        pushl   %fs
        pushl   %es
        pushl   %ds
-       pushl   %eax
+       pushl   \pt_regs_ax
        pushl   %ebp
        pushl   %edi
        pushl   %esi
@@ -211,7 +211,11 @@ ENTRY(ret_from_fork)
        popl    %eax
        pushl   $0x0202                         # Reset kernel eflags
        popfl
-       jmp     syscall_exit
+
+       /* When we fork, we trace the syscall return in the child, too. */
+       movl    %esp, %eax
+       call    syscall_return_slowpath
+       jmp     restore_all
 END(ret_from_fork)
 
 ENTRY(ret_from_kernel_thread)
@@ -224,7 +228,15 @@ ENTRY(ret_from_kernel_thread)
        movl    PT_EBP(%esp), %eax
        call    *PT_EBX(%esp)
        movl    $0, PT_EAX(%esp)
-       jmp     syscall_exit
+
+       /*
+        * Kernel threads return to userspace as if returning from a syscall.
+        * We should check whether anything actually uses this path and, if so,
+        * consider switching it over to ret_from_fork.
+        */
+       movl    %esp, %eax
+       call    syscall_return_slowpath
+       jmp     restore_all
 ENDPROC(ret_from_kernel_thread)
 
 /*
@@ -255,7 +267,6 @@ ret_from_intr:
        jb      resume_kernel                   # not returning to v8086 or userspace
 
 ENTRY(resume_userspace)
-       LOCKDEP_SYS_EXIT
        DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF
        movl    %esp, %eax
@@ -276,76 +287,47 @@ need_resched:
 END(resume_kernel)
 #endif
 
-/*
- * SYSENTER_RETURN points to after the SYSENTER instruction
- * in the vsyscall page.  See vsyscall-sysentry.S, which defines
- * the symbol.
- */
-
        # SYSENTER  call handler stub
 ENTRY(entry_SYSENTER_32)
        movl    TSS_sysenter_sp0(%esp), %esp
 sysenter_past_esp:
+       pushl   $__USER_DS              /* pt_regs->ss */
+       pushl   %ecx                    /* pt_regs->cx */
+       pushfl                          /* pt_regs->flags (except IF = 0) */
+       orl     $X86_EFLAGS_IF, (%esp)  /* Fix IF */
+       pushl   $__USER_CS              /* pt_regs->cs */
+       pushl   $0                      /* pt_regs->ip = 0 (placeholder) */
+       pushl   %eax                    /* pt_regs->orig_ax */
+       SAVE_ALL pt_regs_ax=$-ENOSYS    /* save rest */
+
        /*
-        * Interrupts are disabled here, but we can't trace it until
-        * enough kernel state to call TRACE_IRQS_OFF can be called - but
-        * we immediately enable interrupts at that point anyway.
-        */
-       pushl   $__USER_DS
-       pushl   %ebp
-       pushfl
-       orl     $X86_EFLAGS_IF, (%esp)
-       pushl   $__USER_CS
-       /*
-        * Push current_thread_info()->sysenter_return to the stack.
-        * A tiny bit of offset fixup is necessary: TI_sysenter_return
-        * is relative to thread_info, which is at the bottom of the
-        * kernel stack page.  4*4 means the 4 words pushed above;
-        * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
-        * and THREAD_SIZE takes us to the bottom.
+        * User mode is traced as though IRQs are on, and SYSENTER
+        * turned them off.
         */
-       pushl   ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
-
-       pushl   %eax
-       SAVE_ALL
-       ENABLE_INTERRUPTS(CLBR_NONE)
-
-/*
- * Load the potential sixth argument from user stack.
- * Careful about security.
- */
-       cmpl    $__PAGE_OFFSET-3, %ebp
-       jae     syscall_fault
-       ASM_STAC
-1:     movl    (%ebp), %ebp
-       ASM_CLAC
-       movl    %ebp, PT_EBP(%esp)
-       _ASM_EXTABLE(1b, syscall_fault)
+       TRACE_IRQS_OFF
 
-       GET_THREAD_INFO(%ebp)
+       movl    %esp, %eax
+       call    do_fast_syscall_32
+       testl   %eax, %eax
+       jz      .Lsyscall_32_done
 
-       testl   $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
-       jnz     syscall_trace_entry
-sysenter_do_call:
-       cmpl    $(NR_syscalls), %eax
-       jae     sysenter_badsys
-       call    *sys_call_table(, %eax, 4)
-sysenter_after_call:
-       movl    %eax, PT_EAX(%esp)
-       LOCKDEP_SYS_EXIT
-       DISABLE_INTERRUPTS(CLBR_ANY)
-       TRACE_IRQS_OFF
-       movl    TI_flags(%ebp), %ecx
-       testl   $_TIF_ALLWORK_MASK, %ecx
-       jnz     syscall_exit_work_irqs_off
-sysenter_exit:
-/* if something modifies registers it must also disable sysexit */
-       movl    PT_EIP(%esp), %edx
-       movl    PT_OLDESP(%esp), %ecx
-       xorl    %ebp, %ebp
-       TRACE_IRQS_ON
+/* Opportunistic SYSEXIT */
+       TRACE_IRQS_ON                   /* User mode traces as IRQs on. */
+       movl    PT_EIP(%esp), %edx      /* pt_regs->ip */
+       movl    PT_OLDESP(%esp), %ecx   /* pt_regs->sp */
 1:     mov     PT_FS(%esp), %fs
        PTGS_TO_GS
+       popl    %ebx                    /* pt_regs->bx */
+       addl    $2*4, %esp              /* skip pt_regs->cx and pt_regs->dx */
+       popl    %esi                    /* pt_regs->si */
+       popl    %edi                    /* pt_regs->di */
+       popl    %ebp                    /* pt_regs->bp */
+       popl    %eax                    /* pt_regs->ax */
+
+       /*
+        * Return back to the vDSO, which will pop ecx and edx.
+        * Don't bother with DS and ES (they already contain __USER_DS).
+        */
        ENABLE_INTERRUPTS_SYSEXIT
 
 .pushsection .fixup, "ax"
@@ -359,21 +341,18 @@ ENDPROC(entry_SYSENTER_32)
        # system call handler stub
 ENTRY(entry_INT80_32)
        ASM_CLAC
-       pushl   %eax                            # save orig_eax
-       SAVE_ALL
-       GET_THREAD_INFO(%ebp)
-                                               # system call tracing in operation / emulation
-       testl   $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
-       jnz     syscall_trace_entry
-       cmpl    $(NR_syscalls), %eax
-       jae     syscall_badsys
-syscall_call:
-       call    *sys_call_table(, %eax, 4)
-syscall_after_call:
-       movl    %eax, PT_EAX(%esp)              # store the return value
-syscall_exit:
-       LOCKDEP_SYS_EXIT
-       jmp     syscall_exit_work
+       pushl   %eax                    /* pt_regs->orig_ax */
+       SAVE_ALL pt_regs_ax=$-ENOSYS    /* save rest */
+
+       /*
+        * User mode is traced as though IRQs are on.  Unlike the 64-bit
+        * case, INT80 is a trap gate on 32-bit kernels, so interrupts
+        * are already on (unless user code is messing around with iopl).
+        */
+
+       movl    %esp, %eax
+       call    do_syscall_32_irqs_on
+.Lsyscall_32_done:
 
 restore_all:
        TRACE_IRQS_IRET
@@ -450,47 +429,6 @@ ldt_ss:
 #endif
 ENDPROC(entry_INT80_32)
 
-       # perform syscall exit tracing
-       ALIGN
-syscall_trace_entry:
-       movl    $-ENOSYS, PT_EAX(%esp)
-       movl    %esp, %eax
-       call    syscall_trace_enter
-       /* What it returned is what we'll actually use.  */
-       cmpl    $(NR_syscalls), %eax
-       jnae    syscall_call
-       jmp     syscall_exit
-END(syscall_trace_entry)
-
-       # perform syscall exit tracing
-       ALIGN
-syscall_exit_work_irqs_off:
-       TRACE_IRQS_ON
-       ENABLE_INTERRUPTS(CLBR_ANY)
-
-syscall_exit_work:
-       movl    %esp, %eax
-       call    syscall_return_slowpath
-       jmp     restore_all
-END(syscall_exit_work)
-
-syscall_fault:
-       ASM_CLAC
-       GET_THREAD_INFO(%ebp)
-       movl    $-EFAULT, PT_EAX(%esp)
-       jmp     resume_userspace
-END(syscall_fault)
-
-syscall_badsys:
-       movl    $-ENOSYS, %eax
-       jmp     syscall_after_call
-END(syscall_badsys)
-
-sysenter_badsys:
-       movl    $-ENOSYS, %eax
-       jmp     sysenter_after_call
-END(sysenter_badsys)
-
 .macro FIXUP_ESPFIX_STACK
 /*
  * Switch back for ESPFIX stack to the normal zerobased stack
index 055a01de7c8da6e052cfdebe0be8447b14933c87..53616ca0324440d6cba482c3855433b5a63b4ef9 100644 (file)
@@ -391,20 +391,16 @@ GLOBAL(stub_execveat)
        jmp     return_from_execve
 END(stub_execveat)
 
-#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
+#if defined(CONFIG_X86_X32_ABI)
        .align  8
 GLOBAL(stub_x32_execve)
-GLOBAL(stub32_execve)
        call    compat_sys_execve
        jmp     return_from_execve
-END(stub32_execve)
 END(stub_x32_execve)
        .align  8
 GLOBAL(stub_x32_execveat)
-GLOBAL(stub32_execveat)
        call    compat_sys_execveat
        jmp     return_from_execve
-END(stub32_execveat)
 END(stub_x32_execveat)
 #endif
 
@@ -557,7 +553,6 @@ ret_from_intr:
        jz      retint_kernel
 
        /* Interrupt came from user space */
-       LOCKDEP_SYS_EXIT_IRQ
 GLOBAL(retint_user)
        mov     %rsp,%rdi
        call    prepare_exit_to_usermode
@@ -587,7 +582,7 @@ retint_kernel:
  * At this label, code paths which return to kernel and to user,
  * which come from interrupts/exception and from syscalls, merge.
  */
-restore_regs_and_iret:
+GLOBAL(restore_regs_and_iret)
        RESTORE_EXTRA_REGS
 restore_c_regs_and_iret:
        RESTORE_C_REGS
index a9360d40fb7fd5f518077cc2d9aaa63cfd4853aa..c3201830a85ee8dcddabb0cab864545d99d47fd3 100644 (file)
 #include <linux/linkage.h>
 #include <linux/err.h>
 
-/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
-#include <linux/elf-em.h>
-#define AUDIT_ARCH_I386                (EM_386|__AUDIT_ARCH_LE)
-#define __AUDIT_ARCH_LE                0x40000000
-
-#ifndef CONFIG_AUDITSYSCALL
-# define sysexit_audit         ia32_ret_from_sys_call_irqs_off
-# define sysretl_audit         ia32_ret_from_sys_call_irqs_off
-#endif
-
        .section .entry.text, "ax"
 
 #ifdef CONFIG_PARAVIRT
@@ -58,219 +48,87 @@ ENDPROC(native_usergs_sysret32)
  * with the int 0x80 path.
  */
 ENTRY(entry_SYSENTER_compat)
-       /*
-        * Interrupts are off on entry.
-        * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
-        * it is too small to ever cause noticeable irq latency.
-        */
+       /* Interrupts are off on entry. */
        SWAPGS_UNSAFE_STACK
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
-       ENABLE_INTERRUPTS(CLBR_NONE)
 
-       /* Zero-extending 32-bit regs, do not remove */
-       movl    %ebp, %ebp
+       /*
+        * User tracing code (ptrace or signal handlers) might assume that
+        * the saved RAX contains a 32-bit number when we're invoking a 32-bit
+        * syscall.  Just in case the high bits are nonzero, zero-extend
+        * the syscall number.  (This could almost certainly be deleted
+        * with no ill effects.)
+        */
        movl    %eax, %eax
 
-       movl    ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d
-
        /* Construct struct pt_regs on stack */
        pushq   $__USER32_DS            /* pt_regs->ss */
-       pushq   %rbp                    /* pt_regs->sp */
-       pushfq                          /* pt_regs->flags */
+       pushq   %rcx                    /* pt_regs->sp */
+
+       /*
+        * Push flags.  This is nasty.  First, interrupts are currently
+        * off, but we need pt_regs->flags to have IF set.  Second, even
+        * if TF was set when SYSENTER started, it's clear by now.  We fix
+        * that later using TIF_SINGLESTEP.
+        */
+       pushfq                          /* pt_regs->flags (except IF = 0) */
+       orl     $X86_EFLAGS_IF, (%rsp)  /* Fix saved flags */
+       ASM_CLAC                        /* Clear AC after saving FLAGS */
+
        pushq   $__USER32_CS            /* pt_regs->cs */
-       pushq   %r10                    /* pt_regs->ip = thread_info->sysenter_return */
+       xorq    %r8,%r8
+       pushq   %r8                     /* pt_regs->ip = 0 (placeholder) */
        pushq   %rax                    /* pt_regs->orig_ax */
        pushq   %rdi                    /* pt_regs->di */
        pushq   %rsi                    /* pt_regs->si */
        pushq   %rdx                    /* pt_regs->dx */
-       pushq   %rcx                    /* pt_regs->cx */
+       pushq   %rcx                    /* pt_regs->cx (will be overwritten) */
        pushq   $-ENOSYS                /* pt_regs->ax */
+       pushq   %r8                     /* pt_regs->r8  = 0 */
+       pushq   %r8                     /* pt_regs->r9  = 0 */
+       pushq   %r8                     /* pt_regs->r10 = 0 */
+       pushq   %r8                     /* pt_regs->r11 = 0 */
+       pushq   %rbx                    /* pt_regs->rbx */
+       pushq   %rbp                    /* pt_regs->rbp */
+       pushq   %r8                     /* pt_regs->r12 = 0 */
+       pushq   %r8                     /* pt_regs->r13 = 0 */
+       pushq   %r8                     /* pt_regs->r14 = 0 */
+       pushq   %r8                     /* pt_regs->r15 = 0 */
        cld
-       sub     $(10*8), %rsp /* pt_regs->r8-11, bp, bx, r12-15 not saved */
-
-       /*
-        * no need to do an access_ok check here because rbp has been
-        * 32-bit zero extended
-        */
-       ASM_STAC
-1:     movl    (%rbp), %ebp
-       _ASM_EXTABLE(1b, ia32_badarg)
-       ASM_CLAC
 
        /*
         * Sysenter doesn't filter flags, so we need to clear NT
         * ourselves.  To save a few cycles, we can check whether
         * NT was set instead of doing an unconditional popfq.
+        * This needs to happen before enabling interrupts so that
+        * we don't get preempted with NT set.
+        *
+        * NB.: sysenter_fix_flags is a label with the code under it moved
+        * out-of-line as an optimization: NT is unlikely to be set in the
+        * majority of the cases and instead of polluting the I$ unnecessarily,
+        * we're keeping that code behind a branch which will predict as
+        * not-taken and therefore its instructions won't be fetched.
         */
        testl   $X86_EFLAGS_NT, EFLAGS(%rsp)
        jnz     sysenter_fix_flags
 sysenter_flags_fixed:
 
-       orl     $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
-       testl   $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-       jnz     sysenter_tracesys
-
-sysenter_do_call:
-       /* 32-bit syscall -> 64-bit C ABI argument conversion */
-       movl    %edi, %r8d              /* arg5 */
-       movl    %ebp, %r9d              /* arg6 */
-       xchg    %ecx, %esi              /* rsi:arg2, rcx:arg4 */
-       movl    %ebx, %edi              /* arg1 */
-       movl    %edx, %edx              /* arg3 (zero extension) */
-sysenter_dispatch:
-       cmpq    $(IA32_NR_syscalls-1), %rax
-       ja      1f
-       call    *ia32_sys_call_table(, %rax, 8)
-       movq    %rax, RAX(%rsp)
-1:
-       DISABLE_INTERRUPTS(CLBR_NONE)
-       TRACE_IRQS_OFF
-       testl   $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-       jnz     sysexit_audit
-sysexit_from_sys_call:
        /*
-        * NB: SYSEXIT is not obviously safe for 64-bit kernels -- an
-        * NMI between STI and SYSEXIT has poorly specified behavior,
-        * and and NMI followed by an IRQ with usergs is fatal.  So
-        * we just pretend we're using SYSEXIT but we really use
-        * SYSRETL instead.
-        *
-        * This code path is still called 'sysexit' because it pairs
-        * with 'sysenter' and it uses the SYSENTER calling convention.
+        * User mode is traced as though IRQs are on, and SYSENTER
+        * turned them off.
         */
-       andl    $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
-       movl    RIP(%rsp), %ecx         /* User %eip */
-       movq    RAX(%rsp), %rax
-       movl    RSI(%rsp), %esi
-       movl    RDI(%rsp), %edi
-       xorl    %edx, %edx              /* Do not leak kernel information */
-       xorq    %r8, %r8
-       xorq    %r9, %r9
-       xorq    %r10, %r10
-       movl    EFLAGS(%rsp), %r11d     /* User eflags */
-       TRACE_IRQS_ON
-
-       /*
-        * SYSRETL works even on Intel CPUs.  Use it in preference to SYSEXIT,
-        * since it avoids a dicey window with interrupts enabled.
-        */
-       movl    RSP(%rsp), %esp
-
-       /*
-        * USERGS_SYSRET32 does:
-        *  gsbase = user's gs base
-        *  eip = ecx
-        *  rflags = r11
-        *  cs = __USER32_CS
-        *  ss = __USER_DS
-        *
-        * The prologue set RIP(%rsp) to VDSO32_SYSENTER_RETURN, which does:
-        *
-        *  pop %ebp
-        *  pop %edx
-        *  pop %ecx
-        *
-        * Therefore, we invoke SYSRETL with EDX and R8-R10 zeroed to
-        * avoid info leaks.  R11 ends up with VDSO32_SYSENTER_RETURN's
-        * address (already known to user code), and R12-R15 are
-        * callee-saved and therefore don't contain any interesting
-        * kernel data.
-        */
-       USERGS_SYSRET32
-
-#ifdef CONFIG_AUDITSYSCALL
-       .macro auditsys_entry_common
-       /*
-        * At this point, registers hold syscall args in the 32-bit syscall ABI:
-        * EAX is syscall number, the 6 args are in EBX,ECX,EDX,ESI,EDI,EBP.
-        *
-        * We want to pass them to __audit_syscall_entry(), which is a 64-bit
-        * C function with 5 parameters, so shuffle them to match what
-        * the function expects: RDI,RSI,RDX,RCX,R8.
-        */
-       movl    %esi, %r8d              /* arg5 (R8 ) <= 4th syscall arg (ESI) */
-       xchg    %ecx, %edx              /* arg4 (RCX) <= 3rd syscall arg (EDX) */
-                                       /* arg3 (RDX) <= 2nd syscall arg (ECX) */
-       movl    %ebx, %esi              /* arg2 (RSI) <= 1st syscall arg (EBX) */
-       movl    %eax, %edi              /* arg1 (RDI) <= syscall number  (EAX) */
-       call    __audit_syscall_entry
-
-       /*
-        * We are going to jump back to the syscall dispatch code.
-        * Prepare syscall args as required by the 64-bit C ABI.
-        * Registers clobbered by __audit_syscall_entry() are
-        * loaded from pt_regs on stack:
-        */
-       movl    ORIG_RAX(%rsp), %eax    /* syscall number */
-       movl    %ebx, %edi              /* arg1 */
-       movl    RCX(%rsp), %esi         /* arg2 */
-       movl    RDX(%rsp), %edx         /* arg3 */
-       movl    RSI(%rsp), %ecx         /* arg4 */
-       movl    RDI(%rsp), %r8d         /* arg5 */
-       .endm
-
-       .macro auditsys_exit exit
-       TRACE_IRQS_ON
-       ENABLE_INTERRUPTS(CLBR_NONE)
-       testl   $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-       jnz     ia32_ret_from_sys_call
-       movl    %eax, %esi              /* second arg, syscall return value */
-       cmpl    $-MAX_ERRNO, %eax       /* is it an error ? */
-       jbe     1f
-       movslq  %eax, %rsi              /* if error sign extend to 64 bits */
-1:     setbe   %al                     /* 1 if error, 0 if not */
-       movzbl  %al, %edi               /* zero-extend that into %edi */
-       call    __audit_syscall_exit
-       movl    $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
-       DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
-       testl   %edi, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-       jz      \exit
-       xorl    %eax, %eax              /* Do not leak kernel information */
-       movq    %rax, R11(%rsp)
-       movq    %rax, R10(%rsp)
-       movq    %rax, R9(%rsp)
-       movq    %rax, R8(%rsp)
-       jmp     int_ret_from_sys_call_irqs_off
-       .endm
 
-sysenter_auditsys:
-       auditsys_entry_common
-       movl    %ebp, %r9d              /* reload 6th syscall arg */
-       jmp     sysenter_dispatch
-
-sysexit_audit:
-       auditsys_exit sysexit_from_sys_call
-#endif
+       movq    %rsp, %rdi
+       call    do_fast_syscall_32
+       testl   %eax, %eax
+       jz      .Lsyscall_32_done
+       jmp     sysret32_from_system_call
 
 sysenter_fix_flags:
-       pushq   $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
+       pushq   $X86_EFLAGS_FIXED
        popfq
        jmp     sysenter_flags_fixed
-
-sysenter_tracesys:
-#ifdef CONFIG_AUDITSYSCALL
-       testl   $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-       jz      sysenter_auditsys
-#endif
-       SAVE_EXTRA_REGS
-       xorl    %eax, %eax              /* Do not leak kernel information */
-       movq    %rax, R11(%rsp)
-       movq    %rax, R10(%rsp)
-       movq    %rax, R9(%rsp)
-       movq    %rax, R8(%rsp)
-       movq    %rsp, %rdi              /* &pt_regs -> arg1 */
-       call    syscall_trace_enter
-
-       /* Reload arg registers from stack. (see sysenter_tracesys) */
-       movl    RCX(%rsp), %ecx
-       movl    RDX(%rsp), %edx
-       movl    RSI(%rsp), %esi
-       movl    RDI(%rsp), %edi
-       movl    %eax, %eax              /* zero extension */
-
-       RESTORE_EXTRA_REGS
-       jmp     sysenter_do_call
 ENDPROC(entry_SYSENTER_compat)
 
 /*
@@ -298,21 +156,14 @@ ENDPROC(entry_SYSENTER_compat)
  * edi  arg5
  * esp  user stack
  * 0(%esp) arg6
- *
- * This is purely a fast path. For anything complicated we use the int 0x80
- * path below. We set up a complete hardware stack frame to share code
- * with the int 0x80 path.
  */
 ENTRY(entry_SYSCALL_compat)
-       /*
-        * Interrupts are off on entry.
-        * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
-        * it is too small to ever cause noticeable irq latency.
-        */
+       /* Interrupts are off on entry. */
        SWAPGS_UNSAFE_STACK
+
+       /* Stash user ESP and switch to the kernel stack. */
        movl    %esp, %r8d
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
-       ENABLE_INTERRUPTS(CLBR_NONE)
 
        /* Zero-extending 32-bit regs, do not remove */
        movl    %eax, %eax
@@ -327,162 +178,67 @@ ENTRY(entry_SYSCALL_compat)
        pushq   %rdi                    /* pt_regs->di */
        pushq   %rsi                    /* pt_regs->si */
        pushq   %rdx                    /* pt_regs->dx */
-       pushq   %rbp                    /* pt_regs->cx */
-       movl    %ebp, %ecx
+       pushq   %rcx                    /* pt_regs->cx (will be overwritten) */
        pushq   $-ENOSYS                /* pt_regs->ax */
-       sub     $(10*8), %rsp           /* pt_regs->r8-11, bp, bx, r12-15 not saved */
+       xorq    %r8,%r8
+       pushq   %r8                     /* pt_regs->r8  = 0 */
+       pushq   %r8                     /* pt_regs->r9  = 0 */
+       pushq   %r8                     /* pt_regs->r10 = 0 */
+       pushq   %r8                     /* pt_regs->r11 = 0 */
+       pushq   %rbx                    /* pt_regs->rbx */
+       pushq   %rbp                    /* pt_regs->rbp */
+       pushq   %r8                     /* pt_regs->r12 = 0 */
+       pushq   %r8                     /* pt_regs->r13 = 0 */
+       pushq   %r8                     /* pt_regs->r14 = 0 */
+       pushq   %r8                     /* pt_regs->r15 = 0 */
 
        /*
-        * No need to do an access_ok check here because r8 has been
-        * 32-bit zero extended:
+        * User mode is traced as though IRQs are on, and SYSENTER
+        * turned them off.
         */
-       ASM_STAC
-1:     movl    (%r8), %r9d
-       _ASM_EXTABLE(1b, ia32_badarg)
-       ASM_CLAC
-       orl     $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
-       testl   $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-       jnz     cstar_tracesys
-
-cstar_do_call:
-       /* 32-bit syscall -> 64-bit C ABI argument conversion */
-       movl    %edi, %r8d              /* arg5 */
-       /* r9 already loaded */         /* arg6 */
-       xchg    %ecx, %esi              /* rsi:arg2, rcx:arg4 */
-       movl    %ebx, %edi              /* arg1 */
-       movl    %edx, %edx              /* arg3 (zero extension) */
-
-cstar_dispatch:
-       cmpq    $(IA32_NR_syscalls-1), %rax
-       ja      1f
-
-       call    *ia32_sys_call_table(, %rax, 8)
-       movq    %rax, RAX(%rsp)
-1:
-       DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
-       testl   $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-       jnz     sysretl_audit
 
-sysretl_from_sys_call:
-       andl    $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
-       movl    RDX(%rsp), %edx
-       movl    RSI(%rsp), %esi
-       movl    RDI(%rsp), %edi
-       movl    RIP(%rsp), %ecx
-       movl    EFLAGS(%rsp), %r11d
-       movq    RAX(%rsp), %rax
-       xorq    %r10, %r10
-       xorq    %r9, %r9
-       xorq    %r8, %r8
-       TRACE_IRQS_ON
-       movl    RSP(%rsp), %esp
-       /*
-        * 64-bit->32-bit SYSRET restores eip from ecx,
-        * eflags from r11 (but RF and VM bits are forced to 0),
-        * cs and ss are loaded from MSRs.
-        * (Note: 32-bit->32-bit SYSRET is different: since r11
-        * does not exist, it merely sets eflags.IF=1).
+       movq    %rsp, %rdi
+       call    do_fast_syscall_32
+       testl   %eax, %eax
+       jz      .Lsyscall_32_done
+
+       /* Opportunistic SYSRET */
+sysret32_from_system_call:
+       TRACE_IRQS_ON                   /* User mode traces as IRQs on. */
+       movq    RBX(%rsp), %rbx         /* pt_regs->rbx */
+       movq    RBP(%rsp), %rbp         /* pt_regs->rbp */
+       movq    EFLAGS(%rsp), %r11      /* pt_regs->flags (in r11) */
+       movq    RIP(%rsp), %rcx         /* pt_regs->ip (in rcx) */
+       addq    $RAX, %rsp              /* Skip r8-r15 */
+       popq    %rax                    /* pt_regs->rax */
+       popq    %rdx                    /* Skip pt_regs->cx */
+       popq    %rdx                    /* pt_regs->dx */
+       popq    %rsi                    /* pt_regs->si */
+       popq    %rdi                    /* pt_regs->di */
+
+        /*
+         * USERGS_SYSRET32 does:
+         *  GSBASE = user's GS base
+         *  EIP = ECX
+         *  RFLAGS = R11
+         *  CS = __USER32_CS
+         *  SS = __USER_DS
+         *
+        * ECX will not match pt_regs->cx, but we're returning to a vDSO
+        * trampoline that will fix up RCX, so this is okay.
         *
-        * NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss
-        * descriptor is not reinitialized.  This means that we must
-        * avoid SYSRET with SS == NULL, which could happen if we schedule,
-        * exit the kernel, and re-enter using an interrupt vector.  (All
-        * interrupt entries on x86_64 set SS to NULL.)  We prevent that
-        * from happening by reloading SS in __switch_to.
-        */
-       USERGS_SYSRET32
-
-#ifdef CONFIG_AUDITSYSCALL
-cstar_auditsys:
-       movl    %r9d, R9(%rsp)          /* register to be clobbered by call */
-       auditsys_entry_common
-       movl    R9(%rsp), %r9d          /* reload 6th syscall arg */
-       jmp     cstar_dispatch
-
-sysretl_audit:
-       auditsys_exit sysretl_from_sys_call
-#endif
-
-cstar_tracesys:
-#ifdef CONFIG_AUDITSYSCALL
-       testl   $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-       jz      cstar_auditsys
-#endif
-       xchgl   %r9d, %ebp
-       SAVE_EXTRA_REGS
-       xorl    %eax, %eax              /* Do not leak kernel information */
-       movq    %rax, R11(%rsp)
-       movq    %rax, R10(%rsp)
-       movq    %r9, R9(%rsp)
-       movq    %rax, R8(%rsp)
-       movq    %rsp, %rdi              /* &pt_regs -> arg1 */
-       call    syscall_trace_enter
-       movl    R9(%rsp), %r9d
-
-       /* Reload arg registers from stack. (see sysenter_tracesys) */
-       movl    RCX(%rsp), %ecx
-       movl    RDX(%rsp), %edx
-       movl    RSI(%rsp), %esi
-       movl    RDI(%rsp), %edi
-       movl    %eax, %eax              /* zero extension */
-
-       RESTORE_EXTRA_REGS
-       xchgl   %ebp, %r9d
-       jmp     cstar_do_call
+        * R12-R15 are callee-saved, so they contain whatever was in them
+        * when the system call started, which is already known to user
+        * code.  We zero R8-R10 to avoid info leaks.
+         */
+       xorq    %r8, %r8
+       xorq    %r9, %r9
+       xorq    %r10, %r10
+       movq    RSP-ORIG_RAX(%rsp), %rsp
+        USERGS_SYSRET32
 END(entry_SYSCALL_compat)
 
-ia32_badarg:
-       /*
-        * So far, we've entered kernel mode, set AC, turned on IRQs, and
-        * saved C regs except r8-r11.  We haven't done any of the other
-        * standard entry work, though.  We want to bail, but we shouldn't
-        * treat this as a syscall entry since we don't even know what the
-        * args are.  Instead, treat this as a non-syscall entry, finish
-        * the entry work, and immediately exit after setting AX = -EFAULT.
-        *
-        * We're really just being polite here.  Killing the task outright
-        * would be a reasonable action, too.  Given that the only valid
-        * way to have gotten here is through the vDSO, and we already know
-        * that the stack pointer is bad, the task isn't going to survive
-        * for long no matter what we do.
-        */
-
-       ASM_CLAC                        /* undo STAC */
-       movq    $-EFAULT, RAX(%rsp)     /* return -EFAULT if possible */
-
-       /* Fill in the rest of pt_regs */
-       xorl    %eax, %eax
-       movq    %rax, R11(%rsp)
-       movq    %rax, R10(%rsp)
-       movq    %rax, R9(%rsp)
-       movq    %rax, R8(%rsp)
-       SAVE_EXTRA_REGS
-
-       /* Turn IRQs back off. */
-       DISABLE_INTERRUPTS(CLBR_NONE)
-       TRACE_IRQS_OFF
-
-       /* Now finish entering normal kernel mode. */
-#ifdef CONFIG_CONTEXT_TRACKING
-       call enter_from_user_mode
-#endif
-
-       /* And exit again. */
-       jmp retint_user
-
-ia32_ret_from_sys_call_irqs_off:
-       TRACE_IRQS_ON
-       ENABLE_INTERRUPTS(CLBR_NONE)
-
-ia32_ret_from_sys_call:
-       xorl    %eax, %eax              /* Do not leak kernel information */
-       movq    %rax, R11(%rsp)
-       movq    %rax, R10(%rsp)
-       movq    %rax, R9(%rsp)
-       movq    %rax, R8(%rsp)
-       jmp     int_ret_from_sys_call
-
 /*
  * Emulated IA32 system calls via int 0x80.
  *
@@ -507,14 +263,17 @@ ia32_ret_from_sys_call:
 ENTRY(entry_INT80_compat)
        /*
         * Interrupts are off on entry.
-        * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
-        * it is too small to ever cause noticeable irq latency.
         */
        PARAVIRT_ADJUST_EXCEPTION_FRAME
        SWAPGS
-       ENABLE_INTERRUPTS(CLBR_NONE)
 
-       /* Zero-extending 32-bit regs, do not remove */
+       /*
+        * User tracing code (ptrace or signal handlers) might assume that
+        * the saved RAX contains a 32-bit number when we're invoking a 32-bit
+        * syscall.  Just in case the high bits are nonzero, zero-extend
+        * the syscall number.  (This could almost certainly be deleted
+        * with no ill effects.)
+        */
        movl    %eax, %eax
 
        /* Construct struct pt_regs on stack (iret frame is already on stack) */
@@ -524,67 +283,37 @@ ENTRY(entry_INT80_compat)
        pushq   %rdx                    /* pt_regs->dx */
        pushq   %rcx                    /* pt_regs->cx */
        pushq   $-ENOSYS                /* pt_regs->ax */
-       pushq   $0                      /* pt_regs->r8 */
-       pushq   $0                      /* pt_regs->r9 */
-       pushq   $0                      /* pt_regs->r10 */
-       pushq   $0                      /* pt_regs->r11 */
+       xorq    %r8,%r8
+       pushq   %r8                     /* pt_regs->r8  = 0 */
+       pushq   %r8                     /* pt_regs->r9  = 0 */
+       pushq   %r8                     /* pt_regs->r10 = 0 */
+       pushq   %r8                     /* pt_regs->r11 = 0 */
+       pushq   %rbx                    /* pt_regs->rbx */
+       pushq   %rbp                    /* pt_regs->rbp */
+       pushq   %r12                    /* pt_regs->r12 */
+       pushq   %r13                    /* pt_regs->r13 */
+       pushq   %r14                    /* pt_regs->r14 */
+       pushq   %r15                    /* pt_regs->r15 */
        cld
-       sub     $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
-
-       orl     $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
-       testl   $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-       jnz     ia32_tracesys
-
-ia32_do_call:
-       /* 32-bit syscall -> 64-bit C ABI argument conversion */
-       movl    %edi, %r8d              /* arg5 */
-       movl    %ebp, %r9d              /* arg6 */
-       xchg    %ecx, %esi              /* rsi:arg2, rcx:arg4 */
-       movl    %ebx, %edi              /* arg1 */
-       movl    %edx, %edx              /* arg3 (zero extension) */
-       cmpq    $(IA32_NR_syscalls-1), %rax
-       ja      1f
 
-       call    *ia32_sys_call_table(, %rax, 8)
-       movq    %rax, RAX(%rsp)
-1:
-       jmp     int_ret_from_sys_call
-
-ia32_tracesys:
-       SAVE_EXTRA_REGS
-       movq    %rsp, %rdi                      /* &pt_regs -> arg1 */
-       call    syscall_trace_enter
        /*
-        * Reload arg registers from stack in case ptrace changed them.
-        * Don't reload %eax because syscall_trace_enter() returned
-        * the %rax value we should see.  But do truncate it to 32 bits.
-        * If it's -1 to make us punt the syscall, then (u32)-1 is still
-        * an appropriately invalid value.
+        * User mode is traced as though IRQs are on, and the interrupt
+        * gate turned them off.
         */
-       movl    RCX(%rsp), %ecx
-       movl    RDX(%rsp), %edx
-       movl    RSI(%rsp), %esi
-       movl    RDI(%rsp), %edi
-       movl    %eax, %eax              /* zero extension */
-       RESTORE_EXTRA_REGS
-       jmp     ia32_do_call
-END(entry_INT80_compat)
+       TRACE_IRQS_OFF
 
-       .macro PTREGSCALL label, func
-       ALIGN
-GLOBAL(\label)
-       leaq    \func(%rip), %rax
-       jmp     ia32_ptregs_common
-       .endm
+       movq    %rsp, %rdi
+       call    do_syscall_32_irqs_off
+.Lsyscall_32_done:
 
-       PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn
-       PTREGSCALL stub32_sigreturn,    sys32_sigreturn
-       PTREGSCALL stub32_fork,         sys_fork
-       PTREGSCALL stub32_vfork,        sys_vfork
+       /* Go back to user mode. */
+       TRACE_IRQS_ON
+       SWAPGS
+       jmp     restore_regs_and_iret
+END(entry_INT80_compat)
 
        ALIGN
 GLOBAL(stub32_clone)
-       leaq    sys_clone(%rip), %rax
        /*
         * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr).
         * The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val).
@@ -593,12 +322,4 @@ GLOBAL(stub32_clone)
         * so we need to swap arguments here before calling it:
         */
        xchg    %r8, %rcx
-       jmp     ia32_ptregs_common
-
-       ALIGN
-ia32_ptregs_common:
-       SAVE_EXTRA_REGS 8
-       call    *%rax
-       RESTORE_EXTRA_REGS 8
-       ret
-END(ia32_ptregs_common)
+       jmp     sys_clone
index 8ea34f94e973c41322625743f92fe16bc0ed35fa..9a6649857106a0e0bc6bfe8ad0074562e394a5d1 100644 (file)
@@ -4,24 +4,21 @@
 #include <linux/sys.h>
 #include <linux/cache.h>
 #include <asm/asm-offsets.h>
+#include <asm/syscall.h>
 
 #ifdef CONFIG_IA32_EMULATION
 #define SYM(sym, compat) compat
 #else
 #define SYM(sym, compat) sym
-#define ia32_sys_call_table sys_call_table
-#define __NR_syscall_compat_max __NR_syscall_max
 #endif
 
-#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage void SYM(sym, compat)(void) ;
+#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage long SYM(sym, compat)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
 #include <asm/syscalls_32.h>
 #undef __SYSCALL_I386
 
 #define __SYSCALL_I386(nr, sym, compat) [nr] = SYM(sym, compat),
 
-typedef asmlinkage void (*sys_call_ptr_t)(void);
-
-extern asmlinkage void sys_ni_syscall(void);
+extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
 
 __visible const sys_call_ptr_t ia32_sys_call_table[__NR_syscall_compat_max+1] = {
        /*
index 4ac730b37f0bf4de7ef67fd7f869d9f756e207ed..41283d22be7a92db23e46fb427593a2e878ee37d 100644 (file)
 # define __SYSCALL_X32(nr, sym, compat) /* nothing */
 #endif
 
-#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ;
+#define __SYSCALL_64(nr, sym, compat) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
 #include <asm/syscalls_64.h>
 #undef __SYSCALL_64
 
 #define __SYSCALL_64(nr, sym, compat) [nr] = sym,
 
-extern void sys_ni_syscall(void);
+extern long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
 
 asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
        /*
index 7663c455b9f650f67292e5a2c4664606fb1dbacb..caa2c712d1e70c5895d92cf856b91a02d9123083 100644 (file)
@@ -8,7 +8,7 @@
 #
 0      i386    restart_syscall         sys_restart_syscall
 1      i386    exit                    sys_exit
-2      i386    fork                    sys_fork                        stub32_fork
+2      i386    fork                    sys_fork                        sys_fork
 3      i386    read                    sys_read
 4      i386    write                   sys_write
 5      i386    open                    sys_open                        compat_sys_open
@@ -17,7 +17,7 @@
 8      i386    creat                   sys_creat
 9      i386    link                    sys_link
 10     i386    unlink                  sys_unlink
-11     i386    execve                  sys_execve                      stub32_execve
+11     i386    execve                  sys_execve                      compat_sys_execve
 12     i386    chdir                   sys_chdir
 13     i386    time                    sys_time                        compat_sys_time
 14     i386    mknod                   sys_mknod
 116    i386    sysinfo                 sys_sysinfo                     compat_sys_sysinfo
 117    i386    ipc                     sys_ipc                         compat_sys_ipc
 118    i386    fsync                   sys_fsync
-119    i386    sigreturn               sys_sigreturn                   stub32_sigreturn
+119    i386    sigreturn               sys_sigreturn                   sys32_sigreturn
 120    i386    clone                   sys_clone                       stub32_clone
 121    i386    setdomainname           sys_setdomainname
 122    i386    uname                   sys_newuname
 170    i386    setresgid               sys_setresgid16
 171    i386    getresgid               sys_getresgid16
 172    i386    prctl                   sys_prctl
-173    i386    rt_sigreturn            sys_rt_sigreturn                stub32_rt_sigreturn
+173    i386    rt_sigreturn            sys_rt_sigreturn                sys32_rt_sigreturn
 174    i386    rt_sigaction            sys_rt_sigaction                compat_sys_rt_sigaction
 175    i386    rt_sigprocmask          sys_rt_sigprocmask
 176    i386    rt_sigpending           sys_rt_sigpending               compat_sys_rt_sigpending
 187    i386    sendfile                sys_sendfile                    compat_sys_sendfile
 188    i386    getpmsg
 189    i386    putpmsg
-190    i386    vfork                   sys_vfork                       stub32_vfork
+190    i386    vfork                   sys_vfork                       sys_vfork
 191    i386    ugetrlimit              sys_getrlimit                   compat_sys_getrlimit
 192    i386    mmap2                   sys_mmap_pgoff
 193    i386    truncate64              sys_truncate64                  sys32_truncate64
 355    i386    getrandom               sys_getrandom
 356    i386    memfd_create            sys_memfd_create
 357    i386    bpf                     sys_bpf
-358    i386    execveat                sys_execveat                    stub32_execveat
+358    i386    execveat                sys_execveat                    compat_sys_execveat
 359    i386    socket                  sys_socket
 360    i386    socketpair              sys_socketpair
 361    i386    bind                    sys_bind
index a3d0767a6b294fb1218c305679589e8625f50a97..265c0ed6811800e3b03550cfded0c0cc743794f0 100644 (file)
@@ -19,9 +19,7 @@ obj-y                         += vma.o
 # vDSO images to build
 vdso_img-$(VDSO64-y)           += 64
 vdso_img-$(VDSOX32-y)          += x32
-vdso_img-$(VDSO32-y)           += 32-int80
-vdso_img-$(CONFIG_IA32_EMULATION)      += 32-syscall
-vdso_img-$(VDSO32-y)           += 32-sysenter
+vdso_img-$(VDSO32-y)           += 32
 
 obj-$(VDSO32-y)                        += vdso32-setup.o
 
@@ -69,7 +67,7 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
 CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
        $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
        -fno-omit-frame-pointer -foptimize-sibling-calls \
-       -DDISABLE_BRANCH_PROFILING
+       -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
 
 $(vobjs): KBUILD_CFLAGS += $(CFL)
 
@@ -122,15 +120,6 @@ $(obj)/%.so: $(obj)/%.so.dbg
 $(obj)/vdsox32.so.dbg: $(src)/vdsox32.lds $(vobjx32s) FORCE
        $(call if_changed,vdso)
 
-#
-# Build multiple 32-bit vDSO images to choose from at boot time.
-#
-vdso32.so-$(VDSO32-y)          += int80
-vdso32.so-$(CONFIG_IA32_EMULATION)     += syscall
-vdso32.so-$(VDSO32-y)          += sysenter
-
-vdso32-images                  = $(vdso32.so-y:%=vdso32-%.so)
-
 CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
 VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
 
@@ -139,14 +128,12 @@ VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
 override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
 
 targets += vdso32/vdso32.lds
-targets += vdso32/note.o vdso32/vclock_gettime.o $(vdso32.so-y:%=vdso32/%.o)
+targets += vdso32/note.o vdso32/vclock_gettime.o vdso32/system_call.o
 targets += vdso32/vclock_gettime.o
 
-$(obj)/vdso32.o: $(vdso32-images:%=$(obj)/%)
-
-KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS))
-$(vdso32-images:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
-$(vdso32-images:%=$(obj)/%.dbg): asflags-$(CONFIG_X86_64) += -m32
+KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS)) -DBUILD_VDSO
+$(obj)/vdso32.so.dbg: KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
+$(obj)/vdso32.so.dbg: asflags-$(CONFIG_X86_64) += -m32
 
 KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
 KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32))
@@ -157,13 +144,13 @@ KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
 KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
 KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
 KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
-$(vdso32-images:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
+$(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
 
-$(vdso32-images:%=$(obj)/%.dbg): $(obj)/vdso32-%.so.dbg: FORCE \
-                                $(obj)/vdso32/vdso32.lds \
-                                $(obj)/vdso32/vclock_gettime.o \
-                                $(obj)/vdso32/note.o \
-                                $(obj)/vdso32/%.o
+$(obj)/vdso32.so.dbg: FORCE \
+                     $(obj)/vdso32/vdso32.lds \
+                     $(obj)/vdso32/vclock_gettime.o \
+                     $(obj)/vdso32/note.o \
+                     $(obj)/vdso32/system_call.o
        $(call if_changed,vdso)
 
 #
@@ -206,4 +193,4 @@ $(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso FORCE
 PHONY += vdso_install $(vdso_img_insttargets)
 vdso_install: $(vdso_img_insttargets) FORCE
 
-clean-files := vdso32-syscall* vdso32-sysenter* vdso32-int80* vdso64* vdso-image-*.c vdsox32.so*
+clean-files := vdso32.so vdso32.so.dbg vdso64* vdso-image-*.c vdsox32.so*
index 8627db24a7f6a25993bc3cb5a53157cab7c7df1a..785d9922b106317e9285dbcde43417417f4911fb 100644 (file)
@@ -98,10 +98,10 @@ struct vdso_sym required_syms[] = {
                "VDSO_FAKE_SECTION_TABLE_END", false
        },
        {"VDSO32_NOTE_MASK", true},
-       {"VDSO32_SYSENTER_RETURN", true},
        {"__kernel_vsyscall", true},
        {"__kernel_sigreturn", true},
        {"__kernel_rt_sigreturn", true},
+       {"int80_landing_pad", true},
 };
 
 __attribute__((format(printf, 1, 2))) __attribute__((noreturn))
index e904c270573bf58054d5fd83f2b9c77b0ee81347..08a317a9ae4b582974ec4af4842461bddb762121 100644 (file)
@@ -48,35 +48,9 @@ __setup("vdso32=", vdso32_setup);
 __setup_param("vdso=", vdso_setup, vdso32_setup, 0);
 #endif
 
-#ifdef CONFIG_X86_64
-
-#define        vdso32_sysenter()       (boot_cpu_has(X86_FEATURE_SYSENTER32))
-#define        vdso32_syscall()        (boot_cpu_has(X86_FEATURE_SYSCALL32))
-
-#else  /* CONFIG_X86_32 */
-
-#define vdso32_sysenter()      (boot_cpu_has(X86_FEATURE_SEP))
-#define vdso32_syscall()       (0)
-
-#endif /* CONFIG_X86_64 */
-
-#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
-const struct vdso_image *selected_vdso32;
-#endif
-
 int __init sysenter_setup(void)
 {
-#ifdef CONFIG_COMPAT
-       if (vdso32_syscall())
-               selected_vdso32 = &vdso_image_32_syscall;
-       else
-#endif
-       if (vdso32_sysenter())
-               selected_vdso32 = &vdso_image_32_sysenter;
-       else
-               selected_vdso32 = &vdso_image_32_int80;
-
-       init_vdso_image(selected_vdso32);
+       init_vdso_image(&vdso_image_32);
 
        return 0;
 }
diff --git a/arch/x86/entry/vdso/vdso32/int80.S b/arch/x86/entry/vdso/vdso32/int80.S
deleted file mode 100644 (file)
index b15b7c0..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Code for the vDSO.  This version uses the old int $0x80 method.
- *
- * First get the common code for the sigreturn entry points.
- * This must come first.
- */
-#include "sigreturn.S"
-
-       .text
-       .globl __kernel_vsyscall
-       .type __kernel_vsyscall,@function
-       ALIGN
-__kernel_vsyscall:
-.LSTART_vsyscall:
-       int $0x80
-       ret
-.LEND_vsyscall:
-       .size __kernel_vsyscall,.-.LSTART_vsyscall
-       .previous
-
-       .section .eh_frame,"a",@progbits
-.LSTARTFRAMEDLSI:
-       .long .LENDCIEDLSI-.LSTARTCIEDLSI
-.LSTARTCIEDLSI:
-       .long 0                 /* CIE ID */
-       .byte 1                 /* Version number */
-       .string "zR"            /* NUL-terminated augmentation string */
-       .uleb128 1              /* Code alignment factor */
-       .sleb128 -4             /* Data alignment factor */
-       .byte 8                 /* Return address register column */
-       .uleb128 1              /* Augmentation value length */
-       .byte 0x1b              /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
-       .byte 0x0c              /* DW_CFA_def_cfa */
-       .uleb128 4
-       .uleb128 4
-       .byte 0x88              /* DW_CFA_offset, column 0x8 */
-       .uleb128 1
-       .align 4
-.LENDCIEDLSI:
-       .long .LENDFDEDLSI-.LSTARTFDEDLSI /* Length FDE */
-.LSTARTFDEDLSI:
-       .long .LSTARTFDEDLSI-.LSTARTFRAMEDLSI /* CIE pointer */
-       .long .LSTART_vsyscall-.        /* PC-relative start address */
-       .long .LEND_vsyscall-.LSTART_vsyscall
-       .uleb128 0
-       .align 4
-.LENDFDEDLSI:
-       .previous
-
-       /*
-        * Pad out the segment to match the size of the sysenter.S version.
-        */
-VDSO32_vsyscall_eh_frame_size = 0x40
-       .section .data,"aw",@progbits
-       .space VDSO32_vsyscall_eh_frame_size-(.LENDFDEDLSI-.LSTARTFRAMEDLSI), 0
-       .previous
diff --git a/arch/x86/entry/vdso/vdso32/syscall.S b/arch/x86/entry/vdso/vdso32/syscall.S
deleted file mode 100644 (file)
index 6b286bb..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Code for the vDSO.  This version uses the syscall instruction.
- *
- * First get the common code for the sigreturn entry points.
- * This must come first.
- */
-#define SYSCALL_ENTER_KERNEL   syscall
-#include "sigreturn.S"
-
-#include <asm/segment.h>
-
-       .text
-       .globl __kernel_vsyscall
-       .type __kernel_vsyscall,@function
-       ALIGN
-__kernel_vsyscall:
-.LSTART_vsyscall:
-       push    %ebp
-.Lpush_ebp:
-       movl    %ecx, %ebp
-       syscall
-       movl    %ebp, %ecx
-       popl    %ebp
-.Lpop_ebp:
-       ret
-.LEND_vsyscall:
-       .size __kernel_vsyscall,.-.LSTART_vsyscall
-
-       .section .eh_frame,"a",@progbits
-.LSTARTFRAME:
-       .long .LENDCIE-.LSTARTCIE
-.LSTARTCIE:
-       .long 0                 /* CIE ID */
-       .byte 1                 /* Version number */
-       .string "zR"            /* NUL-terminated augmentation string */
-       .uleb128 1              /* Code alignment factor */
-       .sleb128 -4             /* Data alignment factor */
-       .byte 8                 /* Return address register column */
-       .uleb128 1              /* Augmentation value length */
-       .byte 0x1b              /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
-       .byte 0x0c              /* DW_CFA_def_cfa */
-       .uleb128 4
-       .uleb128 4
-       .byte 0x88              /* DW_CFA_offset, column 0x8 */
-       .uleb128 1
-       .align 4
-.LENDCIE:
-
-       .long .LENDFDE1-.LSTARTFDE1     /* Length FDE */
-.LSTARTFDE1:
-       .long .LSTARTFDE1-.LSTARTFRAME  /* CIE pointer */
-       .long .LSTART_vsyscall-.        /* PC-relative start address */
-       .long .LEND_vsyscall-.LSTART_vsyscall
-       .uleb128 0                      /* Augmentation length */
-       /* What follows are the instructions for the table generation.
-          We have to record all changes of the stack pointer.  */
-       .byte 0x40 + .Lpush_ebp-.LSTART_vsyscall /* DW_CFA_advance_loc */
-       .byte 0x0e              /* DW_CFA_def_cfa_offset */
-       .uleb128 8
-       .byte 0x85, 0x02        /* DW_CFA_offset %ebp -8 */
-       .byte 0x40 + .Lpop_ebp-.Lpush_ebp /* DW_CFA_advance_loc */
-       .byte 0xc5              /* DW_CFA_restore %ebp */
-       .byte 0x0e              /* DW_CFA_def_cfa_offset */
-       .uleb128 4
-       .align 4
-.LENDFDE1:
-       .previous
-
-       /*
-        * Pad out the segment to match the size of the sysenter.S version.
-        */
-VDSO32_vsyscall_eh_frame_size = 0x40
-       .section .data,"aw",@progbits
-       .space VDSO32_vsyscall_eh_frame_size-(.LENDFDE1-.LSTARTFRAME), 0
-       .previous
diff --git a/arch/x86/entry/vdso/vdso32/sysenter.S b/arch/x86/entry/vdso/vdso32/sysenter.S
deleted file mode 100644 (file)
index e354bce..0000000
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Code for the vDSO.  This version uses the sysenter instruction.
- *
- * First get the common code for the sigreturn entry points.
- * This must come first.
- */
-#include "sigreturn.S"
-
-/*
- * The caller puts arg2 in %ecx, which gets pushed. The kernel will use
- * %ecx itself for arg2. The pushing is because the sysexit instruction
- * (found in entry.S) requires that we clobber %ecx with the desired %esp.
- * User code might expect that %ecx is unclobbered though, as it would be
- * for returning via the iret instruction, so we must push and pop.
- *
- * The caller puts arg3 in %edx, which the sysexit instruction requires
- * for %eip. Thus, exactly as for arg2, we must push and pop.
- *
- * Arg6 is different. The caller puts arg6 in %ebp. Since the sysenter
- * instruction clobbers %esp, the user's %esp won't even survive entry
- * into the kernel. We store %esp in %ebp. Code in entry.S must fetch
- * arg6 from the stack.
- *
- * You can not use this vsyscall for the clone() syscall because the
- * three words on the parent stack do not get copied to the child.
- */
-       .text
-       .globl __kernel_vsyscall
-       .type __kernel_vsyscall,@function
-       ALIGN
-__kernel_vsyscall:
-.LSTART_vsyscall:
-       push %ecx
-.Lpush_ecx:
-       push %edx
-.Lpush_edx:
-       push %ebp
-.Lenter_kernel:
-       movl %esp,%ebp
-       sysenter
-
-       /* 7: align return point with nop's to make disassembly easier */
-       .space 7,0x90
-
-       /* 14: System call restart point is here! (SYSENTER_RETURN-2) */
-       int $0x80
-       /* 16: System call normal return point is here! */
-VDSO32_SYSENTER_RETURN:        /* Symbol used by sysenter.c via vdso32-syms.h */
-       pop %ebp
-.Lpop_ebp:
-       pop %edx
-.Lpop_edx:
-       pop %ecx
-.Lpop_ecx:
-       ret
-.LEND_vsyscall:
-       .size __kernel_vsyscall,.-.LSTART_vsyscall
-       .previous
-
-       .section .eh_frame,"a",@progbits
-.LSTARTFRAMEDLSI:
-       .long .LENDCIEDLSI-.LSTARTCIEDLSI
-.LSTARTCIEDLSI:
-       .long 0                 /* CIE ID */
-       .byte 1                 /* Version number */
-       .string "zR"            /* NUL-terminated augmentation string */
-       .uleb128 1              /* Code alignment factor */
-       .sleb128 -4             /* Data alignment factor */
-       .byte 8                 /* Return address register column */
-       .uleb128 1              /* Augmentation value length */
-       .byte 0x1b              /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
-       .byte 0x0c              /* DW_CFA_def_cfa */
-       .uleb128 4
-       .uleb128 4
-       .byte 0x88              /* DW_CFA_offset, column 0x8 */
-       .uleb128 1
-       .align 4
-.LENDCIEDLSI:
-       .long .LENDFDEDLSI-.LSTARTFDEDLSI /* Length FDE */
-.LSTARTFDEDLSI:
-       .long .LSTARTFDEDLSI-.LSTARTFRAMEDLSI /* CIE pointer */
-       .long .LSTART_vsyscall-.        /* PC-relative start address */
-       .long .LEND_vsyscall-.LSTART_vsyscall
-       .uleb128 0
-       /* What follows are the instructions for the table generation.
-          We have to record all changes of the stack pointer.  */
-       .byte 0x40 + (.Lpush_ecx-.LSTART_vsyscall) /* DW_CFA_advance_loc */
-       .byte 0x0e              /* DW_CFA_def_cfa_offset */
-       .byte 0x08              /* RA at offset 8 now */
-       .byte 0x40 + (.Lpush_edx-.Lpush_ecx) /* DW_CFA_advance_loc */
-       .byte 0x0e              /* DW_CFA_def_cfa_offset */
-       .byte 0x0c              /* RA at offset 12 now */
-       .byte 0x40 + (.Lenter_kernel-.Lpush_edx) /* DW_CFA_advance_loc */
-       .byte 0x0e              /* DW_CFA_def_cfa_offset */
-       .byte 0x10              /* RA at offset 16 now */
-       .byte 0x85, 0x04        /* DW_CFA_offset %ebp -16 */
-       /* Finally the epilogue.  */
-       .byte 0x40 + (.Lpop_ebp-.Lenter_kernel) /* DW_CFA_advance_loc */
-       .byte 0x0e              /* DW_CFA_def_cfa_offset */
-       .byte 0x0c              /* RA at offset 12 now */
-       .byte 0xc5              /* DW_CFA_restore %ebp */
-       .byte 0x40 + (.Lpop_edx-.Lpop_ebp) /* DW_CFA_advance_loc */
-       .byte 0x0e              /* DW_CFA_def_cfa_offset */
-       .byte 0x08              /* RA at offset 8 now */
-       .byte 0x40 + (.Lpop_ecx-.Lpop_edx) /* DW_CFA_advance_loc */
-       .byte 0x0e              /* DW_CFA_def_cfa_offset */
-       .byte 0x04              /* RA at offset 4 now */
-       .align 4
-.LENDFDEDLSI:
-       .previous
-
-       /*
-        * Emit a symbol with the size of this .eh_frame data,
-        * to verify it matches the other versions.
-        */
-VDSO32_vsyscall_eh_frame_size = (.LENDFDEDLSI-.LSTARTFRAMEDLSI)
diff --git a/arch/x86/entry/vdso/vdso32/system_call.S b/arch/x86/entry/vdso/vdso32/system_call.S
new file mode 100644 (file)
index 0000000..93bd845
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Code for the vDSO.  This version uses the old int $0x80 method.
+*/
+
+#include <asm/dwarf2.h>
+#include <asm/cpufeature.h>
+#include <asm/alternative-asm.h>
+
+/*
+ * First get the common code for the sigreturn entry points.
+ * This must come first.
+ */
+#include "sigreturn.S"
+
+       .text
+       .globl __kernel_vsyscall
+       .type __kernel_vsyscall,@function
+       ALIGN
+__kernel_vsyscall:
+       CFI_STARTPROC
+       /*
+        * Reshuffle regs so that all of any of the entry instructions
+        * will preserve enough state.
+        */
+       pushl   %edx
+       CFI_ADJUST_CFA_OFFSET   4
+       CFI_REL_OFFSET          edx, 0
+       pushl   %ecx
+       CFI_ADJUST_CFA_OFFSET   4
+       CFI_REL_OFFSET          ecx, 0
+       movl    %esp, %ecx
+
+#ifdef CONFIG_X86_64
+       /* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */
+       ALTERNATIVE_2 "", "sysenter", X86_FEATURE_SYSENTER32, \
+                         "syscall",  X86_FEATURE_SYSCALL32
+#else
+       ALTERNATIVE "", "sysenter", X86_FEATURE_SEP
+#endif
+
+       /* Enter using int $0x80 */
+       movl    (%esp), %ecx
+       int     $0x80
+GLOBAL(int80_landing_pad)
+
+       /* Restore ECX and EDX in case they were clobbered. */
+       popl    %ecx
+       CFI_RESTORE             ecx
+       CFI_ADJUST_CFA_OFFSET   -4
+       popl    %edx
+       CFI_RESTORE             edx
+       CFI_ADJUST_CFA_OFFSET   -4
+       ret
+       CFI_ENDPROC
+
+       .size __kernel_vsyscall,.-__kernel_vsyscall
+       .previous
index 434543145d7889f3fbedd2b28e54df9212346af9..64df47148160239af2442b93ff4c32de941aa362 100644 (file)
@@ -180,21 +180,10 @@ up_fail:
 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
 static int load_vdso32(void)
 {
-       int ret;
-
        if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
                return 0;
 
-       ret = map_vdso(selected_vdso32, false);
-       if (ret)
-               return ret;
-
-       if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
-               current_thread_info()->sysenter_return =
-                       current->mm->context.vdso +
-                       selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
-
-       return 0;
+       return map_vdso(&vdso_image_32, false);
 }
 #endif
 
index b160c0c6baed54c38cc0efbf15ff67075ec869c3..174c2549939d6d184b3c0e01e9d08e23356dacbe 100644 (file)
 #define CREATE_TRACE_POINTS
 #include "vsyscall_trace.h"
 
-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
+static enum { EMULATE, NATIVE, NONE } vsyscall_mode =
+#if defined(CONFIG_LEGACY_VSYSCALL_NATIVE)
+       NATIVE;
+#elif defined(CONFIG_LEGACY_VSYSCALL_NONE)
+       NONE;
+#else
+       EMULATE;
+#endif
 
 static int __init vsyscall_setup(char *str)
 {
index a0a19b7ba22dc242d2bb957b71aa617e6c75104e..e6a5c275cd3f62c858a33641e5b68f87909ef84d 100644 (file)
@@ -289,7 +289,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
                /* Return stub is in 32bit vsyscall page */
                if (current->mm->context.vdso)
                        restorer = current->mm->context.vdso +
-                               selected_vdso32->sym___kernel_sigreturn;
+                               vdso_image_32.sym___kernel_sigreturn;
                else
                        restorer = &frame->retcode;
        }
@@ -368,7 +368,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
                        restorer = ksig->ka.sa.sa_restorer;
                else
                        restorer = current->mm->context.vdso +
-                               selected_vdso32->sym___kernel_rt_sigreturn;
+                               vdso_image_32.sym___kernel_rt_sigreturn;
                put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
 
                /*
index 3a45668f6dc38312bc9f5761214f076a144d00d4..94c18ebfd68cafaec66c6fc0b8c31ccdf37641fb 100644 (file)
 #include <asm/mpspec.h>
 #include <asm/realmode.h>
 
+#ifdef CONFIG_ACPI_APEI
+# include <asm/pgtable_types.h>
+#endif
+
 #ifdef CONFIG_ACPI
 extern int acpi_lapic;
 extern int acpi_ioapic;
@@ -147,4 +151,23 @@ extern int x86_acpi_numa_init(void);
 
 #define acpi_unlazy_tlb(x)     leave_mm(x)
 
+#ifdef CONFIG_ACPI_APEI
+static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
+{
+       /*
+        * We currently have no way to look up the EFI memory map
+        * attributes for a region in a consistent way, because the
+        * memmap is discarded after efi_free_boot_services(). So if
+        * you call efi_mem_attributes() during boot and at runtime,
+        * you could theoretically see different attributes.
+        *
+        * Since we are yet to see any x86 platforms that require
+        * anything other than PAGE_KERNEL (some arm64 platforms
+        * require the equivalent of PAGE_KERNEL_NOCACHE), return that
+        * until we know differently.
+        */
+        return PAGE_KERNEL;
+}
+#endif
+
 #endif /* _ASM_X86_ACPI_H */
index 1a5da2e63aeeebc062bd0f5c08b36e4bda32707d..3c56ef1ae068e8c52b829bab6719d47fba0ef8bd 100644 (file)
@@ -81,7 +81,7 @@ static inline struct amd_northbridge *node_to_amd_nb(int node)
        return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
 }
 
-static inline u16 amd_get_node_id(struct pci_dev *pdev)
+static inline u16 amd_pci_dev_to_node_id(struct pci_dev *pdev)
 {
        struct pci_dev *misc;
        int i;
index ebf6d5e5668c98b823d4e6b8fe508e4d1b8e4995..a30316bf801ab9da14753425de67670f71feece0 100644 (file)
@@ -115,6 +115,59 @@ static inline bool apic_is_x2apic_enabled(void)
        return msr & X2APIC_ENABLE;
 }
 
+extern void enable_IR_x2apic(void);
+
+extern int get_physical_broadcast(void);
+
+extern int lapic_get_maxlvt(void);
+extern void clear_local_APIC(void);
+extern void disconnect_bsp_APIC(int virt_wire_setup);
+extern void disable_local_APIC(void);
+extern void lapic_shutdown(void);
+extern void sync_Arb_IDs(void);
+extern void init_bsp_APIC(void);
+extern void setup_local_APIC(void);
+extern void init_apic_mappings(void);
+void register_lapic_address(unsigned long address);
+extern void setup_boot_APIC_clock(void);
+extern void setup_secondary_APIC_clock(void);
+extern int APIC_init_uniprocessor(void);
+
+#ifdef CONFIG_X86_64
+static inline int apic_force_enable(unsigned long addr)
+{
+       return -1;
+}
+#else
+extern int apic_force_enable(unsigned long addr);
+#endif
+
+extern int apic_bsp_setup(bool upmode);
+extern void apic_ap_setup(void);
+
+/*
+ * On 32bit this is mach-xxx local
+ */
+#ifdef CONFIG_X86_64
+extern int apic_is_clustered_box(void);
+#else
+static inline int apic_is_clustered_box(void)
+{
+       return 0;
+}
+#endif
+
+extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
+
+#else /* !CONFIG_X86_LOCAL_APIC */
+static inline void lapic_shutdown(void) { }
+#define local_apic_timer_c2_ok         1
+static inline void init_apic_mappings(void) { }
+static inline void disable_local_APIC(void) { }
+# define setup_boot_APIC_clock x86_init_noop
+# define setup_secondary_APIC_clock x86_init_noop
+#endif /* !CONFIG_X86_LOCAL_APIC */
+
 #ifdef CONFIG_X86_X2APIC
 /*
  * Make previous memory operations globally visible before
@@ -186,67 +239,14 @@ static inline int x2apic_enabled(void)
 }
 
 #define x2apic_supported()     (cpu_has_x2apic)
-#else
+#else /* !CONFIG_X86_X2APIC */
 static inline void check_x2apic(void) { }
 static inline void x2apic_setup(void) { }
 static inline int x2apic_enabled(void) { return 0; }
 
 #define x2apic_mode            (0)
 #define        x2apic_supported()      (0)
-#endif
-
-extern void enable_IR_x2apic(void);
-
-extern int get_physical_broadcast(void);
-
-extern int lapic_get_maxlvt(void);
-extern void clear_local_APIC(void);
-extern void disconnect_bsp_APIC(int virt_wire_setup);
-extern void disable_local_APIC(void);
-extern void lapic_shutdown(void);
-extern void sync_Arb_IDs(void);
-extern void init_bsp_APIC(void);
-extern void setup_local_APIC(void);
-extern void init_apic_mappings(void);
-void register_lapic_address(unsigned long address);
-extern void setup_boot_APIC_clock(void);
-extern void setup_secondary_APIC_clock(void);
-extern int APIC_init_uniprocessor(void);
-
-#ifdef CONFIG_X86_64
-static inline int apic_force_enable(unsigned long addr)
-{
-       return -1;
-}
-#else
-extern int apic_force_enable(unsigned long addr);
-#endif
-
-extern int apic_bsp_setup(bool upmode);
-extern void apic_ap_setup(void);
-
-/*
- * On 32bit this is mach-xxx local
- */
-#ifdef CONFIG_X86_64
-extern int apic_is_clustered_box(void);
-#else
-static inline int apic_is_clustered_box(void)
-{
-       return 0;
-}
-#endif
-
-extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
-
-#else /* !CONFIG_X86_LOCAL_APIC */
-static inline void lapic_shutdown(void) { }
-#define local_apic_timer_c2_ok         1
-static inline void init_apic_mappings(void) { }
-static inline void disable_local_APIC(void) { }
-# define setup_boot_APIC_clock x86_init_noop
-# define setup_secondary_APIC_clock x86_init_noop
-#endif /* !CONFIG_X86_LOCAL_APIC */
+#endif /* !CONFIG_X86_X2APIC */
 
 #ifdef CONFIG_X86_64
 #define        SET_APIC_ID(x)          (apic->set_apic_id(x))
index fb52aa644aabb4ed8ff15eb26dca1944648786c0..ae5fb83e6d91c9bf459b02f143a9b583a4515ed7 100644 (file)
@@ -24,7 +24,7 @@
  */
 static __always_inline int atomic_read(const atomic_t *v)
 {
-       return ACCESS_ONCE((v)->counter);
+       return READ_ONCE((v)->counter);
 }
 
 /**
@@ -36,7 +36,7 @@ static __always_inline int atomic_read(const atomic_t *v)
  */
 static __always_inline void atomic_set(atomic_t *v, int i)
 {
-       v->counter = i;
+       WRITE_ONCE(v->counter, i);
 }
 
 /**
index 50e33eff58de7fde09c770e229c9a30fccf49d03..037351022f5483c99a3f0a42dd4646f1eb0a3055 100644 (file)
@@ -18,7 +18,7 @@
  */
 static inline long atomic64_read(const atomic64_t *v)
 {
-       return ACCESS_ONCE((v)->counter);
+       return READ_ONCE((v)->counter);
 }
 
 /**
@@ -30,7 +30,7 @@ static inline long atomic64_read(const atomic64_t *v)
  */
 static inline void atomic64_set(atomic64_t *v, long i)
 {
-       v->counter = i;
+       WRITE_ONCE(v->counter, i);
 }
 
 /**
index e6cf2ad350d15a8e6ca207a2618c77d820aacc22..9727b3b48bd174c8ae8297bd94e897484375618a 100644 (file)
 #define X86_FEATURE_HW_PSTATE  ( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
 #define X86_FEATURE_HWP                ( 7*32+ 10) /* "hwp" Intel HWP */
-#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
+#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
 #define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
 #define X86_FEATURE_HWP_EPP    ( 7*32+13) /* Intel HWP_EPP */
 #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h
new file mode 100644 (file)
index 0000000..b7a1ab8
--- /dev/null
@@ -0,0 +1,84 @@
+#ifndef _ASM_X86_DWARF2_H
+#define _ASM_X86_DWARF2_H
+
+#ifndef __ASSEMBLY__
+#warning "asm/dwarf2.h should be only included in pure assembly files"
+#endif
+
+/*
+ * Macros for dwarf2 CFI unwind table entries.
+ * See "as.info" for details on these pseudo ops. Unfortunately
+ * they are only supported in very new binutils, so define them
+ * away for older version.
+ */
+
+#ifdef CONFIG_AS_CFI
+
+#define CFI_STARTPROC          .cfi_startproc
+#define CFI_ENDPROC            .cfi_endproc
+#define CFI_DEF_CFA            .cfi_def_cfa
+#define CFI_DEF_CFA_REGISTER   .cfi_def_cfa_register
+#define CFI_DEF_CFA_OFFSET     .cfi_def_cfa_offset
+#define CFI_ADJUST_CFA_OFFSET  .cfi_adjust_cfa_offset
+#define CFI_OFFSET             .cfi_offset
+#define CFI_REL_OFFSET         .cfi_rel_offset
+#define CFI_REGISTER           .cfi_register
+#define CFI_RESTORE            .cfi_restore
+#define CFI_REMEMBER_STATE     .cfi_remember_state
+#define CFI_RESTORE_STATE      .cfi_restore_state
+#define CFI_UNDEFINED          .cfi_undefined
+#define CFI_ESCAPE             .cfi_escape
+
+#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
+#define CFI_SIGNAL_FRAME       .cfi_signal_frame
+#else
+#define CFI_SIGNAL_FRAME
+#endif
+
+#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__)
+#ifndef BUILD_VDSO
+       /*
+        * Emit CFI data in .debug_frame sections, not .eh_frame sections.
+        * The latter we currently just discard since we don't do DWARF
+        * unwinding at runtime.  So only the offline DWARF information is
+        * useful to anyone.  Note we should not use this directive if
+        * vmlinux.lds.S gets changed so it doesn't discard .eh_frame.
+        */
+       .cfi_sections .debug_frame
+#else
+        /*
+         * For the vDSO, emit both runtime unwind information and debug
+         * symbols for the .dbg file.
+         */
+       .cfi_sections .eh_frame, .debug_frame
+#endif
+#endif
+
+#else
+
+/*
+ * Due to the structure of pre-exisiting code, don't use assembler line
+ * comment character # to ignore the arguments. Instead, use a dummy macro.
+ */
+.macro cfi_ignore a=0, b=0, c=0, d=0
+.endm
+
+#define CFI_STARTPROC          cfi_ignore
+#define CFI_ENDPROC            cfi_ignore
+#define CFI_DEF_CFA            cfi_ignore
+#define CFI_DEF_CFA_REGISTER   cfi_ignore
+#define CFI_DEF_CFA_OFFSET     cfi_ignore
+#define CFI_ADJUST_CFA_OFFSET  cfi_ignore
+#define CFI_OFFSET             cfi_ignore
+#define CFI_REL_OFFSET         cfi_ignore
+#define CFI_REGISTER           cfi_ignore
+#define CFI_RESTORE            cfi_ignore
+#define CFI_REMEMBER_STATE     cfi_ignore
+#define CFI_RESTORE_STATE      cfi_ignore
+#define CFI_UNDEFINED          cfi_ignore
+#define CFI_ESCAPE             cfi_ignore
+#define CFI_SIGNAL_FRAME       cfi_ignore
+
+#endif
+
+#endif /* _ASM_X86_DWARF2_H */
index ab5f1d447ef981088afc70e92c353b0dbe75f27e..0010c78c4998cf0702299ea2f8a9229e09bb6438 100644 (file)
@@ -86,6 +86,7 @@ extern u64 asmlinkage efi_call(void *fp, ...);
 extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
                                        u32 type, u64 attribute);
 
+#ifdef CONFIG_KASAN
 /*
  * CONFIG_KASAN may redefine memset to __memset.  __memset function is present
  * only in kernel binary.  Since the EFI stub linked into a separate binary it
@@ -95,6 +96,7 @@ extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
 #undef memcpy
 #undef memset
 #undef memmove
+#endif
 
 #endif /* CONFIG_X86_32 */
 
@@ -103,6 +105,7 @@ extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
 extern int __init efi_memblock_x86_reserve_range(void);
 extern pgd_t * __init efi_call_phys_prolog(void);
 extern void __init efi_call_phys_epilog(pgd_t *save_pgd);
+extern void __init efi_print_memmap(void);
 extern void __init efi_unmap_memmap(void);
 extern void __init efi_memory_uc(u64 addr, unsigned long size);
 extern void __init efi_map_region(efi_memory_desc_t *md);
index 141c561f46649209915158211c31b038ee061ac8..1514753fd43553e079696712b48a8d08b6966e98 100644 (file)
@@ -171,11 +171,11 @@ do {                                              \
 static inline void elf_common_init(struct thread_struct *t,
                                   struct pt_regs *regs, const u16 ds)
 {
-       /* Commented-out registers are cleared in stub_execve */
-       /*regs->ax = regs->bx =*/ regs->cx = regs->dx = 0;
-       regs->si = regs->di /*= regs->bp*/ = 0;
+       /* ax gets execve's return value. */
+       /*regs->ax = */ regs->bx = regs->cx = regs->dx = 0;
+       regs->si = regs->di = regs->bp = 0;
        regs->r8 = regs->r9 = regs->r10 = regs->r11 = 0;
-       /*regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;*/
+       regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;
        t->fs = t->gs = 0;
        t->fsindex = t->gsindex = 0;
        t->ds = t->es = ds;
@@ -328,7 +328,7 @@ else                                                                        \
 
 #define VDSO_ENTRY                                                     \
        ((unsigned long)current->mm->context.vdso +                     \
-        selected_vdso32->sym___kernel_vsyscall)
+        vdso_image_32.sym___kernel_vsyscall)
 
 struct linux_binprm;
 
index 5fa9fb0f8809902a8e15f6f8fa1d245d379a6a16..cc285ec4b2c1e94906901b3462d9655b4df28cf1 100644 (file)
 /* hpet memory map physical address */
 extern unsigned long hpet_address;
 extern unsigned long force_hpet_address;
-extern int boot_hpet_disable;
+extern bool boot_hpet_disable;
 extern u8 hpet_blockid;
-extern int hpet_force_user;
-extern u8 hpet_msi_disable;
+extern bool hpet_force_user;
+extern bool hpet_msi_disable;
 extern int is_hpet_enabled(void);
 extern int hpet_enable(void);
 extern void hpet_disable(void);
index b130d59406fb12ab3a75d5a2a8631b202be50ab3..e5f5dc9787d5fa64beac6bca4a4854c05d03dc4a 100644 (file)
@@ -29,11 +29,5 @@ extern void show_trace(struct task_struct *t, struct pt_regs *regs,
 extern void __show_regs(struct pt_regs *regs, int all);
 extern unsigned long oops_begin(void);
 extern void oops_end(unsigned long, struct pt_regs *, int signr);
-#ifdef CONFIG_KEXEC_CORE
-extern int in_crash_kexec;
-#else
-/* no crash dump is ever in progress if no crash kernel can be kexec'd */
-#define in_crash_kexec 0
-#endif
 
 #endif /* _ASM_X86_KDEBUG_H */
index 2beee03820889b6c6b436e884a4e83e023d76f9a..3a36ee704c307414b305e1cac21ec9aba4de5872 100644 (file)
@@ -1226,10 +1226,8 @@ void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
 
 int kvm_is_in_guest(void);
 
-int __x86_set_memory_region(struct kvm *kvm,
-                           const struct kvm_userspace_memory_region *mem);
-int x86_set_memory_region(struct kvm *kvm,
-                         const struct kvm_userspace_memory_region *mem);
+int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
+int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
 
index 2dbc0bf2b9f3f7848d10025bb4bcfcf6d36a4d2d..2ea4527e462f133398f178a885a4b920892cd3f5 100644 (file)
@@ -123,19 +123,27 @@ struct mca_config {
 };
 
 struct mce_vendor_flags {
-                       /*
-                        * overflow recovery cpuid bit indicates that overflow
-                        * conditions are not fatal
-                        */
-       __u64           overflow_recov  : 1,
-
-                       /*
-                        * SUCCOR stands for S/W UnCorrectable error COntainment
-                        * and Recovery. It indicates support for data poisoning
-                        * in HW and deferred error interrupts.
-                        */
-                       succor          : 1,
-                       __reserved_0    : 62;
+       /*
+        * Indicates that overflow conditions are not fatal, when set.
+        */
+       __u64 overflow_recov    : 1,
+
+       /*
+        * (AMD) SUCCOR stands for S/W UnCorrectable error COntainment and
+        * Recovery. It indicates support for data poisoning in HW and deferred
+        * error interrupts.
+        */
+             succor            : 1,
+
+       /*
+        * (AMD) SMCA: This bit indicates support for Scalable MCA which expands
+        * the register space for each MCA bank and also increases number of
+        * banks. Also, to accommodate the new banks and registers, the MCA
+        * register space is moved to a new MSR range.
+        */
+             smca              : 1,
+
+             __reserved_0      : 61;
 };
 extern struct mce_vendor_flags mce_flags;
 
index 9e6278c7140eac3cac2e5841002300c3b903e457..34e62b1dcfce46a606fbe78d7d55131513d859d3 100644 (file)
@@ -27,7 +27,6 @@ struct cpu_signature {
 struct device;
 
 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
-extern bool dis_ucode_ldr;
 
 struct microcode_ops {
        enum ucode_state (*request_microcode_user) (int cpu,
@@ -55,6 +54,12 @@ struct ucode_cpu_info {
 };
 extern struct ucode_cpu_info ucode_cpu_info[];
 
+#ifdef CONFIG_MICROCODE
+int __init microcode_init(void);
+#else
+static inline int __init microcode_init(void)  { return 0; };
+#endif
+
 #ifdef CONFIG_MICROCODE_INTEL
 extern struct microcode_ops * __init init_intel_microcode(void);
 #else
@@ -75,7 +80,6 @@ static inline struct microcode_ops * __init init_amd_microcode(void)
 static inline void __exit exit_amd_microcode(void) {}
 #endif
 
-#ifdef CONFIG_MICROCODE_EARLY
 #define MAX_UCODE_COUNT 128
 
 #define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24))
@@ -150,22 +154,18 @@ static inline unsigned int x86_model(unsigned int sig)
        return model;
 }
 
+#ifdef CONFIG_MICROCODE
 extern void __init load_ucode_bsp(void);
 extern void load_ucode_ap(void);
 extern int __init save_microcode_in_initrd(void);
 void reload_early_microcode(void);
 extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
 #else
-static inline void __init load_ucode_bsp(void) {}
-static inline void load_ucode_ap(void) {}
-static inline int __init save_microcode_in_initrd(void)
-{
-       return 0;
-}
-static inline void reload_early_microcode(void) {}
-static inline bool get_builtin_firmware(struct cpio_data *cd, const char *name)
-{
-       return false;
-}
+static inline void __init load_ucode_bsp(void)                 { }
+static inline void load_ucode_ap(void)                         { }
+static inline int __init save_microcode_in_initrd(void)                { return 0; }
+static inline void reload_early_microcode(void)                        { }
+static inline bool
+get_builtin_firmware(struct cpio_data *cd, const char *name)   { return false; }
 #endif
 #endif /* _ASM_X86_MICROCODE_H */
index ac6d328977a67e4fd0ae5674e2d7e8a30539ff92..adfc847a395ec2d94e4d46993379a0a7a77f59e0 100644 (file)
@@ -64,7 +64,7 @@ extern enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, s
 #define PATCH_MAX_SIZE PAGE_SIZE
 extern u8 amd_ucode_patch[PATCH_MAX_SIZE];
 
-#ifdef CONFIG_MICROCODE_AMD_EARLY
+#ifdef CONFIG_MICROCODE_AMD
 extern void __init load_ucode_amd_bsp(unsigned int family);
 extern void load_ucode_amd_ap(void);
 extern int __init save_microcode_in_initrd_amd(void);
@@ -76,4 +76,5 @@ static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; }
 void reload_ucode_amd(void) {}
 #endif
 
+extern bool check_current_patch_level(u32 *rev, bool early);
 #endif /* _ASM_X86_MICROCODE_AMD_H */
index 7991c606125d01b137a6f0e1ed16f1e8c1d86015..8559b0102ea1f14dfc840c28df6e1fcc0f813965 100644 (file)
@@ -57,7 +57,7 @@ extern int has_newer_microcode(void *mc, unsigned int csig, int cpf, int rev);
 extern int microcode_sanity_check(void *mc, int print_err);
 extern int find_matching_signature(void *mc, unsigned int csig, int cpf);
 
-#ifdef CONFIG_MICROCODE_INTEL_EARLY
+#ifdef CONFIG_MICROCODE_INTEL
 extern void __init load_ucode_intel_bsp(void);
 extern void load_ucode_intel_ap(void);
 extern void show_ucode_info_early(void);
@@ -71,13 +71,9 @@ static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL;
 static inline void reload_ucode_intel(void) {}
 #endif
 
-#if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU)
+#ifdef CONFIG_HOTPLUG_CPU
 extern int save_mc_for_early(u8 *mc);
 #else
-static inline int save_mc_for_early(u8 *mc)
-{
-       return 0;
-}
+static inline int save_mc_for_early(u8 *mc) { return 0; }
 #endif
-
 #endif /* _ASM_X86_MICROCODE_INTEL_H */
index b98b471a3b7e660c35910ec5bde5d3580af3f498..b8c14bb7fc8f37ee004dc10342a99c8579110e1d 100644 (file)
 #define DEBUGCTLMSR_BTS_OFF_USR                (1UL << 10)
 #define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
 
+#define MSR_PEBS_FRONTEND              0x000003f7
+
 #define MSR_IA32_POWER_CTL             0x000001fc
 
 #define MSR_IA32_MC0_CTL               0x00000400
index 1c6f7f6212c14066e7522162e16494f2f7feb437..c64373a2d731b2bddfde9656d7f5c1ddc67d676d 100644 (file)
@@ -14,6 +14,7 @@
 #ifndef _ASM_X86_NUMACHIP_NUMACHIP_H
 #define _ASM_X86_NUMACHIP_NUMACHIP_H
 
+extern u8 numachip_system;
 extern int __init pci_numachip_init(void);
 
 #endif /* _ASM_X86_NUMACHIP_NUMACHIP_H */
index 660f843df928c12b1afc0816ae412e86563b1952..29719eecdc2e6119647ae2dc4d5431cea86aa5ac 100644 (file)
 #ifndef _ASM_X86_NUMACHIP_NUMACHIP_CSR_H
 #define _ASM_X86_NUMACHIP_NUMACHIP_CSR_H
 
-#include <linux/numa.h>
-#include <linux/percpu.h>
+#include <linux/smp.h>
 #include <linux/io.h>
-#include <linux/swab.h>
-#include <asm/types.h>
-#include <asm/processor.h>
 
 #define CSR_NODE_SHIFT         16
 #define CSR_NODE_BITS(p)       (((unsigned long)(p)) << CSR_NODE_SHIFT)
 
 /* 32K CSR space, b15 indicates geo/non-geo */
 #define CSR_OFFSET_MASK        0x7fffUL
-
-/* Global CSR space covers all 4K possible nodes with 64K CSR space per node */
-#define NUMACHIP_GCSR_BASE     0x3fff00000000ULL
-#define NUMACHIP_GCSR_LIM      0x3fff0fffffffULL
-#define NUMACHIP_GCSR_SIZE     (NUMACHIP_GCSR_LIM - NUMACHIP_GCSR_BASE + 1)
+#define CSR_G0_NODE_IDS (0x008 + (0 << 12))
+#define CSR_G3_EXT_IRQ_GEN (0x030 + (3 << 12))
 
 /*
  * Local CSR space starts in global CSR space with "nodeid" = 0xfff0, however
 #define NUMACHIP_LCSR_BASE     0x3ffffe000000ULL
 #define NUMACHIP_LCSR_LIM      0x3fffffffffffULL
 #define NUMACHIP_LCSR_SIZE     (NUMACHIP_LCSR_LIM - NUMACHIP_LCSR_BASE + 1)
-
-static inline void *gcsr_address(int node, unsigned long offset)
-{
-       return __va(NUMACHIP_GCSR_BASE | (1UL << 15) |
-               CSR_NODE_BITS(node & CSR_NODE_MASK) | (offset & CSR_OFFSET_MASK));
-}
+#define NUMACHIP_LAPIC_BITS    8
 
 static inline void *lcsr_address(unsigned long offset)
 {
@@ -54,114 +42,57 @@ static inline void *lcsr_address(unsigned long offset)
                CSR_NODE_BITS(0xfff0) | (offset & CSR_OFFSET_MASK));
 }
 
-static inline unsigned int read_gcsr(int node, unsigned long offset)
+static inline unsigned int read_lcsr(unsigned long offset)
 {
-       return swab32(readl(gcsr_address(node, offset)));
+       return swab32(readl(lcsr_address(offset)));
 }
 
-static inline void write_gcsr(int node, unsigned long offset, unsigned int val)
+static inline void write_lcsr(unsigned long offset, unsigned int val)
 {
-       writel(swab32(val), gcsr_address(node, offset));
+       writel(swab32(val), lcsr_address(offset));
 }
 
-static inline unsigned int read_lcsr(unsigned long offset)
+/*
+ * On NumaChip2, local CSR space is 16MB and starts at fixed offset below 4G
+ */
+
+#define NUMACHIP2_LCSR_BASE       0xf0000000UL
+#define NUMACHIP2_LCSR_SIZE       0x1000000UL
+#define NUMACHIP2_APIC_ICR        0x100000
+#define NUMACHIP2_TIMER_DEADLINE  0x200000
+#define NUMACHIP2_TIMER_INT       0x200008
+#define NUMACHIP2_TIMER_NOW       0x200018
+#define NUMACHIP2_TIMER_RESET     0x200020
+
+static inline void __iomem *numachip2_lcsr_address(unsigned long offset)
 {
-       return swab32(readl(lcsr_address(offset)));
+       return (void __iomem *)__va(NUMACHIP2_LCSR_BASE |
+               (offset & (NUMACHIP2_LCSR_SIZE - 1)));
 }
 
-static inline void write_lcsr(unsigned long offset, unsigned int val)
+static inline u32 numachip2_read32_lcsr(unsigned long offset)
 {
-       writel(swab32(val), lcsr_address(offset));
+       return readl(numachip2_lcsr_address(offset));
 }
 
-/* ========================================================================= */
-/*                   CSR_G0_STATE_CLEAR                                      */
-/* ========================================================================= */
-
-#define CSR_G0_STATE_CLEAR (0x000 + (0 << 12))
-union numachip_csr_g0_state_clear {
-       unsigned int v;
-       struct numachip_csr_g0_state_clear_s {
-               unsigned int _state:2;
-               unsigned int _rsvd_2_6:5;
-               unsigned int _lost:1;
-               unsigned int _rsvd_8_31:24;
-       } s;
-};
-
-/* ========================================================================= */
-/*                   CSR_G0_NODE_IDS                                         */
-/* ========================================================================= */
+static inline u64 numachip2_read64_lcsr(unsigned long offset)
+{
+       return readq(numachip2_lcsr_address(offset));
+}
 
-#define CSR_G0_NODE_IDS (0x008 + (0 << 12))
-union numachip_csr_g0_node_ids {
-       unsigned int v;
-       struct numachip_csr_g0_node_ids_s {
-               unsigned int _initialid:16;
-               unsigned int _nodeid:12;
-               unsigned int _rsvd_28_31:4;
-       } s;
-};
-
-/* ========================================================================= */
-/*                   CSR_G3_EXT_IRQ_GEN                                      */
-/* ========================================================================= */
+static inline void numachip2_write32_lcsr(unsigned long offset, u32 val)
+{
+       writel(val, numachip2_lcsr_address(offset));
+}
 
-#define CSR_G3_EXT_IRQ_GEN (0x030 + (3 << 12))
-union numachip_csr_g3_ext_irq_gen {
-       unsigned int v;
-       struct numachip_csr_g3_ext_irq_gen_s {
-               unsigned int _vector:8;
-               unsigned int _msgtype:3;
-               unsigned int _index:5;
-               unsigned int _destination_apic_id:16;
-       } s;
-};
-
-/* ========================================================================= */
-/*                   CSR_G3_EXT_IRQ_STATUS                                   */
-/* ========================================================================= */
-
-#define CSR_G3_EXT_IRQ_STATUS (0x034 + (3 << 12))
-union numachip_csr_g3_ext_irq_status {
-       unsigned int v;
-       struct numachip_csr_g3_ext_irq_status_s {
-               unsigned int _result:32;
-       } s;
-};
-
-/* ========================================================================= */
-/*                   CSR_G3_EXT_IRQ_DEST                                     */
-/* ========================================================================= */
-
-#define CSR_G3_EXT_IRQ_DEST (0x038 + (3 << 12))
-union numachip_csr_g3_ext_irq_dest {
-       unsigned int v;
-       struct numachip_csr_g3_ext_irq_dest_s {
-               unsigned int _irq:8;
-               unsigned int _rsvd_8_31:24;
-       } s;
-};
-
-/* ========================================================================= */
-/*                   CSR_G3_NC_ATT_MAP_SELECT                                */
-/* ========================================================================= */
-
-#define CSR_G3_NC_ATT_MAP_SELECT (0x7fc + (3 << 12))
-union numachip_csr_g3_nc_att_map_select {
-       unsigned int v;
-       struct numachip_csr_g3_nc_att_map_select_s {
-               unsigned int _upper_address_bits:4;
-               unsigned int _select_ram:4;
-               unsigned int _rsvd_8_31:24;
-       } s;
-};
-
-/* ========================================================================= */
-/*                   CSR_G3_NC_ATT_MAP_SELECT_0-255                          */
-/* ========================================================================= */
-
-#define CSR_G3_NC_ATT_MAP_SELECT_0 (0x800 + (3 << 12))
+static inline void numachip2_write64_lcsr(unsigned long offset, u64 val)
+{
+       writeq(val, numachip2_lcsr_address(offset));
+}
 
-#endif /* _ASM_X86_NUMACHIP_NUMACHIP_CSR_H */
+static inline unsigned int numachip2_timer(void)
+{
+       return (smp_processor_id() % 48) << 6;
+}
 
+#endif /* _ASM_X86_NUMACHIP_NUMACHIP_CSR_H */
index b12f81022a6b2c574f9c037e616f90373502c24e..01bcde84d3e40fb36e16625f0c1024f1291bb9ae 100644 (file)
@@ -30,12 +30,9 @@ static __always_inline void preempt_count_set(int pc)
 /*
  * must be macros to avoid header recursion hell
  */
-#define init_task_preempt_count(p) do { \
-       task_thread_info(p)->saved_preempt_count = PREEMPT_DISABLED; \
-} while (0)
+#define init_task_preempt_count(p) do { } while (0)
 
 #define init_idle_preempt_count(p, cpu) do { \
-       task_thread_info(p)->saved_preempt_count = PREEMPT_ENABLED; \
        per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
 } while (0)
 
index 19577dd325fa04443089897863cf59944a78354b..b55f309605549d6226ed6c64023eafc88c67112b 100644 (file)
@@ -556,12 +556,12 @@ static inline unsigned int cpuid_edx(unsigned int op)
 }
 
 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-static inline void rep_nop(void)
+static __always_inline void rep_nop(void)
 {
        asm volatile("rep; nop" ::: "memory");
 }
 
-static inline void cpu_relax(void)
+static __always_inline void cpu_relax(void)
 {
        rep_nop();
 }
index 655e07a48f6cfa9c09114108f7d8b4cb466fc705..67f08230103aa5ab5f9f74ce90622577b76caef4 100644 (file)
@@ -41,6 +41,7 @@ struct pvclock_wall_clock {
 
 #define PVCLOCK_TSC_STABLE_BIT (1 << 0)
 #define PVCLOCK_GUEST_STOPPED  (1 << 1)
+/* PVCLOCK_COUNTS_FROM_ZERO broke ABI and can't be used anymore. */
 #define PVCLOCK_COUNTS_FROM_ZERO (1 << 2)
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_X86_PVCLOCK_ABI_H */
index e4661196994e86b189da7f6c2e84841d3dcdcb9b..ff8b9a17dc4b2d354971fb52b60b69e6fbb0e903 100644 (file)
@@ -27,12 +27,11 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t
    function. */
 
 #define __HAVE_ARCH_MEMCPY 1
+extern void *memcpy(void *to, const void *from, size_t len);
 extern void *__memcpy(void *to, const void *from, size_t len);
 
 #ifndef CONFIG_KMEMCHECK
-#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
-extern void *memcpy(void *to, const void *from, size_t len);
-#else
+#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
 #define memcpy(dst, src, len)                                  \
 ({                                                             \
        size_t __len = (len);                                   \
index d7f3b3b78ac313ca8a871d3a53b3d118850a657d..751bf4b7bf114da12231a56f4217c2583ddeafb2 100644 (file)
@@ -79,12 +79,12 @@ do {                                                                        \
 #else /* CONFIG_X86_32 */
 
 /* frame pointer must be last for get_wchan */
-#define SAVE_CONTEXT    "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
-#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
+#define SAVE_CONTEXT    "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
+#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
 
 #define __EXTRA_CLOBBER  \
        , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
-         "r12", "r13", "r14", "r15"
+         "r12", "r13", "r14", "r15", "flags"
 
 #ifdef CONFIG_CC_STACKPROTECTOR
 #define __switch_canary                                                          \
@@ -100,7 +100,11 @@ do {                                                                       \
 #define __switch_canary_iparam
 #endif /* CC_STACKPROTECTOR */
 
-/* Save restore flags to clear handle leaking NT */
+/*
+ * There is no need to save or restore flags, because flags are always
+ * clean in kernel mode, with the possible exception of IOPL.  Kernel IOPL
+ * has no effect.
+ */
 #define switch_to(prev, next, last) \
        asm volatile(SAVE_CONTEXT                                         \
             "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */       \
index d6a756ae04c8b999ed379159aa03029b17a38139..999b7cd2e78c7ffe21526fd2b9715b3486216907 100644 (file)
 #include <asm/thread_info.h>   /* for TS_COMPAT */
 #include <asm/unistd.h>
 
-typedef void (*sys_call_ptr_t)(void);
+typedef asmlinkage long (*sys_call_ptr_t)(unsigned long, unsigned long,
+                                         unsigned long, unsigned long,
+                                         unsigned long, unsigned long);
 extern const sys_call_ptr_t sys_call_table[];
 
+#if defined(CONFIG_X86_32)
+#define ia32_sys_call_table sys_call_table
+#define __NR_syscall_compat_max __NR_syscall_max
+#define IA32_NR_syscalls NR_syscalls
+#endif
+
+#if defined(CONFIG_IA32_EMULATION)
+extern const sys_call_ptr_t ia32_sys_call_table[];
+#endif
+
 /*
  * Only the low 32 bits of orig_ax are meaningful, so we return int.
  * This importantly ignores the high bits on 64-bit, so comparisons
index 8afdc3e44247c3a53cc5513ddb0d8f2f84340a4c..c7b551028740f18a5360070a6ccb5dc9ad714c5e 100644 (file)
@@ -57,9 +57,7 @@ struct thread_info {
        __u32                   flags;          /* low level flags */
        __u32                   status;         /* thread synchronous flags */
        __u32                   cpu;            /* current CPU */
-       int                     saved_preempt_count;
        mm_segment_t            addr_limit;
-       void __user             *sysenter_return;
        unsigned int            sig_on_uaccess_error:1;
        unsigned int            uaccess_err:1;  /* uaccess failed */
 };
@@ -69,7 +67,6 @@ struct thread_info {
        .task           = &tsk,                 \
        .flags          = 0,                    \
        .cpu            = 0,                    \
-       .saved_preempt_count = INIT_PREEMPT_COUNT,      \
        .addr_limit     = KERNEL_DS,            \
 }
 
index a8df874f3e8825b0ea6bde91909b8b3d7b380ba7..09b1b0ab94b7653f7ed7019eb7e35b518582f825 100644 (file)
@@ -51,13 +51,13 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
         * limit, not add it to the address).
         */
        if (__builtin_constant_p(size))
-               return addr > limit - size;
+               return unlikely(addr > limit - size);
 
        /* Arbitrary sizes? Be careful about overflow */
        addr += size;
-       if (addr < size)
+       if (unlikely(addr < size))
                return true;
-       return addr > limit;
+       return unlikely(addr > limit);
 }
 
 #define __range_not_ok(addr, size, limit)                              \
@@ -182,7 +182,7 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
                     : "=a" (__ret_gu), "=r" (__val_gu)                 \
                     : "0" (ptr), "i" (sizeof(*(ptr))));                \
        (x) = (__force __typeof__(*(ptr))) __val_gu;                    \
-       __ret_gu;                                                       \
+       __builtin_expect(__ret_gu, 0);                                  \
 })
 
 #define __put_user_x(size, x, ptr, __ret_pu)                   \
@@ -278,7 +278,7 @@ extern void __put_user_8(void);
                __put_user_x(X, __pu_val, ptr, __ret_pu);       \
                break;                                          \
        }                                                       \
-       __ret_pu;                                               \
+       __builtin_expect(__ret_pu, 0);                          \
 })
 
 #define __put_user_size(x, ptr, size, retval, errret)                  \
@@ -401,7 +401,7 @@ do {                                                                        \
 ({                                                             \
        int __pu_err;                                           \
        __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
-       __pu_err;                                               \
+       __builtin_expect(__pu_err, 0);                          \
 })
 
 #define __get_user_nocheck(x, ptr, size)                               \
@@ -410,7 +410,7 @@ do {                                                                        \
        unsigned long __gu_val;                                         \
        __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
-       __gu_err;                                                       \
+       __builtin_expect(__gu_err, 0);                                  \
 })
 
 /* FIXME: this hack is definitely wrong -AK */
index 8021bd28c0f13277a79cdfce15c7b842cb0dcd7e..756de9190aecad5d7939ee7189703a9177f4da25 100644 (file)
@@ -26,7 +26,7 @@ struct vdso_image {
        long sym___kernel_sigreturn;
        long sym___kernel_rt_sigreturn;
        long sym___kernel_vsyscall;
-       long sym_VDSO32_SYSENTER_RETURN;
+       long sym_int80_landing_pad;
 };
 
 #ifdef CONFIG_X86_64
@@ -38,13 +38,7 @@ extern const struct vdso_image vdso_image_x32;
 #endif
 
 #if defined CONFIG_X86_32 || defined CONFIG_COMPAT
-extern const struct vdso_image vdso_image_32_int80;
-#ifdef CONFIG_COMPAT
-extern const struct vdso_image vdso_image_32_syscall;
-#endif
-extern const struct vdso_image vdso_image_32_sysenter;
-
-extern const struct vdso_image *selected_vdso32;
+extern const struct vdso_image vdso_image_32;
 #endif
 
 extern void __init init_vdso_image(const struct vdso_image *image);
index 83aea8055119e2f26beb1c909536609d30e88943..4c20dd333412db5b367d0625e9b7cf69a7891493 100644 (file)
@@ -336,10 +336,10 @@ HYPERVISOR_update_descriptor(u64 ma, u64 desc)
        return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
 }
 
-static inline int
+static inline long
 HYPERVISOR_memory_op(unsigned int cmd, void *arg)
 {
-       return _hypercall2(int, memory_op, cmd, arg);
+       return _hypercall2(long, memory_op, cmd, arg);
 }
 
 static inline int
index b0ae1c4dc79142d9284d14e76ac181f1c271ad64..217909b4d6f56d84892655f680f974eaf83ec78e 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef __ASM_X86_BITSPERLONG_H
 #define __ASM_X86_BITSPERLONG_H
 
-#ifdef __x86_64__
+#if defined(__x86_64__) && !defined(__ILP32__)
 # define __BITS_PER_LONG 64
 #else
 # define __BITS_PER_LONG 32
index 76880ede9a359b3d4f433aff9beaaea6a17543db..03429da2fa80145bbe11e56587e2c7868f467d5d 100644 (file)
@@ -2,7 +2,7 @@
 #define _UAPI_ASM_X86_MCE_H
 
 #include <linux/types.h>
-#include <asm/ioctls.h>
+#include <linux/ioctl.h>
 
 /* Fields are zero when not available */
 struct mce {
index 24e94ce454e2363e6ad14ae2e4cff6c4ac492ab4..2f69e3b184f62c2f7e87c6956bc58fbadb711965 100644 (file)
@@ -1431,7 +1431,7 @@ enum {
 };
 static int x2apic_state;
 
-static inline void __x2apic_disable(void)
+static void __x2apic_disable(void)
 {
        u64 msr;
 
@@ -1447,7 +1447,7 @@ static inline void __x2apic_disable(void)
        printk_once(KERN_INFO "x2apic disabled\n");
 }
 
-static inline void __x2apic_enable(void)
+static void __x2apic_enable(void)
 {
        u64 msr;
 
@@ -1807,7 +1807,7 @@ int apic_version[MAX_LOCAL_APIC];
 /*
  * This interrupt should _never_ happen with our APIC/SMP architecture
  */
-static inline void __smp_spurious_interrupt(u8 vector)
+static void __smp_spurious_interrupt(u8 vector)
 {
        u32 v;
 
@@ -1848,7 +1848,7 @@ __visible void smp_trace_spurious_interrupt(struct pt_regs *regs)
 /*
  * This interrupt should never happen with our APIC/SMP architecture
  */
-static inline void __smp_error_interrupt(struct pt_regs *regs)
+static void __smp_error_interrupt(struct pt_regs *regs)
 {
        u32 v;
        u32 i = 0;
index b548fd3b764bd10f6f3b1240b2c774c07e7d6275..38dd5efdd04c33aa58b2b20b19596bcd12e3e4af 100644 (file)
  *
  */
 
-#include <linux/errno.h>
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ctype.h>
 #include <linux/init.h>
-#include <linux/hardirq.h>
-#include <linux/delay.h>
 
 #include <asm/numachip/numachip.h>
 #include <asm/numachip/numachip_csr.h>
-#include <asm/smp.h>
-#include <asm/apic.h>
 #include <asm/ipi.h>
 #include <asm/apic_flat_64.h>
 #include <asm/pgtable.h>
+#include <asm/pci_x86.h>
 
-static int numachip_system __read_mostly;
+u8 numachip_system __read_mostly;
+static const struct apic apic_numachip1;
+static const struct apic apic_numachip2;
+static void (*numachip_apic_icr_write)(int apicid, unsigned int val) __read_mostly;
 
-static const struct apic apic_numachip;
-
-static unsigned int get_apic_id(unsigned long x)
+static unsigned int numachip1_get_apic_id(unsigned long x)
 {
        unsigned long value;
        unsigned int id = (x >> 24) & 0xff;
@@ -47,7 +38,7 @@ static unsigned int get_apic_id(unsigned long x)
        return id;
 }
 
-static unsigned long set_apic_id(unsigned int id)
+static unsigned long numachip1_set_apic_id(unsigned int id)
 {
        unsigned long x;
 
@@ -55,9 +46,17 @@ static unsigned long set_apic_id(unsigned int id)
        return x;
 }
 
-static unsigned int read_xapic_id(void)
+static unsigned int numachip2_get_apic_id(unsigned long x)
+{
+       u64 mcfg;
+
+       rdmsrl(MSR_FAM10H_MMIO_CONF_BASE, mcfg);
+       return ((mcfg >> (28 - 8)) & 0xfff00) | (x >> 24);
+}
+
+static unsigned long numachip2_set_apic_id(unsigned int id)
 {
-       return get_apic_id(apic_read(APIC_ID));
+       return id << 24;
 }
 
 static int numachip_apic_id_valid(int apicid)
@@ -68,7 +67,7 @@ static int numachip_apic_id_valid(int apicid)
 
 static int numachip_apic_id_registered(void)
 {
-       return physid_isset(read_xapic_id(), phys_cpu_present_map);
+       return 1;
 }
 
 static int numachip_phys_pkg_id(int initial_apic_id, int index_msb)
@@ -76,36 +75,48 @@ static int numachip_phys_pkg_id(int initial_apic_id, int index_msb)
        return initial_apic_id >> index_msb;
 }
 
-static int numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip)
+static void numachip1_apic_icr_write(int apicid, unsigned int val)
 {
-       union numachip_csr_g3_ext_irq_gen int_gen;
-
-       int_gen.s._destination_apic_id = phys_apicid;
-       int_gen.s._vector = 0;
-       int_gen.s._msgtype = APIC_DM_INIT >> 8;
-       int_gen.s._index = 0;
-
-       write_lcsr(CSR_G3_EXT_IRQ_GEN, int_gen.v);
+       write_lcsr(CSR_G3_EXT_IRQ_GEN, (apicid << 16) | val);
+}
 
-       int_gen.s._msgtype = APIC_DM_STARTUP >> 8;
-       int_gen.s._vector = start_rip >> 12;
+static void numachip2_apic_icr_write(int apicid, unsigned int val)
+{
+       numachip2_write32_lcsr(NUMACHIP2_APIC_ICR, (apicid << 12) | val);
+}
 
-       write_lcsr(CSR_G3_EXT_IRQ_GEN, int_gen.v);
+static int numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip)
+{
+       numachip_apic_icr_write(phys_apicid, APIC_DM_INIT);
+       numachip_apic_icr_write(phys_apicid, APIC_DM_STARTUP |
+               (start_rip >> 12));
 
        return 0;
 }
 
 static void numachip_send_IPI_one(int cpu, int vector)
 {
-       union numachip_csr_g3_ext_irq_gen int_gen;
-       int apicid = per_cpu(x86_cpu_to_apicid, cpu);
-
-       int_gen.s._destination_apic_id = apicid;
-       int_gen.s._vector = vector;
-       int_gen.s._msgtype = (vector == NMI_VECTOR ? APIC_DM_NMI : APIC_DM_FIXED) >> 8;
-       int_gen.s._index = 0;
+       int local_apicid, apicid = per_cpu(x86_cpu_to_apicid, cpu);
+       unsigned int dmode;
+
+       preempt_disable();
+       local_apicid = __this_cpu_read(x86_cpu_to_apicid);
+
+       /* Send via local APIC where non-local part matches */
+       if (!((apicid ^ local_apicid) >> NUMACHIP_LAPIC_BITS)) {
+               unsigned long flags;
+
+               local_irq_save(flags);
+               __default_send_IPI_dest_field(apicid, vector,
+                       APIC_DEST_PHYSICAL);
+               local_irq_restore(flags);
+               preempt_enable();
+               return;
+       }
+       preempt_enable();
 
-       write_lcsr(CSR_G3_EXT_IRQ_GEN, int_gen.v);
+       dmode = (vector == NMI_VECTOR) ? APIC_DM_NMI : APIC_DM_FIXED;
+       numachip_apic_icr_write(apicid, dmode | vector);
 }
 
 static void numachip_send_IPI_mask(const struct cpumask *mask, int vector)
@@ -149,9 +160,14 @@ static void numachip_send_IPI_self(int vector)
        apic_write(APIC_SELF_IPI, vector);
 }
 
-static int __init numachip_probe(void)
+static int __init numachip1_probe(void)
 {
-       return apic == &apic_numachip;
+       return apic == &apic_numachip1;
+}
+
+static int __init numachip2_probe(void)
+{
+       return apic == &apic_numachip2;
 }
 
 static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
@@ -172,34 +188,118 @@ static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
 
 static int __init numachip_system_init(void)
 {
-       if (!numachip_system)
+       /* Map the LCSR area and set up the apic_icr_write function */
+       switch (numachip_system) {
+       case 1:
+               init_extra_mapping_uc(NUMACHIP_LCSR_BASE, NUMACHIP_LCSR_SIZE);
+               numachip_apic_icr_write = numachip1_apic_icr_write;
+               x86_init.pci.arch_init = pci_numachip_init;
+               break;
+       case 2:
+               init_extra_mapping_uc(NUMACHIP2_LCSR_BASE, NUMACHIP2_LCSR_SIZE);
+               numachip_apic_icr_write = numachip2_apic_icr_write;
+
+               /* Use MCFG config cycles rather than locked CF8 cycles */
+               raw_pci_ops = &pci_mmcfg;
+               break;
+       default:
                return 0;
-
-       init_extra_mapping_uc(NUMACHIP_LCSR_BASE, NUMACHIP_LCSR_SIZE);
-       init_extra_mapping_uc(NUMACHIP_GCSR_BASE, NUMACHIP_GCSR_SIZE);
+       }
 
        x86_cpuinit.fixup_cpu_id = fixup_cpu_id;
-       x86_init.pci.arch_init = pci_numachip_init;
 
        return 0;
 }
 early_initcall(numachip_system_init);
 
-static int numachip_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+static int numachip1_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
 {
-       if (!strncmp(oem_id, "NUMASC", 6)) {
-               numachip_system = 1;
-               return 1;
-       }
+       if ((strncmp(oem_id, "NUMASC", 6) != 0) ||
+           (strncmp(oem_table_id, "NCONNECT", 8) != 0))
+               return 0;
 
-       return 0;
+       numachip_system = 1;
+
+       return 1;
+}
+
+static int numachip2_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+       if ((strncmp(oem_id, "NUMASC", 6) != 0) ||
+           (strncmp(oem_table_id, "NCONECT2", 8) != 0))
+               return 0;
+
+       numachip_system = 2;
+
+       return 1;
+}
+
+/* APIC IPIs are queued */
+static void numachip_apic_wait_icr_idle(void)
+{
 }
 
-static const struct apic apic_numachip __refconst = {
+/* APIC NMI IPIs are queued */
+static u32 numachip_safe_apic_wait_icr_idle(void)
+{
+       return 0;
+}
 
+static const struct apic apic_numachip1 __refconst = {
        .name                           = "NumaConnect system",
-       .probe                          = numachip_probe,
-       .acpi_madt_oem_check            = numachip_acpi_madt_oem_check,
+       .probe                          = numachip1_probe,
+       .acpi_madt_oem_check            = numachip1_acpi_madt_oem_check,
+       .apic_id_valid                  = numachip_apic_id_valid,
+       .apic_id_registered             = numachip_apic_id_registered,
+
+       .irq_delivery_mode              = dest_Fixed,
+       .irq_dest_mode                  = 0, /* physical */
+
+       .target_cpus                    = online_target_cpus,
+       .disable_esr                    = 0,
+       .dest_logical                   = 0,
+       .check_apicid_used              = NULL,
+
+       .vector_allocation_domain       = default_vector_allocation_domain,
+       .init_apic_ldr                  = flat_init_apic_ldr,
+
+       .ioapic_phys_id_map             = NULL,
+       .setup_apic_routing             = NULL,
+       .cpu_present_to_apicid          = default_cpu_present_to_apicid,
+       .apicid_to_cpu_present          = NULL,
+       .check_phys_apicid_present      = default_check_phys_apicid_present,
+       .phys_pkg_id                    = numachip_phys_pkg_id,
+
+       .get_apic_id                    = numachip1_get_apic_id,
+       .set_apic_id                    = numachip1_set_apic_id,
+       .apic_id_mask                   = 0xffU << 24,
+
+       .cpu_mask_to_apicid_and         = default_cpu_mask_to_apicid_and,
+
+       .send_IPI_mask                  = numachip_send_IPI_mask,
+       .send_IPI_mask_allbutself       = numachip_send_IPI_mask_allbutself,
+       .send_IPI_allbutself            = numachip_send_IPI_allbutself,
+       .send_IPI_all                   = numachip_send_IPI_all,
+       .send_IPI_self                  = numachip_send_IPI_self,
+
+       .wakeup_secondary_cpu           = numachip_wakeup_secondary,
+       .inquire_remote_apic            = NULL, /* REMRD not supported */
+
+       .read                           = native_apic_mem_read,
+       .write                          = native_apic_mem_write,
+       .eoi_write                      = native_apic_mem_write,
+       .icr_read                       = native_apic_icr_read,
+       .icr_write                      = native_apic_icr_write,
+       .wait_icr_idle                  = numachip_apic_wait_icr_idle,
+       .safe_wait_icr_idle             = numachip_safe_apic_wait_icr_idle,
+};
+
+apic_driver(apic_numachip1);
+
+static const struct apic apic_numachip2 __refconst = {
+       .name                           = "NumaConnect2 system",
+       .probe                          = numachip2_probe,
+       .acpi_madt_oem_check            = numachip2_acpi_madt_oem_check,
        .apic_id_valid                  = numachip_apic_id_valid,
        .apic_id_registered             = numachip_apic_id_registered,
 
@@ -221,8 +321,8 @@ static const struct apic apic_numachip __refconst = {
        .check_phys_apicid_present      = default_check_phys_apicid_present,
        .phys_pkg_id                    = numachip_phys_pkg_id,
 
-       .get_apic_id                    = get_apic_id,
-       .set_apic_id                    = set_apic_id,
+       .get_apic_id                    = numachip2_get_apic_id,
+       .set_apic_id                    = numachip2_set_apic_id,
        .apic_id_mask                   = 0xffU << 24,
 
        .cpu_mask_to_apicid_and         = default_cpu_mask_to_apicid_and,
@@ -241,8 +341,8 @@ static const struct apic apic_numachip __refconst = {
        .eoi_write                      = native_apic_mem_write,
        .icr_read                       = native_apic_icr_read,
        .icr_write                      = native_apic_icr_write,
-       .wait_icr_idle                  = native_apic_wait_icr_idle,
-       .safe_wait_icr_idle             = native_safe_apic_wait_icr_idle,
+       .wait_icr_idle                  = numachip_apic_wait_icr_idle,
+       .safe_wait_icr_idle             = numachip_safe_apic_wait_icr_idle,
 };
-apic_driver(apic_numachip);
 
+apic_driver(apic_numachip2);
index 5c60bb16262203ab63720ee349384180e9e7d8fb..f25321894ad294278c20d5ba7b4076ce2353a8fb 100644 (file)
@@ -529,7 +529,7 @@ static void __eoi_ioapic_pin(int apic, int pin, int vector)
        }
 }
 
-void eoi_ioapic_pin(int vector, struct mp_chip_data *data)
+static void eoi_ioapic_pin(int vector, struct mp_chip_data *data)
 {
        unsigned long flags;
        struct irq_pin_list *entry;
@@ -2547,7 +2547,9 @@ void __init setup_ioapic_dest(void)
                        mask = apic->target_cpus();
 
                chip = irq_data_get_irq_chip(idata);
-               chip->irq_set_affinity(idata, mask, false);
+               /* Might be lapic_chip for irq 0 */
+               if (chip->irq_set_affinity)
+                       chip->irq_set_affinity(idata, mask, false);
        }
 }
 #endif
@@ -2907,6 +2909,7 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
        struct irq_data *irq_data;
        struct mp_chip_data *data;
        struct irq_alloc_info *info = arg;
+       unsigned long flags;
 
        if (!info || nr_irqs > 1)
                return -EINVAL;
@@ -2939,11 +2942,14 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
 
        cfg = irqd_cfg(irq_data);
        add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin);
+
+       local_irq_save(flags);
        if (info->ioapic_entry)
                mp_setup_entry(cfg, data, info->ioapic_entry);
        mp_register_handler(virq, data->trigger);
        if (virq < nr_legacy_irqs())
                legacy_pic->mask(virq);
+       local_irq_restore(flags);
 
        apic_printk(APIC_VERBOSE, KERN_DEBUG
                    "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i Dest:%d)\n",
index 8e3d22a1af94094b749abca95187b073403d628c..95a18e25d5bfc5ce14f9e930eef09389c336c8d6 100644 (file)
@@ -53,9 +53,6 @@ void common(void) {
        OFFSET(IA32_SIGCONTEXT_sp, sigcontext_ia32, sp);
        OFFSET(IA32_SIGCONTEXT_ip, sigcontext_ia32, ip);
 
-       BLANK();
-       OFFSET(TI_sysenter_return, thread_info, sysenter_return);
-
        BLANK();
        OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext);
 #endif
index 4eb065c6bed26b38a2804ffcab57f9e09274a7ec..58031303e30488c540d609f95d0693918c09fa64 100644 (file)
@@ -41,6 +41,7 @@ obj-$(CONFIG_CPU_SUP_INTEL)           += perf_event_p6.o perf_event_knc.o perf_event_p4.o
 obj-$(CONFIG_CPU_SUP_INTEL)            += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
 obj-$(CONFIG_CPU_SUP_INTEL)            += perf_event_intel_rapl.o perf_event_intel_cqm.o
 obj-$(CONFIG_CPU_SUP_INTEL)            += perf_event_intel_pt.o perf_event_intel_bts.o
+obj-$(CONFIG_CPU_SUP_INTEL)            += perf_event_intel_cstate.o
 
 obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += perf_event_intel_uncore.o \
                                           perf_event_intel_uncore_snb.o \
index be4febc58b9443372e4df5037e357789de5184fe..e38d338a64475a82feb198c7169e54b8d6d7100a 100644 (file)
@@ -157,7 +157,7 @@ struct _cpuid4_info_regs {
        struct amd_northbridge *nb;
 };
 
-unsigned short                 num_cache_leaves;
+static unsigned short num_cache_leaves;
 
 /* AMD doesn't have CPUID4. Emulate it here to report the same
    information to the user.  This makes some assumptions about the machine:
@@ -326,7 +326,7 @@ static void amd_calc_l3_indices(struct amd_northbridge *nb)
  *
  * @returns: the disabled index if used or negative value if slot free.
  */
-int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
+static int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
 {
        unsigned int reg = 0;
 
@@ -403,8 +403,8 @@ static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
  *
  * @return: 0 on success, error status on failure
  */
-int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
-                           unsigned long index)
+static int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu,
+                           unsigned slot, unsigned long index)
 {
        int ret = 0;
 
index 9d014b82a124c82fdfbd6c952f03c7624d99972f..c5b0d562dbf55064685c78b5d0fa6280748086d1 100644 (file)
@@ -1586,6 +1586,8 @@ static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
                winchip_mcheck_init(c);
                return 1;
                break;
+       default:
+               return 0;
        }
 
        return 0;
@@ -1605,6 +1607,8 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
                mce_amd_feature_init(c);
                mce_flags.overflow_recov = !!(ebx & BIT(0));
                mce_flags.succor         = !!(ebx & BIT(1));
+               mce_flags.smca           = !!(ebx & BIT(3));
+
                break;
                }
 
@@ -2042,7 +2046,7 @@ int __init mcheck_init(void)
  * Disable machine checks on suspend and shutdown. We can't really handle
  * them later.
  */
-static int mce_disable_error_reporting(void)
+static void mce_disable_error_reporting(void)
 {
        int i;
 
@@ -2052,17 +2056,32 @@ static int mce_disable_error_reporting(void)
                if (b->init)
                        wrmsrl(MSR_IA32_MCx_CTL(i), 0);
        }
-       return 0;
+       return;
+}
+
+static void vendor_disable_error_reporting(void)
+{
+       /*
+        * Don't clear on Intel CPUs. Some of these MSRs are socket-wide.
+        * Disabling them for just a single offlined CPU is bad, since it will
+        * inhibit reporting for all shared resources on the socket like the
+        * last level cache (LLC), the integrated memory controller (iMC), etc.
+        */
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+               return;
+
+       mce_disable_error_reporting();
 }
 
 static int mce_syscore_suspend(void)
 {
-       return mce_disable_error_reporting();
+       vendor_disable_error_reporting();
+       return 0;
 }
 
 static void mce_syscore_shutdown(void)
 {
-       mce_disable_error_reporting();
+       vendor_disable_error_reporting();
 }
 
 /*
@@ -2342,19 +2361,14 @@ static void mce_device_remove(unsigned int cpu)
 static void mce_disable_cpu(void *h)
 {
        unsigned long action = *(unsigned long *)h;
-       int i;
 
        if (!mce_available(raw_cpu_ptr(&cpu_info)))
                return;
 
        if (!(action & CPU_TASKS_FROZEN))
                cmci_clear();
-       for (i = 0; i < mca_cfg.banks; i++) {
-               struct mce_bank *b = &mce_banks[i];
 
-               if (b->init)
-                       wrmsrl(MSR_IA32_MCx_CTL(i), 0);
-       }
+       vendor_disable_error_reporting();
 }
 
 static void mce_reenable_cpu(void *h)
index 1af51b1586d7f7ae0bfa65ec377634b2d8fa39c2..2c5aaf8c2e2f3dcc94d348dfe91da6d7d5000ae9 100644 (file)
@@ -503,14 +503,6 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
                return;
        }
 
-       /* Check whether a vector already exists */
-       if (h & APIC_VECTOR_MASK) {
-               printk(KERN_DEBUG
-                      "CPU%d: Thermal LVT vector (%#x) already installed\n",
-                      cpu, (h & APIC_VECTOR_MASK));
-               return;
-       }
-
        /* early Pentium M models use different method for enabling TM2 */
        if (cpu_has(c, X86_FEATURE_TM2)) {
                if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) {
index 285c85427c323c8e33abc8a4dfc0436123143de7..220b1a50851345632c3f5c1ca4f0e3cbb40d02a0 100644 (file)
@@ -2,6 +2,3 @@ microcode-y                             := core.o
 obj-$(CONFIG_MICROCODE)                        += microcode.o
 microcode-$(CONFIG_MICROCODE_INTEL)    += intel.o intel_lib.o
 microcode-$(CONFIG_MICROCODE_AMD)      += amd.o
-obj-$(CONFIG_MICROCODE_EARLY)          += core_early.o
-obj-$(CONFIG_MICROCODE_INTEL_EARLY)    += intel_early.o
-obj-$(CONFIG_MICROCODE_AMD_EARLY)      += amd_early.o
index 12829c3ced3c549c982a3072f825c2ed1b1c7758..2233f8a766156891a52b9a7658f04efeaf4f86c8 100644 (file)
@@ -1,5 +1,9 @@
 /*
  *  AMD CPU Microcode Update Driver for Linux
+ *
+ *  This driver allows to upgrade microcode on F10h AMD
+ *  CPUs and later.
+ *
  *  Copyright (C) 2008-2011 Advanced Micro Devices Inc.
  *
  *  Author: Peter Oruba <peter.oruba@amd.com>
  *  Based on work by:
  *  Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
  *
- *  Maintainers:
- *  Andreas Herrmann <herrmann.der.user@googlemail.com>
- *  Borislav Petkov <bp@alien8.de>
+ *  early loader:
+ *  Copyright (C) 2013 Advanced Micro Devices, Inc.
  *
- *  This driver allows to upgrade microcode on F10h AMD
- *  CPUs and later.
+ *  Author: Jacob Shin <jacob.shin@amd.com>
+ *  Fixes: Borislav Petkov <bp@suse.de>
  *
  *  Licensed under the terms of the GNU General Public
  *  License version 2. See file COPYING for details.
  */
+#define pr_fmt(fmt) "microcode: " fmt
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
+#include <linux/earlycpio.h>
 #include <linux/firmware.h>
 #include <linux/uaccess.h>
 #include <linux/vmalloc.h>
+#include <linux/initrd.h>
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/pci.h>
 
+#include <asm/microcode_amd.h>
 #include <asm/microcode.h>
 #include <asm/processor.h>
+#include <asm/setup.h>
+#include <asm/cpu.h>
 #include <asm/msr.h>
-#include <asm/microcode_amd.h>
-
-MODULE_DESCRIPTION("AMD Microcode Update Driver");
-MODULE_AUTHOR("Peter Oruba");
-MODULE_LICENSE("GPL v2");
 
 static struct equiv_cpu_entry *equiv_cpu_table;
 
@@ -47,6 +48,432 @@ struct ucode_patch {
 
 static LIST_HEAD(pcache);
 
+/*
+ * This points to the current valid container of microcode patches which we will
+ * save from the initrd before jettisoning its contents.
+ */
+static u8 *container;
+static size_t container_size;
+
+static u32 ucode_new_rev;
+u8 amd_ucode_patch[PATCH_MAX_SIZE];
+static u16 this_equiv_id;
+
+static struct cpio_data ucode_cpio;
+
+/*
+ * Microcode patch container file is prepended to the initrd in cpio format.
+ * See Documentation/x86/early-microcode.txt
+ */
+static __initdata char ucode_path[] = "kernel/x86/microcode/AuthenticAMD.bin";
+
+static struct cpio_data __init find_ucode_in_initrd(void)
+{
+       long offset = 0;
+       char *path;
+       void *start;
+       size_t size;
+
+#ifdef CONFIG_X86_32
+       struct boot_params *p;
+
+       /*
+        * On 32-bit, early load occurs before paging is turned on so we need
+        * to use physical addresses.
+        */
+       p       = (struct boot_params *)__pa_nodebug(&boot_params);
+       path    = (char *)__pa_nodebug(ucode_path);
+       start   = (void *)p->hdr.ramdisk_image;
+       size    = p->hdr.ramdisk_size;
+#else
+       path    = ucode_path;
+       start   = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET);
+       size    = boot_params.hdr.ramdisk_size;
+#endif
+
+       return find_cpio_data(path, start, size, &offset);
+}
+
+static size_t compute_container_size(u8 *data, u32 total_size)
+{
+       size_t size = 0;
+       u32 *header = (u32 *)data;
+
+       if (header[0] != UCODE_MAGIC ||
+           header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
+           header[2] == 0)                            /* size */
+               return size;
+
+       size = header[2] + CONTAINER_HDR_SZ;
+       total_size -= size;
+       data += size;
+
+       while (total_size) {
+               u16 patch_size;
+
+               header = (u32 *)data;
+
+               if (header[0] != UCODE_UCODE_TYPE)
+                       break;
+
+               /*
+                * Sanity-check patch size.
+                */
+               patch_size = header[1];
+               if (patch_size > PATCH_MAX_SIZE)
+                       break;
+
+               size       += patch_size + SECTION_HDR_SIZE;
+               data       += patch_size + SECTION_HDR_SIZE;
+               total_size -= patch_size + SECTION_HDR_SIZE;
+       }
+
+       return size;
+}
+
+/*
+ * Early load occurs before we can vmalloc(). So we look for the microcode
+ * patch container file in initrd, traverse equivalent cpu table, look for a
+ * matching microcode patch, and update, all in initrd memory in place.
+ * When vmalloc() is available for use later -- on 64-bit during first AP load,
+ * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
+ * load_microcode_amd() to save equivalent cpu table and microcode patches in
+ * kernel heap memory.
+ */
+static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
+{
+       struct equiv_cpu_entry *eq;
+       size_t *cont_sz;
+       u32 *header;
+       u8  *data, **cont;
+       u8 (*patch)[PATCH_MAX_SIZE];
+       u16 eq_id = 0;
+       int offset, left;
+       u32 rev, eax, ebx, ecx, edx;
+       u32 *new_rev;
+
+#ifdef CONFIG_X86_32
+       new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
+       cont_sz = (size_t *)__pa_nodebug(&container_size);
+       cont    = (u8 **)__pa_nodebug(&container);
+       patch   = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
+#else
+       new_rev = &ucode_new_rev;
+       cont_sz = &container_size;
+       cont    = &container;
+       patch   = &amd_ucode_patch;
+#endif
+
+       data   = ucode;
+       left   = size;
+       header = (u32 *)data;
+
+       /* find equiv cpu table */
+       if (header[0] != UCODE_MAGIC ||
+           header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
+           header[2] == 0)                            /* size */
+               return;
+
+       eax = 0x00000001;
+       ecx = 0;
+       native_cpuid(&eax, &ebx, &ecx, &edx);
+
+       while (left > 0) {
+               eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ);
+
+               *cont = data;
+
+               /* Advance past the container header */
+               offset = header[2] + CONTAINER_HDR_SZ;
+               data  += offset;
+               left  -= offset;
+
+               eq_id = find_equiv_id(eq, eax);
+               if (eq_id) {
+                       this_equiv_id = eq_id;
+                       *cont_sz = compute_container_size(*cont, left + offset);
+
+                       /*
+                        * truncate how much we need to iterate over in the
+                        * ucode update loop below
+                        */
+                       left = *cont_sz - offset;
+                       break;
+               }
+
+               /*
+                * support multiple container files appended together. if this
+                * one does not have a matching equivalent cpu entry, we fast
+                * forward to the next container file.
+                */
+               while (left > 0) {
+                       header = (u32 *)data;
+                       if (header[0] == UCODE_MAGIC &&
+                           header[1] == UCODE_EQUIV_CPU_TABLE_TYPE)
+                               break;
+
+                       offset = header[1] + SECTION_HDR_SIZE;
+                       data  += offset;
+                       left  -= offset;
+               }
+
+               /* mark where the next microcode container file starts */
+               offset    = data - (u8 *)ucode;
+               ucode     = data;
+       }
+
+       if (!eq_id) {
+               *cont = NULL;
+               *cont_sz = 0;
+               return;
+       }
+
+       if (check_current_patch_level(&rev, true))
+               return;
+
+       while (left > 0) {
+               struct microcode_amd *mc;
+
+               header = (u32 *)data;
+               if (header[0] != UCODE_UCODE_TYPE || /* type */
+                   header[1] == 0)                  /* size */
+                       break;
+
+               mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE);
+
+               if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) {
+
+                       if (!__apply_microcode_amd(mc)) {
+                               rev = mc->hdr.patch_id;
+                               *new_rev = rev;
+
+                               if (save_patch)
+                                       memcpy(patch, mc,
+                                              min_t(u32, header[1], PATCH_MAX_SIZE));
+                       }
+               }
+
+               offset  = header[1] + SECTION_HDR_SIZE;
+               data   += offset;
+               left   -= offset;
+       }
+}
+
+static bool __init load_builtin_amd_microcode(struct cpio_data *cp,
+                                             unsigned int family)
+{
+#ifdef CONFIG_X86_64
+       char fw_name[36] = "amd-ucode/microcode_amd.bin";
+
+       if (family >= 0x15)
+               snprintf(fw_name, sizeof(fw_name),
+                        "amd-ucode/microcode_amd_fam%.2xh.bin", family);
+
+       return get_builtin_firmware(cp, fw_name);
+#else
+       return false;
+#endif
+}
+
+void __init load_ucode_amd_bsp(unsigned int family)
+{
+       struct cpio_data cp;
+       void **data;
+       size_t *size;
+
+#ifdef CONFIG_X86_32
+       data =  (void **)__pa_nodebug(&ucode_cpio.data);
+       size = (size_t *)__pa_nodebug(&ucode_cpio.size);
+#else
+       data = &ucode_cpio.data;
+       size = &ucode_cpio.size;
+#endif
+
+       cp = find_ucode_in_initrd();
+       if (!cp.data) {
+               if (!load_builtin_amd_microcode(&cp, family))
+                       return;
+       }
+
+       *data = cp.data;
+       *size = cp.size;
+
+       apply_ucode_in_initrd(cp.data, cp.size, true);
+}
+
+#ifdef CONFIG_X86_32
+/*
+ * On 32-bit, since AP's early load occurs before paging is turned on, we
+ * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during
+ * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During
+ * save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
+ * which is used upon resume from suspend.
+ */
+void load_ucode_amd_ap(void)
+{
+       struct microcode_amd *mc;
+       size_t *usize;
+       void **ucode;
+
+       mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
+       if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
+               __apply_microcode_amd(mc);
+               return;
+       }
+
+       ucode = (void *)__pa_nodebug(&container);
+       usize = (size_t *)__pa_nodebug(&container_size);
+
+       if (!*ucode || !*usize)
+               return;
+
+       apply_ucode_in_initrd(*ucode, *usize, false);
+}
+
+static void __init collect_cpu_sig_on_bsp(void *arg)
+{
+       unsigned int cpu = smp_processor_id();
+       struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+
+       uci->cpu_sig.sig = cpuid_eax(0x00000001);
+}
+
+static void __init get_bsp_sig(void)
+{
+       unsigned int bsp = boot_cpu_data.cpu_index;
+       struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
+
+       if (!uci->cpu_sig.sig)
+               smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
+}
+#else
+void load_ucode_amd_ap(void)
+{
+       unsigned int cpu = smp_processor_id();
+       struct equiv_cpu_entry *eq;
+       struct microcode_amd *mc;
+       u32 rev, eax;
+       u16 eq_id;
+
+       /* Exit if called on the BSP. */
+       if (!cpu)
+               return;
+
+       if (!container)
+               return;
+
+       /*
+        * 64-bit runs with paging enabled, thus early==false.
+        */
+       if (check_current_patch_level(&rev, false))
+               return;
+
+       eax = cpuid_eax(0x00000001);
+       eq  = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ);
+
+       eq_id = find_equiv_id(eq, eax);
+       if (!eq_id)
+               return;
+
+       if (eq_id == this_equiv_id) {
+               mc = (struct microcode_amd *)amd_ucode_patch;
+
+               if (mc && rev < mc->hdr.patch_id) {
+                       if (!__apply_microcode_amd(mc))
+                               ucode_new_rev = mc->hdr.patch_id;
+               }
+
+       } else {
+               if (!ucode_cpio.data)
+                       return;
+
+               /*
+                * AP has a different equivalence ID than BSP, looks like
+                * mixed-steppings silicon so go through the ucode blob anew.
+                */
+               apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false);
+       }
+}
+#endif
+
+int __init save_microcode_in_initrd_amd(void)
+{
+       unsigned long cont;
+       int retval = 0;
+       enum ucode_state ret;
+       u8 *cont_va;
+       u32 eax;
+
+       if (!container)
+               return -EINVAL;
+
+#ifdef CONFIG_X86_32
+       get_bsp_sig();
+       cont    = (unsigned long)container;
+       cont_va = __va(container);
+#else
+       /*
+        * We need the physical address of the container for both bitness since
+        * boot_params.hdr.ramdisk_image is a physical address.
+        */
+       cont    = __pa(container);
+       cont_va = container;
+#endif
+
+       /*
+        * Take into account the fact that the ramdisk might get relocated and
+        * therefore we need to recompute the container's position in virtual
+        * memory space.
+        */
+       if (relocated_ramdisk)
+               container = (u8 *)(__va(relocated_ramdisk) +
+                            (cont - boot_params.hdr.ramdisk_image));
+       else
+               container = cont_va;
+
+       if (ucode_new_rev)
+               pr_info("microcode: updated early to new patch_level=0x%08x\n",
+                       ucode_new_rev);
+
+       eax   = cpuid_eax(0x00000001);
+       eax   = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
+
+       ret = load_microcode_amd(smp_processor_id(), eax, container, container_size);
+       if (ret != UCODE_OK)
+               retval = -EINVAL;
+
+       /*
+        * This will be freed any msec now, stash patches for the current
+        * family and switch to patch cache for cpu hotplug, etc later.
+        */
+       container = NULL;
+       container_size = 0;
+
+       return retval;
+}
+
+void reload_ucode_amd(void)
+{
+       struct microcode_amd *mc;
+       u32 rev;
+
+       /*
+        * early==false because this is a syscore ->resume path and by
+        * that time paging is long enabled.
+        */
+       if (check_current_patch_level(&rev, false))
+               return;
+
+       mc = (struct microcode_amd *)amd_ucode_patch;
+
+       if (mc && rev < mc->hdr.patch_id) {
+               if (!__apply_microcode_amd(mc)) {
+                       ucode_new_rev = mc->hdr.patch_id;
+                       pr_info("microcode: reload patch_level=0x%08x\n",
+                               ucode_new_rev);
+               }
+       }
+}
 static u16 __find_equiv_id(unsigned int cpu)
 {
        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
@@ -177,6 +604,53 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
        return patch_size;
 }
 
+/*
+ * Those patch levels cannot be updated to newer ones and thus should be final.
+ */
+static u32 final_levels[] = {
+       0x01000098,
+       0x0100009f,
+       0x010000af,
+       0, /* T-101 terminator */
+};
+
+/*
+ * Check the current patch level on this CPU.
+ *
+ * @rev: Use it to return the patch level. It is set to 0 in the case of
+ * error.
+ *
+ * Returns:
+ *  - true: if update should stop
+ *  - false: otherwise
+ */
+bool check_current_patch_level(u32 *rev, bool early)
+{
+       u32 lvl, dummy, i;
+       bool ret = false;
+       u32 *levels;
+
+       native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
+
+       if (IS_ENABLED(CONFIG_X86_32) && early)
+               levels = (u32 *)__pa_nodebug(&final_levels);
+       else
+               levels = final_levels;
+
+       for (i = 0; levels[i]; i++) {
+               if (lvl == levels[i]) {
+                       lvl = 0;
+                       ret = true;
+                       break;
+               }
+       }
+
+       if (rev)
+               *rev = lvl;
+
+       return ret;
+}
+
 int __apply_microcode_amd(struct microcode_amd *mc_amd)
 {
        u32 rev, dummy;
@@ -197,7 +671,7 @@ int apply_microcode_amd(int cpu)
        struct microcode_amd *mc_amd;
        struct ucode_cpu_info *uci;
        struct ucode_patch *p;
-       u32 rev, dummy;
+       u32 rev;
 
        BUG_ON(raw_smp_processor_id() != cpu);
 
@@ -210,7 +684,8 @@ int apply_microcode_amd(int cpu)
        mc_amd  = p->data;
        uci->mc = p->data;
 
-       rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+       if (check_current_patch_level(&rev, false))
+               return -1;
 
        /* need to apply patch? */
        if (rev >= mc_amd->hdr.patch_id) {
@@ -387,7 +862,7 @@ enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t s
        if (ret != UCODE_OK)
                cleanup();
 
-#if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32)
+#ifdef CONFIG_X86_32
        /* save BSP's matching patch for early load */
        if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
                struct ucode_patch *p = find_patch(cpu);
@@ -475,7 +950,7 @@ static struct microcode_ops microcode_amd_ops = {
 
 struct microcode_ops * __init init_amd_microcode(void)
 {
-       struct cpuinfo_x86 *c = &cpu_data(0);
+       struct cpuinfo_x86 *c = &boot_cpu_data;
 
        if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
                pr_warning("AMD CPU family 0x%x not supported\n", c->x86);
diff --git a/arch/x86/kernel/cpu/microcode/amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c
deleted file mode 100644 (file)
index e8a215a..0000000
+++ /dev/null
@@ -1,440 +0,0 @@
-/*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
- *
- * Author: Jacob Shin <jacob.shin@amd.com>
- * Fixes: Borislav Petkov <bp@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/earlycpio.h>
-#include <linux/initrd.h>
-
-#include <asm/cpu.h>
-#include <asm/setup.h>
-#include <asm/microcode_amd.h>
-
-/*
- * This points to the current valid container of microcode patches which we will
- * save from the initrd before jettisoning its contents.
- */
-static u8 *container;
-static size_t container_size;
-
-static u32 ucode_new_rev;
-u8 amd_ucode_patch[PATCH_MAX_SIZE];
-static u16 this_equiv_id;
-
-static struct cpio_data ucode_cpio;
-
-/*
- * Microcode patch container file is prepended to the initrd in cpio format.
- * See Documentation/x86/early-microcode.txt
- */
-static __initdata char ucode_path[] = "kernel/x86/microcode/AuthenticAMD.bin";
-
-static struct cpio_data __init find_ucode_in_initrd(void)
-{
-       long offset = 0;
-       char *path;
-       void *start;
-       size_t size;
-
-#ifdef CONFIG_X86_32
-       struct boot_params *p;
-
-       /*
-        * On 32-bit, early load occurs before paging is turned on so we need
-        * to use physical addresses.
-        */
-       p       = (struct boot_params *)__pa_nodebug(&boot_params);
-       path    = (char *)__pa_nodebug(ucode_path);
-       start   = (void *)p->hdr.ramdisk_image;
-       size    = p->hdr.ramdisk_size;
-#else
-       path    = ucode_path;
-       start   = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET);
-       size    = boot_params.hdr.ramdisk_size;
-#endif
-
-       return find_cpio_data(path, start, size, &offset);
-}
-
-static size_t compute_container_size(u8 *data, u32 total_size)
-{
-       size_t size = 0;
-       u32 *header = (u32 *)data;
-
-       if (header[0] != UCODE_MAGIC ||
-           header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
-           header[2] == 0)                            /* size */
-               return size;
-
-       size = header[2] + CONTAINER_HDR_SZ;
-       total_size -= size;
-       data += size;
-
-       while (total_size) {
-               u16 patch_size;
-
-               header = (u32 *)data;
-
-               if (header[0] != UCODE_UCODE_TYPE)
-                       break;
-
-               /*
-                * Sanity-check patch size.
-                */
-               patch_size = header[1];
-               if (patch_size > PATCH_MAX_SIZE)
-                       break;
-
-               size       += patch_size + SECTION_HDR_SIZE;
-               data       += patch_size + SECTION_HDR_SIZE;
-               total_size -= patch_size + SECTION_HDR_SIZE;
-       }
-
-       return size;
-}
-
-/*
- * Early load occurs before we can vmalloc(). So we look for the microcode
- * patch container file in initrd, traverse equivalent cpu table, look for a
- * matching microcode patch, and update, all in initrd memory in place.
- * When vmalloc() is available for use later -- on 64-bit during first AP load,
- * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
- * load_microcode_amd() to save equivalent cpu table and microcode patches in
- * kernel heap memory.
- */
-static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
-{
-       struct equiv_cpu_entry *eq;
-       size_t *cont_sz;
-       u32 *header;
-       u8  *data, **cont;
-       u8 (*patch)[PATCH_MAX_SIZE];
-       u16 eq_id = 0;
-       int offset, left;
-       u32 rev, eax, ebx, ecx, edx;
-       u32 *new_rev;
-
-#ifdef CONFIG_X86_32
-       new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
-       cont_sz = (size_t *)__pa_nodebug(&container_size);
-       cont    = (u8 **)__pa_nodebug(&container);
-       patch   = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
-#else
-       new_rev = &ucode_new_rev;
-       cont_sz = &container_size;
-       cont    = &container;
-       patch   = &amd_ucode_patch;
-#endif
-
-       data   = ucode;
-       left   = size;
-       header = (u32 *)data;
-
-       /* find equiv cpu table */
-       if (header[0] != UCODE_MAGIC ||
-           header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
-           header[2] == 0)                            /* size */
-               return;
-
-       eax = 0x00000001;
-       ecx = 0;
-       native_cpuid(&eax, &ebx, &ecx, &edx);
-
-       while (left > 0) {
-               eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ);
-
-               *cont = data;
-
-               /* Advance past the container header */
-               offset = header[2] + CONTAINER_HDR_SZ;
-               data  += offset;
-               left  -= offset;
-
-               eq_id = find_equiv_id(eq, eax);
-               if (eq_id) {
-                       this_equiv_id = eq_id;
-                       *cont_sz = compute_container_size(*cont, left + offset);
-
-                       /*
-                        * truncate how much we need to iterate over in the
-                        * ucode update loop below
-                        */
-                       left = *cont_sz - offset;
-                       break;
-               }
-
-               /*
-                * support multiple container files appended together. if this
-                * one does not have a matching equivalent cpu entry, we fast
-                * forward to the next container file.
-                */
-               while (left > 0) {
-                       header = (u32 *)data;
-                       if (header[0] == UCODE_MAGIC &&
-                           header[1] == UCODE_EQUIV_CPU_TABLE_TYPE)
-                               break;
-
-                       offset = header[1] + SECTION_HDR_SIZE;
-                       data  += offset;
-                       left  -= offset;
-               }
-
-               /* mark where the next microcode container file starts */
-               offset    = data - (u8 *)ucode;
-               ucode     = data;
-       }
-
-       if (!eq_id) {
-               *cont = NULL;
-               *cont_sz = 0;
-               return;
-       }
-
-       /* find ucode and update if needed */
-
-       native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
-
-       while (left > 0) {
-               struct microcode_amd *mc;
-
-               header = (u32 *)data;
-               if (header[0] != UCODE_UCODE_TYPE || /* type */
-                   header[1] == 0)                  /* size */
-                       break;
-
-               mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE);
-
-               if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) {
-
-                       if (!__apply_microcode_amd(mc)) {
-                               rev = mc->hdr.patch_id;
-                               *new_rev = rev;
-
-                               if (save_patch)
-                                       memcpy(patch, mc,
-                                              min_t(u32, header[1], PATCH_MAX_SIZE));
-                       }
-               }
-
-               offset  = header[1] + SECTION_HDR_SIZE;
-               data   += offset;
-               left   -= offset;
-       }
-}
-
-static bool __init load_builtin_amd_microcode(struct cpio_data *cp,
-                                             unsigned int family)
-{
-#ifdef CONFIG_X86_64
-       char fw_name[36] = "amd-ucode/microcode_amd.bin";
-
-       if (family >= 0x15)
-               snprintf(fw_name, sizeof(fw_name),
-                        "amd-ucode/microcode_amd_fam%.2xh.bin", family);
-
-       return get_builtin_firmware(cp, fw_name);
-#else
-       return false;
-#endif
-}
-
-void __init load_ucode_amd_bsp(unsigned int family)
-{
-       struct cpio_data cp;
-       void **data;
-       size_t *size;
-
-#ifdef CONFIG_X86_32
-       data =  (void **)__pa_nodebug(&ucode_cpio.data);
-       size = (size_t *)__pa_nodebug(&ucode_cpio.size);
-#else
-       data = &ucode_cpio.data;
-       size = &ucode_cpio.size;
-#endif
-
-       cp = find_ucode_in_initrd();
-       if (!cp.data) {
-               if (!load_builtin_amd_microcode(&cp, family))
-                       return;
-       }
-
-       *data = cp.data;
-       *size = cp.size;
-
-       apply_ucode_in_initrd(cp.data, cp.size, true);
-}
-
-#ifdef CONFIG_X86_32
-/*
- * On 32-bit, since AP's early load occurs before paging is turned on, we
- * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during
- * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During
- * save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
- * which is used upon resume from suspend.
- */
-void load_ucode_amd_ap(void)
-{
-       struct microcode_amd *mc;
-       size_t *usize;
-       void **ucode;
-
-       mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
-       if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
-               __apply_microcode_amd(mc);
-               return;
-       }
-
-       ucode = (void *)__pa_nodebug(&container);
-       usize = (size_t *)__pa_nodebug(&container_size);
-
-       if (!*ucode || !*usize)
-               return;
-
-       apply_ucode_in_initrd(*ucode, *usize, false);
-}
-
-static void __init collect_cpu_sig_on_bsp(void *arg)
-{
-       unsigned int cpu = smp_processor_id();
-       struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
-
-       uci->cpu_sig.sig = cpuid_eax(0x00000001);
-}
-
-static void __init get_bsp_sig(void)
-{
-       unsigned int bsp = boot_cpu_data.cpu_index;
-       struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
-
-       if (!uci->cpu_sig.sig)
-               smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
-}
-#else
-void load_ucode_amd_ap(void)
-{
-       unsigned int cpu = smp_processor_id();
-       struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
-       struct equiv_cpu_entry *eq;
-       struct microcode_amd *mc;
-       u32 rev, eax;
-       u16 eq_id;
-
-       /* Exit if called on the BSP. */
-       if (!cpu)
-               return;
-
-       if (!container)
-               return;
-
-       rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
-
-       uci->cpu_sig.rev = rev;
-       uci->cpu_sig.sig = eax;
-
-       eax = cpuid_eax(0x00000001);
-       eq  = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ);
-
-       eq_id = find_equiv_id(eq, eax);
-       if (!eq_id)
-               return;
-
-       if (eq_id == this_equiv_id) {
-               mc = (struct microcode_amd *)amd_ucode_patch;
-
-               if (mc && rev < mc->hdr.patch_id) {
-                       if (!__apply_microcode_amd(mc))
-                               ucode_new_rev = mc->hdr.patch_id;
-               }
-
-       } else {
-               if (!ucode_cpio.data)
-                       return;
-
-               /*
-                * AP has a different equivalence ID than BSP, looks like
-                * mixed-steppings silicon so go through the ucode blob anew.
-                */
-               apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false);
-       }
-}
-#endif
-
-int __init save_microcode_in_initrd_amd(void)
-{
-       unsigned long cont;
-       int retval = 0;
-       enum ucode_state ret;
-       u8 *cont_va;
-       u32 eax;
-
-       if (!container)
-               return -EINVAL;
-
-#ifdef CONFIG_X86_32
-       get_bsp_sig();
-       cont    = (unsigned long)container;
-       cont_va = __va(container);
-#else
-       /*
-        * We need the physical address of the container for both bitness since
-        * boot_params.hdr.ramdisk_image is a physical address.
-        */
-       cont    = __pa(container);
-       cont_va = container;
-#endif
-
-       /*
-        * Take into account the fact that the ramdisk might get relocated and
-        * therefore we need to recompute the container's position in virtual
-        * memory space.
-        */
-       if (relocated_ramdisk)
-               container = (u8 *)(__va(relocated_ramdisk) +
-                            (cont - boot_params.hdr.ramdisk_image));
-       else
-               container = cont_va;
-
-       if (ucode_new_rev)
-               pr_info("microcode: updated early to new patch_level=0x%08x\n",
-                       ucode_new_rev);
-
-       eax   = cpuid_eax(0x00000001);
-       eax   = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
-
-       ret = load_microcode_amd(smp_processor_id(), eax, container, container_size);
-       if (ret != UCODE_OK)
-               retval = -EINVAL;
-
-       /*
-        * This will be freed any msec now, stash patches for the current
-        * family and switch to patch cache for cpu hotplug, etc later.
-        */
-       container = NULL;
-       container_size = 0;
-
-       return retval;
-}
-
-void reload_ucode_amd(void)
-{
-       struct microcode_amd *mc;
-       u32 rev, eax;
-
-       rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
-
-       mc = (struct microcode_amd *)amd_ucode_patch;
-
-       if (mc && rev < mc->hdr.patch_id) {
-               if (!__apply_microcode_amd(mc)) {
-                       ucode_new_rev = mc->hdr.patch_id;
-                       pr_info("microcode: reload patch_level=0x%08x\n",
-                               ucode_new_rev);
-               }
-       }
-}
index 9e3f3c7dd5d76124b12b167dc5c4e59129256b63..7fc27f1cca586a1752d95fa73c612db03db78437 100644 (file)
@@ -5,6 +5,12 @@
  *           2006      Shaohua Li <shaohua.li@intel.com>
  *           2013-2015 Borislav Petkov <bp@alien8.de>
  *
+ * X86 CPU microcode early update for Linux:
+ *
+ *     Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
+ *                        H Peter Anvin" <hpa@zytor.com>
+ *               (C) 2015 Borislav Petkov <bp@alien8.de>
+ *
  * This driver allows to upgrade microcode on x86 processors.
  *
  * This program is free software; you can redistribute it and/or
  * 2 of the License, or (at your option) any later version.
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define pr_fmt(fmt) "microcode: " fmt
 
 #include <linux/platform_device.h>
+#include <linux/syscore_ops.h>
 #include <linux/miscdevice.h>
 #include <linux/capability.h>
+#include <linux/firmware.h>
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/cpu.h>
 #include <linux/fs.h>
 #include <linux/mm.h>
-#include <linux/syscore_ops.h>
 
-#include <asm/microcode.h>
-#include <asm/processor.h>
+#include <asm/microcode_intel.h>
 #include <asm/cpu_device_id.h>
+#include <asm/microcode_amd.h>
 #include <asm/perf_event.h>
+#include <asm/microcode.h>
+#include <asm/processor.h>
+#include <asm/cmdline.h>
 
-MODULE_DESCRIPTION("Microcode Update Driver");
-MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
-MODULE_LICENSE("GPL");
-
-#define MICROCODE_VERSION      "2.00"
+#define MICROCODE_VERSION      "2.01"
 
 static struct microcode_ops    *microcode_ops;
 
-bool dis_ucode_ldr;
-module_param(dis_ucode_ldr, bool, 0);
+static bool dis_ucode_ldr;
+
+static int __init disable_loader(char *str)
+{
+       dis_ucode_ldr = true;
+       return 1;
+}
+__setup("dis_ucode_ldr", disable_loader);
 
 /*
  * Synchronization.
@@ -68,6 +79,150 @@ struct cpu_info_ctx {
        int                     err;
 };
 
+static bool __init check_loader_disabled_bsp(void)
+{
+#ifdef CONFIG_X86_32
+       const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
+       const char *opt     = "dis_ucode_ldr";
+       const char *option  = (const char *)__pa_nodebug(opt);
+       bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr);
+
+#else /* CONFIG_X86_64 */
+       const char *cmdline = boot_command_line;
+       const char *option  = "dis_ucode_ldr";
+       bool *res = &dis_ucode_ldr;
+#endif
+
+       if (cmdline_find_option_bool(cmdline, option))
+               *res = true;
+
+       return *res;
+}
+
+extern struct builtin_fw __start_builtin_fw[];
+extern struct builtin_fw __end_builtin_fw[];
+
+bool get_builtin_firmware(struct cpio_data *cd, const char *name)
+{
+#ifdef CONFIG_FW_LOADER
+       struct builtin_fw *b_fw;
+
+       for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
+               if (!strcmp(name, b_fw->name)) {
+                       cd->size = b_fw->size;
+                       cd->data = b_fw->data;
+                       return true;
+               }
+       }
+#endif
+       return false;
+}
+
+void __init load_ucode_bsp(void)
+{
+       int vendor;
+       unsigned int family;
+
+       if (check_loader_disabled_bsp())
+               return;
+
+       if (!have_cpuid_p())
+               return;
+
+       vendor = x86_vendor();
+       family = x86_family();
+
+       switch (vendor) {
+       case X86_VENDOR_INTEL:
+               if (family >= 6)
+                       load_ucode_intel_bsp();
+               break;
+       case X86_VENDOR_AMD:
+               if (family >= 0x10)
+                       load_ucode_amd_bsp(family);
+               break;
+       default:
+               break;
+       }
+}
+
+static bool check_loader_disabled_ap(void)
+{
+#ifdef CONFIG_X86_32
+       return *((bool *)__pa_nodebug(&dis_ucode_ldr));
+#else
+       return dis_ucode_ldr;
+#endif
+}
+
+void load_ucode_ap(void)
+{
+       int vendor, family;
+
+       if (check_loader_disabled_ap())
+               return;
+
+       if (!have_cpuid_p())
+               return;
+
+       vendor = x86_vendor();
+       family = x86_family();
+
+       switch (vendor) {
+       case X86_VENDOR_INTEL:
+               if (family >= 6)
+                       load_ucode_intel_ap();
+               break;
+       case X86_VENDOR_AMD:
+               if (family >= 0x10)
+                       load_ucode_amd_ap();
+               break;
+       default:
+               break;
+       }
+}
+
+int __init save_microcode_in_initrd(void)
+{
+       struct cpuinfo_x86 *c = &boot_cpu_data;
+
+       switch (c->x86_vendor) {
+       case X86_VENDOR_INTEL:
+               if (c->x86 >= 6)
+                       save_microcode_in_initrd_intel();
+               break;
+       case X86_VENDOR_AMD:
+               if (c->x86 >= 0x10)
+                       save_microcode_in_initrd_amd();
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+void reload_early_microcode(void)
+{
+       int vendor, family;
+
+       vendor = x86_vendor();
+       family = x86_family();
+
+       switch (vendor) {
+       case X86_VENDOR_INTEL:
+               if (family >= 6)
+                       reload_ucode_intel();
+               break;
+       case X86_VENDOR_AMD:
+               if (family >= 0x10)
+                       reload_ucode_amd();
+               break;
+       default:
+               break;
+       }
+}
+
 static void collect_cpu_info_local(void *arg)
 {
        struct cpu_info_ctx *ctx = arg;
@@ -210,9 +365,6 @@ static void __exit microcode_dev_exit(void)
 {
        misc_deregister(&microcode_dev);
 }
-
-MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
-MODULE_ALIAS("devname:cpu/microcode");
 #else
 #define microcode_dev_init()   0
 #define microcode_dev_exit()   do { } while (0)
@@ -463,20 +615,6 @@ static struct notifier_block mc_cpu_notifier = {
        .notifier_call  = mc_cpu_callback,
 };
 
-#ifdef MODULE
-/* Autoload on Intel and AMD systems */
-static const struct x86_cpu_id __initconst microcode_id[] = {
-#ifdef CONFIG_MICROCODE_INTEL
-       { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, },
-#endif
-#ifdef CONFIG_MICROCODE_AMD
-       { X86_VENDOR_AMD, X86_FAMILY_ANY, X86_MODEL_ANY, },
-#endif
-       {}
-};
-MODULE_DEVICE_TABLE(x86cpu, microcode_id);
-#endif
-
 static struct attribute *cpu_root_microcode_attrs[] = {
        &dev_attr_reload.attr,
        NULL
@@ -487,9 +625,9 @@ static struct attribute_group cpu_root_microcode_group = {
        .attrs = cpu_root_microcode_attrs,
 };
 
-static int __init microcode_init(void)
+int __init microcode_init(void)
 {
-       struct cpuinfo_x86 *c = &cpu_data(0);
+       struct cpuinfo_x86 *c = &boot_cpu_data;
        int error;
 
        if (paravirt_enabled() || dis_ucode_ldr)
@@ -560,35 +698,3 @@ static int __init microcode_init(void)
        return error;
 
 }
-module_init(microcode_init);
-
-static void __exit microcode_exit(void)
-{
-       struct cpuinfo_x86 *c = &cpu_data(0);
-
-       microcode_dev_exit();
-
-       unregister_hotcpu_notifier(&mc_cpu_notifier);
-       unregister_syscore_ops(&mc_syscore_ops);
-
-       sysfs_remove_group(&cpu_subsys.dev_root->kobj,
-                          &cpu_root_microcode_group);
-
-       get_online_cpus();
-       mutex_lock(&microcode_mutex);
-
-       subsys_interface_unregister(&mc_cpu_interface);
-
-       mutex_unlock(&microcode_mutex);
-       put_online_cpus();
-
-       platform_device_unregister(microcode_pdev);
-
-       microcode_ops = NULL;
-
-       if (c->x86_vendor == X86_VENDOR_AMD)
-               exit_amd_microcode();
-
-       pr_info("Microcode Update Driver: v" MICROCODE_VERSION " removed.\n");
-}
-module_exit(microcode_exit);
diff --git a/arch/x86/kernel/cpu/microcode/core_early.c b/arch/x86/kernel/cpu/microcode/core_early.c
deleted file mode 100644 (file)
index 8ebc421..0000000
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- *     X86 CPU microcode early update for Linux
- *
- *     Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
- *                        H Peter Anvin" <hpa@zytor.com>
- *               (C) 2015 Borislav Petkov <bp@alien8.de>
- *
- *     This driver allows to early upgrade microcode on Intel processors
- *     belonging to IA-32 family - PentiumPro, Pentium II,
- *     Pentium III, Xeon, Pentium 4, etc.
- *
- *     Reference: Section 9.11 of Volume 3, IA-32 Intel Architecture
- *     Software Developer's Manual.
- *
- *     This program is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License
- *     as published by the Free Software Foundation; either version
- *     2 of the License, or (at your option) any later version.
- */
-#include <linux/module.h>
-#include <linux/firmware.h>
-#include <asm/microcode.h>
-#include <asm/microcode_intel.h>
-#include <asm/microcode_amd.h>
-#include <asm/processor.h>
-#include <asm/cmdline.h>
-
-static bool __init check_loader_disabled_bsp(void)
-{
-#ifdef CONFIG_X86_32
-       const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
-       const char *opt     = "dis_ucode_ldr";
-       const char *option  = (const char *)__pa_nodebug(opt);
-       bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr);
-
-#else /* CONFIG_X86_64 */
-       const char *cmdline = boot_command_line;
-       const char *option  = "dis_ucode_ldr";
-       bool *res = &dis_ucode_ldr;
-#endif
-
-       if (cmdline_find_option_bool(cmdline, option))
-               *res = true;
-
-       return *res;
-}
-
-extern struct builtin_fw __start_builtin_fw[];
-extern struct builtin_fw __end_builtin_fw[];
-
-bool get_builtin_firmware(struct cpio_data *cd, const char *name)
-{
-#ifdef CONFIG_FW_LOADER
-       struct builtin_fw *b_fw;
-
-       for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
-               if (!strcmp(name, b_fw->name)) {
-                       cd->size = b_fw->size;
-                       cd->data = b_fw->data;
-                       return true;
-               }
-       }
-#endif
-       return false;
-}
-
-void __init load_ucode_bsp(void)
-{
-       int vendor;
-       unsigned int family;
-
-       if (check_loader_disabled_bsp())
-               return;
-
-       if (!have_cpuid_p())
-               return;
-
-       vendor = x86_vendor();
-       family = x86_family();
-
-       switch (vendor) {
-       case X86_VENDOR_INTEL:
-               if (family >= 6)
-                       load_ucode_intel_bsp();
-               break;
-       case X86_VENDOR_AMD:
-               if (family >= 0x10)
-                       load_ucode_amd_bsp(family);
-               break;
-       default:
-               break;
-       }
-}
-
-static bool check_loader_disabled_ap(void)
-{
-#ifdef CONFIG_X86_32
-       return *((bool *)__pa_nodebug(&dis_ucode_ldr));
-#else
-       return dis_ucode_ldr;
-#endif
-}
-
-void load_ucode_ap(void)
-{
-       int vendor, family;
-
-       if (check_loader_disabled_ap())
-               return;
-
-       if (!have_cpuid_p())
-               return;
-
-       vendor = x86_vendor();
-       family = x86_family();
-
-       switch (vendor) {
-       case X86_VENDOR_INTEL:
-               if (family >= 6)
-                       load_ucode_intel_ap();
-               break;
-       case X86_VENDOR_AMD:
-               if (family >= 0x10)
-                       load_ucode_amd_ap();
-               break;
-       default:
-               break;
-       }
-}
-
-int __init save_microcode_in_initrd(void)
-{
-       struct cpuinfo_x86 *c = &boot_cpu_data;
-
-       switch (c->x86_vendor) {
-       case X86_VENDOR_INTEL:
-               if (c->x86 >= 6)
-                       save_microcode_in_initrd_intel();
-               break;
-       case X86_VENDOR_AMD:
-               if (c->x86 >= 0x10)
-                       save_microcode_in_initrd_amd();
-               break;
-       default:
-               break;
-       }
-
-       return 0;
-}
-
-void reload_early_microcode(void)
-{
-       int vendor, family;
-
-       vendor = x86_vendor();
-       family = x86_family();
-
-       switch (vendor) {
-       case X86_VENDOR_INTEL:
-               if (family >= 6)
-                       reload_ucode_intel();
-               break;
-       case X86_VENDOR_AMD:
-               if (family >= 0x10)
-                       reload_ucode_amd();
-               break;
-       default:
-               break;
-       }
-}
index 969dc17eb1b4b86775d5496bb6ebe9ba67110b4c..ce47402eb2f90a70f44b9902687133da3acbad47 100644 (file)
  * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
  *              2006 Shaohua Li <shaohua.li@intel.com>
  *
+ * Intel CPU microcode early update for Linux
+ *
+ * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
+ *                   H Peter Anvin" <hpa@zytor.com>
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version
  * 2 of the License, or (at your option) any later version.
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+/*
+ * This needs to be before all headers so that pr_debug in printk.h doesn't turn
+ * printk calls into no_printk().
+ *
+ *#define DEBUG
+ */
+#define pr_fmt(fmt) "microcode: " fmt
 
+#include <linux/earlycpio.h>
 #include <linux/firmware.h>
 #include <linux/uaccess.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/vmalloc.h>
+#include <linux/initrd.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/mm.h>
 
 #include <asm/microcode_intel.h>
 #include <asm/processor.h>
+#include <asm/tlbflush.h>
+#include <asm/setup.h>
 #include <asm/msr.h>
 
-MODULE_DESCRIPTION("Microcode Update Driver");
-MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
-MODULE_LICENSE("GPL");
+static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT];
+static struct mc_saved_data {
+       unsigned int mc_saved_count;
+       struct microcode_intel **mc_saved;
+} mc_saved_data;
+
+static enum ucode_state
+load_microcode_early(struct microcode_intel **saved,
+                    unsigned int num_saved, struct ucode_cpu_info *uci)
+{
+       struct microcode_intel *ucode_ptr, *new_mc = NULL;
+       struct microcode_header_intel *mc_hdr;
+       int new_rev, ret, i;
+
+       new_rev = uci->cpu_sig.rev;
+
+       for (i = 0; i < num_saved; i++) {
+               ucode_ptr = saved[i];
+               mc_hdr    = (struct microcode_header_intel *)ucode_ptr;
+
+               ret = has_newer_microcode(ucode_ptr,
+                                         uci->cpu_sig.sig,
+                                         uci->cpu_sig.pf,
+                                         new_rev);
+               if (!ret)
+                       continue;
+
+               new_rev = mc_hdr->rev;
+               new_mc  = ucode_ptr;
+       }
+
+       if (!new_mc)
+               return UCODE_NFOUND;
+
+       uci->mc = (struct microcode_intel *)new_mc;
+       return UCODE_OK;
+}
+
+static inline void
+copy_initrd_ptrs(struct microcode_intel **mc_saved, unsigned long *initrd,
+                 unsigned long off, int num_saved)
+{
+       int i;
+
+       for (i = 0; i < num_saved; i++)
+               mc_saved[i] = (struct microcode_intel *)(initrd[i] + off);
+}
+
+#ifdef CONFIG_X86_32
+static void
+microcode_phys(struct microcode_intel **mc_saved_tmp,
+              struct mc_saved_data *mc_saved_data)
+{
+       int i;
+       struct microcode_intel ***mc_saved;
+
+       mc_saved = (struct microcode_intel ***)
+                  __pa_nodebug(&mc_saved_data->mc_saved);
+       for (i = 0; i < mc_saved_data->mc_saved_count; i++) {
+               struct microcode_intel *p;
+
+               p = *(struct microcode_intel **)
+                       __pa_nodebug(mc_saved_data->mc_saved + i);
+               mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
+       }
+}
+#endif
+
+static enum ucode_state
+load_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
+              unsigned long initrd_start, struct ucode_cpu_info *uci)
+{
+       struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
+       unsigned int count = mc_saved_data->mc_saved_count;
+
+       if (!mc_saved_data->mc_saved) {
+               copy_initrd_ptrs(mc_saved_tmp, initrd, initrd_start, count);
+
+               return load_microcode_early(mc_saved_tmp, count, uci);
+       } else {
+#ifdef CONFIG_X86_32
+               microcode_phys(mc_saved_tmp, mc_saved_data);
+               return load_microcode_early(mc_saved_tmp, count, uci);
+#else
+               return load_microcode_early(mc_saved_data->mc_saved,
+                                                   count, uci);
+#endif
+       }
+}
+
+/*
+ * Given CPU signature and a microcode patch, this function finds if the
+ * microcode patch has matching family and model with the CPU.
+ */
+static enum ucode_state
+matching_model_microcode(struct microcode_header_intel *mc_header,
+                       unsigned long sig)
+{
+       unsigned int fam, model;
+       unsigned int fam_ucode, model_ucode;
+       struct extended_sigtable *ext_header;
+       unsigned long total_size = get_totalsize(mc_header);
+       unsigned long data_size = get_datasize(mc_header);
+       int ext_sigcount, i;
+       struct extended_signature *ext_sig;
+
+       fam   = __x86_family(sig);
+       model = x86_model(sig);
+
+       fam_ucode   = __x86_family(mc_header->sig);
+       model_ucode = x86_model(mc_header->sig);
+
+       if (fam == fam_ucode && model == model_ucode)
+               return UCODE_OK;
+
+       /* Look for ext. headers: */
+       if (total_size <= data_size + MC_HEADER_SIZE)
+               return UCODE_NFOUND;
+
+       ext_header   = (void *) mc_header + data_size + MC_HEADER_SIZE;
+       ext_sig      = (void *)ext_header + EXT_HEADER_SIZE;
+       ext_sigcount = ext_header->count;
+
+       for (i = 0; i < ext_sigcount; i++) {
+               fam_ucode   = __x86_family(ext_sig->sig);
+               model_ucode = x86_model(ext_sig->sig);
+
+               if (fam == fam_ucode && model == model_ucode)
+                       return UCODE_OK;
+
+               ext_sig++;
+       }
+       return UCODE_NFOUND;
+}
+
+static int
+save_microcode(struct mc_saved_data *mc_saved_data,
+              struct microcode_intel **mc_saved_src,
+              unsigned int mc_saved_count)
+{
+       int i, j;
+       struct microcode_intel **saved_ptr;
+       int ret;
+
+       if (!mc_saved_count)
+               return -EINVAL;
+
+       /*
+        * Copy new microcode data.
+        */
+       saved_ptr = kcalloc(mc_saved_count, sizeof(struct microcode_intel *), GFP_KERNEL);
+       if (!saved_ptr)
+               return -ENOMEM;
+
+       for (i = 0; i < mc_saved_count; i++) {
+               struct microcode_header_intel *mc_hdr;
+               struct microcode_intel *mc;
+               unsigned long size;
+
+               if (!mc_saved_src[i]) {
+                       ret = -EINVAL;
+                       goto err;
+               }
+
+               mc     = mc_saved_src[i];
+               mc_hdr = &mc->hdr;
+               size   = get_totalsize(mc_hdr);
+
+               saved_ptr[i] = kmalloc(size, GFP_KERNEL);
+               if (!saved_ptr[i]) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
+
+               memcpy(saved_ptr[i], mc, size);
+       }
+
+       /*
+        * Point to newly saved microcode.
+        */
+       mc_saved_data->mc_saved = saved_ptr;
+       mc_saved_data->mc_saved_count = mc_saved_count;
+
+       return 0;
+
+err:
+       for (j = 0; j <= i; j++)
+               kfree(saved_ptr[j]);
+       kfree(saved_ptr);
+
+       return ret;
+}
+
+/*
+ * A microcode patch in ucode_ptr is saved into mc_saved
+ * - if it has matching signature and newer revision compared to an existing
+ *   patch mc_saved.
+ * - or if it is a newly discovered microcode patch.
+ *
+ * The microcode patch should have matching model with CPU.
+ *
+ * Returns: The updated number @num_saved of saved microcode patches.
+ */
+static unsigned int _save_mc(struct microcode_intel **mc_saved,
+                            u8 *ucode_ptr, unsigned int num_saved)
+{
+       struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
+       unsigned int sig, pf;
+       int found = 0, i;
+
+       mc_hdr = (struct microcode_header_intel *)ucode_ptr;
+
+       for (i = 0; i < num_saved; i++) {
+               mc_saved_hdr = (struct microcode_header_intel *)mc_saved[i];
+               sig          = mc_saved_hdr->sig;
+               pf           = mc_saved_hdr->pf;
+
+               if (!find_matching_signature(ucode_ptr, sig, pf))
+                       continue;
+
+               found = 1;
+
+               if (mc_hdr->rev <= mc_saved_hdr->rev)
+                       continue;
+
+               /*
+                * Found an older ucode saved earlier. Replace it with
+                * this newer one.
+                */
+               mc_saved[i] = (struct microcode_intel *)ucode_ptr;
+               break;
+       }
+
+       /* Newly detected microcode, save it to memory. */
+       if (i >= num_saved && !found)
+               mc_saved[num_saved++] = (struct microcode_intel *)ucode_ptr;
+
+       return num_saved;
+}
+
+/*
+ * Get microcode matching with BSP's model. Only CPUs with the same model as
+ * BSP can stay in the platform.
+ */
+static enum ucode_state __init
+get_matching_model_microcode(int cpu, unsigned long start,
+                            void *data, size_t size,
+                            struct mc_saved_data *mc_saved_data,
+                            unsigned long *mc_saved_in_initrd,
+                            struct ucode_cpu_info *uci)
+{
+       u8 *ucode_ptr = data;
+       unsigned int leftover = size;
+       enum ucode_state state = UCODE_OK;
+       unsigned int mc_size;
+       struct microcode_header_intel *mc_header;
+       struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
+       unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
+       int i;
+
+       while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
+
+               if (leftover < sizeof(mc_header))
+                       break;
+
+               mc_header = (struct microcode_header_intel *)ucode_ptr;
+
+               mc_size = get_totalsize(mc_header);
+               if (!mc_size || mc_size > leftover ||
+                       microcode_sanity_check(ucode_ptr, 0) < 0)
+                       break;
+
+               leftover -= mc_size;
+
+               /*
+                * Since APs with same family and model as the BSP may boot in
+                * the platform, we need to find and save microcode patches
+                * with the same family and model as the BSP.
+                */
+               if (matching_model_microcode(mc_header, uci->cpu_sig.sig) !=
+                        UCODE_OK) {
+                       ucode_ptr += mc_size;
+                       continue;
+               }
+
+               mc_saved_count = _save_mc(mc_saved_tmp, ucode_ptr, mc_saved_count);
+
+               ucode_ptr += mc_size;
+       }
+
+       if (leftover) {
+               state = UCODE_ERROR;
+               goto out;
+       }
+
+       if (mc_saved_count == 0) {
+               state = UCODE_NFOUND;
+               goto out;
+       }
+
+       for (i = 0; i < mc_saved_count; i++)
+               mc_saved_in_initrd[i] = (unsigned long)mc_saved_tmp[i] - start;
+
+       mc_saved_data->mc_saved_count = mc_saved_count;
+out:
+       return state;
+}
+
+static int collect_cpu_info_early(struct ucode_cpu_info *uci)
+{
+       unsigned int val[2];
+       unsigned int family, model;
+       struct cpu_signature csig;
+       unsigned int eax, ebx, ecx, edx;
+
+       csig.sig = 0;
+       csig.pf = 0;
+       csig.rev = 0;
+
+       memset(uci, 0, sizeof(*uci));
+
+       eax = 0x00000001;
+       ecx = 0;
+       native_cpuid(&eax, &ebx, &ecx, &edx);
+       csig.sig = eax;
+
+       family = __x86_family(csig.sig);
+       model  = x86_model(csig.sig);
+
+       if ((model >= 5) || (family > 6)) {
+               /* get processor flags from MSR 0x17 */
+               native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
+               csig.pf = 1 << ((val[1] >> 18) & 7);
+       }
+       native_wrmsr(MSR_IA32_UCODE_REV, 0, 0);
+
+       /* As documented in the SDM: Do a CPUID 1 here */
+       sync_core();
+
+       /* get the current revision from MSR 0x8B */
+       native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
+
+       csig.rev = val[1];
+
+       uci->cpu_sig = csig;
+       uci->valid = 1;
+
+       return 0;
+}
+
+static void show_saved_mc(void)
+{
+#ifdef DEBUG
+       int i, j;
+       unsigned int sig, pf, rev, total_size, data_size, date;
+       struct ucode_cpu_info uci;
+
+       if (mc_saved_data.mc_saved_count == 0) {
+               pr_debug("no microcode data saved.\n");
+               return;
+       }
+       pr_debug("Total microcode saved: %d\n", mc_saved_data.mc_saved_count);
+
+       collect_cpu_info_early(&uci);
+
+       sig = uci.cpu_sig.sig;
+       pf = uci.cpu_sig.pf;
+       rev = uci.cpu_sig.rev;
+       pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);
+
+       for (i = 0; i < mc_saved_data.mc_saved_count; i++) {
+               struct microcode_header_intel *mc_saved_header;
+               struct extended_sigtable *ext_header;
+               int ext_sigcount;
+               struct extended_signature *ext_sig;
+
+               mc_saved_header = (struct microcode_header_intel *)
+                                 mc_saved_data.mc_saved[i];
+               sig = mc_saved_header->sig;
+               pf = mc_saved_header->pf;
+               rev = mc_saved_header->rev;
+               total_size = get_totalsize(mc_saved_header);
+               data_size = get_datasize(mc_saved_header);
+               date = mc_saved_header->date;
+
+               pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, toal size=0x%x, date = %04x-%02x-%02x\n",
+                        i, sig, pf, rev, total_size,
+                        date & 0xffff,
+                        date >> 24,
+                        (date >> 16) & 0xff);
+
+               /* Look for ext. headers: */
+               if (total_size <= data_size + MC_HEADER_SIZE)
+                       continue;
+
+               ext_header = (void *) mc_saved_header + data_size + MC_HEADER_SIZE;
+               ext_sigcount = ext_header->count;
+               ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
+
+               for (j = 0; j < ext_sigcount; j++) {
+                       sig = ext_sig->sig;
+                       pf = ext_sig->pf;
+
+                       pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
+                                j, sig, pf);
+
+                       ext_sig++;
+               }
+
+       }
+#endif
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static DEFINE_MUTEX(x86_cpu_microcode_mutex);
+/*
+ * Save this mc into mc_saved_data. So it will be loaded early when a CPU is
+ * hot added or resumes.
+ *
+ * Please make sure this mc should be a valid microcode patch before calling
+ * this function.
+ */
+int save_mc_for_early(u8 *mc)
+{
+       struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
+       unsigned int mc_saved_count_init;
+       unsigned int mc_saved_count;
+       struct microcode_intel **mc_saved;
+       int ret = 0;
+       int i;
+
+       /*
+        * Hold hotplug lock so mc_saved_data is not accessed by a CPU in
+        * hotplug.
+        */
+       mutex_lock(&x86_cpu_microcode_mutex);
+
+       mc_saved_count_init = mc_saved_data.mc_saved_count;
+       mc_saved_count = mc_saved_data.mc_saved_count;
+       mc_saved = mc_saved_data.mc_saved;
+
+       if (mc_saved && mc_saved_count)
+               memcpy(mc_saved_tmp, mc_saved,
+                      mc_saved_count * sizeof(struct microcode_intel *));
+       /*
+        * Save the microcode patch mc in mc_save_tmp structure if it's a newer
+        * version.
+        */
+       mc_saved_count = _save_mc(mc_saved_tmp, mc, mc_saved_count);
+
+       /*
+        * Save the mc_save_tmp in global mc_saved_data.
+        */
+       ret = save_microcode(&mc_saved_data, mc_saved_tmp, mc_saved_count);
+       if (ret) {
+               pr_err("Cannot save microcode patch.\n");
+               goto out;
+       }
+
+       show_saved_mc();
+
+       /*
+        * Free old saved microcode data.
+        */
+       if (mc_saved) {
+               for (i = 0; i < mc_saved_count_init; i++)
+                       kfree(mc_saved[i]);
+               kfree(mc_saved);
+       }
+
+out:
+       mutex_unlock(&x86_cpu_microcode_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(save_mc_for_early);
+#endif
+
+static bool __init load_builtin_intel_microcode(struct cpio_data *cp)
+{
+#ifdef CONFIG_X86_64
+       unsigned int eax = 0x00000001, ebx, ecx = 0, edx;
+       unsigned int family, model, stepping;
+       char name[30];
+
+       native_cpuid(&eax, &ebx, &ecx, &edx);
+
+       family   = __x86_family(eax);
+       model    = x86_model(eax);
+       stepping = eax & 0xf;
+
+       sprintf(name, "intel-ucode/%02x-%02x-%02x", family, model, stepping);
+
+       return get_builtin_firmware(cp, name);
+#else
+       return false;
+#endif
+}
+
+static __initdata char ucode_name[] = "kernel/x86/microcode/GenuineIntel.bin";
+static __init enum ucode_state
+scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
+              unsigned long start, unsigned long size,
+              struct ucode_cpu_info *uci)
+{
+       struct cpio_data cd;
+       long offset = 0;
+#ifdef CONFIG_X86_32
+       char *p = (char *)__pa_nodebug(ucode_name);
+#else
+       char *p = ucode_name;
+#endif
+
+       cd.data = NULL;
+       cd.size = 0;
+
+       cd = find_cpio_data(p, (void *)start, size, &offset);
+       if (!cd.data) {
+               if (!load_builtin_intel_microcode(&cd))
+                       return UCODE_ERROR;
+       }
+
+       return get_matching_model_microcode(0, start, cd.data, cd.size,
+                                           mc_saved_data, initrd, uci);
+}
+
+/*
+ * Print ucode update info.
+ */
+static void
+print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
+{
+       int cpu = smp_processor_id();
+
+       pr_info("CPU%d microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
+               cpu,
+               uci->cpu_sig.rev,
+               date & 0xffff,
+               date >> 24,
+               (date >> 16) & 0xff);
+}
+
+#ifdef CONFIG_X86_32
+
+static int delay_ucode_info;
+static int current_mc_date;
+
+/*
+ * Print early updated ucode info after printk works. This is delayed info dump.
+ */
+void show_ucode_info_early(void)
+{
+       struct ucode_cpu_info uci;
+
+       if (delay_ucode_info) {
+               collect_cpu_info_early(&uci);
+               print_ucode_info(&uci, current_mc_date);
+               delay_ucode_info = 0;
+       }
+}
+
+/*
+ * At this point, we can not call printk() yet. Keep microcode patch number in
+ * mc_saved_data.mc_saved and delay printing microcode info in
+ * show_ucode_info_early() until printk() works.
+ */
+static void print_ucode(struct ucode_cpu_info *uci)
+{
+       struct microcode_intel *mc_intel;
+       int *delay_ucode_info_p;
+       int *current_mc_date_p;
+
+       mc_intel = uci->mc;
+       if (mc_intel == NULL)
+               return;
+
+       delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
+       current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
+
+       *delay_ucode_info_p = 1;
+       *current_mc_date_p = mc_intel->hdr.date;
+}
+#else
+
+/*
+ * Flush global tlb. We only do this in x86_64 where paging has been enabled
+ * already and PGE should be enabled as well.
+ */
+static inline void flush_tlb_early(void)
+{
+       __native_flush_tlb_global_irq_disabled();
+}
+
+static inline void print_ucode(struct ucode_cpu_info *uci)
+{
+       struct microcode_intel *mc_intel;
+
+       mc_intel = uci->mc;
+       if (mc_intel == NULL)
+               return;
+
+       print_ucode_info(uci, mc_intel->hdr.date);
+}
+#endif
+
+static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
+{
+       struct microcode_intel *mc_intel;
+       unsigned int val[2];
+
+       mc_intel = uci->mc;
+       if (mc_intel == NULL)
+               return 0;
+
+       /* write microcode via MSR 0x79 */
+       native_wrmsr(MSR_IA32_UCODE_WRITE,
+             (unsigned long) mc_intel->bits,
+             (unsigned long) mc_intel->bits >> 16 >> 16);
+       native_wrmsr(MSR_IA32_UCODE_REV, 0, 0);
+
+       /* As documented in the SDM: Do a CPUID 1 here */
+       sync_core();
+
+       /* get the current revision from MSR 0x8B */
+       native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
+       if (val[1] != mc_intel->hdr.rev)
+               return -1;
+
+#ifdef CONFIG_X86_64
+       /* Flush global tlb. This is precaution. */
+       flush_tlb_early();
+#endif
+       uci->cpu_sig.rev = val[1];
+
+       if (early)
+               print_ucode(uci);
+       else
+               print_ucode_info(uci, mc_intel->hdr.date);
+
+       return 0;
+}
+
+/*
+ * This function converts microcode patch offsets previously stored in
+ * mc_saved_in_initrd to pointers and stores the pointers in mc_saved_data.
+ */
+int __init save_microcode_in_initrd_intel(void)
+{
+       unsigned int count = mc_saved_data.mc_saved_count;
+       struct microcode_intel *mc_saved[MAX_UCODE_COUNT];
+       int ret = 0;
+
+       if (count == 0)
+               return ret;
+
+       copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, initrd_start, count);
+       ret = save_microcode(&mc_saved_data, mc_saved, count);
+       if (ret)
+               pr_err("Cannot save microcode patches from initrd.\n");
+
+       show_saved_mc();
+
+       return ret;
+}
+
+static void __init
+_load_ucode_intel_bsp(struct mc_saved_data *mc_saved_data,
+                     unsigned long *initrd,
+                     unsigned long start, unsigned long size)
+{
+       struct ucode_cpu_info uci;
+       enum ucode_state ret;
+
+       collect_cpu_info_early(&uci);
+
+       ret = scan_microcode(mc_saved_data, initrd, start, size, &uci);
+       if (ret != UCODE_OK)
+               return;
+
+       ret = load_microcode(mc_saved_data, initrd, start, &uci);
+       if (ret != UCODE_OK)
+               return;
+
+       apply_microcode_early(&uci, true);
+}
+
+void __init load_ucode_intel_bsp(void)
+{
+       u64 start, size;
+#ifdef CONFIG_X86_32
+       struct boot_params *p;
+
+       p       = (struct boot_params *)__pa_nodebug(&boot_params);
+       start   = p->hdr.ramdisk_image;
+       size    = p->hdr.ramdisk_size;
+
+       _load_ucode_intel_bsp(
+                       (struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
+                       (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
+                       start, size);
+#else
+       start   = boot_params.hdr.ramdisk_image + PAGE_OFFSET;
+       size    = boot_params.hdr.ramdisk_size;
+
+       _load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, start, size);
+#endif
+}
+
+void load_ucode_intel_ap(void)
+{
+       struct mc_saved_data *mc_saved_data_p;
+       struct ucode_cpu_info uci;
+       unsigned long *mc_saved_in_initrd_p;
+       unsigned long initrd_start_addr;
+       enum ucode_state ret;
+#ifdef CONFIG_X86_32
+       unsigned long *initrd_start_p;
+
+       mc_saved_in_initrd_p =
+               (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
+       mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
+       initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
+       initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
+#else
+       mc_saved_data_p = &mc_saved_data;
+       mc_saved_in_initrd_p = mc_saved_in_initrd;
+       initrd_start_addr = initrd_start;
+#endif
+
+       /*
+        * If there is no valid ucode previously saved in memory, no need to
+        * update ucode on this AP.
+        */
+       if (mc_saved_data_p->mc_saved_count == 0)
+               return;
+
+       collect_cpu_info_early(&uci);
+       ret = load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
+                            initrd_start_addr, &uci);
+
+       if (ret != UCODE_OK)
+               return;
+
+       apply_microcode_early(&uci, true);
+}
+
+void reload_ucode_intel(void)
+{
+       struct ucode_cpu_info uci;
+       enum ucode_state ret;
+
+       if (!mc_saved_data.mc_saved_count)
+               return;
+
+       collect_cpu_info_early(&uci);
+
+       ret = load_microcode_early(mc_saved_data.mc_saved,
+                                  mc_saved_data.mc_saved_count, &uci);
+       if (ret != UCODE_OK)
+               return;
+
+       apply_microcode_early(&uci, false);
+}
 
 static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
 {
@@ -264,7 +1041,7 @@ static struct microcode_ops microcode_intel_ops = {
 
 struct microcode_ops * __init init_intel_microcode(void)
 {
-       struct cpuinfo_x86 *c = &cpu_data(0);
+       struct cpuinfo_x86 *c = &boot_cpu_data;
 
        if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
            cpu_has(c, X86_FEATURE_IA64)) {
diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
deleted file mode 100644 (file)
index 37ea89c..0000000
+++ /dev/null
@@ -1,808 +0,0 @@
-/*
- *     Intel CPU microcode early update for Linux
- *
- *     Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
- *                        H Peter Anvin" <hpa@zytor.com>
- *
- *     This allows to early upgrade microcode on Intel processors
- *     belonging to IA-32 family - PentiumPro, Pentium II,
- *     Pentium III, Xeon, Pentium 4, etc.
- *
- *     Reference: Section 9.11 of Volume 3, IA-32 Intel Architecture
- *     Software Developer's Manual.
- *
- *     This program is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License
- *     as published by the Free Software Foundation; either version
- *     2 of the License, or (at your option) any later version.
- */
-
-/*
- * This needs to be before all headers so that pr_debug in printk.h doesn't turn
- * printk calls into no_printk().
- *
- *#define DEBUG
- */
-
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/earlycpio.h>
-#include <linux/initrd.h>
-#include <linux/cpu.h>
-#include <asm/msr.h>
-#include <asm/microcode_intel.h>
-#include <asm/processor.h>
-#include <asm/tlbflush.h>
-#include <asm/setup.h>
-
-#undef pr_fmt
-#define pr_fmt(fmt)    "microcode: " fmt
-
-static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT];
-static struct mc_saved_data {
-       unsigned int mc_saved_count;
-       struct microcode_intel **mc_saved;
-} mc_saved_data;
-
-static enum ucode_state
-load_microcode_early(struct microcode_intel **saved,
-                    unsigned int num_saved, struct ucode_cpu_info *uci)
-{
-       struct microcode_intel *ucode_ptr, *new_mc = NULL;
-       struct microcode_header_intel *mc_hdr;
-       int new_rev, ret, i;
-
-       new_rev = uci->cpu_sig.rev;
-
-       for (i = 0; i < num_saved; i++) {
-               ucode_ptr = saved[i];
-               mc_hdr    = (struct microcode_header_intel *)ucode_ptr;
-
-               ret = has_newer_microcode(ucode_ptr,
-                                         uci->cpu_sig.sig,
-                                         uci->cpu_sig.pf,
-                                         new_rev);
-               if (!ret)
-                       continue;
-
-               new_rev = mc_hdr->rev;
-               new_mc  = ucode_ptr;
-       }
-
-       if (!new_mc)
-               return UCODE_NFOUND;
-
-       uci->mc = (struct microcode_intel *)new_mc;
-       return UCODE_OK;
-}
-
-static inline void
-copy_initrd_ptrs(struct microcode_intel **mc_saved, unsigned long *initrd,
-                 unsigned long off, int num_saved)
-{
-       int i;
-
-       for (i = 0; i < num_saved; i++)
-               mc_saved[i] = (struct microcode_intel *)(initrd[i] + off);
-}
-
-#ifdef CONFIG_X86_32
-static void
-microcode_phys(struct microcode_intel **mc_saved_tmp,
-              struct mc_saved_data *mc_saved_data)
-{
-       int i;
-       struct microcode_intel ***mc_saved;
-
-       mc_saved = (struct microcode_intel ***)
-                  __pa_nodebug(&mc_saved_data->mc_saved);
-       for (i = 0; i < mc_saved_data->mc_saved_count; i++) {
-               struct microcode_intel *p;
-
-               p = *(struct microcode_intel **)
-                       __pa_nodebug(mc_saved_data->mc_saved + i);
-               mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
-       }
-}
-#endif
-
-static enum ucode_state
-load_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
-              unsigned long initrd_start, struct ucode_cpu_info *uci)
-{
-       struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
-       unsigned int count = mc_saved_data->mc_saved_count;
-
-       if (!mc_saved_data->mc_saved) {
-               copy_initrd_ptrs(mc_saved_tmp, initrd, initrd_start, count);
-
-               return load_microcode_early(mc_saved_tmp, count, uci);
-       } else {
-#ifdef CONFIG_X86_32
-               microcode_phys(mc_saved_tmp, mc_saved_data);
-               return load_microcode_early(mc_saved_tmp, count, uci);
-#else
-               return load_microcode_early(mc_saved_data->mc_saved,
-                                                   count, uci);
-#endif
-       }
-}
-
-/*
- * Given CPU signature and a microcode patch, this function finds if the
- * microcode patch has matching family and model with the CPU.
- */
-static enum ucode_state
-matching_model_microcode(struct microcode_header_intel *mc_header,
-                       unsigned long sig)
-{
-       unsigned int fam, model;
-       unsigned int fam_ucode, model_ucode;
-       struct extended_sigtable *ext_header;
-       unsigned long total_size = get_totalsize(mc_header);
-       unsigned long data_size = get_datasize(mc_header);
-       int ext_sigcount, i;
-       struct extended_signature *ext_sig;
-
-       fam   = __x86_family(sig);
-       model = x86_model(sig);
-
-       fam_ucode   = __x86_family(mc_header->sig);
-       model_ucode = x86_model(mc_header->sig);
-
-       if (fam == fam_ucode && model == model_ucode)
-               return UCODE_OK;
-
-       /* Look for ext. headers: */
-       if (total_size <= data_size + MC_HEADER_SIZE)
-               return UCODE_NFOUND;
-
-       ext_header   = (void *) mc_header + data_size + MC_HEADER_SIZE;
-       ext_sig      = (void *)ext_header + EXT_HEADER_SIZE;
-       ext_sigcount = ext_header->count;
-
-       for (i = 0; i < ext_sigcount; i++) {
-               fam_ucode   = __x86_family(ext_sig->sig);
-               model_ucode = x86_model(ext_sig->sig);
-
-               if (fam == fam_ucode && model == model_ucode)
-                       return UCODE_OK;
-
-               ext_sig++;
-       }
-       return UCODE_NFOUND;
-}
-
-static int
-save_microcode(struct mc_saved_data *mc_saved_data,
-              struct microcode_intel **mc_saved_src,
-              unsigned int mc_saved_count)
-{
-       int i, j;
-       struct microcode_intel **saved_ptr;
-       int ret;
-
-       if (!mc_saved_count)
-               return -EINVAL;
-
-       /*
-        * Copy new microcode data.
-        */
-       saved_ptr = kcalloc(mc_saved_count, sizeof(struct microcode_intel *), GFP_KERNEL);
-       if (!saved_ptr)
-               return -ENOMEM;
-
-       for (i = 0; i < mc_saved_count; i++) {
-               struct microcode_header_intel *mc_hdr;
-               struct microcode_intel *mc;
-               unsigned long size;
-
-               if (!mc_saved_src[i]) {
-                       ret = -EINVAL;
-                       goto err;
-               }
-
-               mc     = mc_saved_src[i];
-               mc_hdr = &mc->hdr;
-               size   = get_totalsize(mc_hdr);
-
-               saved_ptr[i] = kmalloc(size, GFP_KERNEL);
-               if (!saved_ptr[i]) {
-                       ret = -ENOMEM;
-                       goto err;
-               }
-
-               memcpy(saved_ptr[i], mc, size);
-       }
-
-       /*
-        * Point to newly saved microcode.
-        */
-       mc_saved_data->mc_saved = saved_ptr;
-       mc_saved_data->mc_saved_count = mc_saved_count;
-
-       return 0;
-
-err:
-       for (j = 0; j <= i; j++)
-               kfree(saved_ptr[j]);
-       kfree(saved_ptr);
-
-       return ret;
-}
-
-/*
- * A microcode patch in ucode_ptr is saved into mc_saved
- * - if it has matching signature and newer revision compared to an existing
- *   patch mc_saved.
- * - or if it is a newly discovered microcode patch.
- *
- * The microcode patch should have matching model with CPU.
- *
- * Returns: The updated number @num_saved of saved microcode patches.
- */
-static unsigned int _save_mc(struct microcode_intel **mc_saved,
-                            u8 *ucode_ptr, unsigned int num_saved)
-{
-       struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
-       unsigned int sig, pf;
-       int found = 0, i;
-
-       mc_hdr = (struct microcode_header_intel *)ucode_ptr;
-
-       for (i = 0; i < num_saved; i++) {
-               mc_saved_hdr = (struct microcode_header_intel *)mc_saved[i];
-               sig          = mc_saved_hdr->sig;
-               pf           = mc_saved_hdr->pf;
-
-               if (!find_matching_signature(ucode_ptr, sig, pf))
-                       continue;
-
-               found = 1;
-
-               if (mc_hdr->rev <= mc_saved_hdr->rev)
-                       continue;
-
-               /*
-                * Found an older ucode saved earlier. Replace it with
-                * this newer one.
-                */
-               mc_saved[i] = (struct microcode_intel *)ucode_ptr;
-               break;
-       }
-
-       /* Newly detected microcode, save it to memory. */
-       if (i >= num_saved && !found)
-               mc_saved[num_saved++] = (struct microcode_intel *)ucode_ptr;
-
-       return num_saved;
-}
-
-/*
- * Get microcode matching with BSP's model. Only CPUs with the same model as
- * BSP can stay in the platform.
- */
-static enum ucode_state __init
-get_matching_model_microcode(int cpu, unsigned long start,
-                            void *data, size_t size,
-                            struct mc_saved_data *mc_saved_data,
-                            unsigned long *mc_saved_in_initrd,
-                            struct ucode_cpu_info *uci)
-{
-       u8 *ucode_ptr = data;
-       unsigned int leftover = size;
-       enum ucode_state state = UCODE_OK;
-       unsigned int mc_size;
-       struct microcode_header_intel *mc_header;
-       struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
-       unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
-       int i;
-
-       while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
-
-               if (leftover < sizeof(mc_header))
-                       break;
-
-               mc_header = (struct microcode_header_intel *)ucode_ptr;
-
-               mc_size = get_totalsize(mc_header);
-               if (!mc_size || mc_size > leftover ||
-                       microcode_sanity_check(ucode_ptr, 0) < 0)
-                       break;
-
-               leftover -= mc_size;
-
-               /*
-                * Since APs with same family and model as the BSP may boot in
-                * the platform, we need to find and save microcode patches
-                * with the same family and model as the BSP.
-                */
-               if (matching_model_microcode(mc_header, uci->cpu_sig.sig) !=
-                        UCODE_OK) {
-                       ucode_ptr += mc_size;
-                       continue;
-               }
-
-               mc_saved_count = _save_mc(mc_saved_tmp, ucode_ptr, mc_saved_count);
-
-               ucode_ptr += mc_size;
-       }
-
-       if (leftover) {
-               state = UCODE_ERROR;
-               goto out;
-       }
-
-       if (mc_saved_count == 0) {
-               state = UCODE_NFOUND;
-               goto out;
-       }
-
-       for (i = 0; i < mc_saved_count; i++)
-               mc_saved_in_initrd[i] = (unsigned long)mc_saved_tmp[i] - start;
-
-       mc_saved_data->mc_saved_count = mc_saved_count;
-out:
-       return state;
-}
-
-static int collect_cpu_info_early(struct ucode_cpu_info *uci)
-{
-       unsigned int val[2];
-       unsigned int family, model;
-       struct cpu_signature csig;
-       unsigned int eax, ebx, ecx, edx;
-
-       csig.sig = 0;
-       csig.pf = 0;
-       csig.rev = 0;
-
-       memset(uci, 0, sizeof(*uci));
-
-       eax = 0x00000001;
-       ecx = 0;
-       native_cpuid(&eax, &ebx, &ecx, &edx);
-       csig.sig = eax;
-
-       family = __x86_family(csig.sig);
-       model  = x86_model(csig.sig);
-
-       if ((model >= 5) || (family > 6)) {
-               /* get processor flags from MSR 0x17 */
-               native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
-               csig.pf = 1 << ((val[1] >> 18) & 7);
-       }
-       native_wrmsr(MSR_IA32_UCODE_REV, 0, 0);
-
-       /* As documented in the SDM: Do a CPUID 1 here */
-       sync_core();
-
-       /* get the current revision from MSR 0x8B */
-       native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
-
-       csig.rev = val[1];
-
-       uci->cpu_sig = csig;
-       uci->valid = 1;
-
-       return 0;
-}
-
-#ifdef DEBUG
-static void show_saved_mc(void)
-{
-       int i, j;
-       unsigned int sig, pf, rev, total_size, data_size, date;
-       struct ucode_cpu_info uci;
-
-       if (mc_saved_data.mc_saved_count == 0) {
-               pr_debug("no microcode data saved.\n");
-               return;
-       }
-       pr_debug("Total microcode saved: %d\n", mc_saved_data.mc_saved_count);
-
-       collect_cpu_info_early(&uci);
-
-       sig = uci.cpu_sig.sig;
-       pf = uci.cpu_sig.pf;
-       rev = uci.cpu_sig.rev;
-       pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);
-
-       for (i = 0; i < mc_saved_data.mc_saved_count; i++) {
-               struct microcode_header_intel *mc_saved_header;
-               struct extended_sigtable *ext_header;
-               int ext_sigcount;
-               struct extended_signature *ext_sig;
-
-               mc_saved_header = (struct microcode_header_intel *)
-                                 mc_saved_data.mc_saved[i];
-               sig = mc_saved_header->sig;
-               pf = mc_saved_header->pf;
-               rev = mc_saved_header->rev;
-               total_size = get_totalsize(mc_saved_header);
-               data_size = get_datasize(mc_saved_header);
-               date = mc_saved_header->date;
-
-               pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, toal size=0x%x, date = %04x-%02x-%02x\n",
-                        i, sig, pf, rev, total_size,
-                        date & 0xffff,
-                        date >> 24,
-                        (date >> 16) & 0xff);
-
-               /* Look for ext. headers: */
-               if (total_size <= data_size + MC_HEADER_SIZE)
-                       continue;
-
-               ext_header = (void *) mc_saved_header + data_size + MC_HEADER_SIZE;
-               ext_sigcount = ext_header->count;
-               ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
-
-               for (j = 0; j < ext_sigcount; j++) {
-                       sig = ext_sig->sig;
-                       pf = ext_sig->pf;
-
-                       pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
-                                j, sig, pf);
-
-                       ext_sig++;
-               }
-
-       }
-}
-#else
-static inline void show_saved_mc(void)
-{
-}
-#endif
-
-#if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU)
-static DEFINE_MUTEX(x86_cpu_microcode_mutex);
-/*
- * Save this mc into mc_saved_data. So it will be loaded early when a CPU is
- * hot added or resumes.
- *
- * Please make sure this mc should be a valid microcode patch before calling
- * this function.
- */
-int save_mc_for_early(u8 *mc)
-{
-       struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
-       unsigned int mc_saved_count_init;
-       unsigned int mc_saved_count;
-       struct microcode_intel **mc_saved;
-       int ret = 0;
-       int i;
-
-       /*
-        * Hold hotplug lock so mc_saved_data is not accessed by a CPU in
-        * hotplug.
-        */
-       mutex_lock(&x86_cpu_microcode_mutex);
-
-       mc_saved_count_init = mc_saved_data.mc_saved_count;
-       mc_saved_count = mc_saved_data.mc_saved_count;
-       mc_saved = mc_saved_data.mc_saved;
-
-       if (mc_saved && mc_saved_count)
-               memcpy(mc_saved_tmp, mc_saved,
-                      mc_saved_count * sizeof(struct microcode_intel *));
-       /*
-        * Save the microcode patch mc in mc_save_tmp structure if it's a newer
-        * version.
-        */
-       mc_saved_count = _save_mc(mc_saved_tmp, mc, mc_saved_count);
-
-       /*
-        * Save the mc_save_tmp in global mc_saved_data.
-        */
-       ret = save_microcode(&mc_saved_data, mc_saved_tmp, mc_saved_count);
-       if (ret) {
-               pr_err("Cannot save microcode patch.\n");
-               goto out;
-       }
-
-       show_saved_mc();
-
-       /*
-        * Free old saved microcode data.
-        */
-       if (mc_saved) {
-               for (i = 0; i < mc_saved_count_init; i++)
-                       kfree(mc_saved[i]);
-               kfree(mc_saved);
-       }
-
-out:
-       mutex_unlock(&x86_cpu_microcode_mutex);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(save_mc_for_early);
-#endif
-
-static bool __init load_builtin_intel_microcode(struct cpio_data *cp)
-{
-#ifdef CONFIG_X86_64
-       unsigned int eax = 0x00000001, ebx, ecx = 0, edx;
-       unsigned int family, model, stepping;
-       char name[30];
-
-       native_cpuid(&eax, &ebx, &ecx, &edx);
-
-       family   = __x86_family(eax);
-       model    = x86_model(eax);
-       stepping = eax & 0xf;
-
-       sprintf(name, "intel-ucode/%02x-%02x-%02x", family, model, stepping);
-
-       return get_builtin_firmware(cp, name);
-#else
-       return false;
-#endif
-}
-
-static __initdata char ucode_name[] = "kernel/x86/microcode/GenuineIntel.bin";
-static __init enum ucode_state
-scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
-              unsigned long start, unsigned long size,
-              struct ucode_cpu_info *uci)
-{
-       struct cpio_data cd;
-       long offset = 0;
-#ifdef CONFIG_X86_32
-       char *p = (char *)__pa_nodebug(ucode_name);
-#else
-       char *p = ucode_name;
-#endif
-
-       cd.data = NULL;
-       cd.size = 0;
-
-       cd = find_cpio_data(p, (void *)start, size, &offset);
-       if (!cd.data) {
-               if (!load_builtin_intel_microcode(&cd))
-                       return UCODE_ERROR;
-       }
-
-       return get_matching_model_microcode(0, start, cd.data, cd.size,
-                                           mc_saved_data, initrd, uci);
-}
-
-/*
- * Print ucode update info.
- */
-static void
-print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
-{
-       int cpu = smp_processor_id();
-
-       pr_info("CPU%d microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
-               cpu,
-               uci->cpu_sig.rev,
-               date & 0xffff,
-               date >> 24,
-               (date >> 16) & 0xff);
-}
-
-#ifdef CONFIG_X86_32
-
-static int delay_ucode_info;
-static int current_mc_date;
-
-/*
- * Print early updated ucode info after printk works. This is delayed info dump.
- */
-void show_ucode_info_early(void)
-{
-       struct ucode_cpu_info uci;
-
-       if (delay_ucode_info) {
-               collect_cpu_info_early(&uci);
-               print_ucode_info(&uci, current_mc_date);
-               delay_ucode_info = 0;
-       }
-}
-
-/*
- * At this point, we can not call printk() yet. Keep microcode patch number in
- * mc_saved_data.mc_saved and delay printing microcode info in
- * show_ucode_info_early() until printk() works.
- */
-static void print_ucode(struct ucode_cpu_info *uci)
-{
-       struct microcode_intel *mc_intel;
-       int *delay_ucode_info_p;
-       int *current_mc_date_p;
-
-       mc_intel = uci->mc;
-       if (mc_intel == NULL)
-               return;
-
-       delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
-       current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
-
-       *delay_ucode_info_p = 1;
-       *current_mc_date_p = mc_intel->hdr.date;
-}
-#else
-
-/*
- * Flush global tlb. We only do this in x86_64 where paging has been enabled
- * already and PGE should be enabled as well.
- */
-static inline void flush_tlb_early(void)
-{
-       __native_flush_tlb_global_irq_disabled();
-}
-
-static inline void print_ucode(struct ucode_cpu_info *uci)
-{
-       struct microcode_intel *mc_intel;
-
-       mc_intel = uci->mc;
-       if (mc_intel == NULL)
-               return;
-
-       print_ucode_info(uci, mc_intel->hdr.date);
-}
-#endif
-
-static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
-{
-       struct microcode_intel *mc_intel;
-       unsigned int val[2];
-
-       mc_intel = uci->mc;
-       if (mc_intel == NULL)
-               return 0;
-
-       /* write microcode via MSR 0x79 */
-       native_wrmsr(MSR_IA32_UCODE_WRITE,
-             (unsigned long) mc_intel->bits,
-             (unsigned long) mc_intel->bits >> 16 >> 16);
-       native_wrmsr(MSR_IA32_UCODE_REV, 0, 0);
-
-       /* As documented in the SDM: Do a CPUID 1 here */
-       sync_core();
-
-       /* get the current revision from MSR 0x8B */
-       native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
-       if (val[1] != mc_intel->hdr.rev)
-               return -1;
-
-#ifdef CONFIG_X86_64
-       /* Flush global tlb. This is precaution. */
-       flush_tlb_early();
-#endif
-       uci->cpu_sig.rev = val[1];
-
-       if (early)
-               print_ucode(uci);
-       else
-               print_ucode_info(uci, mc_intel->hdr.date);
-
-       return 0;
-}
-
-/*
- * This function converts microcode patch offsets previously stored in
- * mc_saved_in_initrd to pointers and stores the pointers in mc_saved_data.
- */
-int __init save_microcode_in_initrd_intel(void)
-{
-       unsigned int count = mc_saved_data.mc_saved_count;
-       struct microcode_intel *mc_saved[MAX_UCODE_COUNT];
-       int ret = 0;
-
-       if (count == 0)
-               return ret;
-
-       copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, initrd_start, count);
-       ret = save_microcode(&mc_saved_data, mc_saved, count);
-       if (ret)
-               pr_err("Cannot save microcode patches from initrd.\n");
-
-       show_saved_mc();
-
-       return ret;
-}
-
-static void __init
-_load_ucode_intel_bsp(struct mc_saved_data *mc_saved_data,
-                     unsigned long *initrd,
-                     unsigned long start, unsigned long size)
-{
-       struct ucode_cpu_info uci;
-       enum ucode_state ret;
-
-       collect_cpu_info_early(&uci);
-
-       ret = scan_microcode(mc_saved_data, initrd, start, size, &uci);
-       if (ret != UCODE_OK)
-               return;
-
-       ret = load_microcode(mc_saved_data, initrd, start, &uci);
-       if (ret != UCODE_OK)
-               return;
-
-       apply_microcode_early(&uci, true);
-}
-
-void __init load_ucode_intel_bsp(void)
-{
-       u64 start, size;
-#ifdef CONFIG_X86_32
-       struct boot_params *p;
-
-       p       = (struct boot_params *)__pa_nodebug(&boot_params);
-       start   = p->hdr.ramdisk_image;
-       size    = p->hdr.ramdisk_size;
-
-       _load_ucode_intel_bsp(
-                       (struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
-                       (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
-                       start, size);
-#else
-       start   = boot_params.hdr.ramdisk_image + PAGE_OFFSET;
-       size    = boot_params.hdr.ramdisk_size;
-
-       _load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, start, size);
-#endif
-}
-
-void load_ucode_intel_ap(void)
-{
-       struct mc_saved_data *mc_saved_data_p;
-       struct ucode_cpu_info uci;
-       unsigned long *mc_saved_in_initrd_p;
-       unsigned long initrd_start_addr;
-       enum ucode_state ret;
-#ifdef CONFIG_X86_32
-       unsigned long *initrd_start_p;
-
-       mc_saved_in_initrd_p =
-               (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
-       mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
-       initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
-       initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
-#else
-       mc_saved_data_p = &mc_saved_data;
-       mc_saved_in_initrd_p = mc_saved_in_initrd;
-       initrd_start_addr = initrd_start;
-#endif
-
-       /*
-        * If there is no valid ucode previously saved in memory, no need to
-        * update ucode on this AP.
-        */
-       if (mc_saved_data_p->mc_saved_count == 0)
-               return;
-
-       collect_cpu_info_early(&uci);
-       ret = load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
-                            initrd_start_addr, &uci);
-
-       if (ret != UCODE_OK)
-               return;
-
-       apply_microcode_early(&uci, true);
-}
-
-void reload_ucode_intel(void)
-{
-       struct ucode_cpu_info uci;
-       enum ucode_state ret;
-
-       if (!mc_saved_data.mc_saved_count)
-               return;
-
-       collect_cpu_info_early(&uci);
-
-       ret = load_microcode_early(mc_saved_data.mc_saved,
-                                  mc_saved_data.mc_saved_count, &uci);
-       if (ret != UCODE_OK)
-               return;
-
-       apply_microcode_early(&uci, false);
-}
index 1883d252ff7d60ce7707a8108283144de64ce14d..b96896bcbdaf1ac88d4dcfa2a7ce3a98d291ccad 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/firmware.h>
 #include <linux/uaccess.h>
 #include <linux/kernel.h>
-#include <linux/module.h>
 
 #include <asm/microcode_intel.h>
 #include <asm/processor.h>
index 381c8b9b3a33570fcf88b82fa69d1fc43298d89f..20e242ea1bc46b5f5828c7b95071d920853b7609 100644 (file)
 struct ms_hyperv_info ms_hyperv;
 EXPORT_SYMBOL_GPL(ms_hyperv);
 
-static void (*hv_kexec_handler)(void);
-static void (*hv_crash_handler)(struct pt_regs *regs);
-
 #if IS_ENABLED(CONFIG_HYPERV)
 static void (*vmbus_handler)(void);
+static void (*hv_kexec_handler)(void);
+static void (*hv_crash_handler)(struct pt_regs *regs);
 
 void hyperv_vector_handler(struct pt_regs *regs)
 {
@@ -96,8 +95,8 @@ void hv_remove_crash_handler(void)
        hv_crash_handler = NULL;
 }
 EXPORT_SYMBOL_GPL(hv_remove_crash_handler);
-#endif
 
+#ifdef CONFIG_KEXEC_CORE
 static void hv_machine_shutdown(void)
 {
        if (kexec_in_progress && hv_kexec_handler)
@@ -111,7 +110,8 @@ static void hv_machine_crash_shutdown(struct pt_regs *regs)
                hv_crash_handler(regs);
        native_machine_crash_shutdown(regs);
 }
-
+#endif /* CONFIG_KEXEC_CORE */
+#endif /* CONFIG_HYPERV */
 
 static uint32_t  __init ms_hyperv_platform(void)
 {
@@ -186,8 +186,10 @@ static void __init ms_hyperv_init_platform(void)
        no_timer_check = 1;
 #endif
 
+#if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE)
        machine_ops.shutdown = hv_machine_shutdown;
        machine_ops.crash_shutdown = hv_machine_crash_shutdown;
+#endif
        mark_tsc_unstable("running on Hyper-V");
 }
 
index 66dd3fe99b82d0e54e1df27255d27e4accc69547..4562cf070c279d5edeb0e18ae94c8bff94166dd8 100644 (file)
@@ -1175,7 +1175,7 @@ static int x86_pmu_add(struct perf_event *event, int flags)
         * skip the schedulability test here, it will be performed
         * at commit time (->commit_txn) as a whole.
         */
-       if (cpuc->group_flag & PERF_EVENT_TXN)
+       if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
                goto done_collect;
 
        ret = x86_pmu.schedule_events(cpuc, n, assign);
@@ -1326,7 +1326,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
         * XXX assumes any ->del() called during a TXN will only be on
         * an event added during that same TXN.
         */
-       if (cpuc->group_flag & PERF_EVENT_TXN)
+       if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
                return;
 
        /*
@@ -1748,11 +1748,22 @@ static inline void x86_pmu_read(struct perf_event *event)
  * Start group events scheduling transaction
  * Set the flag to make pmu::enable() not perform the
  * schedulability test, it will be performed at commit time
+ *
+ * We only support PERF_PMU_TXN_ADD transactions. Save the
+ * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
+ * transactions.
  */
-static void x86_pmu_start_txn(struct pmu *pmu)
+static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
 {
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+       WARN_ON_ONCE(cpuc->txn_flags);          /* txn already in flight */
+
+       cpuc->txn_flags = txn_flags;
+       if (txn_flags & ~PERF_PMU_TXN_ADD)
+               return;
+
        perf_pmu_disable(pmu);
-       __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
        __this_cpu_write(cpu_hw_events.n_txn, 0);
 }
 
@@ -1763,7 +1774,16 @@ static void x86_pmu_start_txn(struct pmu *pmu)
  */
 static void x86_pmu_cancel_txn(struct pmu *pmu)
 {
-       __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
+       unsigned int txn_flags;
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+       WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
+
+       txn_flags = cpuc->txn_flags;
+       cpuc->txn_flags = 0;
+       if (txn_flags & ~PERF_PMU_TXN_ADD)
+               return;
+
        /*
         * Truncate collected array by the number of events added in this
         * transaction. See x86_pmu_add() and x86_pmu_*_txn().
@@ -1786,6 +1806,13 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
        int assign[X86_PMC_IDX_MAX];
        int n, ret;
 
+       WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
+
+       if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) {
+               cpuc->txn_flags = 0;
+               return 0;
+       }
+
        n = cpuc->n_events;
 
        if (!x86_pmu_initialized())
@@ -1801,7 +1828,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
         */
        memcpy(cpuc->assign, assign, n*sizeof(int));
 
-       cpuc->group_flag &= ~PERF_EVENT_TXN;
+       cpuc->txn_flags = 0;
        perf_pmu_enable(pmu);
        return 0;
 }
index 5edf6d868fc16c1e24633d1ea1b727b69fb68584..499f533dd3ccbd22bb84423649e031f2759080ca 100644 (file)
@@ -47,6 +47,7 @@ enum extra_reg_type {
        EXTRA_REG_RSP_1 = 1,    /* offcore_response_1 */
        EXTRA_REG_LBR   = 2,    /* lbr_select */
        EXTRA_REG_LDLAT = 3,    /* ld_lat_threshold */
+       EXTRA_REG_FE    = 4,    /* fe_* */
 
        EXTRA_REG_MAX           /* number of entries needed */
 };
@@ -195,7 +196,7 @@ struct cpu_hw_events {
 
        int                     n_excl; /* the number of exclusive events */
 
-       unsigned int            group_flag;
+       unsigned int            txn_flags;
        int                     is_fake;
 
        /*
index 3fefebfbdf4bb4f68e5a5bf1647550ad0cd71a75..f63360be22387d4fb4cb30728f1834ee4cbd6228 100644 (file)
@@ -205,6 +205,11 @@ static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
        INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
+       /*
+        * Note the low 8 bits eventsel code is not a continuous field, containing
+        * some #GPing bits. These are masked out.
+        */
+       INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
        EVENT_EXTRA_END
 };
 
@@ -250,7 +255,7 @@ struct event_constraint intel_bdw_event_constraints[] = {
        FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
        FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
        INTEL_UEVENT_CONSTRAINT(0x148, 0x4),    /* L1D_PEND_MISS.PENDING */
-       INTEL_EVENT_CONSTRAINT(0xa3, 0x4),      /* CYCLE_ACTIVITY.* */
+       INTEL_UEVENT_CONSTRAINT(0x8a3, 0x4),    /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
        EVENT_CONSTRAINT_END
 };
 
@@ -2891,6 +2896,8 @@ PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
 
 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
 
+PMU_FORMAT_ATTR(frontend, "config1:0-23");
+
 static struct attribute *intel_arch3_formats_attr[] = {
        &format_attr_event.attr,
        &format_attr_umask.attr,
@@ -2907,6 +2914,11 @@ static struct attribute *intel_arch3_formats_attr[] = {
        NULL,
 };
 
+static struct attribute *skl_format_attr[] = {
+       &format_attr_frontend.attr,
+       NULL,
+};
+
 static __initconst const struct x86_pmu core_pmu = {
        .name                   = "core",
        .handle_irq             = x86_pmu_handle_irq,
@@ -3516,7 +3528,8 @@ __init int intel_pmu_init(void)
 
                x86_pmu.hw_config = hsw_hw_config;
                x86_pmu.get_event_constraints = hsw_get_event_constraints;
-               x86_pmu.cpu_events = hsw_events_attrs;
+               x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
+                                                 skl_format_attr);
                WARN_ON(!x86_pmu.format_attrs);
                x86_pmu.cpu_events = hsw_events_attrs;
                pr_cont("Skylake events, ");
index d1c0f254afbeefe61fcfaeeb7625664d7d352918..2cad71d1b14cfb36df83674dac992f51c37db63a 100644 (file)
@@ -495,6 +495,19 @@ static int bts_event_init(struct perf_event *event)
        if (x86_add_exclusive(x86_lbr_exclusive_bts))
                return -EBUSY;
 
+       /*
+        * BTS leaks kernel addresses even when CPL0 tracing is
+        * disabled, so disallow intel_bts driver for unprivileged
+        * users on paranoid systems since it provides trace data
+        * to the user in a zero-copy fashion.
+        *
+        * Note that the default paranoia setting permits unprivileged
+        * users to profile the kernel.
+        */
+       if (event->attr.exclude_kernel && perf_paranoid_kernel() &&
+           !capable(CAP_SYS_ADMIN))
+               return -EACCES;
+
        ret = x86_reserve_hardware();
        if (ret) {
                x86_del_exclusive(x86_lbr_exclusive_bts);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cstate.c b/arch/x86/kernel/cpu/perf_event_intel_cstate.c
new file mode 100644 (file)
index 0000000..75a38b5
--- /dev/null
@@ -0,0 +1,694 @@
+/*
+ * perf_event_intel_cstate.c: support cstate residency counters
+ *
+ * Copyright (C) 2015, Intel Corp.
+ * Author: Kan Liang (kan.liang@intel.com)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ */
+
+/*
+ * This file export cstate related free running (read-only) counters
+ * for perf. These counters may be use simultaneously by other tools,
+ * such as turbostat. However, it still make sense to implement them
+ * in perf. Because we can conveniently collect them together with
+ * other events, and allow to use them from tools without special MSR
+ * access code.
+ *
+ * The events only support system-wide mode counting. There is no
+ * sampling support because it is not supported by the hardware.
+ *
+ * According to counters' scope and category, two PMUs are registered
+ * with the perf_event core subsystem.
+ *  - 'cstate_core': The counter is available for each physical core.
+ *    The counters include CORE_C*_RESIDENCY.
+ *  - 'cstate_pkg': The counter is available for each physical package.
+ *    The counters include PKG_C*_RESIDENCY.
+ *
+ * All of these counters are specified in the Intel® 64 and IA-32
+ * Architectures Software Developer.s Manual Vol3b.
+ *
+ * Model specific counters:
+ *     MSR_CORE_C1_RES: CORE C1 Residency Counter
+ *                      perf code: 0x00
+ *                      Available model: SLM,AMT
+ *                      Scope: Core (each processor core has a MSR)
+ *     MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
+ *                            perf code: 0x01
+ *                            Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
+ *                            Scope: Core
+ *     MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
+ *                            perf code: 0x02
+ *                            Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
+ *                            Scope: Core
+ *     MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
+ *                            perf code: 0x03
+ *                            Available model: SNB,IVB,HSW,BDW,SKL
+ *                            Scope: Core
+ *     MSR_PKG_C2_RESIDENCY:  Package C2 Residency Counter.
+ *                            perf code: 0x00
+ *                            Available model: SNB,IVB,HSW,BDW,SKL
+ *                            Scope: Package (physical package)
+ *     MSR_PKG_C3_RESIDENCY:  Package C3 Residency Counter.
+ *                            perf code: 0x01
+ *                            Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
+ *                            Scope: Package (physical package)
+ *     MSR_PKG_C6_RESIDENCY:  Package C6 Residency Counter.
+ *                            perf code: 0x02
+ *                            Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
+ *                            Scope: Package (physical package)
+ *     MSR_PKG_C7_RESIDENCY:  Package C7 Residency Counter.
+ *                            perf code: 0x03
+ *                            Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
+ *                            Scope: Package (physical package)
+ *     MSR_PKG_C8_RESIDENCY:  Package C8 Residency Counter.
+ *                            perf code: 0x04
+ *                            Available model: HSW ULT only
+ *                            Scope: Package (physical package)
+ *     MSR_PKG_C9_RESIDENCY:  Package C9 Residency Counter.
+ *                            perf code: 0x05
+ *                            Available model: HSW ULT only
+ *                            Scope: Package (physical package)
+ *     MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
+ *                            perf code: 0x06
+ *                            Available model: HSW ULT only
+ *                            Scope: Package (physical package)
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/perf_event.h>
+#include <asm/cpu_device_id.h>
+#include "perf_event.h"
+
+#define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format)                \
+static ssize_t __cstate_##_var##_show(struct kobject *kobj,    \
+                               struct kobj_attribute *attr,    \
+                               char *page)                     \
+{                                                              \
+       BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);             \
+       return sprintf(page, _format "\n");                     \
+}                                                              \
+static struct kobj_attribute format_attr_##_var =              \
+       __ATTR(_name, 0444, __cstate_##_var##_show, NULL)
+
+static ssize_t cstate_get_attr_cpumask(struct device *dev,
+                                      struct device_attribute *attr,
+                                      char *buf);
+
+struct perf_cstate_msr {
+       u64     msr;
+       struct  perf_pmu_events_attr *attr;
+       bool    (*test)(int idx);
+};
+
+
+/* cstate_core PMU */
+
+static struct pmu cstate_core_pmu;
+static bool has_cstate_core;
+
+enum perf_cstate_core_id {
+       /*
+        * cstate_core events
+        */
+       PERF_CSTATE_CORE_C1_RES = 0,
+       PERF_CSTATE_CORE_C3_RES,
+       PERF_CSTATE_CORE_C6_RES,
+       PERF_CSTATE_CORE_C7_RES,
+
+       PERF_CSTATE_CORE_EVENT_MAX,
+};
+
+bool test_core(int idx)
+{
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+           boot_cpu_data.x86 != 6)
+               return false;
+
+       switch (boot_cpu_data.x86_model) {
+       case 30: /* 45nm Nehalem    */
+       case 26: /* 45nm Nehalem-EP */
+       case 46: /* 45nm Nehalem-EX */
+
+       case 37: /* 32nm Westmere    */
+       case 44: /* 32nm Westmere-EP */
+       case 47: /* 32nm Westmere-EX */
+               if (idx == PERF_CSTATE_CORE_C3_RES ||
+                   idx == PERF_CSTATE_CORE_C6_RES)
+                       return true;
+               break;
+       case 42: /* 32nm SandyBridge         */
+       case 45: /* 32nm SandyBridge-E/EN/EP */
+
+       case 58: /* 22nm IvyBridge       */
+       case 62: /* 22nm IvyBridge-EP/EX */
+
+       case 60: /* 22nm Haswell Core */
+       case 63: /* 22nm Haswell Server */
+       case 69: /* 22nm Haswell ULT */
+       case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
+
+       case 61: /* 14nm Broadwell Core-M */
+       case 86: /* 14nm Broadwell Xeon D */
+       case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
+       case 79: /* 14nm Broadwell Server */
+
+       case 78: /* 14nm Skylake Mobile */
+       case 94: /* 14nm Skylake Desktop */
+               if (idx == PERF_CSTATE_CORE_C3_RES ||
+                   idx == PERF_CSTATE_CORE_C6_RES ||
+                   idx == PERF_CSTATE_CORE_C7_RES)
+                       return true;
+               break;
+       case 55: /* 22nm Atom "Silvermont"                */
+       case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
+       case 76: /* 14nm Atom "Airmont"                   */
+               if (idx == PERF_CSTATE_CORE_C1_RES ||
+                   idx == PERF_CSTATE_CORE_C6_RES)
+                       return true;
+               break;
+       }
+
+       return false;
+}
+
+PMU_EVENT_ATTR_STRING(c1-residency, evattr_cstate_core_c1, "event=0x00");
+PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_core_c3, "event=0x01");
+PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_core_c6, "event=0x02");
+PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_core_c7, "event=0x03");
+
+static struct perf_cstate_msr core_msr[] = {
+       [PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES,          &evattr_cstate_core_c1, test_core, },
+       [PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY,    &evattr_cstate_core_c3, test_core, },
+       [PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY,    &evattr_cstate_core_c6, test_core, },
+       [PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY,    &evattr_cstate_core_c7, test_core, },
+};
+
+static struct attribute *core_events_attrs[PERF_CSTATE_CORE_EVENT_MAX + 1] = {
+       NULL,
+};
+
+static struct attribute_group core_events_attr_group = {
+       .name = "events",
+       .attrs = core_events_attrs,
+};
+
+DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63");
+static struct attribute *core_format_attrs[] = {
+       &format_attr_core_event.attr,
+       NULL,
+};
+
+static struct attribute_group core_format_attr_group = {
+       .name = "format",
+       .attrs = core_format_attrs,
+};
+
+static cpumask_t cstate_core_cpu_mask;
+static DEVICE_ATTR(cpumask, S_IRUGO, cstate_get_attr_cpumask, NULL);
+
+static struct attribute *cstate_cpumask_attrs[] = {
+       &dev_attr_cpumask.attr,
+       NULL,
+};
+
+static struct attribute_group cpumask_attr_group = {
+       .attrs = cstate_cpumask_attrs,
+};
+
+static const struct attribute_group *core_attr_groups[] = {
+       &core_events_attr_group,
+       &core_format_attr_group,
+       &cpumask_attr_group,
+       NULL,
+};
+
+/* cstate_core PMU end */
+
+
+/* cstate_pkg PMU */
+
+static struct pmu cstate_pkg_pmu;
+static bool has_cstate_pkg;
+
+enum perf_cstate_pkg_id {
+       /*
+        * cstate_pkg events
+        */
+       PERF_CSTATE_PKG_C2_RES = 0,
+       PERF_CSTATE_PKG_C3_RES,
+       PERF_CSTATE_PKG_C6_RES,
+       PERF_CSTATE_PKG_C7_RES,
+       PERF_CSTATE_PKG_C8_RES,
+       PERF_CSTATE_PKG_C9_RES,
+       PERF_CSTATE_PKG_C10_RES,
+
+       PERF_CSTATE_PKG_EVENT_MAX,
+};
+
+bool test_pkg(int idx)
+{
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+           boot_cpu_data.x86 != 6)
+               return false;
+
+       switch (boot_cpu_data.x86_model) {
+       case 30: /* 45nm Nehalem    */
+       case 26: /* 45nm Nehalem-EP */
+       case 46: /* 45nm Nehalem-EX */
+
+       case 37: /* 32nm Westmere    */
+       case 44: /* 32nm Westmere-EP */
+       case 47: /* 32nm Westmere-EX */
+               if (idx == PERF_CSTATE_CORE_C3_RES ||
+                   idx == PERF_CSTATE_CORE_C6_RES ||
+                   idx == PERF_CSTATE_CORE_C7_RES)
+                       return true;
+               break;
+       case 42: /* 32nm SandyBridge         */
+       case 45: /* 32nm SandyBridge-E/EN/EP */
+
+       case 58: /* 22nm IvyBridge       */
+       case 62: /* 22nm IvyBridge-EP/EX */
+
+       case 60: /* 22nm Haswell Core */
+       case 63: /* 22nm Haswell Server */
+       case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
+
+       case 61: /* 14nm Broadwell Core-M */
+       case 86: /* 14nm Broadwell Xeon D */
+       case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
+       case 79: /* 14nm Broadwell Server */
+
+       case 78: /* 14nm Skylake Mobile */
+       case 94: /* 14nm Skylake Desktop */
+               if (idx == PERF_CSTATE_PKG_C2_RES ||
+                   idx == PERF_CSTATE_PKG_C3_RES ||
+                   idx == PERF_CSTATE_PKG_C6_RES ||
+                   idx == PERF_CSTATE_PKG_C7_RES)
+                       return true;
+               break;
+       case 55: /* 22nm Atom "Silvermont"                */
+       case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
+       case 76: /* 14nm Atom "Airmont"                   */
+               if (idx == PERF_CSTATE_CORE_C6_RES)
+                       return true;
+               break;
+       case 69: /* 22nm Haswell ULT */
+               if (idx == PERF_CSTATE_PKG_C2_RES ||
+                   idx == PERF_CSTATE_PKG_C3_RES ||
+                   idx == PERF_CSTATE_PKG_C6_RES ||
+                   idx == PERF_CSTATE_PKG_C7_RES ||
+                   idx == PERF_CSTATE_PKG_C8_RES ||
+                   idx == PERF_CSTATE_PKG_C9_RES ||
+                   idx == PERF_CSTATE_PKG_C10_RES)
+                       return true;
+               break;
+       }
+
+       return false;
+}
+
+PMU_EVENT_ATTR_STRING(c2-residency, evattr_cstate_pkg_c2, "event=0x00");
+PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_pkg_c3, "event=0x01");
+PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_pkg_c6, "event=0x02");
+PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_pkg_c7, "event=0x03");
+PMU_EVENT_ATTR_STRING(c8-residency, evattr_cstate_pkg_c8, "event=0x04");
+PMU_EVENT_ATTR_STRING(c9-residency, evattr_cstate_pkg_c9, "event=0x05");
+PMU_EVENT_ATTR_STRING(c10-residency, evattr_cstate_pkg_c10, "event=0x06");
+
+static struct perf_cstate_msr pkg_msr[] = {
+       [PERF_CSTATE_PKG_C2_RES] = { MSR_PKG_C2_RESIDENCY,      &evattr_cstate_pkg_c2,  test_pkg, },
+       [PERF_CSTATE_PKG_C3_RES] = { MSR_PKG_C3_RESIDENCY,      &evattr_cstate_pkg_c3,  test_pkg, },
+       [PERF_CSTATE_PKG_C6_RES] = { MSR_PKG_C6_RESIDENCY,      &evattr_cstate_pkg_c6,  test_pkg, },
+       [PERF_CSTATE_PKG_C7_RES] = { MSR_PKG_C7_RESIDENCY,      &evattr_cstate_pkg_c7,  test_pkg, },
+       [PERF_CSTATE_PKG_C8_RES] = { MSR_PKG_C8_RESIDENCY,      &evattr_cstate_pkg_c8,  test_pkg, },
+       [PERF_CSTATE_PKG_C9_RES] = { MSR_PKG_C9_RESIDENCY,      &evattr_cstate_pkg_c9,  test_pkg, },
+       [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY,    &evattr_cstate_pkg_c10, test_pkg, },
+};
+
+static struct attribute *pkg_events_attrs[PERF_CSTATE_PKG_EVENT_MAX + 1] = {
+       NULL,
+};
+
+static struct attribute_group pkg_events_attr_group = {
+       .name = "events",
+       .attrs = pkg_events_attrs,
+};
+
+DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63");
+static struct attribute *pkg_format_attrs[] = {
+       &format_attr_pkg_event.attr,
+       NULL,
+};
+static struct attribute_group pkg_format_attr_group = {
+       .name = "format",
+       .attrs = pkg_format_attrs,
+};
+
+static cpumask_t cstate_pkg_cpu_mask;
+
+static const struct attribute_group *pkg_attr_groups[] = {
+       &pkg_events_attr_group,
+       &pkg_format_attr_group,
+       &cpumask_attr_group,
+       NULL,
+};
+
+/* cstate_pkg PMU end*/
+
+static ssize_t cstate_get_attr_cpumask(struct device *dev,
+                                      struct device_attribute *attr,
+                                      char *buf)
+{
+       struct pmu *pmu = dev_get_drvdata(dev);
+
+       if (pmu == &cstate_core_pmu)
+               return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask);
+       else if (pmu == &cstate_pkg_pmu)
+               return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask);
+       else
+               return 0;
+}
+
+static int cstate_pmu_event_init(struct perf_event *event)
+{
+       u64 cfg = event->attr.config;
+       int ret = 0;
+
+       if (event->attr.type != event->pmu->type)
+               return -ENOENT;
+
+       /* unsupported modes and filters */
+       if (event->attr.exclude_user   ||
+           event->attr.exclude_kernel ||
+           event->attr.exclude_hv     ||
+           event->attr.exclude_idle   ||
+           event->attr.exclude_host   ||
+           event->attr.exclude_guest  ||
+           event->attr.sample_period) /* no sampling */
+               return -EINVAL;
+
+       if (event->pmu == &cstate_core_pmu) {
+               if (cfg >= PERF_CSTATE_CORE_EVENT_MAX)
+                       return -EINVAL;
+               if (!core_msr[cfg].attr)
+                       return -EINVAL;
+               event->hw.event_base = core_msr[cfg].msr;
+       } else if (event->pmu == &cstate_pkg_pmu) {
+               if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
+                       return -EINVAL;
+               if (!pkg_msr[cfg].attr)
+                       return -EINVAL;
+               event->hw.event_base = pkg_msr[cfg].msr;
+       } else
+               return -ENOENT;
+
+       /* must be done before validate_group */
+       event->hw.config = cfg;
+       event->hw.idx = -1;
+
+       return ret;
+}
+
+static inline u64 cstate_pmu_read_counter(struct perf_event *event)
+{
+       u64 val;
+
+       rdmsrl(event->hw.event_base, val);
+       return val;
+}
+
+static void cstate_pmu_event_update(struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       u64 prev_raw_count, new_raw_count;
+
+again:
+       prev_raw_count = local64_read(&hwc->prev_count);
+       new_raw_count = cstate_pmu_read_counter(event);
+
+       if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+                           new_raw_count) != prev_raw_count)
+               goto again;
+
+       local64_add(new_raw_count - prev_raw_count, &event->count);
+}
+
+static void cstate_pmu_event_start(struct perf_event *event, int mode)
+{
+       local64_set(&event->hw.prev_count, cstate_pmu_read_counter(event));
+}
+
+static void cstate_pmu_event_stop(struct perf_event *event, int mode)
+{
+       cstate_pmu_event_update(event);
+}
+
+static void cstate_pmu_event_del(struct perf_event *event, int mode)
+{
+       cstate_pmu_event_stop(event, PERF_EF_UPDATE);
+}
+
+static int cstate_pmu_event_add(struct perf_event *event, int mode)
+{
+       if (mode & PERF_EF_START)
+               cstate_pmu_event_start(event, mode);
+
+       return 0;
+}
+
+static void cstate_cpu_exit(int cpu)
+{
+       int i, id, target;
+
+       /* cpu exit for cstate core */
+       if (has_cstate_core) {
+               id = topology_core_id(cpu);
+               target = -1;
+
+               for_each_online_cpu(i) {
+                       if (i == cpu)
+                               continue;
+                       if (id == topology_core_id(i)) {
+                               target = i;
+                               break;
+                       }
+               }
+               if (cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask) && target >= 0)
+                       cpumask_set_cpu(target, &cstate_core_cpu_mask);
+               WARN_ON(cpumask_empty(&cstate_core_cpu_mask));
+               if (target >= 0)
+                       perf_pmu_migrate_context(&cstate_core_pmu, cpu, target);
+       }
+
+       /* cpu exit for cstate pkg */
+       if (has_cstate_pkg) {
+               id = topology_physical_package_id(cpu);
+               target = -1;
+
+               for_each_online_cpu(i) {
+                       if (i == cpu)
+                               continue;
+                       if (id == topology_physical_package_id(i)) {
+                               target = i;
+                               break;
+                       }
+               }
+               if (cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask) && target >= 0)
+                       cpumask_set_cpu(target, &cstate_pkg_cpu_mask);
+               WARN_ON(cpumask_empty(&cstate_pkg_cpu_mask));
+               if (target >= 0)
+                       perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
+       }
+}
+
+static void cstate_cpu_init(int cpu)
+{
+       int i, id;
+
+       /* cpu init for cstate core */
+       if (has_cstate_core) {
+               id = topology_core_id(cpu);
+               for_each_cpu(i, &cstate_core_cpu_mask) {
+                       if (id == topology_core_id(i))
+                               break;
+               }
+               if (i >= nr_cpu_ids)
+                       cpumask_set_cpu(cpu, &cstate_core_cpu_mask);
+       }
+
+       /* cpu init for cstate pkg */
+       if (has_cstate_pkg) {
+               id = topology_physical_package_id(cpu);
+               for_each_cpu(i, &cstate_pkg_cpu_mask) {
+                       if (id == topology_physical_package_id(i))
+                               break;
+               }
+               if (i >= nr_cpu_ids)
+                       cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
+       }
+}
+
+static int cstate_cpu_notifier(struct notifier_block *self,
+                                 unsigned long action, void *hcpu)
+{
+       unsigned int cpu = (long)hcpu;
+
+       switch (action & ~CPU_TASKS_FROZEN) {
+       case CPU_UP_PREPARE:
+               break;
+       case CPU_STARTING:
+               cstate_cpu_init(cpu);
+               break;
+       case CPU_UP_CANCELED:
+       case CPU_DYING:
+               break;
+       case CPU_ONLINE:
+       case CPU_DEAD:
+               break;
+       case CPU_DOWN_PREPARE:
+               cstate_cpu_exit(cpu);
+               break;
+       default:
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+/*
+ * Probe the cstate events and insert the available one into sysfs attrs
+ * Return false if there is no available events.
+ */
+static bool cstate_probe_msr(struct perf_cstate_msr *msr,
+                            struct attribute   **events_attrs,
+                            int max_event_nr)
+{
+       int i, j = 0;
+       u64 val;
+
+       /* Probe the cstate events. */
+       for (i = 0; i < max_event_nr; i++) {
+               if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val))
+                       msr[i].attr = NULL;
+       }
+
+       /* List remaining events in the sysfs attrs. */
+       for (i = 0; i < max_event_nr; i++) {
+               if (msr[i].attr)
+                       events_attrs[j++] = &msr[i].attr->attr.attr;
+       }
+       events_attrs[j] = NULL;
+
+       return (j > 0) ? true : false;
+}
+
+static int __init cstate_init(void)
+{
+       /* SLM has different MSR for PKG C6 */
+       switch (boot_cpu_data.x86_model) {
+       case 55:
+       case 76:
+       case 77:
+               pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
+       }
+
+       if (cstate_probe_msr(core_msr, core_events_attrs, PERF_CSTATE_CORE_EVENT_MAX))
+               has_cstate_core = true;
+
+       if (cstate_probe_msr(pkg_msr, pkg_events_attrs, PERF_CSTATE_PKG_EVENT_MAX))
+               has_cstate_pkg = true;
+
+       return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV;
+}
+
+static void __init cstate_cpumask_init(void)
+{
+       int cpu;
+
+       cpu_notifier_register_begin();
+
+       for_each_online_cpu(cpu)
+               cstate_cpu_init(cpu);
+
+       __perf_cpu_notifier(cstate_cpu_notifier);
+
+       cpu_notifier_register_done();
+}
+
+static struct pmu cstate_core_pmu = {
+       .attr_groups    = core_attr_groups,
+       .name           = "cstate_core",
+       .task_ctx_nr    = perf_invalid_context,
+       .event_init     = cstate_pmu_event_init,
+       .add            = cstate_pmu_event_add, /* must have */
+       .del            = cstate_pmu_event_del, /* must have */
+       .start          = cstate_pmu_event_start,
+       .stop           = cstate_pmu_event_stop,
+       .read           = cstate_pmu_event_update,
+       .capabilities   = PERF_PMU_CAP_NO_INTERRUPT,
+};
+
+static struct pmu cstate_pkg_pmu = {
+       .attr_groups    = pkg_attr_groups,
+       .name           = "cstate_pkg",
+       .task_ctx_nr    = perf_invalid_context,
+       .event_init     = cstate_pmu_event_init,
+       .add            = cstate_pmu_event_add, /* must have */
+       .del            = cstate_pmu_event_del, /* must have */
+       .start          = cstate_pmu_event_start,
+       .stop           = cstate_pmu_event_stop,
+       .read           = cstate_pmu_event_update,
+       .capabilities   = PERF_PMU_CAP_NO_INTERRUPT,
+};
+
+static void __init cstate_pmus_register(void)
+{
+       int err;
+
+       if (has_cstate_core) {
+               err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
+               if (WARN_ON(err))
+                       pr_info("Failed to register PMU %s error %d\n",
+                               cstate_core_pmu.name, err);
+       }
+
+       if (has_cstate_pkg) {
+               err = perf_pmu_register(&cstate_pkg_pmu, cstate_pkg_pmu.name, -1);
+               if (WARN_ON(err))
+                       pr_info("Failed to register PMU %s error %d\n",
+                               cstate_pkg_pmu.name, err);
+       }
+}
+
+static int __init cstate_pmu_init(void)
+{
+       int err;
+
+       if (cpu_has_hypervisor)
+               return -ENODEV;
+
+       err = cstate_init();
+       if (err)
+               return err;
+
+       cstate_cpumask_init();
+
+       cstate_pmus_register();
+
+       return 0;
+}
+
+device_initcall(cstate_pmu_init);
index 84f236ab96b03459907662de8842958a3645d011..5db1c7755548e2ad472b2fbe5541125a45d90376 100644 (file)
@@ -510,10 +510,11 @@ int intel_pmu_drain_bts_buffer(void)
                u64     flags;
        };
        struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
-       struct bts_record *at, *top;
+       struct bts_record *at, *base, *top;
        struct perf_output_handle handle;
        struct perf_event_header header;
        struct perf_sample_data data;
+       unsigned long skip = 0;
        struct pt_regs regs;
 
        if (!event)
@@ -522,10 +523,10 @@ int intel_pmu_drain_bts_buffer(void)
        if (!x86_pmu.bts_active)
                return 0;
 
-       at  = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
-       top = (struct bts_record *)(unsigned long)ds->bts_index;
+       base = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
+       top  = (struct bts_record *)(unsigned long)ds->bts_index;
 
-       if (top <= at)
+       if (top <= base)
                return 0;
 
        memset(&regs, 0, sizeof(regs));
@@ -534,6 +535,27 @@ int intel_pmu_drain_bts_buffer(void)
 
        perf_sample_data_init(&data, 0, event->hw.last_period);
 
+       /*
+        * BTS leaks kernel addresses in branches across the cpl boundary,
+        * such as traps or system calls, so unless the user is asking for
+        * kernel tracing (and right now it's not possible), we'd need to
+        * filter them out. But first we need to count how many of those we
+        * have in the current batch. This is an extra O(n) pass, however,
+        * it's much faster than the other one especially considering that
+        * n <= 2560 (BTS_BUFFER_SIZE / BTS_RECORD_SIZE * 15/16; see the
+        * alloc_bts_buffer()).
+        */
+       for (at = base; at < top; at++) {
+               /*
+                * Note that right now *this* BTS code only works if
+                * attr::exclude_kernel is set, but let's keep this extra
+                * check here in case that changes.
+                */
+               if (event->attr.exclude_kernel &&
+                   (kernel_ip(at->from) || kernel_ip(at->to)))
+                       skip++;
+       }
+
        /*
         * Prepare a generic sample, i.e. fill in the invariant fields.
         * We will overwrite the from and to address before we output
@@ -541,10 +563,16 @@ int intel_pmu_drain_bts_buffer(void)
         */
        perf_prepare_sample(&header, &data, event, &regs);
 
-       if (perf_output_begin(&handle, event, header.size * (top - at)))
+       if (perf_output_begin(&handle, event, header.size *
+                             (top - base - skip)))
                return 1;
 
-       for (; at < top; at++) {
+       for (at = base; at < top; at++) {
+               /* Filter out any records that contain kernel addresses. */
+               if (event->attr.exclude_kernel &&
+                   (kernel_ip(at->from) || kernel_ip(at->to)))
+                       continue;
+
                data.ip         = at->from;
                data.addr       = at->to;
 
index b2c9475b7ff24af15c08a68f1847eca336bbec97..bfd0b717e944ce012b7a0abe19c89f5294d284e1 100644 (file)
@@ -151,10 +151,10 @@ static void __intel_pmu_lbr_enable(bool pmi)
         * No need to reprogram LBR_SELECT in a PMI, as it
         * did not change.
         */
-       if (cpuc->lbr_sel && !pmi) {
+       if (cpuc->lbr_sel)
                lbr_select = cpuc->lbr_sel->config;
+       if (!pmi)
                wrmsrl(MSR_LBR_SELECT, lbr_select);
-       }
 
        rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
        orig_debugctl = debugctl;
@@ -555,6 +555,8 @@ static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
        if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP)
                mask |= X86_BR_IND_JMP;
 
+       if (br_type & PERF_SAMPLE_BRANCH_CALL)
+               mask |= X86_BR_CALL | X86_BR_ZERO_CALL;
        /*
         * stash actual user request into reg, it may
         * be used by fixup code for some CPU
@@ -890,6 +892,7 @@ static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
        [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT]     = LBR_IND_CALL,
        [PERF_SAMPLE_BRANCH_COND_SHIFT]         = LBR_JCC,
        [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT]     = LBR_IND_JMP,
+       [PERF_SAMPLE_BRANCH_CALL_SHIFT]         = LBR_REL_CALL,
 };
 
 static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
@@ -905,6 +908,7 @@ static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
        [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT]   = LBR_REL_CALL | LBR_IND_CALL
                                                | LBR_RETURN | LBR_CALL_STACK,
        [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT]     = LBR_IND_JMP,
+       [PERF_SAMPLE_BRANCH_CALL_SHIFT]         = LBR_REL_CALL,
 };
 
 /* core */
index 42169283448b9c5af7a75175ad18d02ec0079612..868e1194337f6cf93cd1a8048480a60aa7acf3e3 100644 (file)
@@ -139,9 +139,6 @@ static int __init pt_pmu_hw_init(void)
        long i;
 
        attrs = NULL;
-       ret = -ENODEV;
-       if (!test_cpu_cap(&boot_cpu_data, X86_FEATURE_INTEL_PT))
-               goto fail;
 
        for (i = 0; i < PT_CPUID_LEAVES; i++) {
                cpuid_count(20, i,
@@ -1130,6 +1127,10 @@ static __init int pt_init(void)
        int ret, cpu, prior_warn = 0;
 
        BUILD_BUG_ON(sizeof(struct topa) > PAGE_SIZE);
+
+       if (!test_cpu_cap(&boot_cpu_data, X86_FEATURE_INTEL_PT))
+               return -ENODEV;
+
        get_online_cpus();
        for_each_online_cpu(cpu) {
                u64 ctl;
index 560e5255b15e870403728c3411c1b07246781cb7..61215a69b03d928d83dc6e86950bd5ad1b2db68c 100644 (file)
@@ -7,7 +7,8 @@ struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
 static bool pcidrv_registered;
 struct pci_driver *uncore_pci_driver;
 /* pci bus to socket mapping */
-int uncore_pcibus_to_physid[256] = { [0 ... 255] = -1, };
+DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
+struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
 struct pci_dev *uncore_extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
 
 static DEFINE_RAW_SPINLOCK(uncore_box_lock);
@@ -20,6 +21,59 @@ static struct event_constraint uncore_constraint_fixed =
 struct event_constraint uncore_constraint_empty =
        EVENT_CONSTRAINT(0, 0, 0);
 
+int uncore_pcibus_to_physid(struct pci_bus *bus)
+{
+       struct pci2phy_map *map;
+       int phys_id = -1;
+
+       raw_spin_lock(&pci2phy_map_lock);
+       list_for_each_entry(map, &pci2phy_map_head, list) {
+               if (map->segment == pci_domain_nr(bus)) {
+                       phys_id = map->pbus_to_physid[bus->number];
+                       break;
+               }
+       }
+       raw_spin_unlock(&pci2phy_map_lock);
+
+       return phys_id;
+}
+
+struct pci2phy_map *__find_pci2phy_map(int segment)
+{
+       struct pci2phy_map *map, *alloc = NULL;
+       int i;
+
+       lockdep_assert_held(&pci2phy_map_lock);
+
+lookup:
+       list_for_each_entry(map, &pci2phy_map_head, list) {
+               if (map->segment == segment)
+                       goto end;
+       }
+
+       if (!alloc) {
+               raw_spin_unlock(&pci2phy_map_lock);
+               alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
+               raw_spin_lock(&pci2phy_map_lock);
+
+               if (!alloc)
+                       return NULL;
+
+               goto lookup;
+       }
+
+       map = alloc;
+       alloc = NULL;
+       map->segment = segment;
+       for (i = 0; i < 256; i++)
+               map->pbus_to_physid[i] = -1;
+       list_add_tail(&map->list, &pci2phy_map_head);
+
+end:
+       kfree(alloc);
+       return map;
+}
+
 ssize_t uncore_event_show(struct kobject *kobj,
                          struct kobj_attribute *attr, char *buf)
 {
@@ -809,7 +863,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
        int phys_id;
        bool first_box = false;
 
-       phys_id = uncore_pcibus_to_physid[pdev->bus->number];
+       phys_id = uncore_pcibus_to_physid(pdev->bus);
        if (phys_id < 0)
                return -ENODEV;
 
@@ -856,9 +910,10 @@ static void uncore_pci_remove(struct pci_dev *pdev)
 {
        struct intel_uncore_box *box = pci_get_drvdata(pdev);
        struct intel_uncore_pmu *pmu;
-       int i, cpu, phys_id = uncore_pcibus_to_physid[pdev->bus->number];
+       int i, cpu, phys_id;
        bool last_box = false;
 
+       phys_id = uncore_pcibus_to_physid(pdev->bus);
        box = pci_get_drvdata(pdev);
        if (!box) {
                for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
index 72c54c2e5b1afdd2102b39b1d53e4ee4de57518d..2f0a4a98e16bee4bcadde4591daab3be6e7c08cf 100644 (file)
@@ -117,6 +117,15 @@ struct uncore_event_desc {
        const char *config;
 };
 
+struct pci2phy_map {
+       struct list_head list;
+       int segment;
+       int pbus_to_physid[256];
+};
+
+int uncore_pcibus_to_physid(struct pci_bus *bus);
+struct pci2phy_map *__find_pci2phy_map(int segment);
+
 ssize_t uncore_event_show(struct kobject *kobj,
                          struct kobj_attribute *attr, char *buf);
 
@@ -317,7 +326,8 @@ u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
 extern struct intel_uncore_type **uncore_msr_uncores;
 extern struct intel_uncore_type **uncore_pci_uncores;
 extern struct pci_driver *uncore_pci_driver;
-extern int uncore_pcibus_to_physid[256];
+extern raw_spinlock_t pci2phy_map_lock;
+extern struct list_head pci2phy_map_head;
 extern struct pci_dev *uncore_extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
 extern struct event_constraint uncore_constraint_empty;
 
index f78574b3cb55bafeb4d0a712c688b652fe770f72..845256158a10d79fdcc9ebd4946ca20c1b468277 100644 (file)
@@ -420,15 +420,25 @@ static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
 static int snb_pci2phy_map_init(int devid)
 {
        struct pci_dev *dev = NULL;
-       int bus;
+       struct pci2phy_map *map;
+       int bus, segment;
 
        dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
        if (!dev)
                return -ENOTTY;
 
        bus = dev->bus->number;
-
-       uncore_pcibus_to_physid[bus] = 0;
+       segment = pci_domain_nr(dev->bus);
+
+       raw_spin_lock(&pci2phy_map_lock);
+       map = __find_pci2phy_map(segment);
+       if (!map) {
+               raw_spin_unlock(&pci2phy_map_lock);
+               pci_dev_put(dev);
+               return -ENOMEM;
+       }
+       map->pbus_to_physid[bus] = 0;
+       raw_spin_unlock(&pci2phy_map_lock);
 
        pci_dev_put(dev);
 
index 694510a887dc34d82ea608eaa07599f4a26dd726..f0f4fcba252e1545fc21d8d215329a631c471aac 100644 (file)
@@ -1087,7 +1087,8 @@ static struct pci_driver snbep_uncore_pci_driver = {
 static int snbep_pci2phy_map_init(int devid)
 {
        struct pci_dev *ubox_dev = NULL;
-       int i, bus, nodeid;
+       int i, bus, nodeid, segment;
+       struct pci2phy_map *map;
        int err = 0;
        u32 config = 0;
 
@@ -1106,16 +1107,27 @@ static int snbep_pci2phy_map_init(int devid)
                err = pci_read_config_dword(ubox_dev, 0x54, &config);
                if (err)
                        break;
+
+               segment = pci_domain_nr(ubox_dev->bus);
+               raw_spin_lock(&pci2phy_map_lock);
+               map = __find_pci2phy_map(segment);
+               if (!map) {
+                       raw_spin_unlock(&pci2phy_map_lock);
+                       err = -ENOMEM;
+                       break;
+               }
+
                /*
                 * every three bits in the Node ID mapping register maps
                 * to a particular node.
                 */
                for (i = 0; i < 8; i++) {
                        if (nodeid == ((config >> (3 * i)) & 0x7)) {
-                               uncore_pcibus_to_physid[bus] = i;
+                               map->pbus_to_physid[bus] = i;
                                break;
                        }
                }
+               raw_spin_unlock(&pci2phy_map_lock);
        }
 
        if (!err) {
@@ -1123,13 +1135,17 @@ static int snbep_pci2phy_map_init(int devid)
                 * For PCI bus with no UBOX device, find the next bus
                 * that has UBOX device and use its mapping.
                 */
-               i = -1;
-               for (bus = 255; bus >= 0; bus--) {
-                       if (uncore_pcibus_to_physid[bus] >= 0)
-                               i = uncore_pcibus_to_physid[bus];
-                       else
-                               uncore_pcibus_to_physid[bus] = i;
+               raw_spin_lock(&pci2phy_map_lock);
+               list_for_each_entry(map, &pci2phy_map_head, list) {
+                       i = -1;
+                       for (bus = 255; bus >= 0; bus--) {
+                               if (map->pbus_to_physid[bus] >= 0)
+                                       i = map->pbus_to_physid[bus];
+                               else
+                                       map->pbus_to_physid[bus] = i;
+                       }
                }
+               raw_spin_unlock(&pci2phy_map_lock);
        }
 
        pci_dev_put(ubox_dev);
@@ -2444,7 +2460,7 @@ static struct intel_uncore_type *bdx_pci_uncores[] = {
        NULL,
 };
 
-static DEFINE_PCI_DEVICE_TABLE(bdx_uncore_pci_ids) = {
+static const struct pci_device_id bdx_uncore_pci_ids[] = {
        { /* Home Agent 0 */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
index 086b12eae79493329c8792538ea8c6f179aee4fb..f32ac13934f2310c1b61f54884246089c7e06e01 100644 (file)
@@ -10,12 +10,12 @@ enum perf_msr_id {
        PERF_MSR_EVENT_MAX,
 };
 
-bool test_aperfmperf(int idx)
+static bool test_aperfmperf(int idx)
 {
        return boot_cpu_has(X86_FEATURE_APERFMPERF);
 }
 
-bool test_intel(int idx)
+static bool test_intel(int idx)
 {
        if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
            boot_cpu_data.x86 != 6)
index 3d423a101fae05ccd722a4e564b83ba5e0112b6e..608fb26c72544c5ee0fd7793c0703f642c7ed60f 100644 (file)
@@ -37,7 +37,7 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
                { X86_FEATURE_PLN,              CR_EAX, 4, 0x00000006, 0 },
                { X86_FEATURE_PTS,              CR_EAX, 6, 0x00000006, 0 },
                { X86_FEATURE_HWP,              CR_EAX, 7, 0x00000006, 0 },
-               { X86_FEATURE_HWP_NOITFY,       CR_EAX, 8, 0x00000006, 0 },
+               { X86_FEATURE_HWP_NOTIFY,       CR_EAX, 8, 0x00000006, 0 },
                { X86_FEATURE_HWP_ACT_WINDOW,   CR_EAX, 9, 0x00000006, 0 },
                { X86_FEATURE_HWP_EPP,          CR_EAX,10, 0x00000006, 0 },
                { X86_FEATURE_HWP_PKG_REQ,      CR_EAX,11, 0x00000006, 0 },
index e068d6683dba6bab6bd4c9f9804a621385e3baee..2c1910f6717ed5278fc6d85d25c92360664feb68 100644 (file)
@@ -75,8 +75,6 @@ struct crash_memmap_data {
        unsigned int type;
 };
 
-int in_crash_kexec;
-
 /*
  * This is used to VMCLEAR all VMCSs loaded on the
  * processor. And when loading kvm_intel module, the
@@ -132,7 +130,6 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
 
 static void kdump_nmi_shootdown_cpus(void)
 {
-       in_crash_kexec = 1;
        nmi_shootdown_cpus(kdump_nmi_callback);
 
        disable_local_APIC();
@@ -185,10 +182,9 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
 }
 
 #ifdef CONFIG_KEXEC_FILE
-static int get_nr_ram_ranges_callback(unsigned long start_pfn,
-                               unsigned long nr_pfn, void *arg)
+static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg)
 {
-       int *nr_ranges = arg;
+       unsigned int *nr_ranges = arg;
 
        (*nr_ranges)++;
        return 0;
@@ -214,7 +210,7 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced,
 
        ced->image = image;
 
-       walk_system_ram_range(0, -1, &nr_ranges,
+       walk_system_ram_res(0, -1, &nr_ranges,
                                get_nr_ram_ranges_callback);
 
        ced->max_nr_ranges = nr_ranges;
index 9f9cc682e561e2427152314b544dd387b0c27922..db9a675e751b0bf1401af295a40b1e7a314bab6a 100644 (file)
@@ -584,7 +584,7 @@ static void __init intel_graphics_stolen(int num, int slot, int func)
 static void __init force_disable_hpet(int num, int slot, int func)
 {
 #ifdef CONFIG_HPET_TIMER
-       boot_hpet_disable = 1;
+       boot_hpet_disable = true;
        pr_info("x86/hpet: Will disable the HPET for this platform because it's not reliable\n");
 #endif
 }
index 0e2d96ffd158d0e5f4c1d355040cd9b285ef84d6..6bc9ae24b6d2a74930701c0c6ea60fe090a3ebbc 100644 (file)
@@ -152,7 +152,7 @@ ENTRY(startup_32)
        movl %eax, pa(olpc_ofw_pgd)
 #endif
 
-#ifdef CONFIG_MICROCODE_EARLY
+#ifdef CONFIG_MICROCODE
        /* Early load ucode on BSP. */
        call load_ucode_bsp
 #endif
@@ -311,12 +311,11 @@ ENTRY(startup_32_smp)
        movl %eax,%ss
        leal -__PAGE_OFFSET(%ecx),%esp
 
-#ifdef CONFIG_MICROCODE_EARLY
+#ifdef CONFIG_MICROCODE
        /* Early load ucode on AP. */
        call load_ucode_ap
 #endif
 
-
 default_entry:
 #define CR0_STATE      (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
                         X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
index 88b4da37308115b2189fd811f12a67f1a2c10ff8..b8e6ff5cd5d055892715af36f1e9b72340f7b18c 100644 (file)
  */
 unsigned long                          hpet_address;
 u8                                     hpet_blockid; /* OS timer block num */
-u8                                     hpet_msi_disable;
+bool                                   hpet_msi_disable;
 
 #ifdef CONFIG_PCI_MSI
-static unsigned long                   hpet_num_timers;
+static unsigned int                    hpet_num_timers;
 #endif
 static void __iomem                    *hpet_virt_address;
 
@@ -86,9 +86,9 @@ static inline void hpet_clear_mapping(void)
 /*
  * HPET command line enable / disable
  */
-int boot_hpet_disable;
-int hpet_force_user;
-static int hpet_verbose;
+bool boot_hpet_disable;
+bool hpet_force_user;
+static bool hpet_verbose;
 
 static int __init hpet_setup(char *str)
 {
@@ -98,11 +98,11 @@ static int __init hpet_setup(char *str)
                if (next)
                        *next++ = 0;
                if (!strncmp("disable", str, 7))
-                       boot_hpet_disable = 1;
+                       boot_hpet_disable = true;
                if (!strncmp("force", str, 5))
-                       hpet_force_user = 1;
+                       hpet_force_user = true;
                if (!strncmp("verbose", str, 7))
-                       hpet_verbose = 1;
+                       hpet_verbose = true;
                str = next;
        }
        return 1;
@@ -111,7 +111,7 @@ __setup("hpet=", hpet_setup);
 
 static int __init disable_hpet(char *str)
 {
-       boot_hpet_disable = 1;
+       boot_hpet_disable = true;
        return 1;
 }
 __setup("nohpet", disable_hpet);
@@ -124,7 +124,7 @@ static inline int is_hpet_capable(void)
 /*
  * HPET timer interrupt enable / disable
  */
-static int hpet_legacy_int_enabled;
+static bool hpet_legacy_int_enabled;
 
 /**
  * is_hpet_enabled - check whether the hpet timer interrupt is enabled
@@ -230,7 +230,7 @@ static struct clock_event_device hpet_clockevent;
 
 static void hpet_stop_counter(void)
 {
-       unsigned long cfg = hpet_readl(HPET_CFG);
+       u32 cfg = hpet_readl(HPET_CFG);
        cfg &= ~HPET_CFG_ENABLE;
        hpet_writel(cfg, HPET_CFG);
 }
@@ -272,7 +272,7 @@ static void hpet_enable_legacy_int(void)
 
        cfg |= HPET_CFG_LEGACY;
        hpet_writel(cfg, HPET_CFG);
-       hpet_legacy_int_enabled = 1;
+       hpet_legacy_int_enabled = true;
 }
 
 static void hpet_legacy_clockevent_register(void)
@@ -983,7 +983,7 @@ void hpet_disable(void)
                        cfg = *hpet_boot_cfg;
                else if (hpet_legacy_int_enabled) {
                        cfg &= ~HPET_CFG_LEGACY;
-                       hpet_legacy_int_enabled = 0;
+                       hpet_legacy_int_enabled = false;
                }
                cfg &= ~HPET_CFG_ENABLE;
                hpet_writel(cfg, HPET_CFG);
@@ -1121,8 +1121,7 @@ EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
 
 static void hpet_disable_rtc_channel(void)
 {
-       unsigned long cfg;
-       cfg = hpet_readl(HPET_T1_CFG);
+       u32 cfg = hpet_readl(HPET_T1_CFG);
        cfg &= ~HPET_TN_ENABLE;
        hpet_writel(cfg, HPET_T1_CFG);
 }
index c767cf2bc80a0b8f44be521f6171c3d8d4fa7b7d..206d0b90a3ab1e800e372ada94b2bd9f8dac4e5b 100644 (file)
@@ -72,7 +72,7 @@ bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
 {
        stack_overflow_check(regs);
 
-       if (unlikely(IS_ERR_OR_NULL(desc)))
+       if (IS_ERR_OR_NULL(desc))
                return false;
 
        generic_handle_irq_desc(desc);
index 1b55de1267cfc4f3032b37c72c14239533fbdbcf..cd99433b8ba17597cbc9e91aba9c40eee7e05e4b 100644 (file)
@@ -131,11 +131,12 @@ void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
 
 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
 {
+       if (!*dev)
+               *dev = &x86_dma_fallback_dev;
+
        *gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
        *gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
 
-       if (!*dev)
-               *dev = &x86_dma_fallback_dev;
        if (!is_device_dma_capable(*dev))
                return false;
        return true;
index 6d0e62ae8516760d6ae4af7deaa31418e82238fa..9f7c21c22477e59462d72e930d79a4c2a238a051 100644 (file)
@@ -84,6 +84,9 @@ EXPORT_SYMBOL_GPL(idle_notifier_unregister);
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 {
        memcpy(dst, src, arch_task_struct_size);
+#ifdef CONFIG_VM86
+       dst->thread.vm86 = NULL;
+#endif
 
        return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
 }
@@ -506,3 +509,58 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
        return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
 }
 
+/*
+ * Called from fs/proc with a reference on @p to find the function
+ * which called into schedule(). This needs to be done carefully
+ * because the task might wake up and we might look at a stack
+ * changing under us.
+ */
+unsigned long get_wchan(struct task_struct *p)
+{
+       unsigned long start, bottom, top, sp, fp, ip;
+       int count = 0;
+
+       if (!p || p == current || p->state == TASK_RUNNING)
+               return 0;
+
+       start = (unsigned long)task_stack_page(p);
+       if (!start)
+               return 0;
+
+       /*
+        * Layout of the stack page:
+        *
+        * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
+        * PADDING
+        * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
+        * stack
+        * ----------- bottom = start + sizeof(thread_info)
+        * thread_info
+        * ----------- start
+        *
+        * The tasks stack pointer points at the location where the
+        * framepointer is stored. The data on the stack is:
+        * ... IP FP ... IP FP
+        *
+        * We need to read FP and IP, so we need to adjust the upper
+        * bound by another unsigned long.
+        */
+       top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
+       top -= 2 * sizeof(unsigned long);
+       bottom = start + sizeof(struct thread_info);
+
+       sp = READ_ONCE(p->thread.sp);
+       if (sp < bottom || sp > top)
+               return 0;
+
+       fp = READ_ONCE_NOCHECK(*(unsigned long *)sp);
+       do {
+               if (fp < bottom || fp > top)
+                       return 0;
+               ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long)));
+               if (!in_sched_functions(ip))
+                       return ip;
+               fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
+       } while (count++ < 16 && p->state != TASK_RUNNING);
+       return 0;
+}
index c13df2c735f82765015a4924b08aed9904c27419..9f950917528b332b139afb6fa854c87c8d92c4d0 100644 (file)
@@ -279,14 +279,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
                set_iopl_mask(next->iopl);
 
-       /*
-        * If it were not for PREEMPT_ACTIVE we could guarantee that the
-        * preempt_count of all tasks was equal here and this would not be
-        * needed.
-        */
-       task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
-       this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
-
        /*
         * Now maybe handle debug registers and/or IO bitmaps
         */
@@ -324,31 +316,3 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 
        return prev_p;
 }
-
-#define top_esp                (THREAD_SIZE - sizeof(unsigned long))
-#define top_ebp                (THREAD_SIZE - 2*sizeof(unsigned long))
-
-unsigned long get_wchan(struct task_struct *p)
-{
-       unsigned long bp, sp, ip;
-       unsigned long stack_page;
-       int count = 0;
-       if (!p || p == current || p->state == TASK_RUNNING)
-               return 0;
-       stack_page = (unsigned long)task_stack_page(p);
-       sp = p->thread.sp;
-       if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
-               return 0;
-       /* include/asm-i386/system.h:switch_to() pushes bp last. */
-       bp = *(unsigned long *) sp;
-       do {
-               if (bp < stack_page || bp > top_ebp+stack_page)
-                       return 0;
-               ip = *(unsigned long *) (bp+4);
-               if (!in_sched_functions(ip))
-                       return ip;
-               bp = *(unsigned long *) bp;
-       } while (count++ < 16);
-       return 0;
-}
-
index 3c1bbcf129245aa7909708af46489d73f2e9c297..e835d263a33b43ccf7601698cfeadc447ede491e 100644 (file)
@@ -332,7 +332,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        /*
         * Switch FS and GS.
         *
-        * These are even more complicated than FS and GS: they have
+        * These are even more complicated than DS and ES: they have
         * 64-bit bases are that controlled by arch_prctl.  Those bases
         * only differ from the values in the GDT or LDT if the selector
         * is 0.
@@ -401,14 +401,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         */
        this_cpu_write(current_task, next_p);
 
-       /*
-        * If it were not for PREEMPT_ACTIVE we could guarantee that the
-        * preempt_count of all tasks was equal here and this would not be
-        * needed.
-        */
-       task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
-       this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
-
        /* Reload esp0 and ss1.  This changes current_thread_info(). */
        load_sp0(tss, next);
 
@@ -499,30 +491,6 @@ void set_personality_ia32(bool x32)
 }
 EXPORT_SYMBOL_GPL(set_personality_ia32);
 
-unsigned long get_wchan(struct task_struct *p)
-{
-       unsigned long stack;
-       u64 fp, ip;
-       int count = 0;
-
-       if (!p || p == current || p->state == TASK_RUNNING)
-               return 0;
-       stack = (unsigned long)task_stack_page(p);
-       if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
-               return 0;
-       fp = *(u64 *)(p->thread.sp);
-       do {
-               if (fp < (unsigned long)stack ||
-                   fp >= (unsigned long)stack+THREAD_SIZE)
-                       return 0;
-               ip = *(u64 *)(fp+8);
-               if (!in_sched_functions(ip))
-                       return ip;
-               fp = *(u64 *)fp;
-       } while (count++ < 16);
-       return 0;
-}
-
 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
 {
        int ret = 0;
index 176a0f99d4daa6f2f94d2e505b1452d3a87d02db..cc457ff818ad65c0ea92bb29257d0629141716fa 100644 (file)
@@ -524,7 +524,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU,
  */
 static void force_disable_hpet_msi(struct pci_dev *unused)
 {
-       hpet_msi_disable = 1;
+       hpet_msi_disable = true;
 }
 
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
index fdb7f2a2d3286013a7ea41d392e48596c90fc672..a1e4da98c8f0db8c838766e9466c2608c124fe98 100644 (file)
 #include <asm/mce.h>
 #include <asm/alternative.h>
 #include <asm/prom.h>
+#include <asm/microcode.h>
 
 /*
  * max_low_pfn_mapped: highest direct mapped pfn under 4GB
@@ -480,34 +481,34 @@ static void __init memblock_x86_reserve_range_setup_data(void)
 
 #ifdef CONFIG_KEXEC_CORE
 
+/* 16M alignment for crash kernel regions */
+#define CRASH_ALIGN            (16 << 20)
+
 /*
  * Keep the crash kernel below this limit.  On 32 bits earlier kernels
  * would limit the kernel to the low 512 MiB due to mapping restrictions.
  * On 64bit, old kexec-tools need to under 896MiB.
  */
 #ifdef CONFIG_X86_32
-# define CRASH_KERNEL_ADDR_LOW_MAX     (512 << 20)
-# define CRASH_KERNEL_ADDR_HIGH_MAX    (512 << 20)
+# define CRASH_ADDR_LOW_MAX    (512 << 20)
+# define CRASH_ADDR_HIGH_MAX   (512 << 20)
 #else
-# define CRASH_KERNEL_ADDR_LOW_MAX     (896UL<<20)
-# define CRASH_KERNEL_ADDR_HIGH_MAX    MAXMEM
+# define CRASH_ADDR_LOW_MAX    (896UL << 20)
+# define CRASH_ADDR_HIGH_MAX   MAXMEM
 #endif
 
-static void __init reserve_crashkernel_low(void)
+static int __init reserve_crashkernel_low(void)
 {
 #ifdef CONFIG_X86_64
-       const unsigned long long alignment = 16<<20;    /* 16M */
-       unsigned long long low_base = 0, low_size = 0;
+       unsigned long long base, low_base = 0, low_size = 0;
        unsigned long total_low_mem;
-       unsigned long long base;
-       bool auto_set = false;
        int ret;
 
-       total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT));
+       total_low_mem = memblock_mem_size(1UL << (32 - PAGE_SHIFT));
+
        /* crashkernel=Y,low */
-       ret = parse_crashkernel_low(boot_command_line, total_low_mem,
-                                               &low_size, &base);
-       if (ret != 0) {
+       ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base);
+       if (ret) {
                /*
                 * two parts from lib/swiotlb.c:
                 * -swiotlb size: user-specified with swiotlb= or default.
@@ -517,52 +518,52 @@ static void __init reserve_crashkernel_low(void)
                 * make sure we allocate enough extra low memory so that we
                 * don't run out of DMA buffers for 32-bit devices.
                 */
-               low_size = max(swiotlb_size_or_default() + (8UL<<20), 256UL<<20);
-               auto_set = true;
+               low_size = max(swiotlb_size_or_default() + (8UL << 20), 256UL << 20);
        } else {
                /* passed with crashkernel=0,low ? */
                if (!low_size)
-                       return;
+                       return 0;
        }
 
-       low_base = memblock_find_in_range(low_size, (1ULL<<32),
-                                       low_size, alignment);
-
+       low_base = memblock_find_in_range(low_size, 1ULL << 32, low_size, CRASH_ALIGN);
        if (!low_base) {
-               if (!auto_set)
-                       pr_info("crashkernel low reservation failed - No suitable area found.\n");
+               pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n",
+                      (unsigned long)(low_size >> 20));
+               return -ENOMEM;
+       }
 
-               return;
+       ret = memblock_reserve(low_base, low_size);
+       if (ret) {
+               pr_err("%s: Error reserving crashkernel low memblock.\n", __func__);
+               return ret;
        }
 
-       memblock_reserve(low_base, low_size);
        pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (System low RAM: %ldMB)\n",
-                       (unsigned long)(low_size >> 20),
-                       (unsigned long)(low_base >> 20),
-                       (unsigned long)(total_low_mem >> 20));
+               (unsigned long)(low_size >> 20),
+               (unsigned long)(low_base >> 20),
+               (unsigned long)(total_low_mem >> 20));
+
        crashk_low_res.start = low_base;
        crashk_low_res.end   = low_base + low_size - 1;
        insert_resource(&iomem_resource, &crashk_low_res);
 #endif
+       return 0;
 }
 
 static void __init reserve_crashkernel(void)
 {
-       const unsigned long long alignment = 16<<20;    /* 16M */
-       unsigned long long total_mem;
-       unsigned long long crash_size, crash_base;
+       unsigned long long crash_size, crash_base, total_mem;
        bool high = false;
        int ret;
 
        total_mem = memblock_phys_mem_size();
 
        /* crashkernel=XM */
-       ret = parse_crashkernel(boot_command_line, total_mem,
-                       &crash_size, &crash_base);
+       ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base);
        if (ret != 0 || crash_size <= 0) {
                /* crashkernel=X,high */
                ret = parse_crashkernel_high(boot_command_line, total_mem,
-                               &crash_size, &crash_base);
+                                            &crash_size, &crash_base);
                if (ret != 0 || crash_size <= 0)
                        return;
                high = true;
@@ -573,11 +574,10 @@ static void __init reserve_crashkernel(void)
                /*
                 *  kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
                 */
-               crash_base = memblock_find_in_range(alignment,
-                                       high ? CRASH_KERNEL_ADDR_HIGH_MAX :
-                                              CRASH_KERNEL_ADDR_LOW_MAX,
-                                       crash_size, alignment);
-
+               crash_base = memblock_find_in_range(CRASH_ALIGN,
+                                                   high ? CRASH_ADDR_HIGH_MAX
+                                                        : CRASH_ADDR_LOW_MAX,
+                                                   crash_size, CRASH_ALIGN);
                if (!crash_base) {
                        pr_info("crashkernel reservation failed - No suitable area found.\n");
                        return;
@@ -587,26 +587,32 @@ static void __init reserve_crashkernel(void)
                unsigned long long start;
 
                start = memblock_find_in_range(crash_base,
-                                crash_base + crash_size, crash_size, 1<<20);
+                                              crash_base + crash_size,
+                                              crash_size, 1 << 20);
                if (start != crash_base) {
                        pr_info("crashkernel reservation failed - memory is in use.\n");
                        return;
                }
        }
-       memblock_reserve(crash_base, crash_size);
+       ret = memblock_reserve(crash_base, crash_size);
+       if (ret) {
+               pr_err("%s: Error reserving crashkernel memblock.\n", __func__);
+               return;
+       }
 
-       printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
-                       "for crashkernel (System RAM: %ldMB)\n",
-                       (unsigned long)(crash_size >> 20),
-                       (unsigned long)(crash_base >> 20),
-                       (unsigned long)(total_mem >> 20));
+       if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) {
+               memblock_free(crash_base, crash_size);
+               return;
+       }
+
+       pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
+               (unsigned long)(crash_size >> 20),
+               (unsigned long)(crash_base >> 20),
+               (unsigned long)(total_mem >> 20));
 
        crashk_res.start = crash_base;
        crashk_res.end   = crash_base + crash_size - 1;
        insert_resource(&iomem_resource, &crashk_res);
-
-       if (crash_base >= (1ULL<<32))
-               reserve_crashkernel_low();
 }
 #else
 static void __init reserve_crashkernel(void)
@@ -1079,8 +1085,10 @@ void __init setup_arch(char **cmdline_p)
        memblock_set_current_limit(ISA_END_ADDRESS);
        memblock_x86_fill();
 
-       if (efi_enabled(EFI_BOOT))
+       if (efi_enabled(EFI_BOOT)) {
+               efi_fake_memmap();
                efi_find_mirror();
+       }
 
        /*
         * The EFI specification says that boot service code won't be called
@@ -1173,6 +1181,14 @@ void __init setup_arch(char **cmdline_p)
        clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
                        swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
                        KERNEL_PGD_PTRS);
+
+       /*
+        * sync back low identity map too.  It is used for example
+        * in the 32-bit EFI stub.
+        */
+       clone_pgd_range(initial_page_table,
+                       swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
+                       KERNEL_PGD_PTRS);
 #endif
 
        tboot_probe();
@@ -1234,6 +1250,8 @@ void __init setup_arch(char **cmdline_p)
        if (efi_enabled(EFI_BOOT))
                efi_apply_memmap_quirks();
 #endif
+
+       microcode_init();
 }
 
 #ifdef CONFIG_X86_32
index 2c3336b6b264f1dbff0c9ff9f79b054d80a36c77..32165d6499795fd5cff24c80561f984df8af52fd 100644 (file)
@@ -299,7 +299,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
 
        if (current->mm->context.vdso)
                restorer = current->mm->context.vdso +
-                       selected_vdso32->sym___kernel_sigreturn;
+                       vdso_image_32.sym___kernel_sigreturn;
        else
                restorer = &frame->retcode;
        if (ksig->ka.sa.sa_flags & SA_RESTORER)
@@ -363,7 +363,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
 
                /* Set up to return from userspace.  */
                restorer = current->mm->context.vdso +
-                       selected_vdso32->sym___kernel_rt_sigreturn;
+                       vdso_image_32.sym___kernel_rt_sigreturn;
                if (ksig->ka.sa.sa_flags & SA_RESTORER)
                        restorer = ksig->ka.sa.sa_restorer;
                put_user_ex(restorer, &frame->pretcode);
index e0c198e5f920202bf25fa36e34922d4b0d29d82c..892ee2e5ecbce417df506715f7b28d28c403ef91 100644 (file)
@@ -509,7 +509,7 @@ void __inquire_remote_apic(int apicid)
  */
 #define UDELAY_10MS_DEFAULT 10000
 
-static unsigned int init_udelay = UDELAY_10MS_DEFAULT;
+static unsigned int init_udelay = INT_MAX;
 
 static int __init cpu_init_udelay(char *str)
 {
@@ -522,13 +522,16 @@ early_param("cpu_init_udelay", cpu_init_udelay);
 static void __init smp_quirk_init_udelay(void)
 {
        /* if cmdline changed it from default, leave it alone */
-       if (init_udelay != UDELAY_10MS_DEFAULT)
+       if (init_udelay != INT_MAX)
                return;
 
        /* if modern processor, use no delay */
        if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
            ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF)))
                init_udelay = 0;
+
+       /* else, use legacy delay */
+       init_udelay = UDELAY_10MS_DEFAULT;
 }
 
 /*
@@ -657,7 +660,9 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
                /*
                 * Give the other CPU some time to accept the IPI.
                 */
-               if (init_udelay)
+               if (init_udelay == 0)
+                       udelay(10);
+               else
                        udelay(300);
 
                pr_debug("Startup point 1\n");
@@ -668,7 +673,9 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
                /*
                 * Give the other CPU some time to accept the IPI.
                 */
-               if (init_udelay)
+               if (init_udelay == 0)
+                       udelay(10);
+               else
                        udelay(200);
 
                if (maxlvt > 3)         /* Due to the Pentium erratum 3AP.  */
index c3f7602cd0386b2fb0a1a7437263f9c71f27d03c..c7c4d9c51e99fe582b71ab1f2e3ca9ffaae2226c 100644 (file)
@@ -168,21 +168,20 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
  *              ns = cycles * cyc2ns_scale / SC
  *
  *      And since SC is a constant power of two, we can convert the div
- *  into a shift.
+ *  into a shift. The larger SC is, the more accurate the conversion, but
+ *  cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication
+ *  (64-bit result) can be used.
  *
- *  We can use khz divisor instead of mhz to keep a better precision, since
- *  cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
+ *  We can use khz divisor instead of mhz to keep a better precision.
  *  (mathieu.desnoyers@polymtl.ca)
  *
  *                      -johnstul@us.ibm.com "math is hard, lets go shopping!"
  */
 
-#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
-
 static void cyc2ns_data_init(struct cyc2ns_data *data)
 {
        data->cyc2ns_mul = 0;
-       data->cyc2ns_shift = CYC2NS_SCALE_FACTOR;
+       data->cyc2ns_shift = 0;
        data->cyc2ns_offset = 0;
        data->__count = 0;
 }
@@ -216,14 +215,14 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
 
        if (likely(data == tail)) {
                ns = data->cyc2ns_offset;
-               ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR);
+               ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
        } else {
                data->__count++;
 
                barrier();
 
                ns = data->cyc2ns_offset;
-               ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR);
+               ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
 
                barrier();
 
@@ -257,12 +256,22 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
         * time function is continuous; see the comment near struct
         * cyc2ns_data.
         */
-       data->cyc2ns_mul =
-               DIV_ROUND_CLOSEST(NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR,
-                                 cpu_khz);
-       data->cyc2ns_shift = CYC2NS_SCALE_FACTOR;
+       clocks_calc_mult_shift(&data->cyc2ns_mul, &data->cyc2ns_shift, cpu_khz,
+                              NSEC_PER_MSEC, 0);
+
+       /*
+        * cyc2ns_shift is exported via arch_perf_update_userpage() where it is
+        * not expected to be greater than 31 due to the original published
+        * conversion algorithm shifting a 32-bit value (now specifies a 64-bit
+        * value) - refer perf_event_mmap_page documentation in perf_event.h.
+        */
+       if (data->cyc2ns_shift == 32) {
+               data->cyc2ns_shift = 31;
+               data->cyc2ns_mul >>= 1;
+       }
+
        data->cyc2ns_offset = ns_now -
-               mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR);
+               mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, data->cyc2ns_shift);
 
        cyc2ns_write_end(cpu, data);
 
index b372a7557c16c7d8391fffafdf0b1c74b49c4822..9da95b9daf8deb83af606ae0fffb73f7fab74ff2 100644 (file)
@@ -2418,7 +2418,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
        u64 val, cr0, cr4;
        u32 base3;
        u16 selector;
-       int i;
+       int i, r;
 
        for (i = 0; i < 16; i++)
                *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
@@ -2460,13 +2460,17 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
        dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
        ctxt->ops->set_gdt(ctxt, &dt);
 
+       r = rsm_enter_protected_mode(ctxt, cr0, cr4);
+       if (r != X86EMUL_CONTINUE)
+               return r;
+
        for (i = 0; i < 6; i++) {
-               int r = rsm_load_seg_64(ctxt, smbase, i);
+               r = rsm_load_seg_64(ctxt, smbase, i);
                if (r != X86EMUL_CONTINUE)
                        return r;
        }
 
-       return rsm_enter_protected_mode(ctxt, cr0, cr4);
+       return X86EMUL_CONTINUE;
 }
 
 static int em_rsm(struct x86_emulate_ctxt *ctxt)
index 94b7d15db3fc91f3a70d665892f6f41832145c22..2f9ed1ff063260ed33bf845e1e523df0ad2bd58e 100644 (file)
@@ -514,7 +514,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
        struct vcpu_svm *svm = to_svm(vcpu);
 
        if (svm->vmcb->control.next_rip != 0) {
-               WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
+               WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
                svm->next_rip = svm->vmcb->control.next_rip;
        }
 
@@ -866,64 +866,6 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
        set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
 }
 
-#define MTRR_TYPE_UC_MINUS     7
-#define MTRR2PROTVAL_INVALID 0xff
-
-static u8 mtrr2protval[8];
-
-static u8 fallback_mtrr_type(int mtrr)
-{
-       /*
-        * WT and WP aren't always available in the host PAT.  Treat
-        * them as UC and UC- respectively.  Everything else should be
-        * there.
-        */
-       switch (mtrr)
-       {
-       case MTRR_TYPE_WRTHROUGH:
-               return MTRR_TYPE_UNCACHABLE;
-       case MTRR_TYPE_WRPROT:
-               return MTRR_TYPE_UC_MINUS;
-       default:
-               BUG();
-       }
-}
-
-static void build_mtrr2protval(void)
-{
-       int i;
-       u64 pat;
-
-       for (i = 0; i < 8; i++)
-               mtrr2protval[i] = MTRR2PROTVAL_INVALID;
-
-       /* Ignore the invalid MTRR types.  */
-       mtrr2protval[2] = 0;
-       mtrr2protval[3] = 0;
-
-       /*
-        * Use host PAT value to figure out the mapping from guest MTRR
-        * values to nested page table PAT/PCD/PWT values.  We do not
-        * want to change the host PAT value every time we enter the
-        * guest.
-        */
-       rdmsrl(MSR_IA32_CR_PAT, pat);
-       for (i = 0; i < 8; i++) {
-               u8 mtrr = pat >> (8 * i);
-
-               if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID)
-                       mtrr2protval[mtrr] = __cm_idx2pte(i);
-       }
-
-       for (i = 0; i < 8; i++) {
-               if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) {
-                       u8 fallback = fallback_mtrr_type(i);
-                       mtrr2protval[i] = mtrr2protval[fallback];
-                       BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID);
-               }
-       }
-}
-
 static __init int svm_hardware_setup(void)
 {
        int cpu;
@@ -990,7 +932,6 @@ static __init int svm_hardware_setup(void)
        } else
                kvm_disable_tdp();
 
-       build_mtrr2protval();
        return 0;
 
 err:
@@ -1145,43 +1086,6 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
        return target_tsc - tsc;
 }
 
-static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat)
-{
-       struct kvm_vcpu *vcpu = &svm->vcpu;
-
-       /* Unlike Intel, AMD takes the guest's CR0.CD into account.
-        *
-        * AMD does not have IPAT.  To emulate it for the case of guests
-        * with no assigned devices, just set everything to WB.  If guests
-        * have assigned devices, however, we cannot force WB for RAM
-        * pages only, so use the guest PAT directly.
-        */
-       if (!kvm_arch_has_assigned_device(vcpu->kvm))
-               *g_pat = 0x0606060606060606;
-       else
-               *g_pat = vcpu->arch.pat;
-}
-
-static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
-{
-       u8 mtrr;
-
-       /*
-        * 1. MMIO: trust guest MTRR, so same as item 3.
-        * 2. No passthrough: always map as WB, and force guest PAT to WB as well
-        * 3. Passthrough: can't guarantee the result, try to trust guest.
-        */
-       if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm))
-               return 0;
-
-       if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED) &&
-           kvm_read_cr0(vcpu) & X86_CR0_CD)
-               return _PAGE_NOCACHE;
-
-       mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
-       return mtrr2protval[mtrr];
-}
-
 static void init_vmcb(struct vcpu_svm *svm, bool init_event)
 {
        struct vmcb_control_area *control = &svm->vmcb->control;
@@ -1278,7 +1182,6 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event)
                clr_cr_intercept(svm, INTERCEPT_CR3_READ);
                clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
                save->g_pat = svm->vcpu.arch.pat;
-               svm_set_guest_pat(svm, &save->g_pat);
                save->cr3 = 0;
                save->cr4 = 0;
        }
@@ -1673,10 +1576,13 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 
        if (!vcpu->fpu_active)
                cr0 |= X86_CR0_TS;
-
-       /* These are emulated via page tables.  */
-       cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
-
+       /*
+        * re-enable caching here because the QEMU bios
+        * does not do it - this results in some delay at
+        * reboot
+        */
+       if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
+               cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
        svm->vmcb->save.cr0 = cr0;
        mark_dirty(svm->vmcb, VMCB_CR);
        update_cr0_intercept(svm);
@@ -3351,16 +3257,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
        case MSR_VM_IGNNE:
                vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
                break;
-       case MSR_IA32_CR_PAT:
-               if (npt_enabled) {
-                       if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
-                               return 1;
-                       vcpu->arch.pat = data;
-                       svm_set_guest_pat(svm, &svm->vmcb->save.g_pat);
-                       mark_dirty(svm->vmcb, VMCB_NPT);
-                       break;
-               }
-               /* fall through */
        default:
                return kvm_set_msr_common(vcpu, msr);
        }
@@ -4195,6 +4091,11 @@ static bool svm_has_high_real_mode_segbase(void)
        return true;
 }
 
+static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
+{
+       return 0;
+}
+
 static void svm_cpuid_update(struct kvm_vcpu *vcpu)
 {
 }
index 64076740251e7f3b8197fe87eeff262891ac995a..6a8bc64566abde57f8914f103a6b5d9d49ed8ae8 100644 (file)
@@ -4105,17 +4105,13 @@ static void seg_setup(int seg)
 static int alloc_apic_access_page(struct kvm *kvm)
 {
        struct page *page;
-       struct kvm_userspace_memory_region kvm_userspace_mem;
        int r = 0;
 
        mutex_lock(&kvm->slots_lock);
        if (kvm->arch.apic_access_page_done)
                goto out;
-       kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
-       kvm_userspace_mem.flags = 0;
-       kvm_userspace_mem.guest_phys_addr = APIC_DEFAULT_PHYS_BASE;
-       kvm_userspace_mem.memory_size = PAGE_SIZE;
-       r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
+       r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
+                                   APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
        if (r)
                goto out;
 
@@ -4140,17 +4136,12 @@ static int alloc_identity_pagetable(struct kvm *kvm)
 {
        /* Called with kvm->slots_lock held. */
 
-       struct kvm_userspace_memory_region kvm_userspace_mem;
        int r = 0;
 
        BUG_ON(kvm->arch.ept_identity_pagetable_done);
 
-       kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
-       kvm_userspace_mem.flags = 0;
-       kvm_userspace_mem.guest_phys_addr =
-               kvm->arch.ept_identity_map_addr;
-       kvm_userspace_mem.memory_size = PAGE_SIZE;
-       r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
+       r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
+                                   kvm->arch.ept_identity_map_addr, PAGE_SIZE);
 
        return r;
 }
@@ -4949,14 +4940,9 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
 {
        int ret;
-       struct kvm_userspace_memory_region tss_mem = {
-               .slot = TSS_PRIVATE_MEMSLOT,
-               .guest_phys_addr = addr,
-               .memory_size = PAGE_SIZE * 3,
-               .flags = 0,
-       };
 
-       ret = x86_set_memory_region(kvm, &tss_mem);
+       ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
+                                   PAGE_SIZE * 3);
        if (ret)
                return ret;
        kvm->arch.tss_addr = addr;
@@ -8617,17 +8603,22 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
        u64 ipat = 0;
 
        /* For VT-d and EPT combination
-        * 1. MMIO: guest may want to apply WC, trust it.
+        * 1. MMIO: always map as UC
         * 2. EPT with VT-d:
         *   a. VT-d without snooping control feature: can't guarantee the
-        *      result, try to trust guest.  So the same as item 1.
+        *      result, try to trust guest.
         *   b. VT-d with snooping control feature: snooping control feature of
         *      VT-d engine can guarantee the cache correctness. Just set it
         *      to WB to keep consistent with host. So the same as item 3.
         * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
         *    consistent with host MTRR
         */
-       if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
+       if (is_mmio) {
+               cache = MTRR_TYPE_UNCACHABLE;
+               goto exit;
+       }
+
+       if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
                ipat = VMX_EPT_IPAT_BIT;
                cache = MTRR_TYPE_WRBACK;
                goto exit;
index 991466bf8dee4ab202f422e64c0d211bb5f439fe..9a9a198303219b6430159af03d4d1e1d898ec6f7 100644 (file)
@@ -1708,8 +1708,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
                vcpu->pvclock_set_guest_stopped_request = false;
        }
 
-       pvclock_flags |= PVCLOCK_COUNTS_FROM_ZERO;
-
        /* If the host uses TSC clocksource, then it is stable */
        if (use_master_clock)
                pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
@@ -2007,8 +2005,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                                        &vcpu->requests);
 
                        ka->boot_vcpu_runs_old_kvmclock = tmp;
-
-                       ka->kvmclock_offset = -get_kernel_ns();
                }
 
                vcpu->arch.time = data;
@@ -6457,6 +6453,12 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
        return 1;
 }
 
+static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
+{
+       return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
+               !vcpu->arch.apf.halted);
+}
+
 static int vcpu_run(struct kvm_vcpu *vcpu)
 {
        int r;
@@ -6465,8 +6467,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
        vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
 
        for (;;) {
-               if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
-                   !vcpu->arch.apf.halted)
+               if (kvm_vcpu_running(vcpu))
                        r = vcpu_enter_guest(vcpu);
                else
                        r = vcpu_block(kvm, vcpu);
@@ -7478,34 +7479,66 @@ void kvm_arch_sync_events(struct kvm *kvm)
        kvm_free_pit(kvm);
 }
 
-int __x86_set_memory_region(struct kvm *kvm,
-                           const struct kvm_userspace_memory_region *mem)
+int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
 {
        int i, r;
+       unsigned long hva;
+       struct kvm_memslots *slots = kvm_memslots(kvm);
+       struct kvm_memory_slot *slot, old;
 
        /* Called with kvm->slots_lock held.  */
-       BUG_ON(mem->slot >= KVM_MEM_SLOTS_NUM);
+       if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
+               return -EINVAL;
 
+       slot = id_to_memslot(slots, id);
+       if (size) {
+               if (WARN_ON(slot->npages))
+                       return -EEXIST;
+
+               /*
+                * MAP_SHARED to prevent internal slot pages from being moved
+                * by fork()/COW.
+                */
+               hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
+                             MAP_SHARED | MAP_ANONYMOUS, 0);
+               if (IS_ERR((void *)hva))
+                       return PTR_ERR((void *)hva);
+       } else {
+               if (!slot->npages)
+                       return 0;
+
+               hva = 0;
+       }
+
+       old = *slot;
        for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
-               struct kvm_userspace_memory_region m = *mem;
+               struct kvm_userspace_memory_region m;
 
-               m.slot |= i << 16;
+               m.slot = id | (i << 16);
+               m.flags = 0;
+               m.guest_phys_addr = gpa;
+               m.userspace_addr = hva;
+               m.memory_size = size;
                r = __kvm_set_memory_region(kvm, &m);
                if (r < 0)
                        return r;
        }
 
+       if (!size) {
+               r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
+               WARN_ON(r < 0);
+       }
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(__x86_set_memory_region);
 
-int x86_set_memory_region(struct kvm *kvm,
-                         const struct kvm_userspace_memory_region *mem)
+int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
 {
        int r;
 
        mutex_lock(&kvm->slots_lock);
-       r = __x86_set_memory_region(kvm, mem);
+       r = __x86_set_memory_region(kvm, id, gpa, size);
        mutex_unlock(&kvm->slots_lock);
 
        return r;
@@ -7520,16 +7553,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
                 * unless the the memory map has changed due to process exit
                 * or fd copying.
                 */
-               struct kvm_userspace_memory_region mem;
-               memset(&mem, 0, sizeof(mem));
-               mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
-               x86_set_memory_region(kvm, &mem);
-
-               mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
-               x86_set_memory_region(kvm, &mem);
-
-               mem.slot = TSS_PRIVATE_MEMSLOT;
-               x86_set_memory_region(kvm, &mem);
+               x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
+               x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
+               x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
        }
        kvm_iommu_unmap_guest(kvm);
        kfree(kvm->arch.vpic);
@@ -7632,27 +7658,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
                                const struct kvm_userspace_memory_region *mem,
                                enum kvm_mr_change change)
 {
-       /*
-        * Only private memory slots need to be mapped here since
-        * KVM_SET_MEMORY_REGION ioctl is no longer supported.
-        */
-       if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) {
-               unsigned long userspace_addr;
-
-               /*
-                * MAP_SHARED to prevent internal slot pages from being moved
-                * by fork()/COW.
-                */
-               userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE,
-                                        PROT_READ | PROT_WRITE,
-                                        MAP_SHARED | MAP_ANONYMOUS, 0);
-
-               if (IS_ERR((void *)userspace_addr))
-                       return PTR_ERR((void *)userspace_addr);
-
-               memslot->userspace_addr = userspace_addr;
-       }
-
        return 0;
 }
 
@@ -7714,17 +7719,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
 {
        int nr_mmu_pages = 0;
 
-       if (change == KVM_MR_DELETE && old->id >= KVM_USER_MEM_SLOTS) {
-               int ret;
-
-               ret = vm_munmap(old->userspace_addr,
-                               old->npages * PAGE_SIZE);
-               if (ret < 0)
-                       printk(KERN_WARNING
-                              "kvm_vm_ioctl_set_memory_region: "
-                              "failed to munmap memory\n");
-       }
-
        if (!kvm->arch.n_requested_mmu_pages)
                nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
 
@@ -7773,19 +7767,36 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
        kvm_mmu_invalidate_zap_all_pages(kvm);
 }
 
+static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
+{
+       if (!list_empty_careful(&vcpu->async_pf.done))
+               return true;
+
+       if (kvm_apic_has_events(vcpu))
+               return true;
+
+       if (vcpu->arch.pv.pv_unhalted)
+               return true;
+
+       if (atomic_read(&vcpu->arch.nmi_queued))
+               return true;
+
+       if (test_bit(KVM_REQ_SMI, &vcpu->requests))
+               return true;
+
+       if (kvm_arch_interrupt_allowed(vcpu) &&
+           kvm_cpu_has_interrupt(vcpu))
+               return true;
+
+       return false;
+}
+
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 {
        if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
                kvm_x86_ops->check_nested_events(vcpu, false);
 
-       return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
-               !vcpu->arch.apf.halted)
-               || !list_empty_careful(&vcpu->async_pf.done)
-               || kvm_apic_has_events(vcpu)
-               || vcpu->arch.pv.pv_unhalted
-               || atomic_read(&vcpu->arch.nmi_queued) ||
-               (kvm_arch_interrupt_allowed(vcpu) &&
-                kvm_cpu_has_interrupt(vcpu));
+       return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
 }
 
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
index 816488c0b97e3540a59af5c5f4c001fed1f5ba41..d388de72eacaa353ba14e3340a30ef64b63bcd97 100644 (file)
@@ -353,8 +353,12 @@ AVXcode: 1
 17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1)
 18: Grp16 (1A)
 19:
-1a: BNDCL Ev,Gv | BNDCU Ev,Gv | BNDMOV Gv,Ev | BNDLDX Gv,Ev,Gv
-1b: BNDCN Ev,Gv | BNDMOV Ev,Gv | BNDMK Gv,Ev | BNDSTX Ev,GV,Gv
+# Intel SDM opcode map does not list MPX instructions. For now using Gv for
+# bnd registers and Ev for everything else is OK because the instruction
+# decoder does not use the information except as an indication that there is
+# a ModR/M byte.
+1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
+1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
 1c:
 1d:
 1e:
@@ -732,6 +736,12 @@ bd: vfnmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
 be: vfnmsub231ps/d Vx,Hx,Wx (66),(v)
 bf: vfnmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
 # 0x0f 0x38 0xc0-0xff
+c8: sha1nexte Vdq,Wdq
+c9: sha1msg1 Vdq,Wdq
+ca: sha1msg2 Vdq,Wdq
+cb: sha256rnds2 Vdq,Wdq
+cc: sha256msg1 Vdq,Wdq
+cd: sha256msg2 Vdq,Wdq
 db: VAESIMC Vdq,Wdq (66),(v1)
 dc: VAESENC Vdq,Hdq,Wdq (66),(v1)
 dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1)
@@ -790,6 +800,7 @@ AVXcode: 3
 61: vpcmpestri Vdq,Wdq,Ib (66),(v1)
 62: vpcmpistrm Vdq,Wdq,Ib (66),(v1)
 63: vpcmpistri Vdq,Wdq,Ib (66),(v1)
+cc: sha1rnds4 Vdq,Wdq,Ib
 df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1)
 f0: RORX Gy,Ey,Ib (F2),(v)
 EndTable
@@ -874,7 +885,7 @@ GrpTable: Grp7
 2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
 3: LIDT Ms
 4: SMSW Mw/Rv
-5:
+5: rdpkru (110),(11B) | wrpkru (111),(11B)
 6: LMSW Ew
 7: INVLPG Mb | SWAPGS (o64),(000),(11B) | RDTSCP (001),(11B)
 EndTable
@@ -888,6 +899,9 @@ EndTable
 
 GrpTable: Grp9
 1: CMPXCHG8B/16B Mq/Mdq
+3: xrstors
+4: xsavec
+5: xsaves
 6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B)
 7: VMPTRST Mq | VMPTRST Mq (F3) | RDSEED Rv (11B)
 EndTable
@@ -932,8 +946,8 @@ GrpTable: Grp15
 3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
 4: XSAVE
 5: XRSTOR | lfence (11B)
-6: XSAVEOPT | mfence (11B)
-7: clflush | sfence (11B)
+6: XSAVEOPT | clwb (66) | mfence (11B)
+7: clflush | clflushopt (66) | sfence (11B) | pcommit (66),(11B)
 EndTable
 
 GrpTable: Grp16
index 1d8a83df153af10a150baf44cd7fd85adf0bbcf4..1f37cb2b56a9938784ba64abb4db0e51de0c70b9 100644 (file)
@@ -693,14 +693,12 @@ void free_initmem(void)
 #ifdef CONFIG_BLK_DEV_INITRD
 void __init free_initrd_mem(unsigned long start, unsigned long end)
 {
-#ifdef CONFIG_MICROCODE_EARLY
        /*
         * Remember, initrd memory may contain microcode or other useful things.
         * Before we lose initrd mem, we need to find a place to hold them
         * now that normal virtual memory is enabled.
         */
        save_microcode_in_initrd();
-#endif
 
        /*
         * end could be not aligned, and We can not align that,
index 30564e2752d361870e91a4e25a5afbb3d029b7d6..df48430c279b8688996b9f0074c08b1ce139af06 100644 (file)
@@ -1132,7 +1132,7 @@ void mark_rodata_ro(void)
         * has been zapped already via cleanup_highmem().
         */
        all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
-       set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
+       set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
 
        rodata_test();
 
index 2c44c07923012a58e9f430bf4754910984771556..050a092b8d9a2628c03227d06c4d8a8314d5433a 100644 (file)
@@ -647,9 +647,12 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
        for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
                set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot)));
 
-       if (pfn_range_is_mapped(PFN_DOWN(__pa(address)),
-                               PFN_DOWN(__pa(address)) + 1))
-               split_page_count(level);
+       if (virt_addr_valid(address)) {
+               unsigned long pfn = PFN_DOWN(__pa(address));
+
+               if (pfn_range_is_mapped(pfn, pfn + 1))
+                       split_page_count(level);
+       }
 
        /*
         * Install the new, split up pagetable.
index d7f997f7c26d2502a1a188583d6817c3672417d1..ea48449b2e63d1428de814d654ba58087dee436a 100644 (file)
@@ -50,11 +50,16 @@ void __init efi_bgrt_init(void)
                       bgrt_tab->version);
                return;
        }
-       if (bgrt_tab->status != 1) {
-               pr_err("Ignoring BGRT: invalid status %u (expected 1)\n",
+       if (bgrt_tab->status & 0xfe) {
+               pr_err("Ignoring BGRT: reserved status bits are non-zero %u\n",
                       bgrt_tab->status);
                return;
        }
+       if (bgrt_tab->status != 1) {
+               pr_debug("Ignoring BGRT: invalid status %u (expected 1)\n",
+                        bgrt_tab->status);
+               return;
+       }
        if (bgrt_tab->image_type != 0) {
                pr_err("Ignoring BGRT: invalid image type %u (expected 0)\n",
                       bgrt_tab->image_type);
index 1db84c0758b732b3465fcc896ef98862dabe0f16..ad285404ea7f58ac74e998d5658a8c72a9be019d 100644 (file)
@@ -194,7 +194,7 @@ static void __init do_add_efi_memmap(void)
 int __init efi_memblock_x86_reserve_range(void)
 {
        struct efi_info *e = &boot_params.efi_info;
-       unsigned long pmap;
+       phys_addr_t pmap;
 
        if (efi_enabled(EFI_PARAVIRT))
                return 0;
@@ -209,7 +209,7 @@ int __init efi_memblock_x86_reserve_range(void)
 #else
        pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32));
 #endif
-       memmap.phys_map         = (void *)pmap;
+       memmap.phys_map         = pmap;
        memmap.nr_map           = e->efi_memmap_size /
                                  e->efi_memdesc_size;
        memmap.desc_size        = e->efi_memdesc_size;
@@ -222,7 +222,7 @@ int __init efi_memblock_x86_reserve_range(void)
        return 0;
 }
 
-static void __init print_efi_memmap(void)
+void __init efi_print_memmap(void)
 {
 #ifdef EFI_DEBUG
        efi_memory_desc_t *md;
@@ -524,7 +524,7 @@ void __init efi_init(void)
                return;
 
        if (efi_enabled(EFI_DBG))
-               print_efi_memmap();
+               efi_print_memmap();
 
        efi_esrt_init();
 }
@@ -704,6 +704,70 @@ out:
        return ret;
 }
 
+/*
+ * Iterate the EFI memory map in reverse order because the regions
+ * will be mapped top-down. The end result is the same as if we had
+ * mapped things forward, but doesn't require us to change the
+ * existing implementation of efi_map_region().
+ */
+static inline void *efi_map_next_entry_reverse(void *entry)
+{
+       /* Initial call */
+       if (!entry)
+               return memmap.map_end - memmap.desc_size;
+
+       entry -= memmap.desc_size;
+       if (entry < memmap.map)
+               return NULL;
+
+       return entry;
+}
+
+/*
+ * efi_map_next_entry - Return the next EFI memory map descriptor
+ * @entry: Previous EFI memory map descriptor
+ *
+ * This is a helper function to iterate over the EFI memory map, which
+ * we do in different orders depending on the current configuration.
+ *
+ * To begin traversing the memory map @entry must be %NULL.
+ *
+ * Returns %NULL when we reach the end of the memory map.
+ */
+static void *efi_map_next_entry(void *entry)
+{
+       if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) {
+               /*
+                * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
+                * config table feature requires us to map all entries
+                * in the same order as they appear in the EFI memory
+                * map. That is to say, entry N must have a lower
+                * virtual address than entry N+1. This is because the
+                * firmware toolchain leaves relative references in
+                * the code/data sections, which are split and become
+                * separate EFI memory regions. Mapping things
+                * out-of-order leads to the firmware accessing
+                * unmapped addresses.
+                *
+                * Since we need to map things this way whether or not
+                * the kernel actually makes use of
+                * EFI_PROPERTIES_TABLE, let's just switch to this
+                * scheme by default for 64-bit.
+                */
+               return efi_map_next_entry_reverse(entry);
+       }
+
+       /* Initial call */
+       if (!entry)
+               return memmap.map;
+
+       entry += memmap.desc_size;
+       if (entry >= memmap.map_end)
+               return NULL;
+
+       return entry;
+}
+
 /*
  * Map the efi memory ranges of the runtime services and update new_mmap with
  * virtual addresses.
@@ -714,7 +778,8 @@ static void * __init efi_map_regions(int *count, int *pg_shift)
        unsigned long left = 0;
        efi_memory_desc_t *md;
 
-       for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+       p = NULL;
+       while ((p = efi_map_next_entry(p))) {
                md = p;
                if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
 #ifdef CONFIG_X86_64
@@ -952,24 +1017,6 @@ u32 efi_mem_type(unsigned long phys_addr)
        return 0;
 }
 
-u64 efi_mem_attributes(unsigned long phys_addr)
-{
-       efi_memory_desc_t *md;
-       void *p;
-
-       if (!efi_enabled(EFI_MEMMAP))
-               return 0;
-
-       for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-               md = p;
-               if ((md->phys_addr <= phys_addr) &&
-                   (phys_addr < (md->phys_addr +
-                                 (md->num_pages << EFI_PAGE_SHIFT))))
-                       return md->attribute;
-       }
-       return 0;
-}
-
 static int __init arch_parse_efi_cmdline(char *str)
 {
        if (!str) {
@@ -979,8 +1026,6 @@ static int __init arch_parse_efi_cmdline(char *str)
 
        if (parse_option_str(str, "old_map"))
                set_bit(EFI_OLD_MEMMAP, &efi.flags);
-       if (parse_option_str(str, "debug"))
-               set_bit(EFI_DBG, &efi.flags);
 
        return 0;
 }
index 10fea5fc821e16f450121a550cc00ea214cc4469..df280da348255de9178c0c98cdb1f66b31f13f49 100644 (file)
@@ -1,11 +1,9 @@
 config AMD_MCE_INJ
        tristate "Simple MCE injection interface for AMD processors"
-       depends on RAS && EDAC_DECODE_MCE && DEBUG_FS
+       depends on RAS && EDAC_DECODE_MCE && DEBUG_FS && AMD_NB
        default n
        help
          This is a simple debugfs interface to inject MCEs and test different
          aspects of the MCE handling code.
 
          WARNING: Do not even assume this interface is staying stable!
-
-
index 17e35b5bf7792a95a89472c3dd48c39bd81f0192..55d38cfa46c2626c6d2f85587da27c05ca3e6bf7 100644 (file)
 #include <linux/cpu.h>
 #include <linux/string.h>
 #include <linux/uaccess.h>
+#include <linux/pci.h>
+
 #include <asm/mce.h>
+#include <asm/amd_nb.h>
+#include <asm/irq_vectors.h>
 
 #include "../kernel/cpu/mcheck/mce-internal.h"
 
@@ -30,16 +34,21 @@ static struct dentry *dfs_inj;
 static u8 n_banks;
 
 #define MAX_FLAG_OPT_SIZE      3
+#define NBCFG                  0x44
 
 enum injection_type {
        SW_INJ = 0,     /* SW injection, simply decode the error */
        HW_INJ,         /* Trigger a #MC */
+       DFR_INT_INJ,    /* Trigger Deferred error interrupt */
+       THR_INT_INJ,    /* Trigger threshold interrupt */
        N_INJ_TYPES,
 };
 
 static const char * const flags_options[] = {
        [SW_INJ] = "sw",
        [HW_INJ] = "hw",
+       [DFR_INT_INJ] = "df",
+       [THR_INT_INJ] = "th",
        NULL
 };
 
@@ -129,12 +138,9 @@ static ssize_t flags_write(struct file *filp, const char __user *ubuf,
 {
        char buf[MAX_FLAG_OPT_SIZE], *__buf;
        int err;
-       size_t ret;
 
        if (cnt > MAX_FLAG_OPT_SIZE)
-               cnt = MAX_FLAG_OPT_SIZE;
-
-       ret = cnt;
+               return -EINVAL;
 
        if (copy_from_user(&buf, ubuf, cnt))
                return -EFAULT;
@@ -150,9 +156,9 @@ static ssize_t flags_write(struct file *filp, const char __user *ubuf,
                return err;
        }
 
-       *ppos += ret;
+       *ppos += cnt;
 
-       return ret;
+       return cnt;
 }
 
 static const struct file_operations flags_fops = {
@@ -185,6 +191,55 @@ static void trigger_mce(void *info)
        asm volatile("int $18");
 }
 
+static void trigger_dfr_int(void *info)
+{
+       asm volatile("int %0" :: "i" (DEFERRED_ERROR_VECTOR));
+}
+
+static void trigger_thr_int(void *info)
+{
+       asm volatile("int %0" :: "i" (THRESHOLD_APIC_VECTOR));
+}
+
+static u32 get_nbc_for_node(int node_id)
+{
+       struct cpuinfo_x86 *c = &boot_cpu_data;
+       u32 cores_per_node;
+
+       cores_per_node = c->x86_max_cores / amd_get_nodes_per_socket();
+
+       return cores_per_node * node_id;
+}
+
+static void toggle_nb_mca_mst_cpu(u16 nid)
+{
+       struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
+       u32 val;
+       int err;
+
+       if (!F3)
+               return;
+
+       err = pci_read_config_dword(F3, NBCFG, &val);
+       if (err) {
+               pr_err("%s: Error reading F%dx%03x.\n",
+                      __func__, PCI_FUNC(F3->devfn), NBCFG);
+               return;
+       }
+
+       if (val & BIT(27))
+               return;
+
+       pr_err("%s: Set D18F3x44[NbMcaToMstCpuEn] which BIOS hasn't done.\n",
+              __func__);
+
+       val |= BIT(27);
+       err = pci_write_config_dword(F3, NBCFG, val);
+       if (err)
+               pr_err("%s: Error writing F%dx%03x.\n",
+                      __func__, PCI_FUNC(F3->devfn), NBCFG);
+}
+
 static void do_inject(void)
 {
        u64 mcg_status = 0;
@@ -205,6 +260,26 @@ static void do_inject(void)
        if (!(i_mce.status & MCI_STATUS_PCC))
                mcg_status |= MCG_STATUS_RIPV;
 
+       /*
+        * Ensure necessary status bits for deferred errors:
+        * - MCx_STATUS[Deferred]: make sure it is a deferred error
+        * - MCx_STATUS[UC] cleared: deferred errors are _not_ UC
+        */
+       if (inj_type == DFR_INT_INJ) {
+               i_mce.status |= MCI_STATUS_DEFERRED;
+               i_mce.status |= (i_mce.status & ~MCI_STATUS_UC);
+       }
+
+       /*
+        * For multi node CPUs, logging and reporting of bank 4 errors happens
+        * only on the node base core. Refer to D18F3x44[NbMcaToMstCpuEn] for
+        * Fam10h and later BKDGs.
+        */
+       if (static_cpu_has(X86_FEATURE_AMD_DCM) && b == 4) {
+               toggle_nb_mca_mst_cpu(amd_get_nb_id(cpu));
+               cpu = get_nbc_for_node(amd_get_nb_id(cpu));
+       }
+
        get_online_cpus();
        if (!cpu_online(cpu))
                goto err;
@@ -225,7 +300,16 @@ static void do_inject(void)
 
        toggle_hw_mce_inject(cpu, false);
 
-       smp_call_function_single(cpu, trigger_mce, NULL, 0);
+       switch (inj_type) {
+       case DFR_INT_INJ:
+               smp_call_function_single(cpu, trigger_dfr_int, NULL, 0);
+               break;
+       case THR_INT_INJ:
+               smp_call_function_single(cpu, trigger_thr_int, NULL, 0);
+               break;
+       default:
+               smp_call_function_single(cpu, trigger_mce, NULL, 0);
+       }
 
 err:
        put_online_cpus();
@@ -290,6 +374,11 @@ static const char readme_msg[] =
 "\t    handle the error. Be warned: might cause system panic if MCi_STATUS[PCC] \n"
 "\t    is set. Therefore, consider setting (debugfs_mountpoint)/mce/fake_panic \n"
 "\t    before injecting.\n"
+"\t  - \"df\": Trigger APIC interrupt for Deferred error. Causes deferred \n"
+"\t    error APIC interrupt handler to handle the error if the feature is \n"
+"\t    is present in hardware. \n"
+"\t  - \"th\": Trigger APIC interrupt for Threshold errors. Causes threshold \n"
+"\t    APIC interrupt handler to handle the error. \n"
 "\n";
 
 static ssize_t
index 9fe77b7b5a0ed5b77f05174e8ea87f39cb80e2ed..81d6562ce01d5ce52294691e03555c1b4cf3e10f 100644 (file)
@@ -3,6 +3,10 @@
 
 #include <uapi/linux/audit.h>
 
+typedef asmlinkage long (*sys_call_ptr_t)(unsigned long, unsigned long,
+                                         unsigned long, unsigned long,
+                                         unsigned long, unsigned long);
+
 static inline int syscall_get_arch(void)
 {
 #ifdef CONFIG_X86_32
index 9701a4fd7bf2ea83d640ddeefebd5e4d5de27598..836a1eb5df436bdad88fe3b4ae59b512f9573b19 100644 (file)
 #include <skas.h>
 #include <sysdep/tls.h>
 
-extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
+static inline int modify_ldt (int func, void *ptr, unsigned long bytecount)
+{
+       return syscall(__NR_modify_ldt, func, ptr, bytecount);
+}
 
 static long write_ldt_entry(struct mm_id *mm_idp, int func,
                     struct user_desc *desc, void **addr, int done)
index bd16d6c370ec9aaeb3328779e277de8129ef61f3..439c0994b69689ade4fa49c6a9ead46e25597880 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/sys.h>
 #include <linux/cache.h>
 #include <generated/user_constants.h>
+#include <asm/syscall.h>
 
 #define __NO_STUBS
 
 
 #define old_mmap sys_old_mmap
 
-#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage void sym(void) ;
+#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
 #include <asm/syscalls_32.h>
 
 #undef __SYSCALL_I386
 #define __SYSCALL_I386(nr, sym, compat) [ nr ] = sym,
 
-typedef asmlinkage void (*sys_call_ptr_t)(void);
-
-extern asmlinkage void sys_ni_syscall(void);
+extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
 
 const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
        /*
index a75d8700472a4f52db0211217cf7bbcd4f2dc1f5..b74ea6c2c0e7b2fdb43d2f60231edc8c994333c6 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/sys.h>
 #include <linux/cache.h>
 #include <generated/user_constants.h>
+#include <asm/syscall.h>
 
 #define __NO_STUBS
 
 #define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
 #define __SYSCALL_X32(nr, sym, compat) /* Not supported */
 
-#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ;
+#define __SYSCALL_64(nr, sym, compat) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
 #include <asm/syscalls_64.h>
 
 #undef __SYSCALL_64
 #define __SYSCALL_64(nr, sym, compat) [ nr ] = sym,
 
-typedef void (*sys_call_ptr_t)(void);
-
-extern void sys_ni_syscall(void);
+extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
 
 const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
        /*
index 30d12afe52ed173b2a81720cd5c89c24e667de2a..993b7a71386d53f79befa7a302ede2fdcbed6bd4 100644 (file)
 #include <linux/memblock.h>
 #include <linux/edd.h>
 
+#ifdef CONFIG_KEXEC_CORE
+#include <linux/kexec.h>
+#endif
+
 #include <xen/xen.h>
 #include <xen/events.h>
 #include <xen/interface/xen.h>
@@ -1077,6 +1081,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
                /* Fast syscall setup is all done in hypercalls, so
                   these are all ignored.  Stub them out here to stop
                   Xen console noise. */
+               break;
 
        default:
                if (!pmu_msr_write(msr, low, high, &ret))
@@ -1807,6 +1812,21 @@ static struct notifier_block xen_hvm_cpu_notifier = {
        .notifier_call  = xen_hvm_cpu_notify,
 };
 
+#ifdef CONFIG_KEXEC_CORE
+static void xen_hvm_shutdown(void)
+{
+       native_machine_shutdown();
+       if (kexec_in_progress)
+               xen_reboot(SHUTDOWN_soft_reset);
+}
+
+static void xen_hvm_crash_shutdown(struct pt_regs *regs)
+{
+       native_machine_crash_shutdown(regs);
+       xen_reboot(SHUTDOWN_soft_reset);
+}
+#endif
+
 static void __init xen_hvm_guest_init(void)
 {
        if (xen_pv_domain())
@@ -1826,6 +1846,10 @@ static void __init xen_hvm_guest_init(void)
        x86_init.irqs.intr_init = xen_init_IRQ;
        xen_hvm_init_time_ops();
        xen_hvm_init_mmu_ops();
+#ifdef CONFIG_KEXEC_CORE
+       machine_ops.shutdown = xen_hvm_shutdown;
+       machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
+#endif
 }
 #endif
 
index bfc08b13044b181c5948e5a2f22c205e900e0b47..660b3cfef23485f149e1a9b0b88f0b12666dbefb 100644 (file)
@@ -112,6 +112,15 @@ static unsigned long *p2m_identity;
 static pte_t *p2m_missing_pte;
 static pte_t *p2m_identity_pte;
 
+/*
+ * Hint at last populated PFN.
+ *
+ * Used to set HYPERVISOR_shared_info->arch.max_pfn so the toolstack
+ * can avoid scanning the whole P2M (which may be sized to account for
+ * hotplugged memory).
+ */
+static unsigned long xen_p2m_last_pfn;
+
 static inline unsigned p2m_top_index(unsigned long pfn)
 {
        BUG_ON(pfn >= MAX_P2M_PFN);
@@ -270,7 +279,7 @@ void xen_setup_mfn_list_list(void)
        else
                HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
                        virt_to_mfn(p2m_top_mfn);
-       HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
+       HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
        HYPERVISOR_shared_info->arch.p2m_generation = 0;
        HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr;
        HYPERVISOR_shared_info->arch.p2m_cr3 =
@@ -406,6 +415,8 @@ void __init xen_vmalloc_p2m_tree(void)
        static struct vm_struct vm;
        unsigned long p2m_limit;
 
+       xen_p2m_last_pfn = xen_max_p2m_pfn;
+
        p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
        vm.flags = VM_ALLOC;
        vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
@@ -608,6 +619,12 @@ static bool alloc_p2m(unsigned long pfn)
                        free_p2m_page(p2m);
        }
 
+       /* Expanded the p2m? */
+       if (pfn > xen_p2m_last_pfn) {
+               xen_p2m_last_pfn = pfn;
+               HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
+       }
+
        return true;
 }
 
index f5ef6746d47a0ee36f6b0a11edd0c49cbcf3590a..63320b6d35bc45f4292d3d5371e529e28a7e837c 100644 (file)
@@ -548,7 +548,7 @@ static unsigned long __init xen_get_max_pages(void)
 {
        unsigned long max_pages, limit;
        domid_t domid = DOMID_SELF;
-       int ret;
+       long ret;
 
        limit = xen_get_pages_limit();
        max_pages = limit;
@@ -798,7 +798,7 @@ char * __init xen_memory_setup(void)
                xen_ignore_unusable();
 
        /* Make sure the Xen-supplied memory map is well-ordered. */
-       sanitize_e820_map(xen_e820_map, xen_e820_map_entries,
+       sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
                          &xen_e820_map_entries);
 
        max_pages = xen_get_max_pages();
@@ -965,17 +965,8 @@ char * __init xen_auto_xlated_memory_setup(void)
 static void __init fiddle_vdso(void)
 {
 #ifdef CONFIG_X86_32
-       /*
-        * This could be called before selected_vdso32 is initialized, so
-        * just fiddle with both possible images.  vdso_image_32_syscall
-        * can't be selected, since it only exists on 64-bit systems.
-        */
-       u32 *mask;
-       mask = vdso_image_32_int80.data +
-               vdso_image_32_int80.sym_VDSO32_NOTE_MASK;
-       *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
-       mask = vdso_image_32_sysenter.data +
-               vdso_image_32_sysenter.sym_VDSO32_NOTE_MASK;
+       u32 *mask = vdso_image_32.data +
+               vdso_image_32.sym_VDSO32_NOTE_MASK;
        *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
 #endif
 }
index 63c223dff5f1eebed92297d2cd641535a3aeceb9..b56855a1382a374f8c52632b9aa243a1112a1a06 100644 (file)
@@ -28,4 +28,5 @@ generic-y += statfs.h
 generic-y += termios.h
 generic-y += topology.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 93795d04730387c5207a54807ecb6fe0fc571c50..fd8017ce298afcc54aedae0763e36d71377a02b5 100644 (file)
@@ -47,7 +47,7 @@
  *
  * Atomically reads the value of @v.
  */
-#define atomic_read(v)         ACCESS_ONCE((v)->counter)
+#define atomic_read(v)         READ_ONCE((v)->counter)
 
 /**
  * atomic_set - set atomic variable
@@ -56,7 +56,7 @@
  *
  * Atomically sets the value of @v to @i.
  */
-#define atomic_set(v,i)                ((v)->counter = (i))
+#define atomic_set(v,i)                WRITE_ONCE((v)->counter, (i))
 
 #if XCHAL_HAVE_S32C1I
 #define ATOMIC_OP(op)                                                  \
index 2eb722d48773cb8a8de49d58b934eed830755da7..18e92a6645e24741b786bc35f14b9d06f1355569 100644 (file)
@@ -576,7 +576,7 @@ void blk_cleanup_queue(struct request_queue *q)
                q->queue_lock = &q->__queue_lock;
        spin_unlock_irq(lock);
 
-       bdi_destroy(&q->backing_dev_info);
+       bdi_unregister(&q->backing_dev_info);
 
        /* @q is and will stay empty, shutdown and put */
        blk_put_queue(q);
index bd40292e5009675ef17d6914b67010834cf1beb6..9ebf65379556a0f5730b3934acd2fa78e232e816 100644 (file)
@@ -26,13 +26,6 @@ static void bio_batch_end_io(struct bio *bio)
        bio_put(bio);
 }
 
-/*
- * Ensure that max discard sectors doesn't overflow bi_size and hopefully
- * it is of the proper granularity as long as the granularity is a power
- * of two.
- */
-#define MAX_BIO_SECTORS ((1U << 31) >> 9)
-
 /**
  * blkdev_issue_discard - queue a discard
  * @bdev:      blockdev to issue discard for
@@ -50,6 +43,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
        DECLARE_COMPLETION_ONSTACK(wait);
        struct request_queue *q = bdev_get_queue(bdev);
        int type = REQ_WRITE | REQ_DISCARD;
+       unsigned int granularity;
+       int alignment;
        struct bio_batch bb;
        struct bio *bio;
        int ret = 0;
@@ -61,6 +56,10 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
        if (!blk_queue_discard(q))
                return -EOPNOTSUPP;
 
+       /* Zero-sector (unknown) and one-sector granularities are the same.  */
+       granularity = max(q->limits.discard_granularity >> 9, 1U);
+       alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
+
        if (flags & BLKDEV_DISCARD_SECURE) {
                if (!blk_queue_secdiscard(q))
                        return -EOPNOTSUPP;
@@ -74,7 +73,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
        blk_start_plug(&plug);
        while (nr_sects) {
                unsigned int req_sects;
-               sector_t end_sect;
+               sector_t end_sect, tmp;
 
                bio = bio_alloc(gfp_mask, 1);
                if (!bio) {
@@ -82,8 +81,22 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                        break;
                }
 
-               req_sects = min_t(sector_t, nr_sects, MAX_BIO_SECTORS);
+               /* Make sure bi_size doesn't overflow */
+               req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
+
+               /*
+                * If splitting a request, and the next starting sector would be
+                * misaligned, stop the discard at the previous aligned sector.
+                */
                end_sect = sector + req_sects;
+               tmp = end_sect;
+               if (req_sects < nr_sects &&
+                   sector_div(tmp, granularity) != alignment) {
+                       end_sect = end_sect - alignment;
+                       sector_div(end_sect, granularity);
+                       end_sect = end_sect * granularity + alignment;
+                       req_sects = end_sect - sector;
+               }
 
                bio->bi_iter.bi_sector = sector;
                bio->bi_end_io = bio_batch_end_io;
index 1e28ddb656b891b92d7c135fa65914939b1451aa..8764c241e5bb44858e753b75f6c102c06a927171 100644 (file)
@@ -31,7 +31,8 @@ static int get_first_sibling(unsigned int cpu)
        return cpu;
 }
 
-int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
+int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
+                           const struct cpumask *online_mask)
 {
        unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
        cpumask_var_t cpus;
@@ -41,7 +42,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
 
        cpumask_clear(cpus);
        nr_cpus = nr_uniq_cpus = 0;
-       for_each_online_cpu(i) {
+       for_each_cpu(i, online_mask) {
                nr_cpus++;
                first_sibling = get_first_sibling(i);
                if (!cpumask_test_cpu(first_sibling, cpus))
@@ -51,7 +52,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
 
        queue = 0;
        for_each_possible_cpu(i) {
-               if (!cpu_online(i)) {
+               if (!cpumask_test_cpu(i, online_mask)) {
                        map[i] = 0;
                        continue;
                }
@@ -95,7 +96,7 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
        if (!map)
                return NULL;
 
-       if (!blk_mq_update_queue_map(map, set->nr_hw_queues))
+       if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask))
                return map;
 
        kfree(map);
index 279c5d674edf3cb38627feb360eb745194eecd4e..788fffd9b4098e35a953ed8cc182a9633f9cc421 100644 (file)
@@ -229,8 +229,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
        unsigned int i, first = 1;
        ssize_t ret = 0;
 
-       blk_mq_disable_hotplug();
-
        for_each_cpu(i, hctx->cpumask) {
                if (first)
                        ret += sprintf(ret + page, "%u", i);
@@ -240,8 +238,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
                first = 0;
        }
 
-       blk_mq_enable_hotplug();
-
        ret += sprintf(ret + page, "\n");
        return ret;
 }
@@ -343,7 +339,7 @@ static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
        struct blk_mq_ctx *ctx;
        int i;
 
-       if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
+       if (!hctx->nr_ctx)
                return;
 
        hctx_for_each_ctx(hctx, ctx, i)
@@ -358,7 +354,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
        struct blk_mq_ctx *ctx;
        int i, ret;
 
-       if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
+       if (!hctx->nr_ctx)
                return 0;
 
        ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
@@ -381,6 +377,8 @@ void blk_mq_unregister_disk(struct gendisk *disk)
        struct blk_mq_ctx *ctx;
        int i, j;
 
+       blk_mq_disable_hotplug();
+
        queue_for_each_hw_ctx(q, hctx, i) {
                blk_mq_unregister_hctx(hctx);
 
@@ -395,6 +393,9 @@ void blk_mq_unregister_disk(struct gendisk *disk)
        kobject_put(&q->mq_kobj);
 
        kobject_put(&disk_to_dev(disk)->kobj);
+
+       q->mq_sysfs_init_done = false;
+       blk_mq_enable_hotplug();
 }
 
 static void blk_mq_sysfs_init(struct request_queue *q)
@@ -425,27 +426,30 @@ int blk_mq_register_disk(struct gendisk *disk)
        struct blk_mq_hw_ctx *hctx;
        int ret, i;
 
+       blk_mq_disable_hotplug();
+
        blk_mq_sysfs_init(q);
 
        ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
        if (ret < 0)
-               return ret;
+               goto out;
 
        kobject_uevent(&q->mq_kobj, KOBJ_ADD);
 
        queue_for_each_hw_ctx(q, hctx, i) {
-               hctx->flags |= BLK_MQ_F_SYSFS_UP;
                ret = blk_mq_register_hctx(hctx);
                if (ret)
                        break;
        }
 
-       if (ret) {
+       if (ret)
                blk_mq_unregister_disk(disk);
-               return ret;
-       }
+       else
+               q->mq_sysfs_init_done = true;
+out:
+       blk_mq_enable_hotplug();
 
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(blk_mq_register_disk);
 
@@ -454,6 +458,9 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
        struct blk_mq_hw_ctx *hctx;
        int i;
 
+       if (!q->mq_sysfs_init_done)
+               return;
+
        queue_for_each_hw_ctx(q, hctx, i)
                blk_mq_unregister_hctx(hctx);
 }
@@ -463,6 +470,9 @@ int blk_mq_sysfs_register(struct request_queue *q)
        struct blk_mq_hw_ctx *hctx;
        int i, ret = 0;
 
+       if (!q->mq_sysfs_init_done)
+               return ret;
+
        queue_for_each_hw_ctx(q, hctx, i) {
                ret = blk_mq_register_hctx(hctx);
                if (ret)
index 9115c6d59948addbc445a26ad0f9ccaf4237b137..ec2d11915142a8f9b7a49e839e41a2f54a55aa09 100644 (file)
@@ -471,17 +471,30 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
 }
 EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
 
-void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
+void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
                void *priv)
 {
-       struct blk_mq_tags *tags = hctx->tags;
+       struct blk_mq_hw_ctx *hctx;
+       int i;
+
+
+       queue_for_each_hw_ctx(q, hctx, i) {
+               struct blk_mq_tags *tags = hctx->tags;
+
+               /*
+                * If not software queues are currently mapped to this
+                * hardware queue, there's nothing to check
+                */
+               if (!blk_mq_hw_queue_mapped(hctx))
+                       continue;
+
+               if (tags->nr_reserved_tags)
+                       bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
+               bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
+                     false);
+       }
 
-       if (tags->nr_reserved_tags)
-               bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
-       bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
-                       false);
 }
-EXPORT_SYMBOL(blk_mq_tag_busy_iter);
 
 static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
 {
@@ -628,6 +641,7 @@ void blk_mq_free_tags(struct blk_mq_tags *tags)
 {
        bt_free(&tags->bitmap_tags);
        bt_free(&tags->breserved_tags);
+       free_cpumask_var(tags->cpumask);
        kfree(tags);
 }
 
index 9eb2cf4f01cb874706d64af87a01e94e0121f7e4..d468a79f2c4a2c11a00387816bcc03b64aea09d1 100644 (file)
@@ -58,6 +58,8 @@ extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
 extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
 extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
 extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
+void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
+               void *priv);
 
 enum {
        BLK_MQ_TAG_CACHE_MIN    = 1,
index f2d67b4047a04d7015c3c2af16871972c3b5a720..85f014327342efc775c31833a52f531b69a66329 100644 (file)
@@ -393,14 +393,16 @@ void __blk_mq_complete_request(struct request *rq)
  *     Ends all I/O on a request. It does not handle partial completions.
  *     The actual completion happens out-of-order, through a IPI handler.
  **/
-void blk_mq_complete_request(struct request *rq)
+void blk_mq_complete_request(struct request *rq, int error)
 {
        struct request_queue *q = rq->q;
 
        if (unlikely(blk_should_fake_timeout(q)))
                return;
-       if (!blk_mark_rq_complete(rq))
+       if (!blk_mark_rq_complete(rq)) {
+               rq->errors = error;
                __blk_mq_complete_request(rq);
+       }
 }
 EXPORT_SYMBOL(blk_mq_complete_request);
 
@@ -616,10 +618,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
                 * If a request wasn't started before the queue was
                 * marked dying, kill it here or it'll go unnoticed.
                 */
-               if (unlikely(blk_queue_dying(rq->q))) {
-                       rq->errors = -EIO;
-                       blk_mq_complete_request(rq);
-               }
+               if (unlikely(blk_queue_dying(rq->q)))
+                       blk_mq_complete_request(rq, -EIO);
                return;
        }
        if (rq->cmd_flags & REQ_NO_TIMEOUT)
@@ -641,24 +641,16 @@ static void blk_mq_rq_timer(unsigned long priv)
                .next           = 0,
                .next_set       = 0,
        };
-       struct blk_mq_hw_ctx *hctx;
        int i;
 
-       queue_for_each_hw_ctx(q, hctx, i) {
-               /*
-                * If not software queues are currently mapped to this
-                * hardware queue, there's nothing to check
-                */
-               if (!blk_mq_hw_queue_mapped(hctx))
-                       continue;
-
-               blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
-       }
+       blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
 
        if (data.next_set) {
                data.next = blk_rq_timeout(round_jiffies_up(data.next));
                mod_timer(&q->timeout, data.next);
        } else {
+               struct blk_mq_hw_ctx *hctx;
+
                queue_for_each_hw_ctx(q, hctx, i) {
                        /* the hctx may be unmapped, so check it here */
                        if (blk_mq_hw_queue_mapped(hctx))
@@ -1789,13 +1781,19 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
        }
 }
 
-static void blk_mq_map_swqueue(struct request_queue *q)
+static void blk_mq_map_swqueue(struct request_queue *q,
+                              const struct cpumask *online_mask)
 {
        unsigned int i;
        struct blk_mq_hw_ctx *hctx;
        struct blk_mq_ctx *ctx;
        struct blk_mq_tag_set *set = q->tag_set;
 
+       /*
+        * Avoid others reading imcomplete hctx->cpumask through sysfs
+        */
+       mutex_lock(&q->sysfs_lock);
+
        queue_for_each_hw_ctx(q, hctx, i) {
                cpumask_clear(hctx->cpumask);
                hctx->nr_ctx = 0;
@@ -1806,16 +1804,17 @@ static void blk_mq_map_swqueue(struct request_queue *q)
         */
        queue_for_each_ctx(q, ctx, i) {
                /* If the cpu isn't online, the cpu is mapped to first hctx */
-               if (!cpu_online(i))
+               if (!cpumask_test_cpu(i, online_mask))
                        continue;
 
                hctx = q->mq_ops->map_queue(q, i);
                cpumask_set_cpu(i, hctx->cpumask);
-               cpumask_set_cpu(i, hctx->tags->cpumask);
                ctx->index_hw = hctx->nr_ctx;
                hctx->ctxs[hctx->nr_ctx++] = ctx;
        }
 
+       mutex_unlock(&q->sysfs_lock);
+
        queue_for_each_hw_ctx(q, hctx, i) {
                struct blk_mq_ctxmap *map = &hctx->ctx_map;
 
@@ -1851,6 +1850,14 @@ static void blk_mq_map_swqueue(struct request_queue *q)
                hctx->next_cpu = cpumask_first(hctx->cpumask);
                hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
        }
+
+       queue_for_each_ctx(q, ctx, i) {
+               if (!cpumask_test_cpu(i, online_mask))
+                       continue;
+
+               hctx = q->mq_ops->map_queue(q, i);
+               cpumask_set_cpu(i, hctx->tags->cpumask);
+       }
 }
 
 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
@@ -1918,6 +1925,9 @@ void blk_mq_release(struct request_queue *q)
                kfree(hctx);
        }
 
+       kfree(q->mq_map);
+       q->mq_map = NULL;
+
        kfree(q->queue_hw_ctx);
 
        /* ctx kobj stays in queue_ctx */
@@ -2027,13 +2037,15 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
        if (blk_mq_init_hw_queues(q, set))
                goto err_hctxs;
 
+       get_online_cpus();
        mutex_lock(&all_q_mutex);
-       list_add_tail(&q->all_q_node, &all_q_list);
-       mutex_unlock(&all_q_mutex);
 
+       list_add_tail(&q->all_q_node, &all_q_list);
        blk_mq_add_queue_tag_set(set, q);
+       blk_mq_map_swqueue(q, cpu_online_mask);
 
-       blk_mq_map_swqueue(q);
+       mutex_unlock(&all_q_mutex);
+       put_online_cpus();
 
        return q;
 
@@ -2057,30 +2069,27 @@ void blk_mq_free_queue(struct request_queue *q)
 {
        struct blk_mq_tag_set   *set = q->tag_set;
 
+       mutex_lock(&all_q_mutex);
+       list_del_init(&q->all_q_node);
+       mutex_unlock(&all_q_mutex);
+
        blk_mq_del_queue_tag_set(q);
 
        blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
        blk_mq_free_hw_queues(q, set);
 
        percpu_ref_exit(&q->mq_usage_counter);
-
-       kfree(q->mq_map);
-
-       q->mq_map = NULL;
-
-       mutex_lock(&all_q_mutex);
-       list_del_init(&q->all_q_node);
-       mutex_unlock(&all_q_mutex);
 }
 
 /* Basically redo blk_mq_init_queue with queue frozen */
-static void blk_mq_queue_reinit(struct request_queue *q)
+static void blk_mq_queue_reinit(struct request_queue *q,
+                               const struct cpumask *online_mask)
 {
        WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
 
        blk_mq_sysfs_unregister(q);
 
-       blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
+       blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask);
 
        /*
         * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
@@ -2088,7 +2097,7 @@ static void blk_mq_queue_reinit(struct request_queue *q)
         * involves free and re-allocate memory, worthy doing?)
         */
 
-       blk_mq_map_swqueue(q);
+       blk_mq_map_swqueue(q, online_mask);
 
        blk_mq_sysfs_register(q);
 }
@@ -2097,16 +2106,43 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
                                      unsigned long action, void *hcpu)
 {
        struct request_queue *q;
+       int cpu = (unsigned long)hcpu;
+       /*
+        * New online cpumask which is going to be set in this hotplug event.
+        * Declare this cpumasks as global as cpu-hotplug operation is invoked
+        * one-by-one and dynamically allocating this could result in a failure.
+        */
+       static struct cpumask online_new;
 
        /*
-        * Before new mappings are established, hotadded cpu might already
-        * start handling requests. This doesn't break anything as we map
-        * offline CPUs to first hardware queue. We will re-init the queue
-        * below to get optimal settings.
+        * Before hotadded cpu starts handling requests, new mappings must
+        * be established.  Otherwise, these requests in hw queue might
+        * never be dispatched.
+        *
+        * For example, there is a single hw queue (hctx) and two CPU queues
+        * (ctx0 for CPU0, and ctx1 for CPU1).
+        *
+        * Now CPU1 is just onlined and a request is inserted into
+        * ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is
+        * still zero.
+        *
+        * And then while running hw queue, flush_busy_ctxs() finds bit0 is
+        * set in pending bitmap and tries to retrieve requests in
+        * hctx->ctxs[0]->rq_list.  But htx->ctxs[0] is a pointer to ctx0,
+        * so the request in ctx1->rq_list is ignored.
         */
-       if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
-           action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
+       switch (action & ~CPU_TASKS_FROZEN) {
+       case CPU_DEAD:
+       case CPU_UP_CANCELED:
+               cpumask_copy(&online_new, cpu_online_mask);
+               break;
+       case CPU_UP_PREPARE:
+               cpumask_copy(&online_new, cpu_online_mask);
+               cpumask_set_cpu(cpu, &online_new);
+               break;
+       default:
                return NOTIFY_OK;
+       }
 
        mutex_lock(&all_q_mutex);
 
@@ -2130,7 +2166,7 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
        }
 
        list_for_each_entry(q, &all_q_list, all_q_node)
-               blk_mq_queue_reinit(q);
+               blk_mq_queue_reinit(q, &online_new);
 
        list_for_each_entry(q, &all_q_list, all_q_node)
                blk_mq_unfreeze_queue(q);
@@ -2260,10 +2296,8 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
        int i;
 
        for (i = 0; i < set->nr_hw_queues; i++) {
-               if (set->tags[i]) {
+               if (set->tags[i])
                        blk_mq_free_rq_map(set, set->tags[i], i);
-                       free_cpumask_var(set->tags[i]->cpumask);
-               }
        }
 
        kfree(set->tags);
index 6a48c4c0d8a2a6efb881ea29b772df3bba9d5540..f4fea79649105b4e134860b53294ef2dac90a95f 100644 (file)
@@ -51,7 +51,8 @@ void blk_mq_disable_hotplug(void);
  * CPU -> queue mappings
  */
 extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
-extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues);
+extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
+                                  const struct cpumask *online_mask);
 extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
 
 /*
index 3e44a9da2a13579cacaee3d5e03bb3868f34d02a..07b42f5ad797b7b5e6367d5c70051d3b34fca55c 100644 (file)
@@ -540,6 +540,7 @@ static void blk_release_queue(struct kobject *kobj)
        struct request_queue *q =
                container_of(kobj, struct request_queue, kobj);
 
+       bdi_exit(&q->backing_dev_info);
        blkcg_exit_queue(q);
 
        if (q->elevator) {
index b788f169cc9880d0d1ac2ce4930145d9862b306d..b4ffc5be1a93c1ff0bceb6b9a5cc3dc1b5101d97 100644 (file)
@@ -706,7 +706,7 @@ struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
 err:
                if (err != -EAGAIN)
                        break;
-               if (signal_pending(current)) {
+               if (fatal_signal_pending(current)) {
                        err = -EINTR;
                        break;
                }
index 8acb886032ae7a604fe0e965eb5d3ce07dd4845b..9c1dc8d6106a89a0f853271c1dfc49cd301ec983 100644 (file)
@@ -544,7 +544,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
        struct crypto_alg *base = &alg->halg.base;
 
        if (alg->halg.digestsize > PAGE_SIZE / 8 ||
-           alg->halg.statesize > PAGE_SIZE / 8)
+           alg->halg.statesize > PAGE_SIZE / 8 ||
+           alg->halg.statesize == 0)
                return -EINVAL;
 
        base->cra_type = &crypto_ahash_type;
index d130b41dbaea244000c35328d7eba7b463158bc8..59bf491fe3d8606acc4016c53182182615d32a21 100644 (file)
@@ -345,7 +345,7 @@ static void crypto_wait_for_test(struct crypto_larval *larval)
                crypto_alg_tested(larval->alg.cra_driver_name, 0);
        }
 
-       err = wait_for_completion_interruptible(&larval->completion);
+       err = wait_for_completion_killable(&larval->completion);
        WARN_ON(err);
 
 out:
index afe4610afc4b952d585ede5ba9cbd732f5aa3750..bbc147cb5dec87affad82510412459c075056f86 100644 (file)
@@ -172,7 +172,7 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
        struct crypto_larval *larval = (void *)alg;
        long timeout;
 
-       timeout = wait_for_completion_interruptible_timeout(
+       timeout = wait_for_completion_killable_timeout(
                &larval->completion, 60 * HZ);
 
        alg = larval->adult;
@@ -445,7 +445,7 @@ struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask)
 err:
                if (err != -EAGAIN)
                        break;
-               if (signal_pending(current)) {
+               if (fatal_signal_pending(current)) {
                        err = -EINTR;
                        break;
                }
@@ -562,7 +562,7 @@ void *crypto_alloc_tfm(const char *alg_name,
 err:
                if (err != -EAGAIN)
                        break;
-               if (signal_pending(current)) {
+               if (fatal_signal_pending(current)) {
                        err = -EINTR;
                        break;
                }
index 6d88dd15c98da8cada935c0dc937eb5c8db158cd..19709663241223968ee73f3d1847be9e253969f7 100644 (file)
@@ -332,10 +332,6 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
                srlen = cert->raw_serial_size;
                q = cert->raw_serial;
        }
-       if (srlen > 1 && *q == 0) {
-               srlen--;
-               q++;
-       }
 
        ret = -ENOMEM;
        desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL);
index d94d99ffe8b9b6cecf2c8348241ea17738e9b457..237f3795cfaaa1f988fadf5b07eefe3c44609091 100644 (file)
@@ -375,7 +375,7 @@ static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type,
                err = PTR_ERR(alg);
                if (err != -EAGAIN)
                        break;
-               if (signal_pending(current)) {
+               if (fatal_signal_pending(current)) {
                        err = -EINTR;
                        break;
                }
index 09f37b51680871d8a34dc3a33563872652d4f0d6..4dde37c3d8fcba549ad1eb978bf23466321e9152 100644 (file)
@@ -61,6 +61,7 @@ ACPI_GLOBAL(struct acpi_table_header, acpi_gbl_original_dsdt_header);
 ACPI_INIT_GLOBAL(u32, acpi_gbl_dsdt_index, ACPI_INVALID_TABLE_INDEX);
 ACPI_INIT_GLOBAL(u32, acpi_gbl_facs_index, ACPI_INVALID_TABLE_INDEX);
 ACPI_INIT_GLOBAL(u32, acpi_gbl_xfacs_index, ACPI_INVALID_TABLE_INDEX);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_fadt_index, ACPI_INVALID_TABLE_INDEX);
 
 #if (!ACPI_REDUCED_HARDWARE)
 ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS);
index f7731f260c318606e32455b7175a01ea157d8267..591ea95319e25ca7e5630970dd080ab1eab85e5e 100644 (file)
@@ -85,7 +85,7 @@ void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded);
 /*
  * tbfadt - FADT parse/convert/validate
  */
-void acpi_tb_parse_fadt(u32 table_index);
+void acpi_tb_parse_fadt(void);
 
 void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length);
 
@@ -138,8 +138,6 @@ acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id);
  */
 acpi_status acpi_tb_initialize_facs(void);
 
-u8 acpi_tb_tables_loaded(void);
-
 void
 acpi_tb_print_table_header(acpi_physical_address address,
                           struct acpi_table_header *header);
index faad911d46b5eb71c4ae93a5300cf460e63c8738..10ce48e16ebf43a334fdc2f13479572d9bca4bd1 100644 (file)
@@ -71,7 +71,7 @@ acpi_status acpi_enable(void)
 
        /* ACPI tables must be present */
 
-       if (!acpi_tb_tables_loaded()) {
+       if (acpi_gbl_fadt_index == ACPI_INVALID_TABLE_INDEX) {
                return_ACPI_STATUS(AE_NO_ACPI_TABLES);
        }
 
index 455a0700db392b1663a16c392d8da3bb544b72b6..a6454f4a6fb343b52cada9dc5394094768c6ea14 100644 (file)
@@ -298,7 +298,7 @@ acpi_tb_select_address(char *register_name, u32 address32, u64 address64)
  *
  * FUNCTION:    acpi_tb_parse_fadt
  *
- * PARAMETERS:  table_index         - Index for the FADT
+ * PARAMETERS:  None
  *
  * RETURN:      None
  *
@@ -307,7 +307,7 @@ acpi_tb_select_address(char *register_name, u32 address32, u64 address64)
  *
  ******************************************************************************/
 
-void acpi_tb_parse_fadt(u32 table_index)
+void acpi_tb_parse_fadt(void)
 {
        u32 length;
        struct acpi_table_header *table;
@@ -319,11 +319,11 @@ void acpi_tb_parse_fadt(u32 table_index)
         * Get a local copy of the FADT and convert it to a common format
         * Map entire FADT, assumed to be smaller than one page.
         */
-       length = acpi_gbl_root_table_list.tables[table_index].length;
+       length = acpi_gbl_root_table_list.tables[acpi_gbl_fadt_index].length;
 
        table =
-           acpi_os_map_memory(acpi_gbl_root_table_list.tables[table_index].
-                              address, length);
+           acpi_os_map_memory(acpi_gbl_root_table_list.
+                              tables[acpi_gbl_fadt_index].address, length);
        if (!table) {
                return;
        }
index 4337990127cc39930b50983d7e7eff05966fcfb6..d8ddef38c947f750a226cee1b69fa373c9e1bf8f 100644 (file)
@@ -97,29 +97,6 @@ acpi_status acpi_tb_initialize_facs(void)
 }
 #endif                         /* !ACPI_REDUCED_HARDWARE */
 
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_tables_loaded
- *
- * PARAMETERS:  None
- *
- * RETURN:      TRUE if required ACPI tables are loaded
- *
- * DESCRIPTION: Determine if the minimum required ACPI tables are present
- *              (FADT, FACS, DSDT)
- *
- ******************************************************************************/
-
-u8 acpi_tb_tables_loaded(void)
-{
-
-       if (acpi_gbl_root_table_list.current_table_count >= 4) {
-               return (TRUE);
-       }
-
-       return (FALSE);
-}
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_tb_check_dsdt_header
@@ -392,7 +369,8 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
                    ACPI_COMPARE_NAME(&acpi_gbl_root_table_list.
                                      tables[table_index].signature,
                                      ACPI_SIG_FADT)) {
-                       acpi_tb_parse_fadt(table_index);
+                       acpi_gbl_fadt_index = table_index;
+                       acpi_tb_parse_fadt();
                }
 
 next_table:
index 23981ac1c6c21a895de24f18117aa9c381cee19c..3dd9c462d22afd650b989399ac9c973afc983257 100644 (file)
@@ -157,11 +157,15 @@ static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn)
 
 static void __iomem *ghes_ioremap_pfn_irq(u64 pfn)
 {
-       unsigned long vaddr;
+       unsigned long vaddr, paddr;
+       pgprot_t prot;
 
        vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr);
-       ioremap_page_range(vaddr, vaddr + PAGE_SIZE,
-                          pfn << PAGE_SHIFT, PAGE_KERNEL);
+
+       paddr = pfn << PAGE_SHIFT;
+       prot = arch_apei_get_mem_attribute(paddr);
+
+       ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot);
 
        return (void __iomem *)vaddr;
 }
index 2614a839c60dab8aca4d2955368888ae5c0fefc3..42c66b64c12cefd8c1491e7b91af138b86ddf5af 100644 (file)
@@ -1044,8 +1044,10 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
                goto err_exit;
 
        mutex_lock(&ec->mutex);
+       result = -ENODATA;
        list_for_each_entry(handler, &ec->list, node) {
                if (value == handler->query_bit) {
+                       result = 0;
                        q->handler = acpi_ec_get_query_handler(handler);
                        ec_dbg_evt("Query(0x%02x) scheduled",
                                   q->handler->query_bit);
index 38208f2d0e69264f5ad71c66ac65be9719fa70ec..fa4585a6914e912a402ec339d4bd241dd0fadac1 100644 (file)
 #include <linux/acpi.h>
 #include <linux/irq.h>
 #include <linux/irqdomain.h>
+#include <linux/of.h>
 
 enum acpi_irq_model_id acpi_irq_model;
 
+static struct fwnode_handle *acpi_gsi_domain_id;
+
 static unsigned int acpi_gsi_get_irq_type(int trigger, int polarity)
 {
        switch (polarity) {
@@ -45,12 +48,10 @@ static unsigned int acpi_gsi_get_irq_type(int trigger, int polarity)
  */
 int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
 {
-       /*
-        * Only default domain is supported at present, always find
-        * the mapping corresponding to default domain by passing NULL
-        * as irq_domain parameter
-        */
-       *irq = irq_find_mapping(NULL, gsi);
+       struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id,
+                                                       DOMAIN_BUS_ANY);
+
+       *irq = irq_find_mapping(d, gsi);
        /*
         * *irq == 0 means no mapping, that should
         * be reported as a failure
@@ -72,23 +73,19 @@ EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
 int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
                      int polarity)
 {
-       unsigned int irq;
-       unsigned int irq_type = acpi_gsi_get_irq_type(trigger, polarity);
+       struct irq_fwspec fwspec;
 
-       /*
-        * There is no way at present to look-up the IRQ domain on ACPI,
-        * hence always create mapping referring to the default domain
-        * by passing NULL as irq_domain parameter
-        */
-       irq = irq_create_mapping(NULL, gsi);
-       if (!irq)
+       if (WARN_ON(!acpi_gsi_domain_id)) {
+               pr_warn("GSI: No registered irqchip, giving up\n");
                return -EINVAL;
+       }
 
-       /* Set irq type if specified and different than the current one */
-       if (irq_type != IRQ_TYPE_NONE &&
-               irq_type != irq_get_trigger_type(irq))
-               irq_set_irq_type(irq, irq_type);
-       return irq;
+       fwspec.fwnode = acpi_gsi_domain_id;
+       fwspec.param[0] = gsi;
+       fwspec.param[1] = acpi_gsi_get_irq_type(trigger, polarity);
+       fwspec.param_count = 2;
+
+       return irq_create_fwspec_mapping(&fwspec);
 }
 EXPORT_SYMBOL_GPL(acpi_register_gsi);
 
@@ -98,8 +95,23 @@ EXPORT_SYMBOL_GPL(acpi_register_gsi);
  */
 void acpi_unregister_gsi(u32 gsi)
 {
-       int irq = irq_find_mapping(NULL, gsi);
+       struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id,
+                                                       DOMAIN_BUS_ANY);
+       int irq = irq_find_mapping(d, gsi);
 
        irq_dispose_mapping(irq);
 }
 EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
+
+/**
+ * acpi_set_irq_model - Setup the GSI irqdomain information
+ * @model: the value assigned to acpi_irq_model
+ * @fwnode: the irq_domain identifier for mapping and looking up
+ *          GSI interrupts
+ */
+void __init acpi_set_irq_model(enum acpi_irq_model_id model,
+                              struct fwnode_handle *fwnode)
+{
+       acpi_irq_model = model;
+       acpi_gsi_domain_id = fwnode;
+}
index 6da0f9beab19880ac71199b7cd99fd57b2fa6e21..c9336751e5e3708f96e9f972ab05fc5454970adb 100644 (file)
@@ -372,6 +372,7 @@ static int acpi_isa_register_gsi(struct pci_dev *dev)
 
        /* Interrupt Line values above 0xF are forbidden */
        if (dev->irq > 0 && (dev->irq <= 0xF) &&
+           acpi_isa_irq_available(dev->irq) &&
            (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
                dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n",
                         pin_name(dev->pin), dev->irq);
index 3b4ea98e3ea069eca5f9e0094f520c31eee639b2..7c8408b946ca10160d41648f14f99a6716529903 100644 (file)
@@ -498,8 +498,7 @@ int __init acpi_irq_penalty_init(void)
                            PIRQ_PENALTY_PCI_POSSIBLE;
                }
        }
-       /* Add a penalty for the SCI */
-       acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING;
+
        return 0;
 }
 
@@ -553,6 +552,13 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
                                irq = link->irq.possible[i];
                }
        }
+       if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) {
+               printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. "
+                           "Try pci=noacpi or acpi=off\n",
+                           acpi_device_name(link->device),
+                           acpi_device_bid(link->device));
+               return -ENODEV;
+       }
 
        /* Attempt to enable the link device at this IRQ. */
        if (acpi_pci_link_set(link, irq)) {
@@ -821,6 +827,12 @@ void acpi_penalize_isa_irq(int irq, int active)
        }
 }
 
+bool acpi_isa_irq_available(int irq)
+{
+       return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) ||
+                           acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS);
+}
+
 /*
  * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with
  * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for
index be0eb463912882ddc06b6aeebc443b54f1f84bf7..a641cf3ccad691e76d9665e9defab4ef04e9af4f 100644 (file)
@@ -322,6 +322,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
                        goto probe_failed;
        }
 
+       pinctrl_init_done(dev);
+
        if (dev->pm_domain && dev->pm_domain->sync)
                dev->pm_domain->sync(dev);
 
index 950fff9ce45397024ac5751b452cdd96a6da9907..a12ff9863d7e116ba9f15e21fe9c6fa4070b0f22 100644 (file)
@@ -187,7 +187,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
  * global one. Requires architecture specific dev_get_cma_area() helper
  * function.
  */
-struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
                                       unsigned int align)
 {
        if (align > CONFIG_CMA_ALIGNMENT)
index 5fb74b43848e21369670b4503fdee13b0d4f255e..0762975927541346de55ca09466fe194f7f1d212 100644 (file)
@@ -42,9 +42,20 @@ int pinctrl_bind_pins(struct device *dev)
                goto cleanup_get;
        }
 
-       ret = pinctrl_select_state(dev->pins->p, dev->pins->default_state);
+       dev->pins->init_state = pinctrl_lookup_state(dev->pins->p,
+                                       PINCTRL_STATE_INIT);
+       if (IS_ERR(dev->pins->init_state)) {
+               /* Not supplying this state is perfectly legal */
+               dev_dbg(dev, "no init pinctrl state\n");
+
+               ret = pinctrl_select_state(dev->pins->p,
+                                          dev->pins->default_state);
+       } else {
+               ret = pinctrl_select_state(dev->pins->p, dev->pins->init_state);
+       }
+
        if (ret) {
-               dev_dbg(dev, "failed to activate default pinctrl state\n");
+               dev_dbg(dev, "failed to activate initial pinctrl state\n");
                goto cleanup_get;
        }
 
index 134483daac25e6627234a5dac90a60687d4810dd..5df4575b5ba765de181a02701c706a6802d38b13 100644 (file)
@@ -152,7 +152,7 @@ static int platform_msi_alloc_descs(struct device *dev, int nvec,
 
 /**
  * platform_msi_create_irq_domain - Create a platform MSI interrupt domain
- * @np:                Optional device-tree node of the interrupt controller
+ * @fwnode:            Optional fwnode of the interrupt controller
  * @info:      MSI domain info
  * @parent:    Parent irq domain
  *
@@ -162,7 +162,7 @@ static int platform_msi_alloc_descs(struct device *dev, int nvec,
  * Returns:
  * A domain pointer or NULL in case of failure.
  */
-struct irq_domain *platform_msi_create_irq_domain(struct device_node *np,
+struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
                                                  struct msi_domain_info *info,
                                                  struct irq_domain *parent)
 {
@@ -173,7 +173,7 @@ struct irq_domain *platform_msi_create_irq_domain(struct device_node *np,
        if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
                platform_msi_update_chip_ops(info);
 
-       domain = msi_create_irq_domain(np, info, parent);
+       domain = msi_create_irq_domain(fwnode, info, parent);
        if (domain)
                domain->bus_token = DOMAIN_BUS_PLATFORM_MSI;
 
index 2a4154a09e4dca0dc9af3aa09f9a395a313f60e8..85e17bacc834156664c5980c4f7ea7ef68e5d6b6 100644 (file)
@@ -77,13 +77,16 @@ static bool default_stop_ok(struct device *dev)
                                      dev_update_qos_constraint);
 
        if (constraint_ns > 0) {
-               constraint_ns -= td->start_latency_ns;
+               constraint_ns -= td->save_state_latency_ns +
+                               td->stop_latency_ns +
+                               td->start_latency_ns +
+                               td->restore_state_latency_ns;
                if (constraint_ns == 0)
                        return false;
        }
        td->effective_constraint_ns = constraint_ns;
-       td->cached_stop_ok = constraint_ns > td->stop_latency_ns ||
-                               constraint_ns == 0;
+       td->cached_stop_ok = constraint_ns >= 0;
+
        /*
         * The children have been suspended already, so we don't need to take
         * their stop latencies into account here.
@@ -126,18 +129,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
 
        off_on_time_ns = genpd->power_off_latency_ns +
                                genpd->power_on_latency_ns;
-       /*
-        * It doesn't make sense to remove power from the domain if saving
-        * the state of all devices in it and the power off/power on operations
-        * take too much time.
-        *
-        * All devices in this domain have been stopped already at this point.
-        */
-       list_for_each_entry(pdd, &genpd->dev_list, list_node) {
-               if (pdd->dev->driver)
-                       off_on_time_ns +=
-                               to_gpd_data(pdd)->td.save_state_latency_ns;
-       }
 
        min_off_time_ns = -1;
        /*
@@ -193,7 +184,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
                 * constraint_ns cannot be negative here, because the device has
                 * been suspended.
                 */
-               constraint_ns -= td->restore_state_latency_ns;
                if (constraint_ns <= off_on_time_ns)
                        return false;
 
index 28cd75c535b047f2c4276fed4308fa8159491007..7ae7cd990fbf79bf39de570a6a0fca5bd47cef94 100644 (file)
@@ -892,10 +892,17 @@ static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev)
        u32 microvolt[3] = {0};
        int count, ret;
 
-       count = of_property_count_u32_elems(opp->np, "opp-microvolt");
-       if (!count)
+       /* Missing property isn't a problem, but an invalid entry is */
+       if (!of_find_property(opp->np, "opp-microvolt", NULL))
                return 0;
 
+       count = of_property_count_u32_elems(opp->np, "opp-microvolt");
+       if (count < 0) {
+               dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n",
+                       __func__, count);
+               return count;
+       }
+
        /* There can be one or three elements here */
        if (count != 1 && count != 3) {
                dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
@@ -1063,7 +1070,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add);
  * share a common logic which is isolated here.
  *
  * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
  * successful.
  *
  * Locking: The internal device_opp and opp structures are RCU protected.
@@ -1151,7 +1158,7 @@ unlock:
  * mutex locking or synchronize_rcu() blocking calls cannot be used.
  *
  * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
  * successful.
  */
 int dev_pm_opp_enable(struct device *dev, unsigned long freq)
@@ -1177,7 +1184,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
  * mutex locking or synchronize_rcu() blocking calls cannot be used.
  *
  * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
  * successful.
  */
 int dev_pm_opp_disable(struct device *dev, unsigned long freq)
index cc557886ab2377a550c1ae529b6ecee23380161f..3250e53473a38957ecd12899612b6acaa5590283 100644 (file)
@@ -59,6 +59,7 @@ struct regmap {
        regmap_lock lock;
        regmap_unlock unlock;
        void *lock_arg; /* This is passed to lock/unlock functions */
+       gfp_t alloc_flags;
 
        struct device *dev; /* Device we do I/O on */
        void *work_buf;     /* Scratch buffer used to format I/O */
@@ -98,6 +99,8 @@ struct regmap {
 
        int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
        int (*reg_write)(void *context, unsigned int reg, unsigned int val);
+       int (*reg_update_bits)(void *context, unsigned int reg,
+                              unsigned int mask, unsigned int val);
 
        bool defer_caching;
 
index f42f2bac646623fc1db767bae3a5fff0ecf98aac..3f0a7e262d6982c63dcaf3e81f058b0c471d2880 100644 (file)
@@ -30,10 +30,9 @@ static LIST_HEAD(regmap_debugfs_early_list);
 static DEFINE_MUTEX(regmap_debugfs_early_lock);
 
 /* Calculate the length of a fixed format  */
-static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
+static size_t regmap_calc_reg_len(int max_val)
 {
-       snprintf(buf, buf_size, "%x", max_val);
-       return strlen(buf);
+       return snprintf(NULL, 0, "%x", max_val);
 }
 
 static ssize_t regmap_name_read_file(struct file *file,
@@ -174,8 +173,7 @@ static inline void regmap_calc_tot_len(struct regmap *map,
 {
        /* Calculate the length of a fixed format  */
        if (!map->debugfs_tot_len) {
-               map->debugfs_reg_len = regmap_calc_reg_len(map->max_register,
-                                                          buf, count);
+               map->debugfs_reg_len = regmap_calc_reg_len(map->max_register),
                map->debugfs_val_len = 2 * map->format.val_bytes;
                map->debugfs_tot_len = map->debugfs_reg_len +
                        map->debugfs_val_len + 3;      /* : \n */
@@ -339,6 +337,7 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file,
        char *buf;
        char *entry;
        int ret;
+       unsigned entry_len;
 
        if (*ppos < 0 || !count)
                return -EINVAL;
@@ -366,18 +365,15 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file,
        p = 0;
        mutex_lock(&map->cache_lock);
        list_for_each_entry(c, &map->debugfs_off_cache, list) {
-               snprintf(entry, PAGE_SIZE, "%x-%x",
-                        c->base_reg, c->max_reg);
+               entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n",
+                                    c->base_reg, c->max_reg);
                if (p >= *ppos) {
-                       if (buf_pos + 1 + strlen(entry) > count)
+                       if (buf_pos + entry_len > count)
                                break;
-                       snprintf(buf + buf_pos, count - buf_pos,
-                                "%s", entry);
-                       buf_pos += strlen(entry);
-                       buf[buf_pos] = '\n';
-                       buf_pos++;
+                       memcpy(buf + buf_pos, entry, entry_len);
+                       buf_pos += entry_len;
                }
-               p += strlen(entry) + 1;
+               p += entry_len;
        }
        mutex_unlock(&map->cache_lock);
 
@@ -421,7 +417,7 @@ static ssize_t regmap_access_read_file(struct file *file,
                return -ENOMEM;
 
        /* Calculate the length of a fixed format  */
-       reg_len = regmap_calc_reg_len(map->max_register, buf, count);
+       reg_len = regmap_calc_reg_len(map->max_register);
        tot_len = reg_len + 10; /* ': R W V P\n' */
 
        for (i = 0; i <= map->max_register; i += map->reg_stride) {
@@ -432,7 +428,7 @@ static ssize_t regmap_access_read_file(struct file *file,
                /* If we're in the region the user is trying to read */
                if (p >= *ppos) {
                        /* ...but not beyond it */
-                       if (buf_pos >= count - 1 - tot_len)
+                       if (buf_pos + tot_len + 1 >= count)
                                break;
 
                        /* Format the register */
index 38d1f72d869cf4ceb698067f588f81b96e0102df..8d16db533527362efa638f67b69673607d069327 100644 (file)
@@ -63,6 +63,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
        struct regmap *map = d->map;
        int i, ret;
        u32 reg;
+       u32 unmask_offset;
 
        if (d->chip->runtime_pm) {
                ret = pm_runtime_get_sync(map->dev);
@@ -79,12 +80,28 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
        for (i = 0; i < d->chip->num_regs; i++) {
                reg = d->chip->mask_base +
                        (i * map->reg_stride * d->irq_reg_stride);
-               if (d->chip->mask_invert)
+               if (d->chip->mask_invert) {
                        ret = regmap_update_bits(d->map, reg,
                                         d->mask_buf_def[i], ~d->mask_buf[i]);
-               else
+               } else if (d->chip->unmask_base) {
+                       /* set mask with mask_base register */
+                       ret = regmap_update_bits(d->map, reg,
+                                       d->mask_buf_def[i], ~d->mask_buf[i]);
+                       if (ret < 0)
+                               dev_err(d->map->dev,
+                                       "Failed to sync unmasks in %x\n",
+                                       reg);
+                       unmask_offset = d->chip->unmask_base -
+                                                       d->chip->mask_base;
+                       /* clear mask with unmask_base register */
+                       ret = regmap_update_bits(d->map,
+                                       reg + unmask_offset,
+                                       d->mask_buf_def[i],
+                                       d->mask_buf[i]);
+               } else {
                        ret = regmap_update_bits(d->map, reg,
                                         d->mask_buf_def[i], d->mask_buf[i]);
+               }
                if (ret != 0)
                        dev_err(d->map->dev, "Failed to sync masks in %x\n",
                                reg);
@@ -116,7 +133,11 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
                if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
                        reg = d->chip->ack_base +
                                (i * map->reg_stride * d->irq_reg_stride);
-                       ret = regmap_write(map, reg, d->mask_buf[i]);
+                       /* some chips ack by write 0 */
+                       if (d->chip->ack_invert)
+                               ret = regmap_write(map, reg, ~d->mask_buf[i]);
+                       else
+                               ret = regmap_write(map, reg, d->mask_buf[i]);
                        if (ret != 0)
                                dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
                                        reg, ret);
@@ -339,6 +360,7 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
        int i;
        int ret = -ENOMEM;
        u32 reg;
+       u32 unmask_offset;
 
        if (chip->num_regs <= 0)
                return -EINVAL;
@@ -420,7 +442,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
                if (chip->mask_invert)
                        ret = regmap_update_bits(map, reg,
                                         d->mask_buf[i], ~d->mask_buf[i]);
-               else
+               else if (d->chip->unmask_base) {
+                       unmask_offset = d->chip->unmask_base -
+                                       d->chip->mask_base;
+                       ret = regmap_update_bits(d->map,
+                                       reg + unmask_offset,
+                                       d->mask_buf[i],
+                                       d->mask_buf[i]);
+               } else
                        ret = regmap_update_bits(map, reg,
                                         d->mask_buf[i], d->mask_buf[i]);
                if (ret != 0) {
@@ -445,7 +474,11 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
                if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
                        reg = chip->ack_base +
                                (i * map->reg_stride * d->irq_reg_stride);
-                       ret = regmap_write(map, reg,
+                       if (chip->ack_invert)
+                               ret = regmap_write(map, reg,
+                                       ~(d->status_buf[i] & d->mask_buf[i]));
+                       else
+                               ret = regmap_write(map, reg,
                                        d->status_buf[i] & d->mask_buf[i]);
                        if (ret != 0) {
                                dev_err(map->dev, "Failed to ack 0x%x: %d\n",
index afaf56200674a29517678af763bc9a084659224b..4ac63c0e50c7e6a446847b7a29567834d4a5b073 100644 (file)
@@ -561,6 +561,16 @@ struct regmap *__regmap_init(struct device *dev,
                }
                map->lock_arg = map;
        }
+
+       /*
+        * When we write in fast-paths with regmap_bulk_write() don't allocate
+        * scratch buffers with sleeping allocations.
+        */
+       if ((bus && bus->fast_io) || config->fast_io)
+               map->alloc_flags = GFP_ATOMIC;
+       else
+               map->alloc_flags = GFP_KERNEL;
+
        map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
        map->format.pad_bytes = config->pad_bits / 8;
        map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
@@ -619,6 +629,7 @@ struct regmap *__regmap_init(struct device *dev,
                goto skip_format_initialization;
        } else {
                map->reg_read  = _regmap_bus_read;
+               map->reg_update_bits = bus->reg_update_bits;
        }
 
        reg_endian = regmap_get_reg_endian(bus, config);
@@ -1786,7 +1797,7 @@ out:
                if (!val_count)
                        return -EINVAL;
 
-               wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
+               wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
                if (!wval) {
                        dev_err(map->dev, "Error in memory allocation\n");
                        return -ENOMEM;
@@ -2509,20 +2520,26 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
        int ret;
        unsigned int tmp, orig;
 
-       ret = _regmap_read(map, reg, &orig);
-       if (ret != 0)
-               return ret;
+       if (change)
+               *change = false;
 
-       tmp = orig & ~mask;
-       tmp |= val & mask;
-
-       if (force_write || (tmp != orig)) {
-               ret = _regmap_write(map, reg, tmp);
-               if (change)
+       if (regmap_volatile(map, reg) && map->reg_update_bits) {
+               ret = map->reg_update_bits(map->bus_context, reg, mask, val);
+               if (ret == 0 && change)
                        *change = true;
        } else {
-               if (change)
-                       *change = false;
+               ret = _regmap_read(map, reg, &orig);
+               if (ret != 0)
+                       return ret;
+
+               tmp = orig & ~mask;
+               tmp |= val & mask;
+
+               if (force_write || (tmp != orig)) {
+                       ret = _regmap_write(map, reg, tmp);
+                       if (ret == 0 && change)
+                               *change = true;
+               }
        }
 
        return ret;
index f9889b6bc02c316bed46e130c9f5c7ce38b7b93b..674f800a3b5760ad6374c98fa11e88097e30d160 100644 (file)
@@ -1486,17 +1486,16 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
 {
        const bool write = cmd->rq->cmd_flags & REQ_WRITE;
        struct loop_device *lo = cmd->rq->q->queuedata;
-       int ret = -EIO;
+       int ret = 0;
 
-       if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY))
+       if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
+               ret = -EIO;
                goto failed;
+       }
 
        ret = do_req_filebacked(lo, cmd->rq);
-
  failed:
-       if (ret)
-               cmd->rq->errors = -EIO;
-       blk_mq_complete_request(cmd->rq);
+       blk_mq_complete_request(cmd->rq, ret ? -EIO : 0);
 }
 
 static void loop_queue_write_work(struct work_struct *work)
index 293495a75d3d8ce1e0f61e3571ddcc0c1fa14312..1b87623381e2b1183b5c9d57c870b7c10924f65e 100644 (file)
@@ -60,6 +60,7 @@ struct nbd_device {
        bool disconnect; /* a disconnect has been requested by user */
 
        struct timer_list timeout_timer;
+       spinlock_t tasks_lock;
        struct task_struct *task_recv;
        struct task_struct *task_send;
 
@@ -140,21 +141,23 @@ static void sock_shutdown(struct nbd_device *nbd)
 static void nbd_xmit_timeout(unsigned long arg)
 {
        struct nbd_device *nbd = (struct nbd_device *)arg;
-       struct task_struct *task;
+       unsigned long flags;
 
        if (list_empty(&nbd->queue_head))
                return;
 
        nbd->disconnect = true;
 
-       task = READ_ONCE(nbd->task_recv);
-       if (task)
-               force_sig(SIGKILL, task);
+       spin_lock_irqsave(&nbd->tasks_lock, flags);
+
+       if (nbd->task_recv)
+               force_sig(SIGKILL, nbd->task_recv);
 
-       task = READ_ONCE(nbd->task_send);
-       if (task)
+       if (nbd->task_send)
                force_sig(SIGKILL, nbd->task_send);
 
+       spin_unlock_irqrestore(&nbd->tasks_lock, flags);
+
        dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n");
 }
 
@@ -403,17 +406,24 @@ static int nbd_thread_recv(struct nbd_device *nbd)
 {
        struct request *req;
        int ret;
+       unsigned long flags;
 
        BUG_ON(nbd->magic != NBD_MAGIC);
 
        sk_set_memalloc(nbd->sock->sk);
 
+       spin_lock_irqsave(&nbd->tasks_lock, flags);
        nbd->task_recv = current;
+       spin_unlock_irqrestore(&nbd->tasks_lock, flags);
 
        ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
        if (ret) {
                dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
+
+               spin_lock_irqsave(&nbd->tasks_lock, flags);
                nbd->task_recv = NULL;
+               spin_unlock_irqrestore(&nbd->tasks_lock, flags);
+
                return ret;
        }
 
@@ -429,7 +439,9 @@ static int nbd_thread_recv(struct nbd_device *nbd)
 
        device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
 
+       spin_lock_irqsave(&nbd->tasks_lock, flags);
        nbd->task_recv = NULL;
+       spin_unlock_irqrestore(&nbd->tasks_lock, flags);
 
        if (signal_pending(current)) {
                siginfo_t info;
@@ -534,8 +546,11 @@ static int nbd_thread_send(void *data)
 {
        struct nbd_device *nbd = data;
        struct request *req;
+       unsigned long flags;
 
+       spin_lock_irqsave(&nbd->tasks_lock, flags);
        nbd->task_send = current;
+       spin_unlock_irqrestore(&nbd->tasks_lock, flags);
 
        set_user_nice(current, MIN_NICE);
        while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
@@ -572,7 +587,15 @@ static int nbd_thread_send(void *data)
                nbd_handle_req(nbd, req);
        }
 
+       spin_lock_irqsave(&nbd->tasks_lock, flags);
        nbd->task_send = NULL;
+       spin_unlock_irqrestore(&nbd->tasks_lock, flags);
+
+       /* Clear maybe pending signals */
+       if (signal_pending(current)) {
+               siginfo_t info;
+               dequeue_signal_lock(current, &current->blocked, &info);
+       }
 
        return 0;
 }
@@ -1052,6 +1075,7 @@ static int __init nbd_init(void)
                nbd_dev[i].magic = NBD_MAGIC;
                INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
                spin_lock_init(&nbd_dev[i].queue_lock);
+               spin_lock_init(&nbd_dev[i].tasks_lock);
                INIT_LIST_HEAD(&nbd_dev[i].queue_head);
                mutex_init(&nbd_dev[i].tx_lock);
                init_timer(&nbd_dev[i].timeout_timer);
index a295b98c6baed2df8bdd9484a62e44ca9bbfdc7a..1c9e4fe5aa440cbde62bb5e6c0cf0c397d8417a7 100644 (file)
@@ -289,7 +289,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd)
        case NULL_IRQ_SOFTIRQ:
                switch (queue_mode)  {
                case NULL_Q_MQ:
-                       blk_mq_complete_request(cmd->rq);
+                       blk_mq_complete_request(cmd->rq, cmd->rq->errors);
                        break;
                case NULL_Q_RQ:
                        blk_complete_request(cmd->rq);
index b97fc3fe0916a6b6fd3fb2be32be44ce3c137b39..ccc0c1f93daa45fae2603dd8e36bd2dcc38a8217 100644 (file)
@@ -603,31 +603,34 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
        struct nvme_iod *iod = ctx;
        struct request *req = iod_get_private(iod);
        struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
-
        u16 status = le16_to_cpup(&cqe->status) >> 1;
+       bool requeue = false;
+       int error = 0;
 
        if (unlikely(status)) {
                if (!(status & NVME_SC_DNR || blk_noretry_request(req))
                    && (jiffies - req->start_time) < req->timeout) {
                        unsigned long flags;
 
+                       requeue = true;
                        blk_mq_requeue_request(req);
                        spin_lock_irqsave(req->q->queue_lock, flags);
                        if (!blk_queue_stopped(req->q))
                                blk_mq_kick_requeue_list(req->q);
                        spin_unlock_irqrestore(req->q->queue_lock, flags);
-                       return;
+                       goto release_iod;
                }
+
                if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
                        if (cmd_rq->ctx == CMD_CTX_CANCELLED)
-                               req->errors = -EINTR;
+                               error = -EINTR;
                        else
-                               req->errors = status;
+                               error = status;
                } else {
-                       req->errors = nvme_error_status(status);
+                       error = nvme_error_status(status);
                }
-       } else
-               req->errors = 0;
+       }
+
        if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
                u32 result = le32_to_cpup(&cqe->result);
                req->special = (void *)(uintptr_t)result;
@@ -636,8 +639,9 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
        if (cmd_rq->aborted)
                dev_warn(nvmeq->dev->dev,
                        "completing aborted command with status:%04x\n",
-                       status);
+                       error);
 
+release_iod:
        if (iod->nents) {
                dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents,
                        rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
@@ -650,7 +654,8 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
        }
        nvme_free_iod(nvmeq->dev, iod);
 
-       blk_mq_complete_request(req);
+       if (likely(!requeue))
+               blk_mq_complete_request(req, error);
 }
 
 /* length is in bytes.  gfp flags indicates whether we may sleep. */
@@ -863,8 +868,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (ns && ns->ms && !blk_integrity_rq(req)) {
                if (!(ns->pi_type && ns->ms == 8) &&
                                        req->cmd_type != REQ_TYPE_DRV_PRIV) {
-                       req->errors = -EFAULT;
-                       blk_mq_complete_request(req);
+                       blk_mq_complete_request(req, -EFAULT);
                        return BLK_MQ_RQ_QUEUE_OK;
                }
        }
@@ -1806,7 +1810,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
 
        length = (io.nblocks + 1) << ns->lba_shift;
        meta_len = (io.nblocks + 1) * ns->ms;
-       metadata = (void __user *)(unsigned long)io.metadata;
+       metadata = (void __user *)(uintptr_t)io.metadata;
        write = io.opcode & 1;
 
        if (ns->ext) {
@@ -1846,7 +1850,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
        c.rw.metadata = cpu_to_le64(meta_dma);
 
        status = __nvme_submit_sync_cmd(ns->queue, &c, NULL,
-                       (void __user *)io.addr, length, NULL, 0);
+                       (void __user *)(uintptr_t)io.addr, length, NULL, 0);
  unmap:
        if (meta) {
                if (status == NVME_SC_SUCCESS && !write) {
@@ -1888,7 +1892,7 @@ static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
                timeout = msecs_to_jiffies(cmd.timeout_ms);
 
        status = __nvme_submit_sync_cmd(ns ? ns->queue : dev->admin_q, &c,
-                       NULL, (void __user *)cmd.addr, cmd.data_len,
+                       NULL, (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
                        &cmd.result, timeout);
        if (status >= 0) {
                if (put_user(cmd.result, &ucmd->result))
@@ -2439,6 +2443,22 @@ static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn)
        list_sort(NULL, &dev->namespaces, ns_cmp);
 }
 
+static void nvme_set_irq_hints(struct nvme_dev *dev)
+{
+       struct nvme_queue *nvmeq;
+       int i;
+
+       for (i = 0; i < dev->online_queues; i++) {
+               nvmeq = dev->queues[i];
+
+               if (!nvmeq->tags || !(*nvmeq->tags))
+                       continue;
+
+               irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
+                                       blk_mq_tags_cpumask(*nvmeq->tags));
+       }
+}
+
 static void nvme_dev_scan(struct work_struct *work)
 {
        struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work);
@@ -2450,6 +2470,7 @@ static void nvme_dev_scan(struct work_struct *work)
                return;
        nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn));
        kfree(ctrl);
+       nvme_set_irq_hints(dev);
 }
 
 /*
@@ -2953,22 +2974,6 @@ static const struct file_operations nvme_dev_fops = {
        .compat_ioctl   = nvme_dev_ioctl,
 };
 
-static void nvme_set_irq_hints(struct nvme_dev *dev)
-{
-       struct nvme_queue *nvmeq;
-       int i;
-
-       for (i = 0; i < dev->online_queues; i++) {
-               nvmeq = dev->queues[i];
-
-               if (!nvmeq->tags || !(*nvmeq->tags))
-                       continue;
-
-               irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
-                                       blk_mq_tags_cpumask(*nvmeq->tags));
-       }
-}
-
 static int nvme_dev_start(struct nvme_dev *dev)
 {
        int result;
@@ -3010,8 +3015,6 @@ static int nvme_dev_start(struct nvme_dev *dev)
        if (result)
                goto free_tags;
 
-       nvme_set_irq_hints(dev);
-
        dev->event_limit = 1;
        return result;
 
@@ -3062,7 +3065,6 @@ static int nvme_dev_resume(struct nvme_dev *dev)
        } else {
                nvme_unfreeze_queues(dev);
                nvme_dev_add(dev);
-               nvme_set_irq_hints(dev);
        }
        return 0;
 }
index d93a0372b37b5c7b4cb214e7013e64897c3a9aba..128e7df5b807222ba2d6f50148cb164919a308f7 100644 (file)
@@ -96,6 +96,8 @@ static int atomic_dec_return_safe(atomic_t *v)
 #define RBD_MINORS_PER_MAJOR           256
 #define RBD_SINGLE_MAJOR_PART_SHIFT    4
 
+#define RBD_MAX_PARENT_CHAIN_LEN       16
+
 #define RBD_SNAP_DEV_NAME_PREFIX       "snap_"
 #define RBD_MAX_SNAP_NAME_LEN  \
                        (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
@@ -426,7 +428,7 @@ static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
                                    size_t count);
 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
                                       size_t count);
-static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
+static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
 static void rbd_spec_put(struct rbd_spec *spec);
 
 static int rbd_dev_id_to_minor(int dev_id)
@@ -1863,9 +1865,11 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
                rbd_osd_read_callback(obj_request);
                break;
        case CEPH_OSD_OP_SETALLOCHINT:
-               rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE);
+               rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
+                          osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
                /* fall through */
        case CEPH_OSD_OP_WRITE:
+       case CEPH_OSD_OP_WRITEFULL:
                rbd_osd_write_callback(obj_request);
                break;
        case CEPH_OSD_OP_STAT:
@@ -2401,7 +2405,10 @@ static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
                                opcode = CEPH_OSD_OP_ZERO;
                }
        } else if (op_type == OBJ_OP_WRITE) {
-               opcode = CEPH_OSD_OP_WRITE;
+               if (!offset && length == object_size)
+                       opcode = CEPH_OSD_OP_WRITEFULL;
+               else
+                       opcode = CEPH_OSD_OP_WRITE;
                osd_req_op_alloc_hint_init(osd_request, num_ops,
                                        object_size, object_size);
                num_ops++;
@@ -3760,6 +3767,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
        /* set io sizes to object size */
        segment_size = rbd_obj_bytes(&rbd_dev->header);
        blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
+       q->limits.max_sectors = queue_max_hw_sectors(q);
        blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
        blk_queue_max_segment_size(q, segment_size);
        blk_queue_io_min(q, segment_size);
@@ -3772,6 +3780,9 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
        blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
        q->limits.discard_zeroes_data = 1;
 
+       if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
+               q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
+
        disk->queue = q;
 
        q->queuedata = rbd_dev;
@@ -5125,44 +5136,51 @@ out_err:
        return ret;
 }
 
-static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
+/*
+ * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
+ * rbd_dev_image_probe() recursion depth, which means it's also the
+ * length of the already discovered part of the parent chain.
+ */
+static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
 {
        struct rbd_device *parent = NULL;
-       struct rbd_spec *parent_spec;
-       struct rbd_client *rbdc;
        int ret;
 
        if (!rbd_dev->parent_spec)
                return 0;
-       /*
-        * We need to pass a reference to the client and the parent
-        * spec when creating the parent rbd_dev.  Images related by
-        * parent/child relationships always share both.
-        */
-       parent_spec = rbd_spec_get(rbd_dev->parent_spec);
-       rbdc = __rbd_get_client(rbd_dev->rbd_client);
 
-       ret = -ENOMEM;
-       parent = rbd_dev_create(rbdc, parent_spec, NULL);
-       if (!parent)
+       if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
+               pr_info("parent chain is too long (%d)\n", depth);
+               ret = -EINVAL;
+               goto out_err;
+       }
+
+       parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec,
+                               NULL);
+       if (!parent) {
+               ret = -ENOMEM;
                goto out_err;
+       }
 
-       ret = rbd_dev_image_probe(parent, false);
+       /*
+        * Images related by parent/child relationships always share
+        * rbd_client and spec/parent_spec, so bump their refcounts.
+        */
+       __rbd_get_client(rbd_dev->rbd_client);
+       rbd_spec_get(rbd_dev->parent_spec);
+
+       ret = rbd_dev_image_probe(parent, depth);
        if (ret < 0)
                goto out_err;
+
        rbd_dev->parent = parent;
        atomic_set(&rbd_dev->parent_ref, 1);
-
        return 0;
+
 out_err:
-       if (parent) {
-               rbd_dev_unparent(rbd_dev);
+       rbd_dev_unparent(rbd_dev);
+       if (parent)
                rbd_dev_destroy(parent);
-       } else {
-               rbd_put_client(rbdc);
-               rbd_spec_put(parent_spec);
-       }
-
        return ret;
 }
 
@@ -5280,7 +5298,7 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev)
  * parent), initiate a watch on its header object before using that
  * object to get detailed information about the rbd image.
  */
-static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
+static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
 {
        int ret;
 
@@ -5298,7 +5316,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
        if (ret)
                goto err_out_format;
 
-       if (mapping) {
+       if (!depth) {
                ret = rbd_dev_header_watch_sync(rbd_dev);
                if (ret) {
                        if (ret == -ENOENT)
@@ -5319,7 +5337,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
         * Otherwise this is a parent image, identified by pool, image
         * and snap ids - need to fill in names for those ids.
         */
-       if (mapping)
+       if (!depth)
                ret = rbd_spec_fill_snap_id(rbd_dev);
        else
                ret = rbd_spec_fill_names(rbd_dev);
@@ -5341,12 +5359,12 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
                 * Need to warn users if this image is the one being
                 * mapped and has a parent.
                 */
-               if (mapping && rbd_dev->parent_spec)
+               if (!depth && rbd_dev->parent_spec)
                        rbd_warn(rbd_dev,
                                 "WARNING: kernel layering is EXPERIMENTAL!");
        }
 
-       ret = rbd_dev_probe_parent(rbd_dev);
+       ret = rbd_dev_probe_parent(rbd_dev, depth);
        if (ret)
                goto err_out_probe;
 
@@ -5357,7 +5375,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
 err_out_probe:
        rbd_dev_unprobe(rbd_dev);
 err_out_watch:
-       if (mapping)
+       if (!depth)
                rbd_dev_header_unwatch_sync(rbd_dev);
 out_header_name:
        kfree(rbd_dev->header_name);
@@ -5420,7 +5438,7 @@ static ssize_t do_rbd_add(struct bus_type *bus,
        spec = NULL;            /* rbd_dev now owns this */
        rbd_opts = NULL;        /* rbd_dev now owns this */
 
-       rc = rbd_dev_image_probe(rbd_dev, true);
+       rc = rbd_dev_image_probe(rbd_dev, 0);
        if (rc < 0)
                goto err_out_rbd_dev;
 
index e93899cc6f60be0bd13b45dde3b8d697b7a733c8..6ca35495a5becdbac067cb4338981191fd6bc56a 100644 (file)
@@ -144,7 +144,7 @@ static void virtblk_done(struct virtqueue *vq)
        do {
                virtqueue_disable_cb(vq);
                while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
-                       blk_mq_complete_request(vbr->req);
+                       blk_mq_complete_request(vbr->req, vbr->req->errors);
                        req_done = true;
                }
                if (unlikely(virtqueue_is_broken(vq)))
index deb3f001791f159c5c7ebce19814de31e3106a5e..767657565de64e73f61304741fe9f39c496a2892 100644 (file)
@@ -212,6 +212,9 @@ static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t *gref,
 
 static int xen_blkif_disconnect(struct xen_blkif *blkif)
 {
+       struct pending_req *req, *n;
+       int i = 0, j;
+
        if (blkif->xenblkd) {
                kthread_stop(blkif->xenblkd);
                wake_up(&blkif->shutdown_wq);
@@ -238,13 +241,28 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
        /* Remove all persistent grants and the cache of ballooned pages. */
        xen_blkbk_free_caches(blkif);
 
+       /* Check that there is no request in use */
+       list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
+               list_del(&req->free_list);
+
+               for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
+                       kfree(req->segments[j]);
+
+               for (j = 0; j < MAX_INDIRECT_PAGES; j++)
+                       kfree(req->indirect_pages[j]);
+
+               kfree(req);
+               i++;
+       }
+
+       WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
+       blkif->nr_ring_pages = 0;
+
        return 0;
 }
 
 static void xen_blkif_free(struct xen_blkif *blkif)
 {
-       struct pending_req *req, *n;
-       int i = 0, j;
 
        xen_blkif_disconnect(blkif);
        xen_vbd_free(&blkif->vbd);
@@ -257,22 +275,6 @@ static void xen_blkif_free(struct xen_blkif *blkif)
        BUG_ON(!list_empty(&blkif->free_pages));
        BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
 
-       /* Check that there is no request in use */
-       list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
-               list_del(&req->free_list);
-
-               for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
-                       kfree(req->segments[j]);
-
-               for (j = 0; j < MAX_INDIRECT_PAGES; j++)
-                       kfree(req->indirect_pages[j]);
-
-               kfree(req);
-               i++;
-       }
-
-       WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
-
        kmem_cache_free(xen_blkif_cachep, blkif);
 }
 
index 0823a96902f87fa90d2e35a425183ea0de2e0049..a69c02dadec05684f2a49eefe98033e8ebbc0c3e 100644 (file)
@@ -1142,6 +1142,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
        RING_IDX i, rp;
        unsigned long flags;
        struct blkfront_info *info = (struct blkfront_info *)dev_id;
+       int error;
 
        spin_lock_irqsave(&info->io_lock, flags);
 
@@ -1182,37 +1183,37 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                        continue;
                }
 
-               req->errors = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
+               error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
                switch (bret->operation) {
                case BLKIF_OP_DISCARD:
                        if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
                                struct request_queue *rq = info->rq;
                                printk(KERN_WARNING "blkfront: %s: %s op failed\n",
                                           info->gd->disk_name, op_name(bret->operation));
-                               req->errors = -EOPNOTSUPP;
+                               error = -EOPNOTSUPP;
                                info->feature_discard = 0;
                                info->feature_secdiscard = 0;
                                queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
                                queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
                        }
-                       blk_mq_complete_request(req);
+                       blk_mq_complete_request(req, error);
                        break;
                case BLKIF_OP_FLUSH_DISKCACHE:
                case BLKIF_OP_WRITE_BARRIER:
                        if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
                                printk(KERN_WARNING "blkfront: %s: %s op failed\n",
                                       info->gd->disk_name, op_name(bret->operation));
-                               req->errors = -EOPNOTSUPP;
+                               error = -EOPNOTSUPP;
                        }
                        if (unlikely(bret->status == BLKIF_RSP_ERROR &&
                                     info->shadow[id].req.u.rw.nr_segments == 0)) {
                                printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
                                       info->gd->disk_name, op_name(bret->operation));
-                               req->errors = -EOPNOTSUPP;
+                               error = -EOPNOTSUPP;
                        }
-                       if (unlikely(req->errors)) {
-                               if (req->errors == -EOPNOTSUPP)
-                                       req->errors = 0;
+                       if (unlikely(error)) {
+                               if (error == -EOPNOTSUPP)
+                                       error = 0;
                                info->feature_flush = 0;
                                xlvbd_flush(info);
                        }
@@ -1223,7 +1224,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                                dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
                                        "request: %x\n", bret->status);
 
-                       blk_mq_complete_request(req);
+                       blk_mq_complete_request(req, error);
                        break;
                default:
                        BUG();
@@ -1955,7 +1956,8 @@ static void blkback_changed(struct xenbus_device *dev,
                        break;
                /* Missed the backend's Closing state -- fallthrough */
        case XenbusStateClosing:
-               blkfront_closing(info);
+               if (info)
+                       blkfront_closing(info);
                break;
        }
 }
index 1a82f3a17681b77926a11c29ba23cbdc27d8b6b5..0ebca8ba7bc4103eeeb48c2fe245404091e0c9d3 100644 (file)
@@ -36,7 +36,6 @@ config ARM_CCI400_PORT_CTRL
 
 config ARM_CCI500_PMU
        bool "ARM CCI500 PMU support"
-       default y
        depends on (ARM && CPU_V7) || ARM64
        depends on PERF_EVENTS
        select ARM_CCI_PMU
index 7d9879e166cf4c4346402cb353ef3cd002483740..7082c7268845639399d9ceab44471937011e1705 100644 (file)
@@ -1184,11 +1184,12 @@ static int arm_ccn_pmu_cpu_notifier(struct notifier_block *nb,
                if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu))
                        break;
                target = cpumask_any_but(cpu_online_mask, cpu);
-               if (target < 0)
+               if (target >= nr_cpu_ids)
                        break;
                perf_pmu_migrate_context(&dt->pmu, cpu, target);
                cpumask_set_cpu(target, &dt->cpu);
-               WARN_ON(irq_set_affinity(ccn->irq, &dt->cpu) != 0);
+               if (ccn->irq)
+                       WARN_ON(irq_set_affinity(ccn->irq, &dt->cpu) != 0);
        default:
                break;
        }
index c0eaf0973bd2bca88e269dbca7f44fd3fcdaf3c7..779b6ff0c7ad4040a680a2155964bd3016363767 100644 (file)
@@ -333,7 +333,8 @@ int clk_add_alias(const char *alias, const char *alias_dev_name,
        if (IS_ERR(r))
                return PTR_ERR(r);
 
-       l = clkdev_create(r, alias, "%s", alias_dev_name);
+       l = clkdev_create(r, alias, alias_dev_name ? "%s" : NULL,
+                         alias_dev_name);
        clk_put(r);
 
        return l ? 0 : -ENODEV;
index 5837eb8a212fbdcd8446ff9da77f393cc05c128a..85da8b9832568b2e4daab35eea661d4d99a5ac26 100644 (file)
@@ -197,6 +197,7 @@ static void __init of_cpu_clk_setup(struct device_node *node)
        for_each_node_by_type(dn, "cpu") {
                struct clk_init_data init;
                struct clk *clk;
+               struct clk *parent_clk;
                char *clk_name = kzalloc(5, GFP_KERNEL);
                int cpu, err;
 
@@ -208,8 +209,9 @@ static void __init of_cpu_clk_setup(struct device_node *node)
                        goto bail_out;
 
                sprintf(clk_name, "cpu%d", cpu);
+               parent_clk = of_clk_get(node, 0);
 
-               cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0);
+               cpuclk[cpu].parent_name = __clk_get_name(parent_clk);
                cpuclk[cpu].clk_name = clk_name;
                cpuclk[cpu].cpu = cpu;
                cpuclk[cpu].reg_base = clock_complex_base;
index 9b613426e968ab6a0e36fdcb52b7c87ac076253b..bc24e5a002e77cad874a0c010fc55046aa66b0f4 100644 (file)
@@ -45,8 +45,8 @@ static unsigned long rockchip_mmc_recalc(struct clk_hw *hw,
 #define PSECS_PER_SEC 1000000000000LL
 
 /*
- * Each fine delay is between 40ps-80ps. Assume each fine delay is 60ps to
- * simplify calculations. So 45degs could be anywhere between 33deg and 66deg.
+ * Each fine delay is between 44ps-77ps. Assume each fine delay is 60ps to
+ * simplify calculations. So 45degs could be anywhere between 33deg and 57.8deg.
  */
 #define ROCKCHIP_MMC_DELAY_ELEMENT_PSEC 60
 
@@ -69,7 +69,7 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
 
                delay_num = (raw_value & ROCKCHIP_MMC_DELAYNUM_MASK);
                delay_num >>= ROCKCHIP_MMC_DELAYNUM_OFFSET;
-               degrees += delay_num * factor / 10000;
+               degrees += DIV_ROUND_CLOSEST(delay_num * factor, 10000);
        }
 
        return degrees % 360;
@@ -82,25 +82,41 @@ static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
        u8 nineties, remainder;
        u8 delay_num;
        u32 raw_value;
-       u64 delay;
-
-       /* allow 22 to be 22.5 */
-       degrees++;
-       /* floor to 22.5 increment */
-       degrees -= ((degrees) * 10 % 225) / 10;
+       u32 delay;
 
        nineties = degrees / 90;
-       /* 22.5 multiples */
-       remainder = (degrees % 90) / 22;
-
-       delay = PSECS_PER_SEC;
-       do_div(delay, rate);
-       /* / 360 / 22.5 */
-       do_div(delay, 16);
-       do_div(delay, ROCKCHIP_MMC_DELAY_ELEMENT_PSEC);
-
+       remainder = (degrees % 90);
+
+       /*
+        * Due to the inexact nature of the "fine" delay, we might
+        * actually go non-monotonic.  We don't go _too_ monotonic
+        * though, so we should be OK.  Here are options of how we may
+        * work:
+        *
+        * Ideally we end up with:
+        *   1.0, 2.0, ..., 69.0, 70.0, ...,  89.0, 90.0
+        *
+        * On one extreme (if delay is actually 44ps):
+        *   .73, 1.5, ..., 50.6, 51.3, ...,  65.3, 90.0
+        * The other (if delay is actually 77ps):
+        *   1.3, 2.6, ..., 88.6. 89.8, ..., 114.0, 90
+        *
+        * It's possible we might make a delay that is up to 25
+        * degrees off from what we think we're making.  That's OK
+        * though because we should be REALLY far from any bad range.
+        */
+
+       /*
+        * Convert to delay; do a little extra work to make sure we
+        * don't overflow 32-bit / 64-bit numbers.
+        */
+       delay = 10000000; /* PSECS_PER_SEC / 10000 / 10 */
        delay *= remainder;
-       delay_num = (u8) min(delay, 255ULL);
+       delay = DIV_ROUND_CLOSEST(delay,
+                       (rate / 1000) * 36 *
+                               (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10));
+
+       delay_num = (u8) min_t(u32, delay, 255);
 
        raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0;
        raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET;
index 7c1e1f58e2da2e7dc7909fce407bc477651add3a..2fe37f708dc70828ffa10fc165ecc830fff49c86 100644 (file)
@@ -164,7 +164,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
         * the values for DIV_COPY and DIV_HPM dividers need not be set.
         */
        div0 = cfg_data->div0;
-       if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) {
+       if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
                div1 = cfg_data->div1;
                if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK)
                        div1 = readl(base + E4210_DIV_CPU1) &
@@ -185,7 +185,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
                alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
                WARN_ON(alt_div >= MAX_DIV);
 
-               if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
+               if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
                        /*
                         * In Exynos4210, ATB clock parent is also mout_core. So
                         * ATB clock also needs to be mantained at safe speed.
@@ -206,7 +206,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
        writel(div0, base + E4210_DIV_CPU0);
        wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL);
 
-       if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) {
+       if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
                writel(div1, base + E4210_DIV_CPU1);
                wait_until_divider_stable(base + E4210_DIV_STAT_CPU1,
                                DIV_MASK_ALL);
@@ -225,7 +225,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
        unsigned long mux_reg;
 
        /* find out the divider values to use for clock data */
-       if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
+       if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
                while ((cfg_data->prate * 1000) != ndata->new_rate) {
                        if (cfg_data->prate == 0)
                                return -EINVAL;
@@ -240,7 +240,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
        writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU);
        wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1);
 
-       if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
+       if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
                div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK);
                div_mask |= E4210_DIV0_ATB_MASK;
        }
index 676ee8f6d8136729a9665cfb9c29e7faed123781..8831e1a05367ad9c7473e3ee723adc3f29dc9936 100644 (file)
@@ -374,7 +374,6 @@ static struct ti_dt_clk omap3xxx_clks[] = {
        DT_CLK(NULL, "gpio2_ick", "gpio2_ick"),
        DT_CLK(NULL, "wdt3_ick", "wdt3_ick"),
        DT_CLK(NULL, "uart3_ick", "uart3_ick"),
-       DT_CLK(NULL, "uart4_ick", "uart4_ick"),
        DT_CLK(NULL, "gpt9_ick", "gpt9_ick"),
        DT_CLK(NULL, "gpt8_ick", "gpt8_ick"),
        DT_CLK(NULL, "gpt7_ick", "gpt7_ick"),
@@ -519,6 +518,7 @@ static struct ti_dt_clk am35xx_clks[] = {
 static struct ti_dt_clk omap36xx_clks[] = {
        DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"),
        DT_CLK(NULL, "uart4_fck", "uart4_fck"),
+       DT_CLK(NULL, "uart4_ick", "uart4_ick"),
        { .node_name = NULL },
 };
 
index 9b5b289e633456206e81268d00bc212a7f9f62cc..a911d7de33778d7bc7648e59f0ee60c1a6c83027 100644 (file)
@@ -18,7 +18,6 @@
 
 #include "clock.h"
 
-#define DRA7_DPLL_ABE_DEFFREQ                          180633600
 #define DRA7_DPLL_GMAC_DEFFREQ                         1000000000
 #define DRA7_DPLL_USB_DEFFREQ                          960000000
 
@@ -313,27 +312,12 @@ static struct ti_dt_clk dra7xx_clks[] = {
 int __init dra7xx_dt_clk_init(void)
 {
        int rc;
-       struct clk *abe_dpll_mux, *sys_clkin2, *dpll_ck, *hdcp_ck;
+       struct clk *dpll_ck, *hdcp_ck;
 
        ti_dt_clocks_register(dra7xx_clks);
 
        omap2_clk_disable_autoidle_all();
 
-       abe_dpll_mux = clk_get_sys(NULL, "abe_dpll_sys_clk_mux");
-       sys_clkin2 = clk_get_sys(NULL, "sys_clkin2");
-       dpll_ck = clk_get_sys(NULL, "dpll_abe_ck");
-
-       rc = clk_set_parent(abe_dpll_mux, sys_clkin2);
-       if (!rc)
-               rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ);
-       if (rc)
-               pr_err("%s: failed to configure ABE DPLL!\n", __func__);
-
-       dpll_ck = clk_get_sys(NULL, "dpll_abe_m2x2_ck");
-       rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ * 2);
-       if (rc)
-               pr_err("%s: failed to configure ABE DPLL m2x2!\n", __func__);
-
        dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck");
        rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ);
        if (rc)
index 90d7d8a21c4918d52b910fab900e78acd68c5aad..1ddc288fce4eb123e63d941971bc94bff41f69a5 100644 (file)
@@ -222,7 +222,7 @@ int omap2_dflt_clk_enable(struct clk_hw *hw)
                }
        }
 
-       if (unlikely(!clk->enable_reg)) {
+       if (unlikely(IS_ERR(clk->enable_reg))) {
                pr_err("%s: %s missing enable_reg\n", __func__,
                       clk_hw_get_name(hw));
                ret = -EINVAL;
@@ -264,7 +264,7 @@ void omap2_dflt_clk_disable(struct clk_hw *hw)
        u32 v;
 
        clk = to_clk_hw_omap(hw);
-       if (!clk->enable_reg) {
+       if (IS_ERR(clk->enable_reg)) {
                /*
                 * 'independent' here refers to a clock which is not
                 * controlled by its parent.
index a7726db13abbb0e883ec5681fec65a473bc9d29e..50b68bc20720297b3ae4cb3e935c771540b9032f 100644 (file)
@@ -279,6 +279,10 @@ config CLKSRC_MIPS_GIC
        depends on MIPS_GIC
        select CLKSRC_OF
 
+config CLKSRC_TANGO_XTAL
+       bool
+       select CLKSRC_OF
+
 config CLKSRC_PXA
        def_bool y if ARCH_PXA || ARCH_SA1100
        select CLKSRC_OF if OF
index 5c00863c3e33ad8a3061fdf88765cfb4c0770d97..67bc996ca90983dc353453b3015808315e1c8c35 100644 (file)
@@ -56,9 +56,11 @@ obj-$(CONFIG_ARCH_KEYSTONE)          += timer-keystone.o
 obj-$(CONFIG_ARCH_INTEGRATOR_AP)       += timer-integrator-ap.o
 obj-$(CONFIG_CLKSRC_VERSATILE)         += versatile.o
 obj-$(CONFIG_CLKSRC_MIPS_GIC)          += mips-gic-timer.o
+obj-$(CONFIG_CLKSRC_TANGO_XTAL)                += tango_xtal.o
 obj-$(CONFIG_CLKSRC_IMX_GPT)           += timer-imx-gpt.o
 obj-$(CONFIG_ASM9260_TIMER)            += asm9260_timer.o
 obj-$(CONFIG_H8300)                    += h8300_timer8.o
 obj-$(CONFIG_H8300_TMR16)              += h8300_timer16.o
 obj-$(CONFIG_H8300_TPU)                        += h8300_tpu.o
 obj-$(CONFIG_CLKSRC_ST_LPC)            += clksrc_st_lpc.o
+obj-$(CONFIG_X86_NUMACHIP)             += numachip.o
index 29ea50ac366ab9c9399951286beb3ff09984d68c..a2cb6fae92958b7319c55ee20ef21e26268ffbea 100644 (file)
@@ -60,7 +60,7 @@ static struct clock_event_device __percpu *gt_evt;
  *  different to the 32-bit upper value read previously, go back to step 2.
  *  Otherwise the 64-bit timer counter value is correct.
  */
-static u64 gt_counter_read(void)
+static u64 notrace _gt_counter_read(void)
 {
        u64 counter;
        u32 lower;
@@ -79,6 +79,11 @@ static u64 gt_counter_read(void)
        return counter;
 }
 
+static u64 gt_counter_read(void)
+{
+       return _gt_counter_read();
+}
+
 /**
  * To ensure that updates to comparator value register do not set the
  * Interrupt Status Register proceed as follows:
@@ -201,7 +206,7 @@ static struct clocksource gt_clocksource = {
 #ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
 static u64 notrace gt_sched_clock_read(void)
 {
-       return gt_counter_read();
+       return _gt_counter_read();
 }
 #endif
 
index 7a97a34dba701576c7d8b1d143e355cb56abfc9c..19bb1792d647dc8cca312248b5f2c7eef86099dd 100644 (file)
@@ -228,7 +228,6 @@ static int em_sti_register_clocksource(struct em_sti_priv *p)
 {
        struct clocksource *cs = &p->cs;
 
-       memset(cs, 0, sizeof(*cs));
        cs->name = dev_name(&p->pdev->dev);
        cs->rating = 200;
        cs->read = em_sti_clocksource_read;
@@ -285,7 +284,6 @@ static void em_sti_register_clockevent(struct em_sti_priv *p)
 {
        struct clock_event_device *ced = &p->ced;
 
-       memset(ced, 0, sizeof(*ced));
        ced->name = dev_name(&p->pdev->dev);
        ced->features = CLOCK_EVT_FEAT_ONESHOT;
        ced->rating = 200;
index 029f96ab131a20c5ff51ec9f6818f7053e6d4349..ff44082a0827d60ff14a00e40001d5900adc8e92 100644 (file)
@@ -382,24 +382,28 @@ static void exynos4_mct_tick_start(unsigned long cycles,
 static int exynos4_tick_set_next_event(unsigned long cycles,
                                       struct clock_event_device *evt)
 {
-       struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
+       struct mct_clock_event_device *mevt;
 
+       mevt = container_of(evt, struct mct_clock_event_device, evt);
        exynos4_mct_tick_start(cycles, mevt);
-
        return 0;
 }
 
 static int set_state_shutdown(struct clock_event_device *evt)
 {
-       exynos4_mct_tick_stop(this_cpu_ptr(&percpu_mct_tick));
+       struct mct_clock_event_device *mevt;
+
+       mevt = container_of(evt, struct mct_clock_event_device, evt);
+       exynos4_mct_tick_stop(mevt);
        return 0;
 }
 
 static int set_state_periodic(struct clock_event_device *evt)
 {
-       struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
+       struct mct_clock_event_device *mevt;
        unsigned long cycles_per_jiffy;
 
+       mevt = container_of(evt, struct mct_clock_event_device, evt);
        cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult)
                            >> evt->shift);
        exynos4_mct_tick_stop(mevt);
index ef434699c80a090ee1bbb0de41b38170bddc188b..10202f1fdfd7b2afc6ccf9cd044cecdc2d3d66a7 100644 (file)
@@ -118,7 +118,7 @@ static inline void ftm_reset_counter(void __iomem *base)
        ftm_writel(0x00, base + FTM_CNT);
 }
 
-static u64 ftm_read_sched_clock(void)
+static u64 notrace ftm_read_sched_clock(void)
 {
        return ftm_readl(priv->clksrc_base + FTM_CNT);
 }
index 82941c1e9e33f0d098ae4b0b7273fa263c3b9ce0..0e076c6fc006b26b52729c9389db7d87a9604ce3 100644 (file)
@@ -153,7 +153,6 @@ static int timer16_setup(struct timer16_priv *p, struct platform_device *pdev)
        int ret, irq;
        unsigned int ch;
 
-       memset(p, 0, sizeof(*p));
        p->pdev = pdev;
 
        res[REG_CH] = platform_get_resource(p->pdev,
index f9b3b7033a970acfdc21c7503ec34326e6042748..44375d8b9bc438878be58699fb95ce8fb624ebc6 100644 (file)
@@ -215,7 +215,6 @@ static int timer8_setup(struct timer8_priv *p,
        int irq;
        int ret;
 
-       memset(p, 0, sizeof(*p));
        p->pdev = pdev;
 
        res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
index 64195fdd78bf78e90a1f3c264a1e88716b6c7f25..5487410bfabb93b0c4768a91870485f6ba77d31f 100644 (file)
@@ -123,7 +123,6 @@ static int __init tpu_setup(struct tpu_priv *p, struct platform_device *pdev)
 {
        struct resource *res[2];
 
-       memset(p, 0, sizeof(*p));
        p->pdev = pdev;
 
        res[CH_L] = platform_get_resource(p->pdev, IORESOURCE_MEM, CH_L);
index 50f0641c65b6f30cd890240aca4f6febd5f4a6e8..fbfc74685e6afba837ae549021f7fe763f7c0de4 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
+#include <linux/sched_clock.h>
 #include <linux/slab.h>
 
 #define GPT_IRQ_EN_REG         0x00
@@ -59,6 +60,13 @@ struct mtk_clock_event_device {
        struct clock_event_device dev;
 };
 
+static void __iomem *gpt_sched_reg __read_mostly;
+
+static u64 notrace mtk_read_sched_clock(void)
+{
+       return readl_relaxed(gpt_sched_reg);
+}
+
 static inline struct mtk_clock_event_device *to_mtk_clk(
                                struct clock_event_device *c)
 {
@@ -141,14 +149,6 @@ static irqreturn_t mtk_timer_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static void mtk_timer_global_reset(struct mtk_clock_event_device *evt)
-{
-       /* Disable all interrupts */
-       writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG);
-       /* Acknowledge all interrupts */
-       writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG);
-}
-
 static void
 mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option)
 {
@@ -168,6 +168,12 @@ static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer)
 {
        u32 val;
 
+       /* Disable all interrupts */
+       writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG);
+
+       /* Acknowledge all spurious pending interrupts */
+       writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG);
+
        val = readl(evt->gpt_base + GPT_IRQ_EN_REG);
        writel(val | GPT_IRQ_ENABLE(timer),
                        evt->gpt_base + GPT_IRQ_EN_REG);
@@ -220,8 +226,6 @@ static void __init mtk_timer_init(struct device_node *node)
        }
        rate = clk_get_rate(clk);
 
-       mtk_timer_global_reset(evt);
-
        if (request_irq(evt->dev.irq, mtk_timer_interrupt,
                        IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) {
                pr_warn("failed to setup irq %d\n", evt->dev.irq);
@@ -234,6 +238,8 @@ static void __init mtk_timer_init(struct device_node *node)
        mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN);
        clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC),
                        node->name, rate, 300, 32, clocksource_mmio_readl_up);
+       gpt_sched_reg = evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC);
+       sched_clock_register(mtk_read_sched_clock, 32, rate);
 
        /* Configure clock event */
        mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT);
diff --git a/drivers/clocksource/numachip.c b/drivers/clocksource/numachip.c
new file mode 100644 (file)
index 0000000..4e0f11f
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ *
+ * Copyright (C) 2015 Numascale AS. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clockchips.h>
+
+#include <asm/irq.h>
+#include <asm/numachip/numachip.h>
+#include <asm/numachip/numachip_csr.h>
+
+static DEFINE_PER_CPU(struct clock_event_device, numachip2_ced);
+
+static cycles_t numachip2_timer_read(struct clocksource *cs)
+{
+       return numachip2_read64_lcsr(NUMACHIP2_TIMER_NOW);
+}
+
+static struct clocksource numachip2_clocksource = {
+       .name            = "numachip2",
+       .rating          = 295,
+       .read            = numachip2_timer_read,
+       .mask            = CLOCKSOURCE_MASK(64),
+       .flags           = CLOCK_SOURCE_IS_CONTINUOUS,
+       .mult            = 1,
+       .shift           = 0,
+};
+
+static int numachip2_set_next_event(unsigned long delta, struct clock_event_device *ced)
+{
+       numachip2_write64_lcsr(NUMACHIP2_TIMER_DEADLINE + numachip2_timer(),
+               delta);
+       return 0;
+}
+
+static struct clock_event_device numachip2_clockevent = {
+       .name            = "numachip2",
+       .rating          = 400,
+       .set_next_event  = numachip2_set_next_event,
+       .features        = CLOCK_EVT_FEAT_ONESHOT,
+       .mult            = 1,
+       .shift           = 0,
+       .min_delta_ns    = 1250,
+       .max_delta_ns    = LONG_MAX,
+};
+
+static void numachip_timer_interrupt(void)
+{
+       struct clock_event_device *ced = this_cpu_ptr(&numachip2_ced);
+
+       ced->event_handler(ced);
+}
+
+static __init void numachip_timer_each(struct work_struct *work)
+{
+       unsigned local_apicid = __this_cpu_read(x86_cpu_to_apicid) & 0xff;
+       struct clock_event_device *ced = this_cpu_ptr(&numachip2_ced);
+
+       /* Setup IPI vector to local core and relative timing mode */
+       numachip2_write64_lcsr(NUMACHIP2_TIMER_INT + numachip2_timer(),
+               (3 << 22) | (X86_PLATFORM_IPI_VECTOR << 14) |
+               (local_apicid << 6));
+
+       *ced = numachip2_clockevent;
+       ced->cpumask = cpumask_of(smp_processor_id());
+       clockevents_register_device(ced);
+}
+
+static int __init numachip_timer_init(void)
+{
+       if (numachip_system != 2)
+               return -ENODEV;
+
+       /* Reset timer */
+       numachip2_write64_lcsr(NUMACHIP2_TIMER_RESET, 0);
+       clocksource_register_hz(&numachip2_clocksource, NSEC_PER_SEC);
+
+       /* Setup per-cpu clockevents */
+       x86_platform_ipi_callback = numachip_timer_interrupt;
+       schedule_on_each_cpu(&numachip_timer_each);
+
+       return 0;
+}
+
+arch_initcall(numachip_timer_init);
index bb2c2b05096455066826a13d5ad1156248f5e64e..d3c1742ded1af7655c3e2e77031ab3801ea84761 100644 (file)
@@ -148,7 +148,7 @@ static void __init rk_timer_init(struct device_node *np)
        bc_timer.freq = clk_get_rate(timer_clk);
 
        irq = irq_of_parse_and_map(np, 0);
-       if (irq == NO_IRQ) {
+       if (!irq) {
                pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME);
                return;
        }
index bc90e13338cc6e961d3d3e4c4bdc611c2dd05436..9502bc4c3f6d9a17ec7616bf743c5bfc26c3f638 100644 (file)
@@ -307,7 +307,7 @@ static void samsung_clocksource_resume(struct clocksource *cs)
        samsung_time_start(pwm.source_id, true);
 }
 
-static cycle_t samsung_clocksource_read(struct clocksource *c)
+static cycle_t notrace samsung_clocksource_read(struct clocksource *c)
 {
        return ~readl_relaxed(pwm.source_reg);
 }
index ba73a6eb8d66e640666b3ee5fb1733ea829ba601..103c49362c68f66f830587a6504e33ad428a1c14 100644 (file)
@@ -962,7 +962,6 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
        unsigned int i;
        int ret;
 
-       memset(cmt, 0, sizeof(*cmt));
        cmt->pdev = pdev;
        raw_spin_lock_init(&cmt->lock);
 
index f1985da8113f481fb870c4b91eeef62b962300ad..53aa7e92a7d77b7efc052466e8904efc110cc2eb 100644 (file)
@@ -280,7 +280,9 @@ static int sh_mtu2_clock_event_shutdown(struct clock_event_device *ced)
 {
        struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
 
-       sh_mtu2_disable(ch);
+       if (clockevent_state_periodic(ced))
+               sh_mtu2_disable(ch);
+
        return 0;
 }
 
diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c
new file mode 100644 (file)
index 0000000..d297b30
--- /dev/null
@@ -0,0 +1,66 @@
+#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
+#include <linux/of_address.h>
+#include <linux/printk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+
+static void __iomem *xtal_in_cnt;
+static struct delay_timer delay_timer;
+
+static unsigned long notrace read_xtal_counter(void)
+{
+       return readl_relaxed(xtal_in_cnt);
+}
+
+static u64 notrace read_sched_clock(void)
+{
+       return read_xtal_counter();
+}
+
+static cycle_t read_clocksource(struct clocksource *cs)
+{
+       return read_xtal_counter();
+}
+
+static struct clocksource tango_xtal = {
+       .name   = "tango-xtal",
+       .rating = 350,
+       .read   = read_clocksource,
+       .mask   = CLOCKSOURCE_MASK(32),
+       .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static void __init tango_clocksource_init(struct device_node *np)
+{
+       struct clk *clk;
+       int xtal_freq, ret;
+
+       xtal_in_cnt = of_iomap(np, 0);
+       if (xtal_in_cnt == NULL) {
+               pr_err("%s: invalid address\n", np->full_name);
+               return;
+       }
+
+       clk = of_clk_get(np, 0);
+       if (IS_ERR(clk)) {
+               pr_err("%s: invalid clock\n", np->full_name);
+               return;
+       }
+
+       xtal_freq = clk_get_rate(clk);
+       delay_timer.freq = xtal_freq;
+       delay_timer.read_current_timer = read_xtal_counter;
+
+       ret = clocksource_register_hz(&tango_xtal, xtal_freq);
+       if (ret != 0) {
+               pr_err("%s: registration failed\n", np->full_name);
+               return;
+       }
+
+       sched_clock_register(read_sched_clock, 32, xtal_freq);
+       register_current_timer_delay(&delay_timer);
+}
+
+CLOCKSOURCE_OF_DECLARE(tango, "sigma,tick-counter", tango_clocksource_init);
index 2162796fd504fb15d9639bfec054a7ece427220b..d93ec3c4f139f2bbcf1ada62efaeed9e6062fb66 100644 (file)
@@ -45,6 +45,8 @@
 #include <linux/percpu.h>
 #include <linux/syscore_ops.h>
 
+#include <asm/delay.h>
+
 /*
  * Timer block registers.
  */
@@ -249,6 +251,15 @@ struct syscore_ops armada_370_xp_timer_syscore_ops = {
        .resume         = armada_370_xp_timer_resume,
 };
 
+static unsigned long armada_370_delay_timer_read(void)
+{
+       return ~readl(timer_base + TIMER0_VAL_OFF);
+}
+
+static struct delay_timer armada_370_delay_timer = {
+       .read_current_timer = armada_370_delay_timer_read,
+};
+
 static void __init armada_370_xp_timer_common_init(struct device_node *np)
 {
        u32 clr = 0, set = 0;
@@ -287,6 +298,9 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
                TIMER0_RELOAD_EN | enable_mask,
                TIMER0_RELOAD_EN | enable_mask);
 
+       armada_370_delay_timer.freq = timer_clk;
+       register_current_timer_delay(&armada_370_delay_timer);
+
        /*
         * Set scale and timer for sched_clock.
         */
index 18d4266c2986c65d1f2d3cd55e0d4986dfb9b4d6..bba6799000541d360081b41e4e87ee8d9bf7596a 100644 (file)
@@ -67,7 +67,8 @@ static inline void gpt_writel(void __iomem *base, u32 value, u32 offset,
        writel(value, base + 0x20 * gpt_id + offset);
 }
 
-static cycle_t pistachio_clocksource_read_cycles(struct clocksource *cs)
+static cycle_t notrace
+pistachio_clocksource_read_cycles(struct clocksource *cs)
 {
        struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
        u32 counter, overflw;
index e73947f0f86db3c1b9656c9b3418c9151f7f2954..a536eeb634d885fccf5b92f0d0cbeeec7f88baf2 100644 (file)
@@ -143,7 +143,7 @@ static irqreturn_t digicolor_timer_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static u64 digicolor_timer_sched_read(void)
+static u64 notrace digicolor_timer_sched_read(void)
 {
        return ~readl(dc_timer_dev.base + COUNT(TIMER_B));
 }
index 839aba92fc395811e67e8d2ac7ba1ce7727c0bcf..99ec96769dda899817bf1ee41fd836a4e2bbbc53 100644 (file)
@@ -305,13 +305,14 @@ static int __init mxc_clockevent_init(struct imx_timer *imxtm)
        struct irqaction *act = &imxtm->act;
 
        ced->name = "mxc_timer1";
-       ced->features = CLOCK_EVT_FEAT_ONESHOT;
+       ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
        ced->set_state_shutdown = mxc_shutdown;
        ced->set_state_oneshot = mxc_set_oneshot;
        ced->tick_resume = mxc_shutdown;
        ced->set_next_event = imxtm->gpt->set_next_event;
        ced->rating = 200;
        ced->cpumask = cpumask_of(0);
+       ced->irq = imxtm->irq;
        clockevents_config_and_register(ced, clk_get_rate(imxtm->clk_per),
                                        0xff, 0xfffffffe);
 
index edacf3902e107d9ac60c84cdaba4f4ad1822213c..1cea08cf603eb30d5028e157dc8927aef4a004a9 100644 (file)
@@ -152,7 +152,7 @@ static void __init keystone_timer_init(struct device_node *np)
        int irq, error;
 
        irq  = irq_of_parse_and_map(np, 0);
-       if (irq == NO_IRQ) {
+       if (!irq) {
                pr_err("%s: failed to map interrupts\n", __func__);
                return;
        }
index 78de982cc640bd93a5ea9b834e9c7049061a24a2..2854c663e8b5b978ae463d1d96c218c79da77a5e 100644 (file)
@@ -73,7 +73,7 @@ static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id)
 }
 
 /* read 64-bit timer counter */
-static cycle_t sirfsoc_timer_read(struct clocksource *cs)
+static cycle_t notrace sirfsoc_timer_read(struct clocksource *cs)
 {
        u64 cycles;
 
index f07ba99321716c02628affeafca70100642a1df2..a0e6c68536a18d8dbc76eb7f239bea31ca24240f 100644 (file)
@@ -52,7 +52,7 @@ static inline void pit_irq_acknowledge(void)
        __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);
 }
 
-static u64 pit_read_sched_clock(void)
+static u64 notrace pit_read_sched_clock(void)
 {
        return ~__raw_readl(clksrc_base + PITCVAL);
 }
index 798277227de7f3a897a4ad79fcaabe787412fcfb..cec1ee2d2f744b968fe653f47dc5067dfe4dccb1 100644 (file)
@@ -149,6 +149,9 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
 {
        struct acpi_cpufreq_data *data = policy->driver_data;
 
+       if (unlikely(!data))
+               return -ENODEV;
+
        return cpufreq_show_cpus(data->freqdomain_cpus, buf);
 }
 
index ef5ed9470de9a59d371e34e7db24a434d1f11a9f..25c4c15103a0cd8759e006eaa10d9f9edbfb5872 100644 (file)
@@ -1436,8 +1436,10 @@ static void cpufreq_offline_finish(unsigned int cpu)
         * since this is a core component, and is essential for the
         * subsequent light-weight ->init() to succeed.
         */
-       if (cpufreq_driver->exit)
+       if (cpufreq_driver->exit) {
                cpufreq_driver->exit(policy);
+               policy->freq_table = NULL;
+       }
 }
 
 /**
index 3af9dd7332e6927d8dd860b5af410fba738bff4a..aa33b92b3e3e8866345e9893e3b0a880b8b1a17a 100644 (file)
@@ -776,6 +776,11 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
        local_irq_save(flags);
        rdmsrl(MSR_IA32_APERF, aperf);
        rdmsrl(MSR_IA32_MPERF, mperf);
+       if (cpu->prev_mperf == mperf) {
+               local_irq_restore(flags);
+               return;
+       }
+
        tsc = rdtsc();
        local_irq_restore(flags);
 
index 3927ed9fdbd51f16d765aede8fef14418994ab55..ca848cc6a8fd1313bc56e5b93674b3d795814779 100644 (file)
@@ -492,7 +492,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
        if (err) {
                put_device(&devfreq->dev);
                mutex_unlock(&devfreq->lock);
-               goto err_dev;
+               goto err_out;
        }
 
        mutex_unlock(&devfreq->lock);
@@ -518,7 +518,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
 err_init:
        list_del(&devfreq->node);
        device_unregister(&devfreq->dev);
-err_dev:
        kfree(devfreq);
 err_out:
        return ERR_PTR(err);
@@ -795,8 +794,10 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
                ret = PTR_ERR(governor);
                goto out;
        }
-       if (df->governor == governor)
+       if (df->governor == governor) {
+               ret = 0;
                goto out;
+       }
 
        if (df->governor) {
                ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
index a165b4bfd3300e97d409f2053b71adb276392336..dd24375b76ddcba72409d3c5c1285f19c172a45f 100644 (file)
@@ -455,6 +455,15 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
        return desc;
 }
 
+void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
+{
+       memset(&desc->lld, 0, sizeof(desc->lld));
+       INIT_LIST_HEAD(&desc->descs_list);
+       desc->direction = DMA_TRANS_NONE;
+       desc->xfer_size = 0;
+       desc->active_xfer = false;
+}
+
 /* Call must be protected by lock. */
 static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
 {
@@ -466,7 +475,7 @@ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
                desc = list_first_entry(&atchan->free_descs_list,
                                        struct at_xdmac_desc, desc_node);
                list_del(&desc->desc_node);
-               desc->active_xfer = false;
+               at_xdmac_init_used_desc(desc);
        }
 
        return desc;
@@ -875,14 +884,14 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
 
        if (xt->src_inc) {
                if (xt->src_sgl)
-                       chan_cc |=  AT_XDMAC_CC_SAM_UBS_DS_AM;
+                       chan_cc |=  AT_XDMAC_CC_SAM_UBS_AM;
                else
                        chan_cc |=  AT_XDMAC_CC_SAM_INCREMENTED_AM;
        }
 
        if (xt->dst_inc) {
                if (xt->dst_sgl)
-                       chan_cc |=  AT_XDMAC_CC_DAM_UBS_DS_AM;
+                       chan_cc |=  AT_XDMAC_CC_DAM_UBS_AM;
                else
                        chan_cc |=  AT_XDMAC_CC_DAM_INCREMENTED_AM;
        }
index 3ff284c8e3d5aef72f229017c883c73cbe13403f..09479d4be4db3d776fd1f3400724d13f26808428 100644 (file)
@@ -554,10 +554,18 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
        mutex_lock(&dma_list_mutex);
 
        if (chan->client_count == 0) {
+               struct dma_device *device = chan->device;
+
+               dma_cap_set(DMA_PRIVATE, device->cap_mask);
+               device->privatecnt++;
                err = dma_chan_get(chan);
-               if (err)
+               if (err) {
                        pr_debug("%s: failed to get %s: (%d)\n",
                                __func__, dma_chan_name(chan), err);
+                       chan = NULL;
+                       if (--device->privatecnt == 0)
+                               dma_cap_clear(DMA_PRIVATE, device->cap_mask);
+               }
        } else
                chan = NULL;
 
index cf1c87fa1edd557eb57f53dd41c11c02a440ea82..bedce038c6e281bb1e1bf6ba89585c14d532a5b2 100644 (file)
@@ -1591,7 +1591,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
        INIT_LIST_HEAD(&dw->dma.channels);
        for (i = 0; i < nr_channels; i++) {
                struct dw_dma_chan      *dwc = &dw->chan[i];
-               int                     r = nr_channels - i - 1;
 
                dwc->chan.device = &dw->dma;
                dma_cookie_init(&dwc->chan);
@@ -1603,7 +1602,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
 
                /* 7 is highest priority & 0 is lowest. */
                if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
-                       dwc->priority = r;
+                       dwc->priority = nr_channels - i - 1;
                else
                        dwc->priority = i;
 
@@ -1622,6 +1621,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
                /* Hardware configuration */
                if (autocfg) {
                        unsigned int dwc_params;
+                       unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
                        void __iomem *addr = chip->regs + r * sizeof(u32);
 
                        dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
index 18c14e1f1414e650969ff3c9e34431072b3abd83..48d6d9e94f6763c91bcf069848d9ef13e2eed48d 100644 (file)
@@ -355,23 +355,23 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c)
        struct idma64_desc *desc = idma64c->desc;
        struct idma64_hw_desc *hw;
        size_t bytes = desc->length;
-       u64 llp;
-       u32 ctlhi;
+       u64 llp = channel_readq(idma64c, LLP);
+       u32 ctlhi = channel_readl(idma64c, CTL_HI);
        unsigned int i = 0;
 
-       llp = channel_readq(idma64c, LLP);
        do {
                hw = &desc->hw[i];
-       } while ((hw->llp != llp) && (++i < desc->ndesc));
+               if (hw->llp == llp)
+                       break;
+               bytes -= hw->len;
+       } while (++i < desc->ndesc);
 
        if (!i)
                return bytes;
 
-       do {
-               bytes -= desc->hw[--i].len;
-       } while (i);
+       /* The current chunk is not fully transfered yet */
+       bytes += desc->hw[--i].len;
 
-       ctlhi = channel_readl(idma64c, CTL_HI);
        return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
 }
 
index 5cb61ce01036fef2dc5248d11f99859e2dcb9d86..fc4156afa070306cd2fee4502f361717f364706b 100644 (file)
@@ -473,8 +473,10 @@ static void pxad_free_phy(struct pxad_chan *chan)
                return;
 
        /* clear the channel mapping in DRCMR */
-       reg = pxad_drcmr(chan->drcmr);
-       writel_relaxed(0, chan->phy->base + reg);
+       if (chan->drcmr <= DRCMR_CHLNUM) {
+               reg = pxad_drcmr(chan->drcmr);
+               writel_relaxed(0, chan->phy->base + reg);
+       }
 
        spin_lock_irqsave(&pdev->phy_lock, flags);
        for (i = 0; i < 32; i++)
@@ -516,8 +518,10 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned)
                "%s(); phy=%p(%d) misaligned=%d\n", __func__,
                phy, phy->idx, misaligned);
 
-       reg = pxad_drcmr(phy->vchan->drcmr);
-       writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+       if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
+               reg = pxad_drcmr(phy->vchan->drcmr);
+               writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+       }
 
        dalgn = phy_readl_relaxed(phy, DALGN);
        if (misaligned)
@@ -887,6 +891,7 @@ pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd,
        struct dma_async_tx_descriptor *tx;
        struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc);
 
+       INIT_LIST_HEAD(&vd->node);
        tx = vchan_tx_prep(vc, vd, tx_flags);
        tx->tx_submit = pxad_tx_submit;
        dev_dbg(&chan->vc.chan.dev->device,
@@ -910,14 +915,18 @@ static void pxad_get_config(struct pxad_chan *chan,
                width = chan->cfg.src_addr_width;
                dev_addr = chan->cfg.src_addr;
                *dev_src = dev_addr;
-               *dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC;
+               *dcmd |= PXA_DCMD_INCTRGADDR;
+               if (chan->drcmr <= DRCMR_CHLNUM)
+                       *dcmd |= PXA_DCMD_FLOWSRC;
        }
        if (dir == DMA_MEM_TO_DEV) {
                maxburst = chan->cfg.dst_maxburst;
                width = chan->cfg.dst_addr_width;
                dev_addr = chan->cfg.dst_addr;
                *dev_dst = dev_addr;
-               *dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG;
+               *dcmd |= PXA_DCMD_INCSRCADDR;
+               if (chan->drcmr <= DRCMR_CHLNUM)
+                       *dcmd |= PXA_DCMD_FLOWTRG;
        }
        if (dir == DMA_MEM_TO_MEM)
                *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
@@ -1177,6 +1186,16 @@ static unsigned int pxad_residue(struct pxad_chan *chan,
        else
                curr = phy_readl_relaxed(chan->phy, DTADR);
 
+       /*
+        * curr has to be actually read before checking descriptor
+        * completion, so that a curr inside a status updater
+        * descriptor implies the following test returns true, and
+        * preventing reordering of curr load and the test.
+        */
+       rmb();
+       if (is_desc_completed(vd))
+               goto out;
+
        for (i = 0; i < sw_desc->nb_desc - 1; i++) {
                hw_desc = sw_desc->hw_desc[i];
                if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
index a1a500d96ff2788db7355a65284a9a3b54c0a1e0..1661d518224a7e4e57ca6c8c717096b5a87333e1 100644 (file)
@@ -599,13 +599,13 @@ get_next_cyclic_promise(struct sun4i_dma_contract *contract)
 static void sun4i_dma_free_contract(struct virt_dma_desc *vd)
 {
        struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd);
-       struct sun4i_dma_promise *promise;
+       struct sun4i_dma_promise *promise, *tmp;
 
        /* Free all the demands and completed demands */
-       list_for_each_entry(promise, &contract->demands, list)
+       list_for_each_entry_safe(promise, tmp, &contract->demands, list)
                kfree(promise);
 
-       list_for_each_entry(promise, &contract->completed_demands, list)
+       list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list)
                kfree(promise);
 
        kfree(contract);
index b23e8d52d1263abc11cc126e9e0b80e1dcc5cc1b..8d57b1b12e411ef902d26af984e7d34a741a4cf2 100644 (file)
@@ -59,7 +59,6 @@
 #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN                0xD070
 #define XGENE_DMA_RING_BLK_MEM_RDY             0xD074
 #define XGENE_DMA_RING_BLK_MEM_RDY_VAL         0xFFFFFFFF
-#define XGENE_DMA_RING_DESC_CNT(v)             (((v) & 0x0001FFFE) >> 1)
 #define XGENE_DMA_RING_ID_GET(owner, num)      (((owner) << 6) | (num))
 #define XGENE_DMA_RING_DST_ID(v)               ((1 << 10) | (v))
 #define XGENE_DMA_RING_CMD_OFFSET              0x2C
@@ -379,14 +378,6 @@ static u8 xgene_dma_encode_xor_flyby(u32 src_cnt)
        return flyby_type[src_cnt];
 }
 
-static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring)
-{
-       u32 __iomem *cmd_base = ring->cmd_base;
-       u32 ring_state = ioread32(&cmd_base[1]);
-
-       return XGENE_DMA_RING_DESC_CNT(ring_state);
-}
-
 static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len,
                                     dma_addr_t *paddr)
 {
@@ -659,15 +650,12 @@ static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan,
        dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
 }
 
-static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
-                                  struct xgene_dma_desc_sw *desc_sw)
+static void xgene_chan_xfer_request(struct xgene_dma_chan *chan,
+                                   struct xgene_dma_desc_sw *desc_sw)
 {
+       struct xgene_dma_ring *ring = &chan->tx_ring;
        struct xgene_dma_desc_hw *desc_hw;
 
-       /* Check if can push more descriptor to hw for execution */
-       if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2))
-               return -EBUSY;
-
        /* Get hw descriptor from DMA tx ring */
        desc_hw = &ring->desc_hw[ring->head];
 
@@ -694,11 +682,13 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
                memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw));
        }
 
+       /* Increment the pending transaction count */
+       chan->pending += ((desc_sw->flags &
+                         XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
+
        /* Notify the hw that we have descriptor ready for execution */
        iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ?
                  2 : 1, ring->cmd);
-
-       return 0;
 }
 
 /**
@@ -710,7 +700,6 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
 static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
 {
        struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
-       int ret;
 
        /*
         * If the list of pending descriptors is empty, then we
@@ -735,18 +724,13 @@ static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
                if (chan->pending >= chan->max_outstanding)
                        return;
 
-               ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw);
-               if (ret)
-                       return;
+               xgene_chan_xfer_request(chan, desc_sw);
 
                /*
                 * Delete this element from ld pending queue and append it to
                 * ld running queue
                 */
                list_move_tail(&desc_sw->node, &chan->ld_running);
-
-               /* Increment the pending transaction count */
-               chan->pending++;
        }
 }
 
@@ -821,7 +805,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
                 * Decrement the pending transaction count
                 * as we have processed one
                 */
-               chan->pending--;
+               chan->pending -= ((desc_sw->flags &
+                                 XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
 
                /*
                 * Delete this node from ld running queue and append it to
@@ -1421,15 +1406,18 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
                                     struct xgene_dma_ring *ring,
                                     enum xgene_dma_ring_cfgsize cfgsize)
 {
+       int ret;
+
        /* Setup DMA ring descriptor variables */
        ring->pdma = chan->pdma;
        ring->cfgsize = cfgsize;
        ring->num = chan->pdma->ring_num++;
        ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num);
 
-       ring->size = xgene_dma_get_ring_size(chan, cfgsize);
-       if (ring->size <= 0)
-               return ring->size;
+       ret = xgene_dma_get_ring_size(chan, cfgsize);
+       if (ret <= 0)
+               return ret;
+       ring->size = ret;
 
        /* Allocate memory for DMA ring descriptor */
        ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size,
@@ -1482,7 +1470,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan)
                 tx_ring->id, tx_ring->num, tx_ring->desc_vaddr);
 
        /* Set the max outstanding request possible to this channel */
-       chan->max_outstanding = rx_ring->slots;
+       chan->max_outstanding = tx_ring->slots;
 
        return ret;
 }
index 39915a6b7986e2fba00d285370f57f27ed3eeb9a..c017fcd8e07c29b65b7a480a1b817b4645c40f33 100644 (file)
@@ -739,7 +739,7 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
        struct dma_chan *chan;
        struct zx_dma_chan *c;
 
-       if (request > d->dma_requests)
+       if (request >= d->dma_requests)
                return NULL;
 
        chan = dma_get_any_slave_channel(&d->slave);
index ae3c5f3ce405829fabc9e18e790778b8a5dcf3ea..dbf53e08bdd1a38261fc883bd812e881e4152fd8 100644 (file)
@@ -12,6 +12,8 @@ obj-$(CONFIG_EDAC_MM_EDAC)            += edac_core.o
 edac_core-y    := edac_mc.o edac_device.o edac_mc_sysfs.o
 edac_core-y    += edac_module.o edac_device_sysfs.o
 
+edac_core-$(CONFIG_EDAC_DEBUG)         += debugfs.o
+
 ifdef CONFIG_PCI
 edac_core-y    += edac_pci.o edac_pci_sysfs.o
 endif
index 23ef0917483cadb2c2380795d70c2714aae0ce76..929640981d8a845793ed4c005da23a7a26c0c32b 100644 (file)
@@ -51,11 +51,9 @@ static const struct altr_sdram_prv_data c5_data = {
        .ecc_irq_clr_mask   = (CV_DRAMINTR_INTRCLR | CV_DRAMINTR_INTREN),
        .ecc_cnt_rst_offset = CV_DRAMINTR_OFST,
        .ecc_cnt_rst_mask   = CV_DRAMINTR_INTRCLR,
-#ifdef CONFIG_EDAC_DEBUG
        .ce_ue_trgr_offset  = CV_CTLCFG_OFST,
        .ce_set_mask        = CV_CTLCFG_GEN_SB_ERR,
        .ue_set_mask        = CV_CTLCFG_GEN_DB_ERR,
-#endif
 };
 
 static const struct altr_sdram_prv_data a10_data = {
@@ -72,11 +70,9 @@ static const struct altr_sdram_prv_data a10_data = {
        .ecc_irq_clr_mask   = (A10_INTSTAT_SBEERR | A10_INTSTAT_DBEERR),
        .ecc_cnt_rst_offset = A10_ECCCTRL1_OFST,
        .ecc_cnt_rst_mask   = A10_ECC_CNT_RESET_MASK,
-#ifdef CONFIG_EDAC_DEBUG
        .ce_ue_trgr_offset  = A10_DIAGINTTEST_OFST,
        .ce_set_mask        = A10_DIAGINT_TSERRA_MASK,
        .ue_set_mask        = A10_DIAGINT_TDERRA_MASK,
-#endif
 };
 
 static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id)
@@ -116,7 +112,6 @@ static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id)
        return IRQ_NONE;
 }
 
-#ifdef CONFIG_EDAC_DEBUG
 static ssize_t altr_sdr_mc_err_inject_write(struct file *file,
                                            const char __user *data,
                                            size_t count, loff_t *ppos)
@@ -191,14 +186,15 @@ static const struct file_operations altr_sdr_mc_debug_inject_fops = {
 
 static void altr_sdr_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
 {
-       if (mci->debugfs)
-               debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci,
-                                   &altr_sdr_mc_debug_inject_fops);
+       if (!IS_ENABLED(CONFIG_EDAC_DEBUG))
+               return;
+
+       if (!mci->debugfs)
+               return;
+
+       edac_debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci,
+                                &altr_sdr_mc_debug_inject_fops);
 }
-#else
-static void altr_sdr_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
-{}
-#endif
 
 /* Get total memory size from Open Firmware DTB */
 static unsigned long get_total_mem(void)
index 7b64dc7c4eb7bc54f505416a2d1c7eb7c234d832..953077d3e4f3c3d43f45a5a415e20426123dabd2 100644 (file)
@@ -30,8 +30,7 @@
 #define CV_CTLCFG_GEN_SB_ERR       0x2000
 #define CV_CTLCFG_GEN_DB_ERR       0x4000
 
-#define CV_CTLCFG_ECC_AUTO_EN     (CV_CTLCFG_ECC_EN | \
-                                  CV_CTLCFG_ECC_CORR_EN)
+#define CV_CTLCFG_ECC_AUTO_EN     (CV_CTLCFG_ECC_EN)
 
 /* SDRAM Controller Address Width Register */
 #define CV_DRAMADDRW_OFST          0x2C
@@ -181,13 +180,11 @@ struct altr_sdram_prv_data {
        int ecc_irq_clr_mask;
        int ecc_cnt_rst_offset;
        int ecc_cnt_rst_mask;
-#ifdef CONFIG_EDAC_DEBUG
        struct edac_dev_sysfs_attribute *eccmgr_sysfs_attr;
        int ecc_enable_mask;
        int ce_set_mask;
        int ue_set_mask;
        int ce_ue_trgr_offset;
-#endif
 };
 
 /* Altera SDRAM Memory Controller data */
index 73aea40a9c89592035e08ecf98292dc9aab9df62..9eee13ef83a560fd0c54153c61bc01a77cd3eb7f 100644 (file)
@@ -173,7 +173,7 @@ static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
  * scan the scrub rate mapping table for a close or matching bandwidth value to
  * issue. If requested is too big, then use last maximum value found.
  */
-static int __set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
+static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
 {
        u32 scrubval;
        int i;
@@ -201,7 +201,14 @@ static int __set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
 
        scrubval = scrubrates[i].scrubval;
 
-       pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
+       if (pvt->fam == 0x15 && pvt->model == 0x60) {
+               f15h_select_dct(pvt, 0);
+               pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
+               f15h_select_dct(pvt, 1);
+               pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
+       } else {
+               pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
+       }
 
        if (scrubval)
                return scrubrates[i].bandwidth;
@@ -217,11 +224,15 @@ static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
        if (pvt->fam == 0xf)
                min_scrubrate = 0x0;
 
-       /* Erratum #505 */
-       if (pvt->fam == 0x15 && pvt->model < 0x10)
-               f15h_select_dct(pvt, 0);
+       if (pvt->fam == 0x15) {
+               /* Erratum #505 */
+               if (pvt->model < 0x10)
+                       f15h_select_dct(pvt, 0);
 
-       return __set_scrub_rate(pvt->F3, bw, min_scrubrate);
+               if (pvt->model == 0x60)
+                       min_scrubrate = 0x6;
+       }
+       return __set_scrub_rate(pvt, bw, min_scrubrate);
 }
 
 static int get_scrub_rate(struct mem_ctl_info *mci)
@@ -230,11 +241,15 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
        u32 scrubval = 0;
        int i, retval = -EINVAL;
 
-       /* Erratum #505 */
-       if (pvt->fam == 0x15 && pvt->model < 0x10)
-               f15h_select_dct(pvt, 0);
+       if (pvt->fam == 0x15) {
+               /* Erratum #505 */
+               if (pvt->model < 0x10)
+                       f15h_select_dct(pvt, 0);
 
-       amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
+               if (pvt->model == 0x60)
+                       amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
+       } else
+               amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
 
        scrubval = scrubval & 0x001F;
 
@@ -2770,7 +2785,7 @@ static int init_one_instance(struct pci_dev *F2)
        struct mem_ctl_info *mci = NULL;
        struct edac_mc_layer layers[2];
        int err = 0, ret;
-       u16 nid = amd_get_node_id(F2);
+       u16 nid = amd_pci_dev_to_node_id(F2);
 
        ret = -ENOMEM;
        pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
@@ -2860,7 +2875,7 @@ err_ret:
 static int probe_one_instance(struct pci_dev *pdev,
                              const struct pci_device_id *mc_type)
 {
-       u16 nid = amd_get_node_id(pdev);
+       u16 nid = amd_pci_dev_to_node_id(pdev);
        struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
        struct ecc_settings *s;
        int ret = 0;
@@ -2910,7 +2925,7 @@ static void remove_one_instance(struct pci_dev *pdev)
 {
        struct mem_ctl_info *mci;
        struct amd64_pvt *pvt;
-       u16 nid = amd_get_node_id(pdev);
+       u16 nid = amd_pci_dev_to_node_id(pdev);
        struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
        struct ecc_settings *s = ecc_stngs[nid];
 
index 4bdec752d33096bfdfa9c5c201c81dba3c217af8..c0f248f3aaf976ef9f5ce3b79c20b9bf8b3b7cbd 100644 (file)
@@ -2,64 +2,10 @@
  * AMD64 class Memory Controller kernel module
  *
  * Copyright (c) 2009 SoftwareBitMaker.
- * Copyright (c) 2009 Advanced Micro Devices, Inc.
+ * Copyright (c) 2009-15 Advanced Micro Devices, Inc.
  *
  * This file may be distributed under the terms of the
  * GNU General Public License.
- *
- *     Originally Written by Thayne Harbaugh
- *
- *      Changes by Douglas "norsk" Thompson  <dougthompson@xmission.com>:
- *             - K8 CPU Revision D and greater support
- *
- *      Changes by Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>:
- *             - Module largely rewritten, with new (and hopefully correct)
- *             code for dealing with node and chip select interleaving,
- *             various code cleanup, and bug fixes
- *             - Added support for memory hoisting using DRAM hole address
- *             register
- *
- *     Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
- *             -K8 Rev (1207) revision support added, required Revision
- *             specific mini-driver code to support Rev F as well as
- *             prior revisions
- *
- *     Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
- *             -Family 10h revision support added. New PCI Device IDs,
- *             indicating new changes. Actual registers modified
- *             were slight, less than the Rev E to Rev F transition
- *             but changing the PCI Device ID was the proper thing to
- *             do, as it provides for almost automactic family
- *             detection. The mods to Rev F required more family
- *             information detection.
- *
- *     Changes/Fixes by Borislav Petkov <bp@alien8.de>:
- *             - misc fixes and code cleanups
- *
- * This module is based on the following documents
- * (available from http://www.amd.com/):
- *
- *     Title:  BIOS and Kernel Developer's Guide for AMD Athlon 64 and AMD
- *             Opteron Processors
- *     AMD publication #: 26094
- *`    Revision: 3.26
- *
- *     Title:  BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh
- *             Processors
- *     AMD publication #: 32559
- *     Revision: 3.00
- *     Issue Date: May 2006
- *
- *     Title:  BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h
- *             Processors
- *     AMD publication #: 31116
- *     Revision: 3.00
- *     Issue Date: September 07, 2007
- *
- * Sections in the first 2 documents are no longer in sync with each other.
- * The Family 10h BKDG was totally re-written from scratch with a new
- * presentation model.
- * Therefore, comments that refer to a Document section might be off.
  */
 
 #include <linux/module.h>
 
 #define DCT_SEL_HI                     0x114
 
+#define F15H_M60H_SCRCTRL              0x1C8
+
 /*
  * Function 3 - Misc Control
  */
diff --git a/drivers/edac/debugfs.c b/drivers/edac/debugfs.c
new file mode 100644 (file)
index 0000000..54d2f66
--- /dev/null
@@ -0,0 +1,163 @@
+#include "edac_module.h"
+
+static struct dentry *edac_debugfs;
+
+static ssize_t edac_fake_inject_write(struct file *file,
+                                     const char __user *data,
+                                     size_t count, loff_t *ppos)
+{
+       struct device *dev = file->private_data;
+       struct mem_ctl_info *mci = to_mci(dev);
+       static enum hw_event_mc_err_type type;
+       u16 errcount = mci->fake_inject_count;
+
+       if (!errcount)
+               errcount = 1;
+
+       type = mci->fake_inject_ue ? HW_EVENT_ERR_UNCORRECTED
+                                  : HW_EVENT_ERR_CORRECTED;
+
+       printk(KERN_DEBUG
+              "Generating %d %s fake error%s to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n",
+               errcount,
+               (type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE",
+               errcount > 1 ? "s" : "",
+               mci->fake_inject_layer[0],
+               mci->fake_inject_layer[1],
+               mci->fake_inject_layer[2]
+              );
+       edac_mc_handle_error(type, mci, errcount, 0, 0, 0,
+                            mci->fake_inject_layer[0],
+                            mci->fake_inject_layer[1],
+                            mci->fake_inject_layer[2],
+                            "FAKE ERROR", "for EDAC testing only");
+
+       return count;
+}
+
+static const struct file_operations debug_fake_inject_fops = {
+       .open = simple_open,
+       .write = edac_fake_inject_write,
+       .llseek = generic_file_llseek,
+};
+
+int __init edac_debugfs_init(void)
+{
+       edac_debugfs = debugfs_create_dir("edac", NULL);
+       if (IS_ERR(edac_debugfs)) {
+               edac_debugfs = NULL;
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+void edac_debugfs_exit(void)
+{
+       debugfs_remove(edac_debugfs);
+}
+
+int edac_create_debugfs_nodes(struct mem_ctl_info *mci)
+{
+       struct dentry *d, *parent;
+       char name[80];
+       int i;
+
+       if (!edac_debugfs)
+               return -ENODEV;
+
+       d = debugfs_create_dir(mci->dev.kobj.name, edac_debugfs);
+       if (!d)
+               return -ENOMEM;
+       parent = d;
+
+       for (i = 0; i < mci->n_layers; i++) {
+               sprintf(name, "fake_inject_%s",
+                            edac_layer_name[mci->layers[i].type]);
+               d = debugfs_create_u8(name, S_IRUGO | S_IWUSR, parent,
+                                     &mci->fake_inject_layer[i]);
+               if (!d)
+                       goto nomem;
+       }
+
+       d = debugfs_create_bool("fake_inject_ue", S_IRUGO | S_IWUSR, parent,
+                               &mci->fake_inject_ue);
+       if (!d)
+               goto nomem;
+
+       d = debugfs_create_u16("fake_inject_count", S_IRUGO | S_IWUSR, parent,
+                               &mci->fake_inject_count);
+       if (!d)
+               goto nomem;
+
+       d = debugfs_create_file("fake_inject", S_IWUSR, parent,
+                               &mci->dev,
+                               &debug_fake_inject_fops);
+       if (!d)
+               goto nomem;
+
+       mci->debugfs = parent;
+       return 0;
+nomem:
+       edac_debugfs_remove_recursive(mci->debugfs);
+       return -ENOMEM;
+}
+
+/* Create a toplevel dir under EDAC's debugfs hierarchy */
+struct dentry *edac_debugfs_create_dir(const char *dirname)
+{
+       if (!edac_debugfs)
+               return NULL;
+
+       return debugfs_create_dir(dirname, edac_debugfs);
+}
+EXPORT_SYMBOL_GPL(edac_debugfs_create_dir);
+
+/* Create a toplevel dir under EDAC's debugfs hierarchy with parent @parent */
+struct dentry *
+edac_debugfs_create_dir_at(const char *dirname, struct dentry *parent)
+{
+       return debugfs_create_dir(dirname, parent);
+}
+EXPORT_SYMBOL_GPL(edac_debugfs_create_dir_at);
+
+/*
+ * Create a file under EDAC's hierarchy or a sub-hierarchy:
+ *
+ * @name: file name
+ * @mode: file permissions
+ * @parent: parent dentry. If NULL, it becomes the toplevel EDAC dir
+ * @data: private data of caller
+ * @fops: file operations of this file
+ */
+struct dentry *
+edac_debugfs_create_file(const char *name, umode_t mode, struct dentry *parent,
+                        void *data, const struct file_operations *fops)
+{
+       if (!parent)
+               parent = edac_debugfs;
+
+       return debugfs_create_file(name, mode, parent, data, fops);
+}
+EXPORT_SYMBOL_GPL(edac_debugfs_create_file);
+
+/* Wrapper for debugfs_create_x8() */
+struct dentry *edac_debugfs_create_x8(const char *name, umode_t mode,
+                                      struct dentry *parent, u8 *value)
+{
+       if (!parent)
+               parent = edac_debugfs;
+
+       return debugfs_create_x8(name, mode, parent, value);
+}
+EXPORT_SYMBOL_GPL(edac_debugfs_create_x8);
+
+/* Wrapper for debugfs_create_x16() */
+struct dentry *edac_debugfs_create_x16(const char *name, umode_t mode,
+                                      struct dentry *parent, u16 *value)
+{
+       if (!parent)
+               parent = edac_debugfs;
+
+       return debugfs_create_x16(name, mode, parent, value);
+}
+EXPORT_SYMBOL_GPL(edac_debugfs_create_x16);
index ad42587c3f4d6e60d25ccb49fd9550b3341e6984..4861542163d704fe4f629777b04df791b5c790ea 100644 (file)
@@ -94,6 +94,8 @@ do {                                                                  \
 
 #define edac_dev_name(dev) (dev)->dev_name
 
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+
 /*
  * The following are the structures to provide for a generic
  * or abstract 'edac_device'. This set of structures and the
index 943ed8cf71b946c8dfad8b33b2ee97711443e3ba..77ecd6a4179aaa2e2b6504da395debe6351508d9 100644 (file)
@@ -1302,7 +1302,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
        grain_bits = fls_long(e->grain) + 1;
        trace_mc_event(type, e->msg, e->label, e->error_count,
                       mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer,
-                      PAGES_TO_MiB(e->page_frame_number) | e->offset_in_page,
+                      (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
                       grain_bits, e->syndrome, e->other_detail);
 
        edac_raw_mc_handle_error(type, mci, e);
index 33df7d93c857b6fcd65b4b136e3e7eaf0cf1ea62..a75acea0f674ed7ca9101ca7ce8a65e5b0961905 100644 (file)
@@ -229,7 +229,7 @@ static ssize_t channel_dimm_label_show(struct device *dev,
        if (!rank->dimm->label[0])
                return 0;
 
-       return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
+       return snprintf(data, sizeof(rank->dimm->label) + 1, "%s\n",
                        rank->dimm->label);
 }
 
@@ -240,14 +240,21 @@ static ssize_t channel_dimm_label_store(struct device *dev,
        struct csrow_info *csrow = to_csrow(dev);
        unsigned chan = to_channel(mattr);
        struct rank_info *rank = csrow->channels[chan];
+       size_t copy_count = count;
 
-       ssize_t max_size = 0;
+       if (count == 0)
+               return -EINVAL;
+
+       if (data[count - 1] == '\0' || data[count - 1] == '\n')
+               copy_count -= 1;
+
+       if (copy_count == 0 || copy_count >= sizeof(rank->dimm->label))
+               return -EINVAL;
 
-       max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
-       strncpy(rank->dimm->label, data, max_size);
-       rank->dimm->label[max_size] = '\0';
+       strncpy(rank->dimm->label, data, copy_count);
+       rank->dimm->label[copy_count] = '\0';
 
-       return max_size;
+       return count;
 }
 
 /* show function for dynamic chX_ce_count attribute */
@@ -485,7 +492,7 @@ static ssize_t dimmdev_label_show(struct device *dev,
        if (!dimm->label[0])
                return 0;
 
-       return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", dimm->label);
+       return snprintf(data, sizeof(dimm->label) + 1, "%s\n", dimm->label);
 }
 
 static ssize_t dimmdev_label_store(struct device *dev,
@@ -494,14 +501,21 @@ static ssize_t dimmdev_label_store(struct device *dev,
                                   size_t count)
 {
        struct dimm_info *dimm = to_dimm(dev);
+       size_t copy_count = count;
 
-       ssize_t max_size = 0;
+       if (count == 0)
+               return -EINVAL;
 
-       max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
-       strncpy(dimm->label, data, max_size);
-       dimm->label[max_size] = '\0';
+       if (data[count - 1] == '\0' || data[count - 1] == '\n')
+               copy_count -= 1;
 
-       return max_size;
+       if (copy_count == 0 || copy_count >= sizeof(dimm->label))
+               return -EINVAL;
+
+       strncpy(dimm->label, data, copy_count);
+       dimm->label[copy_count] = '\0';
+
+       return count;
 }
 
 static ssize_t dimmdev_size_show(struct device *dev,
@@ -785,47 +799,6 @@ static ssize_t mci_max_location_show(struct device *dev,
        return p - data;
 }
 
-#ifdef CONFIG_EDAC_DEBUG
-static ssize_t edac_fake_inject_write(struct file *file,
-                                     const char __user *data,
-                                     size_t count, loff_t *ppos)
-{
-       struct device *dev = file->private_data;
-       struct mem_ctl_info *mci = to_mci(dev);
-       static enum hw_event_mc_err_type type;
-       u16 errcount = mci->fake_inject_count;
-
-       if (!errcount)
-               errcount = 1;
-
-       type = mci->fake_inject_ue ? HW_EVENT_ERR_UNCORRECTED
-                                  : HW_EVENT_ERR_CORRECTED;
-
-       printk(KERN_DEBUG
-              "Generating %d %s fake error%s to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n",
-               errcount,
-               (type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE",
-               errcount > 1 ? "s" : "",
-               mci->fake_inject_layer[0],
-               mci->fake_inject_layer[1],
-               mci->fake_inject_layer[2]
-              );
-       edac_mc_handle_error(type, mci, errcount, 0, 0, 0,
-                            mci->fake_inject_layer[0],
-                            mci->fake_inject_layer[1],
-                            mci->fake_inject_layer[2],
-                            "FAKE ERROR", "for EDAC testing only");
-
-       return count;
-}
-
-static const struct file_operations debug_fake_inject_fops = {
-       .open = simple_open,
-       .write = edac_fake_inject_write,
-       .llseek = generic_file_llseek,
-};
-#endif
-
 /* default Control file */
 static DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
 
@@ -896,71 +869,6 @@ static struct device_type mci_attr_type = {
        .release        = mci_attr_release,
 };
 
-#ifdef CONFIG_EDAC_DEBUG
-static struct dentry *edac_debugfs;
-
-int __init edac_debugfs_init(void)
-{
-       edac_debugfs = debugfs_create_dir("edac", NULL);
-       if (IS_ERR(edac_debugfs)) {
-               edac_debugfs = NULL;
-               return -ENOMEM;
-       }
-       return 0;
-}
-
-void edac_debugfs_exit(void)
-{
-       debugfs_remove(edac_debugfs);
-}
-
-static int edac_create_debug_nodes(struct mem_ctl_info *mci)
-{
-       struct dentry *d, *parent;
-       char name[80];
-       int i;
-
-       if (!edac_debugfs)
-               return -ENODEV;
-
-       d = debugfs_create_dir(mci->dev.kobj.name, edac_debugfs);
-       if (!d)
-               return -ENOMEM;
-       parent = d;
-
-       for (i = 0; i < mci->n_layers; i++) {
-               sprintf(name, "fake_inject_%s",
-                            edac_layer_name[mci->layers[i].type]);
-               d = debugfs_create_u8(name, S_IRUGO | S_IWUSR, parent,
-                                     &mci->fake_inject_layer[i]);
-               if (!d)
-                       goto nomem;
-       }
-
-       d = debugfs_create_bool("fake_inject_ue", S_IRUGO | S_IWUSR, parent,
-                               &mci->fake_inject_ue);
-       if (!d)
-               goto nomem;
-
-       d = debugfs_create_u16("fake_inject_count", S_IRUGO | S_IWUSR, parent,
-                               &mci->fake_inject_count);
-       if (!d)
-               goto nomem;
-
-       d = debugfs_create_file("fake_inject", S_IWUSR, parent,
-                               &mci->dev,
-                               &debug_fake_inject_fops);
-       if (!d)
-               goto nomem;
-
-       mci->debugfs = parent;
-       return 0;
-nomem:
-       debugfs_remove(mci->debugfs);
-       return -ENOMEM;
-}
-#endif
-
 /*
  * Create a new Memory Controller kobject instance,
  *     mc<id> under the 'mc' directory
@@ -1039,9 +947,7 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
                goto fail_unregister_dimm;
 #endif
 
-#ifdef CONFIG_EDAC_DEBUG
-       edac_create_debug_nodes(mci);
-#endif
+       edac_create_debugfs_nodes(mci);
        return 0;
 
 fail_unregister_dimm:
@@ -1070,7 +976,7 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
        edac_dbg(0, "\n");
 
 #ifdef CONFIG_EDAC_DEBUG
-       debugfs_remove(mci->debugfs);
+       edac_debugfs_remove_recursive(mci->debugfs);
 #endif
 #ifdef CONFIG_EDAC_LEGACY_SYSFS
        edac_delete_csrow_objects(mci);
index 26ecc52e073d8b5dc246aa2897cd59fdf54700ad..b95a48fc723d587dce62116f330b46034fc3ecf1 100644 (file)
@@ -60,15 +60,39 @@ extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
 /*
  * EDAC debugfs functions
  */
+
+#define edac_debugfs_remove_recursive debugfs_remove_recursive
+#define edac_debugfs_remove debugfs_remove
 #ifdef CONFIG_EDAC_DEBUG
 int edac_debugfs_init(void);
 void edac_debugfs_exit(void);
+int edac_create_debugfs_nodes(struct mem_ctl_info *mci);
+struct dentry *edac_debugfs_create_dir(const char *dirname);
+struct dentry *
+edac_debugfs_create_dir_at(const char *dirname, struct dentry *parent);
+struct dentry *
+edac_debugfs_create_file(const char *name, umode_t mode, struct dentry *parent,
+                        void *data, const struct file_operations *fops);
+struct dentry *
+edac_debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent, u8 *value);
+struct dentry *
+edac_debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent, u16 *value);
 #else
-static inline int edac_debugfs_init(void)
-{
-       return -ENODEV;
-}
-static inline void edac_debugfs_exit(void) {}
+static inline int edac_debugfs_init(void)                                      { return -ENODEV; }
+static inline void edac_debugfs_exit(void)                                     { }
+static inline int edac_create_debugfs_nodes(struct mem_ctl_info *mci)          { return 0; }
+static inline struct dentry *edac_debugfs_create_dir(const char *dirname)      { return NULL; }
+static inline struct dentry *
+edac_debugfs_create_dir_at(const char *dirname, struct dentry *parent)         { return NULL; }
+static inline struct dentry *
+edac_debugfs_create_file(const char *name, umode_t mode, struct dentry *parent,
+                        void *data, const struct file_operations *fops)        { return NULL; }
+static inline struct dentry *
+edac_debugfs_create_x8(const char *name, umode_t mode,
+                      struct dentry *parent, u8 *value)                        { return NULL; }
+static inline struct dentry *
+edac_debugfs_create_x16(const char *name, umode_t mode,
+                      struct dentry *parent, u16 *value)                       { return NULL; }
 #endif
 
 /*
index b24681998740033664331ed4ba5d8ced56af63d0..e3fa4390f8460685c0bd57fe8d52c9c043de719f 100644 (file)
@@ -66,26 +66,6 @@ struct ghes_edac_dimm_fill {
        unsigned count;
 };
 
-char *memory_type[] = {
-       [MEM_EMPTY] = "EMPTY",
-       [MEM_RESERVED] = "RESERVED",
-       [MEM_UNKNOWN] = "UNKNOWN",
-       [MEM_FPM] = "FPM",
-       [MEM_EDO] = "EDO",
-       [MEM_BEDO] = "BEDO",
-       [MEM_SDR] = "SDR",
-       [MEM_RDR] = "RDR",
-       [MEM_DDR] = "DDR",
-       [MEM_RDDR] = "RDDR",
-       [MEM_RMBS] = "RMBS",
-       [MEM_DDR2] = "DDR2",
-       [MEM_FB_DDR2] = "FB_DDR2",
-       [MEM_RDDR2] = "RDDR2",
-       [MEM_XDR] = "XDR",
-       [MEM_DDR3] = "DDR3",
-       [MEM_RDDR3] = "RDDR3",
-};
-
 static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg)
 {
        int *num_dimm = arg;
@@ -173,7 +153,7 @@ static void ghes_edac_dmidecode(const struct dmi_header *dh, void *arg)
 
                if (dimm->nr_pages) {
                        edac_dbg(1, "DIMM%i: %s size = %d MB%s\n",
-                               dimm_fill->count, memory_type[dimm->mtype],
+                               dimm_fill->count, edac_mem_types[dimm->mtype],
                                PAGES_TO_MiB(dimm->nr_pages),
                                (dimm->edac_mode != EDAC_NONE) ? "(ECC)" : "");
                        edac_dbg(2, "\ttype %d, detail 0x%02x, width %d(total %d)\n",
@@ -417,7 +397,7 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
                 "APEI location: %s %s", e->location, e->other_detail);
        trace_mc_event(type, e->msg, e->label, e->error_count,
                       mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer,
-                      PAGES_TO_MiB(e->page_frame_number) | e->offset_in_page,
+                      (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
                       grain_bits, e->syndrome, pvt->detail_location);
 
        /* Report the error via EDAC API */
index e9f8a393915a82f54b3c7c63920ccf29244b8e17..40917775dca1c84fa2f84acbea9a988ec94060e3 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/debugfs.h>
 
 #include "edac_core.h"
+#include "edac_module.h"
 
 /* register addresses */
 
@@ -966,25 +967,25 @@ static int i5100_setup_debugfs(struct mem_ctl_info *mci)
        if (!i5100_debugfs)
                return -ENODEV;
 
-       priv->debugfs = debugfs_create_dir(mci->bus->name, i5100_debugfs);
+       priv->debugfs = edac_debugfs_create_dir_at(mci->bus->name, i5100_debugfs);
 
        if (!priv->debugfs)
                return -ENOMEM;
 
-       debugfs_create_x8("inject_channel", S_IRUGO | S_IWUSR, priv->debugfs,
-                       &priv->inject_channel);
-       debugfs_create_x8("inject_hlinesel", S_IRUGO | S_IWUSR, priv->debugfs,
-                       &priv->inject_hlinesel);
-       debugfs_create_x8("inject_deviceptr1", S_IRUGO | S_IWUSR, priv->debugfs,
-                       &priv->inject_deviceptr1);
-       debugfs_create_x8("inject_deviceptr2", S_IRUGO | S_IWUSR, priv->debugfs,
-                       &priv->inject_deviceptr2);
-       debugfs_create_x16("inject_eccmask1", S_IRUGO | S_IWUSR, priv->debugfs,
-                       &priv->inject_eccmask1);
-       debugfs_create_x16("inject_eccmask2", S_IRUGO | S_IWUSR, priv->debugfs,
-                       &priv->inject_eccmask2);
-       debugfs_create_file("inject_enable", S_IWUSR, priv->debugfs,
-                       &mci->dev, &i5100_inject_enable_fops);
+       edac_debugfs_create_x8("inject_channel", S_IRUGO | S_IWUSR, priv->debugfs,
+                               &priv->inject_channel);
+       edac_debugfs_create_x8("inject_hlinesel", S_IRUGO | S_IWUSR, priv->debugfs,
+                               &priv->inject_hlinesel);
+       edac_debugfs_create_x8("inject_deviceptr1", S_IRUGO | S_IWUSR, priv->debugfs,
+                               &priv->inject_deviceptr1);
+       edac_debugfs_create_x8("inject_deviceptr2", S_IRUGO | S_IWUSR, priv->debugfs,
+                               &priv->inject_deviceptr2);
+       edac_debugfs_create_x16("inject_eccmask1", S_IRUGO | S_IWUSR, priv->debugfs,
+                               &priv->inject_eccmask1);
+       edac_debugfs_create_x16("inject_eccmask2", S_IRUGO | S_IWUSR, priv->debugfs,
+                               &priv->inject_eccmask2);
+       edac_debugfs_create_file("inject_enable", S_IWUSR, priv->debugfs,
+                               &mci->dev, &i5100_inject_enable_fops);
 
        return 0;
 
@@ -1189,7 +1190,7 @@ static void i5100_remove_one(struct pci_dev *pdev)
 
        priv = mci->pvt_info;
 
-       debugfs_remove_recursive(priv->debugfs);
+       edac_debugfs_remove_recursive(priv->debugfs);
 
        priv->scrub_enable = 0;
        cancel_delayed_work_sync(&(priv->i5100_scrubbing));
@@ -1223,7 +1224,7 @@ static int __init i5100_init(void)
 {
        int pci_rc;
 
-       i5100_debugfs = debugfs_create_dir("i5100_edac", NULL);
+       i5100_debugfs = edac_debugfs_create_dir_at("i5100_edac", NULL);
 
        pci_rc = pci_register_driver(&i5100_driver);
        return (pci_rc < 0) ? pci_rc : 0;
@@ -1231,7 +1232,7 @@ static int __init i5100_init(void)
 
 static void __exit i5100_exit(void)
 {
-       debugfs_remove(i5100_debugfs);
+       edac_debugfs_remove(i5100_debugfs);
 
        pci_unregister_driver(&i5100_driver);
 }
index 711d8ad74f116ebdcc7fd3833fbc0672c7a6359b..d3a64ba61fa3075beab3001d9270c572c8af9c9c 100644 (file)
@@ -199,6 +199,7 @@ static const struct of_device_id ppc4xx_edac_match[] = {
        },
        { }
 };
+MODULE_DEVICE_TABLE(of, ppc4xx_edac_match);
 
 static struct platform_driver ppc4xx_edac_driver = {
        .probe                  = ppc4xx_edac_probe,
index cf1268ddef0c058982ff45fb0bb0074a06d2f6b7..429309c62699ff9ea536a90b839dbb99b7305ecc 100644 (file)
@@ -1688,6 +1688,7 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
 {
        struct sbridge_pvt *pvt = mci->pvt_info;
        struct pci_dev *pdev;
+       u8 saw_chan_mask = 0;
        int i;
 
        for (i = 0; i < sbridge_dev->n_devs; i++) {
@@ -1721,6 +1722,7 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
                {
                        int id = pdev->device - PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0;
                        pvt->pci_tad[id] = pdev;
+                       saw_chan_mask |= 1 << id;
                }
                        break;
                case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO:
@@ -1741,10 +1743,8 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
            !pvt-> pci_tad || !pvt->pci_ras  || !pvt->pci_ta)
                goto enodev;
 
-       for (i = 0; i < NUM_CHANNELS; i++) {
-               if (!pvt->pci_tad[i])
-                       goto enodev;
-       }
+       if (saw_chan_mask != 0x0f)
+               goto enodev;
        return 0;
 
 enodev:
index ba06904af2e1c2dc9f574caa8d82c4478f3c42df..41f876414a18d759ee2d89c33b5a4791ddcd6f43 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/regmap.h>
 
 #include "edac_core.h"
+#include "edac_module.h"
 
 #define EDAC_MOD_STR                   "xgene_edac"
 
@@ -62,10 +63,12 @@ struct xgene_edac {
        struct regmap           *efuse_map;
        void __iomem            *pcp_csr;
        spinlock_t              lock;
-       struct dentry           *dfs;
+       struct dentry           *dfs;
 
        struct list_head        mcus;
        struct list_head        pmds;
+       struct list_head        l3s;
+       struct list_head        socs;
 
        struct mutex            mc_lock;
        int                     mc_active_mask;
@@ -172,12 +175,12 @@ static void xgene_edac_mc_create_debugfs_node(struct mem_ctl_info *mci)
 {
        if (!IS_ENABLED(CONFIG_EDAC_DEBUG))
                return;
-#ifdef CONFIG_EDAC_DEBUG
+
        if (!mci->debugfs)
                return;
-       debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci,
-                           &xgene_edac_mc_debug_inject_fops);
-#endif
+
+       edac_debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci,
+                                &xgene_edac_mc_debug_inject_fops);
 }
 
 static void xgene_edac_mc_check(struct mem_ctl_info *mci)
@@ -536,140 +539,134 @@ static void xgene_edac_pmd_l1_check(struct edac_device_ctl_info *edac_dev,
        pg_f = ctx->pmd_csr + cpu_idx * CPU_CSR_STRIDE + CPU_MEMERR_CPU_PAGE;
 
        val = readl(pg_f + MEMERR_CPU_ICFESR_PAGE_OFFSET);
-       if (val) {
-               dev_err(edac_dev->dev,
-                       "CPU%d L1 memory error ICF 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X\n",
-                       ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val,
-                       MEMERR_CPU_ICFESR_ERRWAY_RD(val),
-                       MEMERR_CPU_ICFESR_ERRINDEX_RD(val),
-                       MEMERR_CPU_ICFESR_ERRINFO_RD(val));
-               if (val & MEMERR_CPU_ICFESR_CERR_MASK)
-                       dev_err(edac_dev->dev,
-                               "One or more correctable error\n");
-               if (val & MEMERR_CPU_ICFESR_MULTCERR_MASK)
-                       dev_err(edac_dev->dev, "Multiple correctable error\n");
-               switch (MEMERR_CPU_ICFESR_ERRTYPE_RD(val)) {
-               case 1:
-                       dev_err(edac_dev->dev, "L1 TLB multiple hit\n");
-                       break;
-               case 2:
-                       dev_err(edac_dev->dev, "Way select multiple hit\n");
-                       break;
-               case 3:
-                       dev_err(edac_dev->dev, "Physical tag parity error\n");
-                       break;
-               case 4:
-               case 5:
-                       dev_err(edac_dev->dev, "L1 data parity error\n");
-                       break;
-               case 6:
-                       dev_err(edac_dev->dev, "L1 pre-decode parity error\n");
-                       break;
-               }
+       if (!val)
+               goto chk_lsu;
+       dev_err(edac_dev->dev,
+               "CPU%d L1 memory error ICF 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X\n",
+               ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val,
+               MEMERR_CPU_ICFESR_ERRWAY_RD(val),
+               MEMERR_CPU_ICFESR_ERRINDEX_RD(val),
+               MEMERR_CPU_ICFESR_ERRINFO_RD(val));
+       if (val & MEMERR_CPU_ICFESR_CERR_MASK)
+               dev_err(edac_dev->dev, "One or more correctable error\n");
+       if (val & MEMERR_CPU_ICFESR_MULTCERR_MASK)
+               dev_err(edac_dev->dev, "Multiple correctable error\n");
+       switch (MEMERR_CPU_ICFESR_ERRTYPE_RD(val)) {
+       case 1:
+               dev_err(edac_dev->dev, "L1 TLB multiple hit\n");
+               break;
+       case 2:
+               dev_err(edac_dev->dev, "Way select multiple hit\n");
+               break;
+       case 3:
+               dev_err(edac_dev->dev, "Physical tag parity error\n");
+               break;
+       case 4:
+       case 5:
+               dev_err(edac_dev->dev, "L1 data parity error\n");
+               break;
+       case 6:
+               dev_err(edac_dev->dev, "L1 pre-decode parity error\n");
+               break;
+       }
 
-               /* Clear any HW errors */
-               writel(val, pg_f + MEMERR_CPU_ICFESR_PAGE_OFFSET);
+       /* Clear any HW errors */
+       writel(val, pg_f + MEMERR_CPU_ICFESR_PAGE_OFFSET);
 
-               if (val & (MEMERR_CPU_ICFESR_CERR_MASK |
-                          MEMERR_CPU_ICFESR_MULTCERR_MASK))
-                       edac_device_handle_ce(edac_dev, 0, 0,
-                                             edac_dev->ctl_name);
-       }
+       if (val & (MEMERR_CPU_ICFESR_CERR_MASK |
+                  MEMERR_CPU_ICFESR_MULTCERR_MASK))
+               edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
 
+chk_lsu:
        val = readl(pg_f + MEMERR_CPU_LSUESR_PAGE_OFFSET);
-       if (val) {
+       if (!val)
+               goto chk_mmu;
+       dev_err(edac_dev->dev,
+               "CPU%d memory error LSU 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X\n",
+               ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val,
+               MEMERR_CPU_LSUESR_ERRWAY_RD(val),
+               MEMERR_CPU_LSUESR_ERRINDEX_RD(val),
+               MEMERR_CPU_LSUESR_ERRINFO_RD(val));
+       if (val & MEMERR_CPU_LSUESR_CERR_MASK)
+               dev_err(edac_dev->dev, "One or more correctable error\n");
+       if (val & MEMERR_CPU_LSUESR_MULTCERR_MASK)
+               dev_err(edac_dev->dev, "Multiple correctable error\n");
+       switch (MEMERR_CPU_LSUESR_ERRTYPE_RD(val)) {
+       case 0:
+               dev_err(edac_dev->dev, "Load tag error\n");
+               break;
+       case 1:
+               dev_err(edac_dev->dev, "Load data error\n");
+               break;
+       case 2:
+               dev_err(edac_dev->dev, "WSL multihit error\n");
+               break;
+       case 3:
+               dev_err(edac_dev->dev, "Store tag error\n");
+               break;
+       case 4:
                dev_err(edac_dev->dev,
-                       "CPU%d memory error LSU 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X\n",
-                       ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val,
-                       MEMERR_CPU_LSUESR_ERRWAY_RD(val),
-                       MEMERR_CPU_LSUESR_ERRINDEX_RD(val),
-                       MEMERR_CPU_LSUESR_ERRINFO_RD(val));
-               if (val & MEMERR_CPU_LSUESR_CERR_MASK)
-                       dev_err(edac_dev->dev,
-                               "One or more correctable error\n");
-               if (val & MEMERR_CPU_LSUESR_MULTCERR_MASK)
-                       dev_err(edac_dev->dev, "Multiple correctable error\n");
-               switch (MEMERR_CPU_LSUESR_ERRTYPE_RD(val)) {
-               case 0:
-                       dev_err(edac_dev->dev, "Load tag error\n");
-                       break;
-               case 1:
-                       dev_err(edac_dev->dev, "Load data error\n");
-                       break;
-               case 2:
-                       dev_err(edac_dev->dev, "WSL multihit error\n");
-                       break;
-               case 3:
-                       dev_err(edac_dev->dev, "Store tag error\n");
-                       break;
-               case 4:
-                       dev_err(edac_dev->dev,
-                               "DTB multihit from load pipeline error\n");
-                       break;
-               case 5:
-                       dev_err(edac_dev->dev,
-                               "DTB multihit from store pipeline error\n");
-                       break;
-               }
+                       "DTB multihit from load pipeline error\n");
+               break;
+       case 5:
+               dev_err(edac_dev->dev,
+                       "DTB multihit from store pipeline error\n");
+               break;
+       }
 
-               /* Clear any HW errors */
-               writel(val, pg_f + MEMERR_CPU_LSUESR_PAGE_OFFSET);
+       /* Clear any HW errors */
+       writel(val, pg_f + MEMERR_CPU_LSUESR_PAGE_OFFSET);
 
-               if (val & (MEMERR_CPU_LSUESR_CERR_MASK |
-                          MEMERR_CPU_LSUESR_MULTCERR_MASK))
-                       edac_device_handle_ce(edac_dev, 0, 0,
-                                             edac_dev->ctl_name);
-       }
+       if (val & (MEMERR_CPU_LSUESR_CERR_MASK |
+                  MEMERR_CPU_LSUESR_MULTCERR_MASK))
+               edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
 
+chk_mmu:
        val = readl(pg_f + MEMERR_CPU_MMUESR_PAGE_OFFSET);
-       if (val) {
-               dev_err(edac_dev->dev,
-                       "CPU%d memory error MMU 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X %s\n",
-                       ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val,
-                       MEMERR_CPU_MMUESR_ERRWAY_RD(val),
-                       MEMERR_CPU_MMUESR_ERRINDEX_RD(val),
-                       MEMERR_CPU_MMUESR_ERRINFO_RD(val),
-                       val & MEMERR_CPU_MMUESR_ERRREQSTR_LSU_MASK ? "LSU" :
-                                                                    "ICF");
-               if (val & MEMERR_CPU_MMUESR_CERR_MASK)
-                       dev_err(edac_dev->dev,
-                               "One or more correctable error\n");
-               if (val & MEMERR_CPU_MMUESR_MULTCERR_MASK)
-                       dev_err(edac_dev->dev, "Multiple correctable error\n");
-               switch (MEMERR_CPU_MMUESR_ERRTYPE_RD(val)) {
-               case 0:
-                       dev_err(edac_dev->dev, "Stage 1 UTB hit error\n");
-                       break;
-               case 1:
-                       dev_err(edac_dev->dev, "Stage 1 UTB miss error\n");
-                       break;
-               case 2:
-                       dev_err(edac_dev->dev, "Stage 1 UTB allocate error\n");
-                       break;
-               case 3:
-                       dev_err(edac_dev->dev,
-                               "TMO operation single bank error\n");
-                       break;
-               case 4:
-                       dev_err(edac_dev->dev, "Stage 2 UTB error\n");
-                       break;
-               case 5:
-                       dev_err(edac_dev->dev, "Stage 2 UTB miss error\n");
-                       break;
-               case 6:
-                       dev_err(edac_dev->dev, "Stage 2 UTB allocate error\n");
-                       break;
-               case 7:
-                       dev_err(edac_dev->dev,
-                               "TMO operation multiple bank error\n");
-                       break;
-               }
+       if (!val)
+               return;
+       dev_err(edac_dev->dev,
+               "CPU%d memory error MMU 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X %s\n",
+               ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val,
+               MEMERR_CPU_MMUESR_ERRWAY_RD(val),
+               MEMERR_CPU_MMUESR_ERRINDEX_RD(val),
+               MEMERR_CPU_MMUESR_ERRINFO_RD(val),
+               val & MEMERR_CPU_MMUESR_ERRREQSTR_LSU_MASK ? "LSU" : "ICF");
+       if (val & MEMERR_CPU_MMUESR_CERR_MASK)
+               dev_err(edac_dev->dev, "One or more correctable error\n");
+       if (val & MEMERR_CPU_MMUESR_MULTCERR_MASK)
+               dev_err(edac_dev->dev, "Multiple correctable error\n");
+       switch (MEMERR_CPU_MMUESR_ERRTYPE_RD(val)) {
+       case 0:
+               dev_err(edac_dev->dev, "Stage 1 UTB hit error\n");
+               break;
+       case 1:
+               dev_err(edac_dev->dev, "Stage 1 UTB miss error\n");
+               break;
+       case 2:
+               dev_err(edac_dev->dev, "Stage 1 UTB allocate error\n");
+               break;
+       case 3:
+               dev_err(edac_dev->dev, "TMO operation single bank error\n");
+               break;
+       case 4:
+               dev_err(edac_dev->dev, "Stage 2 UTB error\n");
+               break;
+       case 5:
+               dev_err(edac_dev->dev, "Stage 2 UTB miss error\n");
+               break;
+       case 6:
+               dev_err(edac_dev->dev, "Stage 2 UTB allocate error\n");
+               break;
+       case 7:
+               dev_err(edac_dev->dev, "TMO operation multiple bank error\n");
+               break;
+       }
 
-               /* Clear any HW errors */
-               writel(val, pg_f + MEMERR_CPU_MMUESR_PAGE_OFFSET);
+       /* Clear any HW errors */
+       writel(val, pg_f + MEMERR_CPU_MMUESR_PAGE_OFFSET);
 
-               edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
-       }
+       edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
 }
 
 static void xgene_edac_pmd_l2_check(struct edac_device_ctl_info *edac_dev)
@@ -684,60 +681,56 @@ static void xgene_edac_pmd_l2_check(struct edac_device_ctl_info *edac_dev)
        /* Check L2 */
        pg_e = ctx->pmd_csr + CPU_MEMERR_L2C_PAGE;
        val = readl(pg_e + MEMERR_L2C_L2ESR_PAGE_OFFSET);
-       if (val) {
-               val_lo = readl(pg_e + MEMERR_L2C_L2EALR_PAGE_OFFSET);
-               val_hi = readl(pg_e + MEMERR_L2C_L2EAHR_PAGE_OFFSET);
-               dev_err(edac_dev->dev,
-                       "PMD%d memory error L2C L2ESR 0x%08X @ 0x%08X.%08X\n",
-                       ctx->pmd, val, val_hi, val_lo);
-               dev_err(edac_dev->dev,
-                       "ErrSyndrome 0x%02X ErrWay 0x%02X ErrCpu %d ErrGroup 0x%02X ErrAction 0x%02X\n",
-                       MEMERR_L2C_L2ESR_ERRSYN_RD(val),
-                       MEMERR_L2C_L2ESR_ERRWAY_RD(val),
-                       MEMERR_L2C_L2ESR_ERRCPU_RD(val),
-                       MEMERR_L2C_L2ESR_ERRGROUP_RD(val),
-                       MEMERR_L2C_L2ESR_ERRACTION_RD(val));
-
-               if (val & MEMERR_L2C_L2ESR_ERR_MASK)
-                       dev_err(edac_dev->dev,
-                               "One or more correctable error\n");
-               if (val & MEMERR_L2C_L2ESR_MULTICERR_MASK)
-                       dev_err(edac_dev->dev, "Multiple correctable error\n");
-               if (val & MEMERR_L2C_L2ESR_UCERR_MASK)
-                       dev_err(edac_dev->dev,
-                               "One or more uncorrectable error\n");
-               if (val & MEMERR_L2C_L2ESR_MULTUCERR_MASK)
-                       dev_err(edac_dev->dev,
-                               "Multiple uncorrectable error\n");
-
-               switch (MEMERR_L2C_L2ESR_ERRTYPE_RD(val)) {
-               case 0:
-                       dev_err(edac_dev->dev, "Outbound SDB parity error\n");
-                       break;
-               case 1:
-                       dev_err(edac_dev->dev, "Inbound SDB parity error\n");
-                       break;
-               case 2:
-                       dev_err(edac_dev->dev, "Tag ECC error\n");
-                       break;
-               case 3:
-                       dev_err(edac_dev->dev, "Data ECC error\n");
-                       break;
-               }
+       if (!val)
+               goto chk_l2c;
+       val_lo = readl(pg_e + MEMERR_L2C_L2EALR_PAGE_OFFSET);
+       val_hi = readl(pg_e + MEMERR_L2C_L2EAHR_PAGE_OFFSET);
+       dev_err(edac_dev->dev,
+               "PMD%d memory error L2C L2ESR 0x%08X @ 0x%08X.%08X\n",
+               ctx->pmd, val, val_hi, val_lo);
+       dev_err(edac_dev->dev,
+               "ErrSyndrome 0x%02X ErrWay 0x%02X ErrCpu %d ErrGroup 0x%02X ErrAction 0x%02X\n",
+               MEMERR_L2C_L2ESR_ERRSYN_RD(val),
+               MEMERR_L2C_L2ESR_ERRWAY_RD(val),
+               MEMERR_L2C_L2ESR_ERRCPU_RD(val),
+               MEMERR_L2C_L2ESR_ERRGROUP_RD(val),
+               MEMERR_L2C_L2ESR_ERRACTION_RD(val));
+
+       if (val & MEMERR_L2C_L2ESR_ERR_MASK)
+               dev_err(edac_dev->dev, "One or more correctable error\n");
+       if (val & MEMERR_L2C_L2ESR_MULTICERR_MASK)
+               dev_err(edac_dev->dev, "Multiple correctable error\n");
+       if (val & MEMERR_L2C_L2ESR_UCERR_MASK)
+               dev_err(edac_dev->dev, "One or more uncorrectable error\n");
+       if (val & MEMERR_L2C_L2ESR_MULTUCERR_MASK)
+               dev_err(edac_dev->dev, "Multiple uncorrectable error\n");
+
+       switch (MEMERR_L2C_L2ESR_ERRTYPE_RD(val)) {
+       case 0:
+               dev_err(edac_dev->dev, "Outbound SDB parity error\n");
+               break;
+       case 1:
+               dev_err(edac_dev->dev, "Inbound SDB parity error\n");
+               break;
+       case 2:
+               dev_err(edac_dev->dev, "Tag ECC error\n");
+               break;
+       case 3:
+               dev_err(edac_dev->dev, "Data ECC error\n");
+               break;
+       }
 
-               /* Clear any HW errors */
-               writel(val, pg_e + MEMERR_L2C_L2ESR_PAGE_OFFSET);
+       /* Clear any HW errors */
+       writel(val, pg_e + MEMERR_L2C_L2ESR_PAGE_OFFSET);
 
-               if (val & (MEMERR_L2C_L2ESR_ERR_MASK |
-                          MEMERR_L2C_L2ESR_MULTICERR_MASK))
-                       edac_device_handle_ce(edac_dev, 0, 0,
-                                             edac_dev->ctl_name);
-               if (val & (MEMERR_L2C_L2ESR_UCERR_MASK |
-                          MEMERR_L2C_L2ESR_MULTUCERR_MASK))
-                       edac_device_handle_ue(edac_dev, 0, 0,
-                                             edac_dev->ctl_name);
-       }
+       if (val & (MEMERR_L2C_L2ESR_ERR_MASK |
+                  MEMERR_L2C_L2ESR_MULTICERR_MASK))
+               edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
+       if (val & (MEMERR_L2C_L2ESR_UCERR_MASK |
+                  MEMERR_L2C_L2ESR_MULTUCERR_MASK))
+               edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
 
+chk_l2c:
        /* Check if any memory request timed out on L2 cache */
        pg_d = ctx->pmd_csr + CPU_L2C_PAGE;
        val = readl(pg_d + CPUX_L2C_L2RTOSR_PAGE_OFFSET);
@@ -877,35 +870,25 @@ static const struct file_operations xgene_edac_pmd_debug_inject_fops[] = {
        { }
 };
 
-static void xgene_edac_pmd_create_debugfs_nodes(
-       struct edac_device_ctl_info *edac_dev)
+static void
+xgene_edac_pmd_create_debugfs_nodes(struct edac_device_ctl_info *edac_dev)
 {
        struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
-       struct dentry *edac_debugfs;
-       char name[30];
+       struct dentry *dbgfs_dir;
+       char name[10];
 
-       if (!IS_ENABLED(CONFIG_EDAC_DEBUG))
+       if (!IS_ENABLED(CONFIG_EDAC_DEBUG) || !ctx->edac->dfs)
                return;
 
-       /*
-        * Todo: Switch to common EDAC debug file system for edac device
-        *       when available.
-        */
-       if (!ctx->edac->dfs) {
-               ctx->edac->dfs = debugfs_create_dir(edac_dev->dev->kobj.name,
-                                                   NULL);
-               if (!ctx->edac->dfs)
-                       return;
-       }
-       sprintf(name, "PMD%d", ctx->pmd);
-       edac_debugfs = debugfs_create_dir(name, ctx->edac->dfs);
-       if (!edac_debugfs)
+       snprintf(name, sizeof(name), "PMD%d", ctx->pmd);
+       dbgfs_dir = edac_debugfs_create_dir_at(name, ctx->edac->dfs);
+       if (!dbgfs_dir)
                return;
 
-       debugfs_create_file("l1_inject_ctrl", S_IWUSR, edac_debugfs, edac_dev,
-                           &xgene_edac_pmd_debug_inject_fops[0]);
-       debugfs_create_file("l2_inject_ctrl", S_IWUSR, edac_debugfs, edac_dev,
-                           &xgene_edac_pmd_debug_inject_fops[1]);
+       edac_debugfs_create_file("l1_inject_ctrl", S_IWUSR, dbgfs_dir, edac_dev,
+                                &xgene_edac_pmd_debug_inject_fops[0]);
+       edac_debugfs_create_file("l2_inject_ctrl", S_IWUSR, dbgfs_dir, edac_dev,
+                                &xgene_edac_pmd_debug_inject_fops[1]);
 }
 
 static int xgene_edac_pmd_available(u32 efuse, int pmd)
@@ -941,7 +924,7 @@ static int xgene_edac_pmd_add(struct xgene_edac *edac, struct device_node *np,
                goto err_group;
        }
 
-       sprintf(edac_name, "l2c%d", pmd);
+       snprintf(edac_name, sizeof(edac_name), "l2c%d", pmd);
        edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx),
                                              edac_name, 1, "l2c", 1, 2, NULL,
                                              0, edac_device_alloc_index());
@@ -1016,10 +999,780 @@ static int xgene_edac_pmd_remove(struct xgene_edac_pmd_ctx *pmd)
        return 0;
 }
 
+/* L3 Error device */
+#define L3C_ESR                                (0x0A * 4)
+#define  L3C_ESR_DATATAG_MASK          BIT(9)
+#define  L3C_ESR_MULTIHIT_MASK         BIT(8)
+#define  L3C_ESR_UCEVICT_MASK          BIT(6)
+#define  L3C_ESR_MULTIUCERR_MASK       BIT(5)
+#define  L3C_ESR_MULTICERR_MASK                BIT(4)
+#define  L3C_ESR_UCERR_MASK            BIT(3)
+#define  L3C_ESR_CERR_MASK             BIT(2)
+#define  L3C_ESR_UCERRINTR_MASK                BIT(1)
+#define  L3C_ESR_CERRINTR_MASK         BIT(0)
+#define L3C_ECR                                (0x0B * 4)
+#define  L3C_ECR_UCINTREN              BIT(3)
+#define  L3C_ECR_CINTREN               BIT(2)
+#define  L3C_UCERREN                   BIT(1)
+#define  L3C_CERREN                    BIT(0)
+#define L3C_ELR                                (0x0C * 4)
+#define  L3C_ELR_ERRSYN(src)           ((src & 0xFF800000) >> 23)
+#define  L3C_ELR_ERRWAY(src)           ((src & 0x007E0000) >> 17)
+#define  L3C_ELR_AGENTID(src)          ((src & 0x0001E000) >> 13)
+#define  L3C_ELR_ERRGRP(src)           ((src & 0x00000F00) >> 8)
+#define  L3C_ELR_OPTYPE(src)           ((src & 0x000000F0) >> 4)
+#define  L3C_ELR_PADDRHIGH(src)                (src & 0x0000000F)
+#define L3C_AELR                       (0x0D * 4)
+#define L3C_BELR                       (0x0E * 4)
+#define  L3C_BELR_BANK(src)            (src & 0x0000000F)
+
+struct xgene_edac_dev_ctx {
+       struct list_head        next;
+       struct device           ddev;
+       char                    *name;
+       struct xgene_edac       *edac;
+       struct edac_device_ctl_info *edac_dev;
+       int                     edac_idx;
+       void __iomem            *dev_csr;
+       int                     version;
+};
+
+/*
+ * Version 1 of the L3 controller has broken single bit correctable logic for
+ * certain error syndromes. Log them as uncorrectable in that case.
+ */
+static bool xgene_edac_l3_promote_to_uc_err(u32 l3cesr, u32 l3celr)
+{
+       if (l3cesr & L3C_ESR_DATATAG_MASK) {
+               switch (L3C_ELR_ERRSYN(l3celr)) {
+               case 0x13C:
+               case 0x0B4:
+               case 0x007:
+               case 0x00D:
+               case 0x00E:
+               case 0x019:
+               case 0x01A:
+               case 0x01C:
+               case 0x04E:
+               case 0x041:
+                       return true;
+               }
+       } else if (L3C_ELR_ERRSYN(l3celr) == 9)
+               return true;
+
+       return false;
+}
+
+static void xgene_edac_l3_check(struct edac_device_ctl_info *edac_dev)
+{
+       struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
+       u32 l3cesr;
+       u32 l3celr;
+       u32 l3caelr;
+       u32 l3cbelr;
+
+       l3cesr = readl(ctx->dev_csr + L3C_ESR);
+       if (!(l3cesr & (L3C_ESR_UCERR_MASK | L3C_ESR_CERR_MASK)))
+               return;
+
+       if (l3cesr & L3C_ESR_UCERR_MASK)
+               dev_err(edac_dev->dev, "L3C uncorrectable error\n");
+       if (l3cesr & L3C_ESR_CERR_MASK)
+               dev_warn(edac_dev->dev, "L3C correctable error\n");
+
+       l3celr = readl(ctx->dev_csr + L3C_ELR);
+       l3caelr = readl(ctx->dev_csr + L3C_AELR);
+       l3cbelr = readl(ctx->dev_csr + L3C_BELR);
+       if (l3cesr & L3C_ESR_MULTIHIT_MASK)
+               dev_err(edac_dev->dev, "L3C multiple hit error\n");
+       if (l3cesr & L3C_ESR_UCEVICT_MASK)
+               dev_err(edac_dev->dev,
+                       "L3C dropped eviction of line with error\n");
+       if (l3cesr & L3C_ESR_MULTIUCERR_MASK)
+               dev_err(edac_dev->dev, "L3C multiple uncorrectable error\n");
+       if (l3cesr & L3C_ESR_DATATAG_MASK)
+               dev_err(edac_dev->dev,
+                       "L3C data error syndrome 0x%X group 0x%X\n",
+                       L3C_ELR_ERRSYN(l3celr), L3C_ELR_ERRGRP(l3celr));
+       else
+               dev_err(edac_dev->dev,
+                       "L3C tag error syndrome 0x%X Way of Tag 0x%X Agent ID 0x%X Operation type 0x%X\n",
+                       L3C_ELR_ERRSYN(l3celr), L3C_ELR_ERRWAY(l3celr),
+                       L3C_ELR_AGENTID(l3celr), L3C_ELR_OPTYPE(l3celr));
+       /*
+        * NOTE: Address [41:38] in L3C_ELR_PADDRHIGH(l3celr).
+        *       Address [37:6] in l3caelr. Lower 6 bits are zero.
+        */
+       dev_err(edac_dev->dev, "L3C error address 0x%08X.%08X bank %d\n",
+               L3C_ELR_PADDRHIGH(l3celr) << 6 | (l3caelr >> 26),
+               (l3caelr & 0x3FFFFFFF) << 6, L3C_BELR_BANK(l3cbelr));
+       dev_err(edac_dev->dev,
+               "L3C error status register value 0x%X\n", l3cesr);
+
+       /* Clear L3C error interrupt */
+       writel(0, ctx->dev_csr + L3C_ESR);
+
+       if (ctx->version <= 1 &&
+           xgene_edac_l3_promote_to_uc_err(l3cesr, l3celr)) {
+               edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
+               return;
+       }
+       if (l3cesr & L3C_ESR_CERR_MASK)
+               edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
+       if (l3cesr & L3C_ESR_UCERR_MASK)
+               edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
+}
+
+static void xgene_edac_l3_hw_init(struct edac_device_ctl_info *edac_dev,
+                                 bool enable)
+{
+       struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
+       u32 val;
+
+       val = readl(ctx->dev_csr + L3C_ECR);
+       val |= L3C_UCERREN | L3C_CERREN;
+       /* On disable, we just disable interrupt but keep error enabled */
+       if (edac_dev->op_state == OP_RUNNING_INTERRUPT) {
+               if (enable)
+                       val |= L3C_ECR_UCINTREN | L3C_ECR_CINTREN;
+               else
+                       val &= ~(L3C_ECR_UCINTREN | L3C_ECR_CINTREN);
+       }
+       writel(val, ctx->dev_csr + L3C_ECR);
+
+       if (edac_dev->op_state == OP_RUNNING_INTERRUPT) {
+               /* Enable/disable L3 error top level interrupt */
+               if (enable) {
+                       xgene_edac_pcp_clrbits(ctx->edac, PCPHPERRINTMSK,
+                                              L3C_UNCORR_ERR_MASK);
+                       xgene_edac_pcp_clrbits(ctx->edac, PCPLPERRINTMSK,
+                                              L3C_CORR_ERR_MASK);
+               } else {
+                       xgene_edac_pcp_setbits(ctx->edac, PCPHPERRINTMSK,
+                                              L3C_UNCORR_ERR_MASK);
+                       xgene_edac_pcp_setbits(ctx->edac, PCPLPERRINTMSK,
+                                              L3C_CORR_ERR_MASK);
+               }
+       }
+}
+
+static ssize_t xgene_edac_l3_inject_ctrl_write(struct file *file,
+                                              const char __user *data,
+                                              size_t count, loff_t *ppos)
+{
+       struct edac_device_ctl_info *edac_dev = file->private_data;
+       struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
+
+       /* Generate all errors */
+       writel(0xFFFFFFFF, ctx->dev_csr + L3C_ESR);
+       return count;
+}
+
+static const struct file_operations xgene_edac_l3_debug_inject_fops = {
+       .open = simple_open,
+       .write = xgene_edac_l3_inject_ctrl_write,
+       .llseek = generic_file_llseek
+};
+
+static void
+xgene_edac_l3_create_debugfs_nodes(struct edac_device_ctl_info *edac_dev)
+{
+       struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
+       struct dentry *dbgfs_dir;
+       char name[10];
+
+       if (!IS_ENABLED(CONFIG_EDAC_DEBUG) || !ctx->edac->dfs)
+               return;
+
+       snprintf(name, sizeof(name), "l3c%d", ctx->edac_idx);
+       dbgfs_dir = edac_debugfs_create_dir_at(name, ctx->edac->dfs);
+       if (!dbgfs_dir)
+               return;
+
+       debugfs_create_file("l3_inject_ctrl", S_IWUSR, dbgfs_dir, edac_dev,
+                           &xgene_edac_l3_debug_inject_fops);
+}
+
+static int xgene_edac_l3_add(struct xgene_edac *edac, struct device_node *np,
+                            int version)
+{
+       struct edac_device_ctl_info *edac_dev;
+       struct xgene_edac_dev_ctx *ctx;
+       struct resource res;
+       void __iomem *dev_csr;
+       int edac_idx;
+       int rc = 0;
+
+       if (!devres_open_group(edac->dev, xgene_edac_l3_add, GFP_KERNEL))
+               return -ENOMEM;
+
+       rc = of_address_to_resource(np, 0, &res);
+       if (rc < 0) {
+               dev_err(edac->dev, "no L3 resource address\n");
+               goto err_release_group;
+       }
+       dev_csr = devm_ioremap_resource(edac->dev, &res);
+       if (IS_ERR(dev_csr)) {
+               dev_err(edac->dev,
+                       "devm_ioremap_resource failed for L3 resource address\n");
+               rc = PTR_ERR(dev_csr);
+               goto err_release_group;
+       }
+
+       edac_idx = edac_device_alloc_index();
+       edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx),
+                                             "l3c", 1, "l3c", 1, 0, NULL, 0,
+                                             edac_idx);
+       if (!edac_dev) {
+               rc = -ENOMEM;
+               goto err_release_group;
+       }
+
+       ctx = edac_dev->pvt_info;
+       ctx->dev_csr = dev_csr;
+       ctx->name = "xgene_l3_err";
+       ctx->edac_idx = edac_idx;
+       ctx->edac = edac;
+       ctx->edac_dev = edac_dev;
+       ctx->ddev = *edac->dev;
+       ctx->version = version;
+       edac_dev->dev = &ctx->ddev;
+       edac_dev->ctl_name = ctx->name;
+       edac_dev->dev_name = ctx->name;
+       edac_dev->mod_name = EDAC_MOD_STR;
+
+       if (edac_op_state == EDAC_OPSTATE_POLL)
+               edac_dev->edac_check = xgene_edac_l3_check;
+
+       xgene_edac_l3_create_debugfs_nodes(edac_dev);
+
+       rc = edac_device_add_device(edac_dev);
+       if (rc > 0) {
+               dev_err(edac->dev, "failed edac_device_add_device()\n");
+               rc = -ENOMEM;
+               goto err_ctl_free;
+       }
+
+       if (edac_op_state == EDAC_OPSTATE_INT)
+               edac_dev->op_state = OP_RUNNING_INTERRUPT;
+
+       list_add(&ctx->next, &edac->l3s);
+
+       xgene_edac_l3_hw_init(edac_dev, 1);
+
+       devres_remove_group(edac->dev, xgene_edac_l3_add);
+
+       dev_info(edac->dev, "X-Gene EDAC L3 registered\n");
+       return 0;
+
+err_ctl_free:
+       edac_device_free_ctl_info(edac_dev);
+err_release_group:
+       devres_release_group(edac->dev, xgene_edac_l3_add);
+       return rc;
+}
+
+static int xgene_edac_l3_remove(struct xgene_edac_dev_ctx *l3)
+{
+       struct edac_device_ctl_info *edac_dev = l3->edac_dev;
+
+       xgene_edac_l3_hw_init(edac_dev, 0);
+       edac_device_del_device(l3->edac->dev);
+       edac_device_free_ctl_info(edac_dev);
+       return 0;
+}
+
+/* SoC error device */
+#define IOBAXIS0TRANSERRINTSTS         0x0000
+#define  IOBAXIS0_M_ILLEGAL_ACCESS_MASK        BIT(1)
+#define  IOBAXIS0_ILLEGAL_ACCESS_MASK  BIT(0)
+#define IOBAXIS0TRANSERRINTMSK         0x0004
+#define IOBAXIS0TRANSERRREQINFOL       0x0008
+#define IOBAXIS0TRANSERRREQINFOH       0x000c
+#define  REQTYPE_RD(src)               (((src) & BIT(0)))
+#define  ERRADDRH_RD(src)              (((src) & 0xffc00000) >> 22)
+#define IOBAXIS1TRANSERRINTSTS         0x0010
+#define IOBAXIS1TRANSERRINTMSK         0x0014
+#define IOBAXIS1TRANSERRREQINFOL       0x0018
+#define IOBAXIS1TRANSERRREQINFOH       0x001c
+#define IOBPATRANSERRINTSTS            0x0020
+#define  IOBPA_M_REQIDRAM_CORRUPT_MASK BIT(7)
+#define  IOBPA_REQIDRAM_CORRUPT_MASK   BIT(6)
+#define  IOBPA_M_TRANS_CORRUPT_MASK    BIT(5)
+#define  IOBPA_TRANS_CORRUPT_MASK      BIT(4)
+#define  IOBPA_M_WDATA_CORRUPT_MASK    BIT(3)
+#define  IOBPA_WDATA_CORRUPT_MASK      BIT(2)
+#define  IOBPA_M_RDATA_CORRUPT_MASK    BIT(1)
+#define  IOBPA_RDATA_CORRUPT_MASK      BIT(0)
+#define IOBBATRANSERRINTSTS            0x0030
+#define  M_ILLEGAL_ACCESS_MASK         BIT(15)
+#define  ILLEGAL_ACCESS_MASK           BIT(14)
+#define  M_WIDRAM_CORRUPT_MASK         BIT(13)
+#define  WIDRAM_CORRUPT_MASK           BIT(12)
+#define  M_RIDRAM_CORRUPT_MASK         BIT(11)
+#define  RIDRAM_CORRUPT_MASK           BIT(10)
+#define  M_TRANS_CORRUPT_MASK          BIT(9)
+#define  TRANS_CORRUPT_MASK            BIT(8)
+#define  M_WDATA_CORRUPT_MASK          BIT(7)
+#define  WDATA_CORRUPT_MASK            BIT(6)
+#define  M_RBM_POISONED_REQ_MASK       BIT(5)
+#define  RBM_POISONED_REQ_MASK         BIT(4)
+#define  M_XGIC_POISONED_REQ_MASK      BIT(3)
+#define  XGIC_POISONED_REQ_MASK                BIT(2)
+#define  M_WRERR_RESP_MASK             BIT(1)
+#define  WRERR_RESP_MASK               BIT(0)
+#define IOBBATRANSERRREQINFOL          0x0038
+#define IOBBATRANSERRREQINFOH          0x003c
+#define  REQTYPE_F2_RD(src)            ((src) & BIT(0))
+#define  ERRADDRH_F2_RD(src)           (((src) & 0xffc00000) >> 22)
+#define IOBBATRANSERRCSWREQID          0x0040
+#define XGICTRANSERRINTSTS             0x0050
+#define  M_WR_ACCESS_ERR_MASK          BIT(3)
+#define  WR_ACCESS_ERR_MASK            BIT(2)
+#define  M_RD_ACCESS_ERR_MASK          BIT(1)
+#define  RD_ACCESS_ERR_MASK            BIT(0)
+#define XGICTRANSERRINTMSK             0x0054
+#define XGICTRANSERRREQINFO            0x0058
+#define  REQTYPE_MASK                  BIT(26)
+#define  ERRADDR_RD(src)               ((src) & 0x03ffffff)
+#define GLBL_ERR_STS                   0x0800
+#define  MDED_ERR_MASK                 BIT(3)
+#define  DED_ERR_MASK                  BIT(2)
+#define  MSEC_ERR_MASK                 BIT(1)
+#define  SEC_ERR_MASK                  BIT(0)
+#define GLBL_SEC_ERRL                  0x0810
+#define GLBL_SEC_ERRH                  0x0818
+#define GLBL_MSEC_ERRL                 0x0820
+#define GLBL_MSEC_ERRH                 0x0828
+#define GLBL_DED_ERRL                  0x0830
+#define GLBL_DED_ERRLMASK              0x0834
+#define GLBL_DED_ERRH                  0x0838
+#define GLBL_DED_ERRHMASK              0x083c
+#define GLBL_MDED_ERRL                 0x0840
+#define GLBL_MDED_ERRLMASK             0x0844
+#define GLBL_MDED_ERRH                 0x0848
+#define GLBL_MDED_ERRHMASK             0x084c
+
+static const char * const soc_mem_err_v1[] = {
+       "10GbE0",
+       "10GbE1",
+       "Security",
+       "SATA45",
+       "SATA23/ETH23",
+       "SATA01/ETH01",
+       "USB1",
+       "USB0",
+       "QML",
+       "QM0",
+       "QM1 (XGbE01)",
+       "PCIE4",
+       "PCIE3",
+       "PCIE2",
+       "PCIE1",
+       "PCIE0",
+       "CTX Manager",
+       "OCM",
+       "1GbE",
+       "CLE",
+       "AHBC",
+       "PktDMA",
+       "GFC",
+       "MSLIM",
+       "10GbE2",
+       "10GbE3",
+       "QM2 (XGbE23)",
+       "IOB",
+       "unknown",
+       "unknown",
+       "unknown",
+       "unknown",
+};
+
+static void xgene_edac_iob_gic_report(struct edac_device_ctl_info *edac_dev)
+{
+       struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
+       u32 err_addr_lo;
+       u32 err_addr_hi;
+       u32 reg;
+       u32 info;
+
+       /* GIC transaction error interrupt */
+       reg = readl(ctx->dev_csr + XGICTRANSERRINTSTS);
+       if (!reg)
+               goto chk_iob_err;
+       dev_err(edac_dev->dev, "XGIC transaction error\n");
+       if (reg & RD_ACCESS_ERR_MASK)
+               dev_err(edac_dev->dev, "XGIC read size error\n");
+       if (reg & M_RD_ACCESS_ERR_MASK)
+               dev_err(edac_dev->dev, "Multiple XGIC read size error\n");
+       if (reg & WR_ACCESS_ERR_MASK)
+               dev_err(edac_dev->dev, "XGIC write size error\n");
+       if (reg & M_WR_ACCESS_ERR_MASK)
+               dev_err(edac_dev->dev, "Multiple XGIC write size error\n");
+       info = readl(ctx->dev_csr + XGICTRANSERRREQINFO);
+       dev_err(edac_dev->dev, "XGIC %s access @ 0x%08X (0x%08X)\n",
+               info & REQTYPE_MASK ? "read" : "write", ERRADDR_RD(info),
+               info);
+       writel(reg, ctx->dev_csr + XGICTRANSERRINTSTS);
+
+chk_iob_err:
+       /* IOB memory error */
+       reg = readl(ctx->dev_csr + GLBL_ERR_STS);
+       if (!reg)
+               return;
+       if (reg & SEC_ERR_MASK) {
+               err_addr_lo = readl(ctx->dev_csr + GLBL_SEC_ERRL);
+               err_addr_hi = readl(ctx->dev_csr + GLBL_SEC_ERRH);
+               dev_err(edac_dev->dev,
+                       "IOB single-bit correctable memory at 0x%08X.%08X error\n",
+                       err_addr_lo, err_addr_hi);
+               writel(err_addr_lo, ctx->dev_csr + GLBL_SEC_ERRL);
+               writel(err_addr_hi, ctx->dev_csr + GLBL_SEC_ERRH);
+       }
+       if (reg & MSEC_ERR_MASK) {
+               err_addr_lo = readl(ctx->dev_csr + GLBL_MSEC_ERRL);
+               err_addr_hi = readl(ctx->dev_csr + GLBL_MSEC_ERRH);
+               dev_err(edac_dev->dev,
+                       "IOB multiple single-bit correctable memory at 0x%08X.%08X error\n",
+                       err_addr_lo, err_addr_hi);
+               writel(err_addr_lo, ctx->dev_csr + GLBL_MSEC_ERRL);
+               writel(err_addr_hi, ctx->dev_csr + GLBL_MSEC_ERRH);
+       }
+       if (reg & (SEC_ERR_MASK | MSEC_ERR_MASK))
+               edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
+
+       if (reg & DED_ERR_MASK) {
+               err_addr_lo = readl(ctx->dev_csr + GLBL_DED_ERRL);
+               err_addr_hi = readl(ctx->dev_csr + GLBL_DED_ERRH);
+               dev_err(edac_dev->dev,
+                       "IOB double-bit uncorrectable memory at 0x%08X.%08X error\n",
+                       err_addr_lo, err_addr_hi);
+               writel(err_addr_lo, ctx->dev_csr + GLBL_DED_ERRL);
+               writel(err_addr_hi, ctx->dev_csr + GLBL_DED_ERRH);
+       }
+       if (reg & MDED_ERR_MASK) {
+               err_addr_lo = readl(ctx->dev_csr + GLBL_MDED_ERRL);
+               err_addr_hi = readl(ctx->dev_csr + GLBL_MDED_ERRH);
+               dev_err(edac_dev->dev,
+                       "Multiple IOB double-bit uncorrectable memory at 0x%08X.%08X error\n",
+                       err_addr_lo, err_addr_hi);
+               writel(err_addr_lo, ctx->dev_csr + GLBL_MDED_ERRL);
+               writel(err_addr_hi, ctx->dev_csr + GLBL_MDED_ERRH);
+       }
+       if (reg & (DED_ERR_MASK | MDED_ERR_MASK))
+               edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
+}
+
+static void xgene_edac_rb_report(struct edac_device_ctl_info *edac_dev)
+{
+       struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
+       u32 err_addr_lo;
+       u32 err_addr_hi;
+       u32 reg;
+
+       /* IOB Bridge agent transaction error interrupt */
+       reg = readl(ctx->dev_csr + IOBBATRANSERRINTSTS);
+       if (!reg)
+               return;
+
+       dev_err(edac_dev->dev, "IOB bridge agent (BA) transaction error\n");
+       if (reg & WRERR_RESP_MASK)
+               dev_err(edac_dev->dev, "IOB BA write response error\n");
+       if (reg & M_WRERR_RESP_MASK)
+               dev_err(edac_dev->dev,
+                       "Multiple IOB BA write response error\n");
+       if (reg & XGIC_POISONED_REQ_MASK)
+               dev_err(edac_dev->dev, "IOB BA XGIC poisoned write error\n");
+       if (reg & M_XGIC_POISONED_REQ_MASK)
+               dev_err(edac_dev->dev,
+                       "Multiple IOB BA XGIC poisoned write error\n");
+       if (reg & RBM_POISONED_REQ_MASK)
+               dev_err(edac_dev->dev, "IOB BA RBM poisoned write error\n");
+       if (reg & M_RBM_POISONED_REQ_MASK)
+               dev_err(edac_dev->dev,
+                       "Multiple IOB BA RBM poisoned write error\n");
+       if (reg & WDATA_CORRUPT_MASK)
+               dev_err(edac_dev->dev, "IOB BA write error\n");
+       if (reg & M_WDATA_CORRUPT_MASK)
+               dev_err(edac_dev->dev, "Multiple IOB BA write error\n");
+       if (reg & TRANS_CORRUPT_MASK)
+               dev_err(edac_dev->dev, "IOB BA transaction error\n");
+       if (reg & M_TRANS_CORRUPT_MASK)
+               dev_err(edac_dev->dev, "Multiple IOB BA transaction error\n");
+       if (reg & RIDRAM_CORRUPT_MASK)
+               dev_err(edac_dev->dev,
+                       "IOB BA RDIDRAM read transaction ID error\n");
+       if (reg & M_RIDRAM_CORRUPT_MASK)
+               dev_err(edac_dev->dev,
+                       "Multiple IOB BA RDIDRAM read transaction ID error\n");
+       if (reg & WIDRAM_CORRUPT_MASK)
+               dev_err(edac_dev->dev,
+                       "IOB BA RDIDRAM write transaction ID error\n");
+       if (reg & M_WIDRAM_CORRUPT_MASK)
+               dev_err(edac_dev->dev,
+                       "Multiple IOB BA RDIDRAM write transaction ID error\n");
+       if (reg & ILLEGAL_ACCESS_MASK)
+               dev_err(edac_dev->dev,
+                       "IOB BA XGIC/RB illegal access error\n");
+       if (reg & M_ILLEGAL_ACCESS_MASK)
+               dev_err(edac_dev->dev,
+                       "Multiple IOB BA XGIC/RB illegal access error\n");
+
+       err_addr_lo = readl(ctx->dev_csr + IOBBATRANSERRREQINFOL);
+       err_addr_hi = readl(ctx->dev_csr + IOBBATRANSERRREQINFOH);
+       dev_err(edac_dev->dev, "IOB BA %s access at 0x%02X.%08X (0x%08X)\n",
+               REQTYPE_F2_RD(err_addr_hi) ? "read" : "write",
+               ERRADDRH_F2_RD(err_addr_hi), err_addr_lo, err_addr_hi);
+       if (reg & WRERR_RESP_MASK)
+               dev_err(edac_dev->dev, "IOB BA requestor ID 0x%08X\n",
+                       readl(ctx->dev_csr + IOBBATRANSERRCSWREQID));
+       writel(reg, ctx->dev_csr + IOBBATRANSERRINTSTS);
+}
+
+static void xgene_edac_pa_report(struct edac_device_ctl_info *edac_dev)
+{
+       struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
+       u32 err_addr_lo;
+       u32 err_addr_hi;
+       u32 reg;
+
+       /* IOB Processing agent transaction error interrupt */
+       reg = readl(ctx->dev_csr + IOBPATRANSERRINTSTS);
+       if (!reg)
+               goto chk_iob_axi0;
+       dev_err(edac_dev->dev, "IOB procesing agent (PA) transaction error\n");
+       if (reg & IOBPA_RDATA_CORRUPT_MASK)
+               dev_err(edac_dev->dev, "IOB PA read data RAM error\n");
+       if (reg & IOBPA_M_RDATA_CORRUPT_MASK)
+               dev_err(edac_dev->dev,
+                       "Mutilple IOB PA read data RAM error\n");
+       if (reg & IOBPA_WDATA_CORRUPT_MASK)
+               dev_err(edac_dev->dev, "IOB PA write data RAM error\n");
+       if (reg & IOBPA_M_WDATA_CORRUPT_MASK)
+               dev_err(edac_dev->dev,
+                       "Mutilple IOB PA write data RAM error\n");
+       if (reg & IOBPA_TRANS_CORRUPT_MASK)
+               dev_err(edac_dev->dev, "IOB PA transaction error\n");
+       if (reg & IOBPA_M_TRANS_CORRUPT_MASK)
+               dev_err(edac_dev->dev, "Mutilple IOB PA transaction error\n");
+       if (reg & IOBPA_REQIDRAM_CORRUPT_MASK)
+               dev_err(edac_dev->dev, "IOB PA transaction ID RAM error\n");
+       if (reg & IOBPA_M_REQIDRAM_CORRUPT_MASK)
+               dev_err(edac_dev->dev,
+                       "Multiple IOB PA transaction ID RAM error\n");
+       writel(reg, ctx->dev_csr + IOBPATRANSERRINTSTS);
+
+chk_iob_axi0:
+       /* IOB AXI0 Error */
+       reg = readl(ctx->dev_csr + IOBAXIS0TRANSERRINTSTS);
+       if (!reg)
+               goto chk_iob_axi1;
+       err_addr_lo = readl(ctx->dev_csr + IOBAXIS0TRANSERRREQINFOL);
+       err_addr_hi = readl(ctx->dev_csr + IOBAXIS0TRANSERRREQINFOH);
+       dev_err(edac_dev->dev,
+               "%sAXI slave 0 illegal %s access @ 0x%02X.%08X (0x%08X)\n",
+               reg & IOBAXIS0_M_ILLEGAL_ACCESS_MASK ? "Multiple " : "",
+               REQTYPE_RD(err_addr_hi) ? "read" : "write",
+               ERRADDRH_RD(err_addr_hi), err_addr_lo, err_addr_hi);
+       writel(reg, ctx->dev_csr + IOBAXIS0TRANSERRINTSTS);
+
+chk_iob_axi1:
+       /* IOB AXI1 Error */
+       reg = readl(ctx->dev_csr + IOBAXIS1TRANSERRINTSTS);
+       if (!reg)
+               return;
+       err_addr_lo = readl(ctx->dev_csr + IOBAXIS1TRANSERRREQINFOL);
+       err_addr_hi = readl(ctx->dev_csr + IOBAXIS1TRANSERRREQINFOH);
+       dev_err(edac_dev->dev,
+               "%sAXI slave 1 illegal %s access @ 0x%02X.%08X (0x%08X)\n",
+               reg & IOBAXIS0_M_ILLEGAL_ACCESS_MASK ? "Multiple " : "",
+               REQTYPE_RD(err_addr_hi) ? "read" : "write",
+               ERRADDRH_RD(err_addr_hi), err_addr_lo, err_addr_hi);
+       writel(reg, ctx->dev_csr + IOBAXIS1TRANSERRINTSTS);
+}
+
+static void xgene_edac_soc_check(struct edac_device_ctl_info *edac_dev)
+{
+       struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
+       const char * const *soc_mem_err = NULL;
+       u32 pcp_hp_stat;
+       u32 pcp_lp_stat;
+       u32 reg;
+       int i;
+
+       xgene_edac_pcp_rd(ctx->edac, PCPHPERRINTSTS, &pcp_hp_stat);
+       xgene_edac_pcp_rd(ctx->edac, PCPLPERRINTSTS, &pcp_lp_stat);
+       xgene_edac_pcp_rd(ctx->edac, MEMERRINTSTS, &reg);
+       if (!((pcp_hp_stat & (IOB_PA_ERR_MASK | IOB_BA_ERR_MASK |
+                             IOB_XGIC_ERR_MASK | IOB_RB_ERR_MASK)) ||
+             (pcp_lp_stat & CSW_SWITCH_TRACE_ERR_MASK) || reg))
+               return;
+
+       if (pcp_hp_stat & IOB_XGIC_ERR_MASK)
+               xgene_edac_iob_gic_report(edac_dev);
+
+       if (pcp_hp_stat & (IOB_RB_ERR_MASK | IOB_BA_ERR_MASK))
+               xgene_edac_rb_report(edac_dev);
+
+       if (pcp_hp_stat & IOB_PA_ERR_MASK)
+               xgene_edac_pa_report(edac_dev);
+
+       if (pcp_lp_stat & CSW_SWITCH_TRACE_ERR_MASK) {
+               dev_info(edac_dev->dev,
+                        "CSW switch trace correctable memory parity error\n");
+               edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
+       }
+
+       if (!reg)
+               return;
+       if (ctx->version == 1)
+               soc_mem_err = soc_mem_err_v1;
+       if (!soc_mem_err) {
+               dev_err(edac_dev->dev, "SoC memory parity error 0x%08X\n",
+                       reg);
+               edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
+               return;
+       }
+       for (i = 0; i < 31; i++) {
+               if (reg & (1 << i)) {
+                       dev_err(edac_dev->dev, "%s memory parity error\n",
+                               soc_mem_err[i]);
+                       edac_device_handle_ue(edac_dev, 0, 0,
+                                             edac_dev->ctl_name);
+               }
+       }
+}
+
+static void xgene_edac_soc_hw_init(struct edac_device_ctl_info *edac_dev,
+                                  bool enable)
+{
+       struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
+
+       /* Enable SoC IP error interrupt */
+       if (edac_dev->op_state == OP_RUNNING_INTERRUPT) {
+               if (enable) {
+                       xgene_edac_pcp_clrbits(ctx->edac, PCPHPERRINTMSK,
+                                              IOB_PA_ERR_MASK |
+                                              IOB_BA_ERR_MASK |
+                                              IOB_XGIC_ERR_MASK |
+                                              IOB_RB_ERR_MASK);
+                       xgene_edac_pcp_clrbits(ctx->edac, PCPLPERRINTMSK,
+                                              CSW_SWITCH_TRACE_ERR_MASK);
+               } else {
+                       xgene_edac_pcp_setbits(ctx->edac, PCPHPERRINTMSK,
+                                              IOB_PA_ERR_MASK |
+                                              IOB_BA_ERR_MASK |
+                                              IOB_XGIC_ERR_MASK |
+                                              IOB_RB_ERR_MASK);
+                       xgene_edac_pcp_setbits(ctx->edac, PCPLPERRINTMSK,
+                                              CSW_SWITCH_TRACE_ERR_MASK);
+               }
+
+               writel(enable ? 0x0 : 0xFFFFFFFF,
+                      ctx->dev_csr + IOBAXIS0TRANSERRINTMSK);
+               writel(enable ? 0x0 : 0xFFFFFFFF,
+                      ctx->dev_csr + IOBAXIS1TRANSERRINTMSK);
+               writel(enable ? 0x0 : 0xFFFFFFFF,
+                      ctx->dev_csr + XGICTRANSERRINTMSK);
+
+               xgene_edac_pcp_setbits(ctx->edac, MEMERRINTMSK,
+                                      enable ? 0x0 : 0xFFFFFFFF);
+       }
+}
+
+static int xgene_edac_soc_add(struct xgene_edac *edac, struct device_node *np,
+                             int version)
+{
+       struct edac_device_ctl_info *edac_dev;
+       struct xgene_edac_dev_ctx *ctx;
+       void __iomem *dev_csr;
+       struct resource res;
+       int edac_idx;
+       int rc;
+
+       if (!devres_open_group(edac->dev, xgene_edac_soc_add, GFP_KERNEL))
+               return -ENOMEM;
+
+       rc = of_address_to_resource(np, 0, &res);
+       if (rc < 0) {
+               dev_err(edac->dev, "no SoC resource address\n");
+               goto err_release_group;
+       }
+       dev_csr = devm_ioremap_resource(edac->dev, &res);
+       if (IS_ERR(dev_csr)) {
+               dev_err(edac->dev,
+                       "devm_ioremap_resource failed for soc resource address\n");
+               rc = PTR_ERR(dev_csr);
+               goto err_release_group;
+       }
+
+       edac_idx = edac_device_alloc_index();
+       edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx),
+                                             "SOC", 1, "SOC", 1, 2, NULL, 0,
+                                             edac_idx);
+       if (!edac_dev) {
+               rc = -ENOMEM;
+               goto err_release_group;
+       }
+
+       ctx = edac_dev->pvt_info;
+       ctx->dev_csr = dev_csr;
+       ctx->name = "xgene_soc_err";
+       ctx->edac_idx = edac_idx;
+       ctx->edac = edac;
+       ctx->edac_dev = edac_dev;
+       ctx->ddev = *edac->dev;
+       ctx->version = version;
+       edac_dev->dev = &ctx->ddev;
+       edac_dev->ctl_name = ctx->name;
+       edac_dev->dev_name = ctx->name;
+       edac_dev->mod_name = EDAC_MOD_STR;
+
+       if (edac_op_state == EDAC_OPSTATE_POLL)
+               edac_dev->edac_check = xgene_edac_soc_check;
+
+       rc = edac_device_add_device(edac_dev);
+       if (rc > 0) {
+               dev_err(edac->dev, "failed edac_device_add_device()\n");
+               rc = -ENOMEM;
+               goto err_ctl_free;
+       }
+
+       if (edac_op_state == EDAC_OPSTATE_INT)
+               edac_dev->op_state = OP_RUNNING_INTERRUPT;
+
+       list_add(&ctx->next, &edac->socs);
+
+       xgene_edac_soc_hw_init(edac_dev, 1);
+
+       devres_remove_group(edac->dev, xgene_edac_soc_add);
+
+       dev_info(edac->dev, "X-Gene EDAC SoC registered\n");
+
+       return 0;
+
+err_ctl_free:
+       edac_device_free_ctl_info(edac_dev);
+err_release_group:
+       devres_release_group(edac->dev, xgene_edac_soc_add);
+       return rc;
+}
+
+static int xgene_edac_soc_remove(struct xgene_edac_dev_ctx *soc)
+{
+       struct edac_device_ctl_info *edac_dev = soc->edac_dev;
+
+       xgene_edac_soc_hw_init(edac_dev, 0);
+       edac_device_del_device(soc->edac->dev);
+       edac_device_free_ctl_info(edac_dev);
+       return 0;
+}
+
 static irqreturn_t xgene_edac_isr(int irq, void *dev_id)
 {
        struct xgene_edac *ctx = dev_id;
        struct xgene_edac_pmd_ctx *pmd;
+       struct xgene_edac_dev_ctx *node;
        unsigned int pcp_hp_stat;
        unsigned int pcp_lp_stat;
 
@@ -1030,9 +1783,8 @@ static irqreturn_t xgene_edac_isr(int irq, void *dev_id)
            (MCU_CORR_ERR_MASK & pcp_lp_stat)) {
                struct xgene_edac_mc_ctx *mcu;
 
-               list_for_each_entry(mcu, &ctx->mcus, next) {
+               list_for_each_entry(mcu, &ctx->mcus, next)
                        xgene_edac_mc_check(mcu->mci);
-               }
        }
 
        list_for_each_entry(pmd, &ctx->pmds, next) {
@@ -1040,6 +1792,12 @@ static irqreturn_t xgene_edac_isr(int irq, void *dev_id)
                        xgene_edac_pmd_check(pmd->edac_dev);
        }
 
+       list_for_each_entry(node, &ctx->l3s, next)
+               xgene_edac_l3_check(node->edac_dev);
+
+       list_for_each_entry(node, &ctx->socs, next)
+               xgene_edac_soc_check(node->edac_dev);
+
        return IRQ_HANDLED;
 }
 
@@ -1058,6 +1816,8 @@ static int xgene_edac_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, edac);
        INIT_LIST_HEAD(&edac->mcus);
        INIT_LIST_HEAD(&edac->pmds);
+       INIT_LIST_HEAD(&edac->l3s);
+       INIT_LIST_HEAD(&edac->socs);
        spin_lock_init(&edac->lock);
        mutex_init(&edac->mc_lock);
 
@@ -1122,6 +1882,8 @@ static int xgene_edac_probe(struct platform_device *pdev)
                }
        }
 
+       edac->dfs = edac_debugfs_create_dir(pdev->dev.kobj.name);
+
        for_each_child_of_node(pdev->dev.of_node, child) {
                if (!of_device_is_available(child))
                        continue;
@@ -1131,6 +1893,14 @@ static int xgene_edac_probe(struct platform_device *pdev)
                        xgene_edac_pmd_add(edac, child, 1);
                if (of_device_is_compatible(child, "apm,xgene-edac-pmd-v2"))
                        xgene_edac_pmd_add(edac, child, 2);
+               if (of_device_is_compatible(child, "apm,xgene-edac-l3"))
+                       xgene_edac_l3_add(edac, child, 1);
+               if (of_device_is_compatible(child, "apm,xgene-edac-l3-v2"))
+                       xgene_edac_l3_add(edac, child, 2);
+               if (of_device_is_compatible(child, "apm,xgene-edac-soc"))
+                       xgene_edac_soc_add(edac, child, 0);
+               if (of_device_is_compatible(child, "apm,xgene-edac-soc-v1"))
+                       xgene_edac_soc_add(edac, child, 1);
        }
 
        return 0;
@@ -1146,14 +1916,21 @@ static int xgene_edac_remove(struct platform_device *pdev)
        struct xgene_edac_mc_ctx *temp_mcu;
        struct xgene_edac_pmd_ctx *pmd;
        struct xgene_edac_pmd_ctx *temp_pmd;
+       struct xgene_edac_dev_ctx *node;
+       struct xgene_edac_dev_ctx *temp_node;
 
-       list_for_each_entry_safe(mcu, temp_mcu, &edac->mcus, next) {
+       list_for_each_entry_safe(mcu, temp_mcu, &edac->mcus, next)
                xgene_edac_mc_remove(mcu);
-       }
 
-       list_for_each_entry_safe(pmd, temp_pmd, &edac->pmds, next) {
+       list_for_each_entry_safe(pmd, temp_pmd, &edac->pmds, next)
                xgene_edac_pmd_remove(pmd);
-       }
+
+       list_for_each_entry_safe(node, temp_node, &edac->l3s, next)
+               xgene_edac_l3_remove(node);
+
+       list_for_each_entry_safe(node, temp_node, &edac->socs, next)
+               xgene_edac_soc_remove(node);
+
        return 0;
 }
 
index 84533e02fbf8ba292cddf960fde5881e1898821c..e1670d533f9742e94b505ba0684d9373956c8bb0 100644 (file)
@@ -52,6 +52,28 @@ config EFI_RUNTIME_MAP
 
          See also Documentation/ABI/testing/sysfs-firmware-efi-runtime-map.
 
+config EFI_FAKE_MEMMAP
+       bool "Enable EFI fake memory map"
+       depends on EFI && X86
+       default n
+       help
+         Saying Y here will enable "efi_fake_mem" boot option.
+         By specifying this parameter, you can add arbitrary attribute
+         to specific memory range by updating original (firmware provided)
+         EFI memmap.
+         This is useful for debugging of EFI memmap related feature.
+         e.g. Address Range Mirroring feature.
+
+config EFI_MAX_FAKE_MEM
+       int "maximum allowable number of ranges in efi_fake_mem boot option"
+       depends on EFI_FAKE_MEMMAP
+       range 1 128
+       default 8
+       help
+         Maximum allowable number of ranges in efi_fake_mem boot option.
+         Ranges can be set up to this value using comma-separated list.
+         The default value is 8.
+
 config EFI_PARAMS_FROM_FDT
        bool
        help
index 6fd3da938717c27c233af9c6b66b186f69fc93fa..c24f00569acb3b92c5e0a3a91cc04f257336f96f 100644 (file)
@@ -9,3 +9,4 @@ obj-$(CONFIG_UEFI_CPER)                 += cper.o
 obj-$(CONFIG_EFI_RUNTIME_MAP)          += runtime-map.o
 obj-$(CONFIG_EFI_RUNTIME_WRAPPERS)     += runtime-wrappers.o
 obj-$(CONFIG_EFI_STUB)                 += libstub/
+obj-$(CONFIG_EFI_FAKE_MEMMAP)          += fake_mem.o
index e992abc5ef264e38fefb0b3361f91c357fd1d08e..c8d794c5847989e1d150b8da968e76a24c6e0d91 100644 (file)
@@ -400,3 +400,4 @@ module_exit(efivars_pstore_exit);
 
 MODULE_DESCRIPTION("EFI variable backend for pstore");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:efivars");
index d6144e3b97c54235ca45a0ad71872957a36ffe48..027ca212179f7f81276733d0bda6b97a636c6770 100644 (file)
 #include <linux/platform_device.h>
 
 struct efi __read_mostly efi = {
-       .mps        = EFI_INVALID_TABLE_ADDR,
-       .acpi       = EFI_INVALID_TABLE_ADDR,
-       .acpi20     = EFI_INVALID_TABLE_ADDR,
-       .smbios     = EFI_INVALID_TABLE_ADDR,
-       .smbios3    = EFI_INVALID_TABLE_ADDR,
-       .sal_systab = EFI_INVALID_TABLE_ADDR,
-       .boot_info  = EFI_INVALID_TABLE_ADDR,
-       .hcdp       = EFI_INVALID_TABLE_ADDR,
-       .uga        = EFI_INVALID_TABLE_ADDR,
-       .uv_systab  = EFI_INVALID_TABLE_ADDR,
-       .fw_vendor  = EFI_INVALID_TABLE_ADDR,
-       .runtime    = EFI_INVALID_TABLE_ADDR,
-       .config_table  = EFI_INVALID_TABLE_ADDR,
-       .esrt       = EFI_INVALID_TABLE_ADDR,
+       .mps                    = EFI_INVALID_TABLE_ADDR,
+       .acpi                   = EFI_INVALID_TABLE_ADDR,
+       .acpi20                 = EFI_INVALID_TABLE_ADDR,
+       .smbios                 = EFI_INVALID_TABLE_ADDR,
+       .smbios3                = EFI_INVALID_TABLE_ADDR,
+       .sal_systab             = EFI_INVALID_TABLE_ADDR,
+       .boot_info              = EFI_INVALID_TABLE_ADDR,
+       .hcdp                   = EFI_INVALID_TABLE_ADDR,
+       .uga                    = EFI_INVALID_TABLE_ADDR,
+       .uv_systab              = EFI_INVALID_TABLE_ADDR,
+       .fw_vendor              = EFI_INVALID_TABLE_ADDR,
+       .runtime                = EFI_INVALID_TABLE_ADDR,
+       .config_table           = EFI_INVALID_TABLE_ADDR,
+       .esrt                   = EFI_INVALID_TABLE_ADDR,
+       .properties_table       = EFI_INVALID_TABLE_ADDR,
 };
 EXPORT_SYMBOL(efi);
 
@@ -63,6 +64,9 @@ static int __init parse_efi_cmdline(char *str)
                return -EINVAL;
        }
 
+       if (parse_option_str(str, "debug"))
+               set_bit(EFI_DBG, &efi.flags);
+
        if (parse_option_str(str, "noruntime"))
                disable_runtime = true;
 
@@ -250,7 +254,7 @@ subsys_initcall(efisubsys_init);
 int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
 {
        struct efi_memory_map *map = efi.memmap;
-       void *p, *e;
+       phys_addr_t p, e;
 
        if (!efi_enabled(EFI_MEMMAP)) {
                pr_err_once("EFI_MEMMAP is not enabled.\n");
@@ -282,10 +286,10 @@ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
                 * So just always get our own virtual map on the CPU.
                 *
                 */
-               md = early_memremap((phys_addr_t)p, sizeof (*md));
+               md = early_memremap(p, sizeof (*md));
                if (!md) {
-                       pr_err_once("early_memremap(%p, %zu) failed.\n",
-                                   p, sizeof (*md));
+                       pr_err_once("early_memremap(%pa, %zu) failed.\n",
+                                   &p, sizeof (*md));
                        return -ENOMEM;
                }
 
@@ -362,6 +366,7 @@ static __initdata efi_config_table_type_t common_tables[] = {
        {SMBIOS3_TABLE_GUID, "SMBIOS 3.0", &efi.smbios3},
        {UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga},
        {EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt},
+       {EFI_PROPERTIES_TABLE_GUID, "PROP", &efi.properties_table},
        {NULL_GUID, NULL, NULL},
 };
 
@@ -421,6 +426,24 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
        }
        pr_cont("\n");
        set_bit(EFI_CONFIG_TABLES, &efi.flags);
+
+       /* Parse the EFI Properties table if it exists */
+       if (efi.properties_table != EFI_INVALID_TABLE_ADDR) {
+               efi_properties_table_t *tbl;
+
+               tbl = early_memremap(efi.properties_table, sizeof(*tbl));
+               if (tbl == NULL) {
+                       pr_err("Could not map Properties table!\n");
+                       return -ENOMEM;
+               }
+
+               if (tbl->memory_protection_attribute &
+                   EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA)
+                       set_bit(EFI_NX_PE_DATA, &efi.flags);
+
+               early_memunmap(tbl, sizeof(*tbl));
+       }
+
        return 0;
 }
 
@@ -489,7 +512,6 @@ static __initdata struct {
 };
 
 struct param_info {
-       int verbose;
        int found;
        void *params;
 };
@@ -520,21 +542,20 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
                else
                        *(u64 *)dest = val;
 
-               if (info->verbose)
+               if (efi_enabled(EFI_DBG))
                        pr_info("  %s: 0x%0*llx\n", dt_params[i].name,
                                dt_params[i].size * 2, val);
        }
        return 1;
 }
 
-int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose)
+int __init efi_get_fdt_params(struct efi_fdt_params *params)
 {
        struct param_info info;
        int ret;
 
        pr_info("Getting EFI parameters from FDT:\n");
 
-       info.verbose = verbose;
        info.found = 0;
        info.params = params;
 
@@ -588,16 +609,19 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
 
        attr = md->attribute;
        if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
-                    EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_WP |
-                    EFI_MEMORY_RP | EFI_MEMORY_XP | EFI_MEMORY_RUNTIME))
+                    EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
+                    EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
+                    EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
                snprintf(pos, size, "|attr=0x%016llx]",
                         (unsigned long long)attr);
        else
-               snprintf(pos, size, "|%3s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
+               snprintf(pos, size, "|%3s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
                         attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
+                        attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
                         attr & EFI_MEMORY_XP      ? "XP"  : "",
                         attr & EFI_MEMORY_RP      ? "RP"  : "",
                         attr & EFI_MEMORY_WP      ? "WP"  : "",
+                        attr & EFI_MEMORY_RO      ? "RO"  : "",
                         attr & EFI_MEMORY_UCE     ? "UCE" : "",
                         attr & EFI_MEMORY_WB      ? "WB"  : "",
                         attr & EFI_MEMORY_WT      ? "WT"  : "",
@@ -605,3 +629,36 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
                         attr & EFI_MEMORY_UC      ? "UC"  : "");
        return buf;
 }
+
+/*
+ * efi_mem_attributes - lookup memmap attributes for physical address
+ * @phys_addr: the physical address to lookup
+ *
+ * Search in the EFI memory map for the region covering
+ * @phys_addr. Returns the EFI memory attributes if the region
+ * was found in the memory map, 0 otherwise.
+ *
+ * Despite being marked __weak, most architectures should *not*
+ * override this function. It is __weak solely for the benefit
+ * of ia64 which has a funky EFI memory map that doesn't work
+ * the same way as other architectures.
+ */
+u64 __weak efi_mem_attributes(unsigned long phys_addr)
+{
+       struct efi_memory_map *map;
+       efi_memory_desc_t *md;
+       void *p;
+
+       if (!efi_enabled(EFI_MEMMAP))
+               return 0;
+
+       map = efi.memmap;
+       for (p = map->map; p < map->map_end; p += map->desc_size) {
+               md = p;
+               if ((md->phys_addr <= phys_addr) &&
+                   (phys_addr < (md->phys_addr +
+                   (md->num_pages << EFI_PAGE_SHIFT))))
+                       return md->attribute;
+       }
+       return 0;
+}
index a5b95d61ae71efe1252978406ceb4b6bb922eb46..22c5285f77050f27d2d96a22b5ade945bae54316 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/kobject.h>
 #include <linux/list.h>
 #include <linux/memblock.h>
-#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 
@@ -450,22 +449,10 @@ err:
        esrt = NULL;
        return error;
 }
+device_initcall(esrt_sysfs_init);
 
-static void __exit esrt_sysfs_exit(void)
-{
-       pr_debug("esrt-sysfs: unloading.\n");
-       cleanup_entry_list();
-       kset_unregister(esrt_kset);
-       sysfs_remove_group(esrt_kobj, &esrt_attr_group);
-       kfree(esrt);
-       esrt = NULL;
-       kobject_del(esrt_kobj);
-       kobject_put(esrt_kobj);
-}
-
-module_init(esrt_sysfs_init);
-module_exit(esrt_sysfs_exit);
-
+/*
 MODULE_AUTHOR("Peter Jones <pjones@redhat.com>");
 MODULE_DESCRIPTION("EFI System Resource Table support");
 MODULE_LICENSE("GPL");
+*/
diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
new file mode 100644 (file)
index 0000000..ed3a854
--- /dev/null
@@ -0,0 +1,238 @@
+/*
+ * fake_mem.c
+ *
+ * Copyright (C) 2015 FUJITSU LIMITED
+ * Author: Taku Izumi <izumi.taku@jp.fujitsu.com>
+ *
+ * This code introduces new boot option named "efi_fake_mem"
+ * By specifying this parameter, you can add arbitrary attribute to
+ * specific memory range by updating original (firmware provided) EFI
+ * memmap.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms and conditions of the GNU General Public License,
+ *  version 2, as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  You should have received a copy of the GNU General Public License along with
+ *  this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ *  The full GNU General Public License is included in this distribution in
+ *  the file called "COPYING".
+ */
+
+#include <linux/kernel.h>
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/memblock.h>
+#include <linux/types.h>
+#include <linux/sort.h>
+#include <asm/efi.h>
+
+#define EFI_MAX_FAKEMEM CONFIG_EFI_MAX_FAKE_MEM
+
+struct fake_mem {
+       struct range range;
+       u64 attribute;
+};
+static struct fake_mem fake_mems[EFI_MAX_FAKEMEM];
+static int nr_fake_mem;
+
+static int __init cmp_fake_mem(const void *x1, const void *x2)
+{
+       const struct fake_mem *m1 = x1;
+       const struct fake_mem *m2 = x2;
+
+       if (m1->range.start < m2->range.start)
+               return -1;
+       if (m1->range.start > m2->range.start)
+               return 1;
+       return 0;
+}
+
+void __init efi_fake_memmap(void)
+{
+       u64 start, end, m_start, m_end, m_attr;
+       int new_nr_map = memmap.nr_map;
+       efi_memory_desc_t *md;
+       phys_addr_t new_memmap_phy;
+       void *new_memmap;
+       void *old, *new;
+       int i;
+
+       if (!nr_fake_mem || !efi_enabled(EFI_MEMMAP))
+               return;
+
+       /* count up the number of EFI memory descriptor */
+       for (old = memmap.map; old < memmap.map_end; old += memmap.desc_size) {
+               md = old;
+               start = md->phys_addr;
+               end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
+
+               for (i = 0; i < nr_fake_mem; i++) {
+                       /* modifying range */
+                       m_start = fake_mems[i].range.start;
+                       m_end = fake_mems[i].range.end;
+
+                       if (m_start <= start) {
+                               /* split into 2 parts */
+                               if (start < m_end && m_end < end)
+                                       new_nr_map++;
+                       }
+                       if (start < m_start && m_start < end) {
+                               /* split into 3 parts */
+                               if (m_end < end)
+                                       new_nr_map += 2;
+                               /* split into 2 parts */
+                               if (end <= m_end)
+                                       new_nr_map++;
+                       }
+               }
+       }
+
+       /* allocate memory for new EFI memmap */
+       new_memmap_phy = memblock_alloc(memmap.desc_size * new_nr_map,
+                                       PAGE_SIZE);
+       if (!new_memmap_phy)
+               return;
+
+       /* create new EFI memmap */
+       new_memmap = early_memremap(new_memmap_phy,
+                                   memmap.desc_size * new_nr_map);
+       if (!new_memmap) {
+               memblock_free(new_memmap_phy, memmap.desc_size * new_nr_map);
+               return;
+       }
+
+       for (old = memmap.map, new = new_memmap;
+            old < memmap.map_end;
+            old += memmap.desc_size, new += memmap.desc_size) {
+
+               /* copy original EFI memory descriptor */
+               memcpy(new, old, memmap.desc_size);
+               md = new;
+               start = md->phys_addr;
+               end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
+
+               for (i = 0; i < nr_fake_mem; i++) {
+                       /* modifying range */
+                       m_start = fake_mems[i].range.start;
+                       m_end = fake_mems[i].range.end;
+                       m_attr = fake_mems[i].attribute;
+
+                       if (m_start <= start && end <= m_end)
+                               md->attribute |= m_attr;
+
+                       if (m_start <= start &&
+                           (start < m_end && m_end < end)) {
+                               /* first part */
+                               md->attribute |= m_attr;
+                               md->num_pages = (m_end - md->phys_addr + 1) >>
+                                       EFI_PAGE_SHIFT;
+                               /* latter part */
+                               new += memmap.desc_size;
+                               memcpy(new, old, memmap.desc_size);
+                               md = new;
+                               md->phys_addr = m_end + 1;
+                               md->num_pages = (end - md->phys_addr + 1) >>
+                                       EFI_PAGE_SHIFT;
+                       }
+
+                       if ((start < m_start && m_start < end) && m_end < end) {
+                               /* first part */
+                               md->num_pages = (m_start - md->phys_addr) >>
+                                       EFI_PAGE_SHIFT;
+                               /* middle part */
+                               new += memmap.desc_size;
+                               memcpy(new, old, memmap.desc_size);
+                               md = new;
+                               md->attribute |= m_attr;
+                               md->phys_addr = m_start;
+                               md->num_pages = (m_end - m_start + 1) >>
+                                       EFI_PAGE_SHIFT;
+                               /* last part */
+                               new += memmap.desc_size;
+                               memcpy(new, old, memmap.desc_size);
+                               md = new;
+                               md->phys_addr = m_end + 1;
+                               md->num_pages = (end - m_end) >>
+                                       EFI_PAGE_SHIFT;
+                       }
+
+                       if ((start < m_start && m_start < end) &&
+                           (end <= m_end)) {
+                               /* first part */
+                               md->num_pages = (m_start - md->phys_addr) >>
+                                       EFI_PAGE_SHIFT;
+                               /* latter part */
+                               new += memmap.desc_size;
+                               memcpy(new, old, memmap.desc_size);
+                               md = new;
+                               md->phys_addr = m_start;
+                               md->num_pages = (end - md->phys_addr + 1) >>
+                                       EFI_PAGE_SHIFT;
+                               md->attribute |= m_attr;
+                       }
+               }
+       }
+
+       /* swap into new EFI memmap */
+       efi_unmap_memmap();
+       memmap.map = new_memmap;
+       memmap.phys_map = new_memmap_phy;
+       memmap.nr_map = new_nr_map;
+       memmap.map_end = memmap.map + memmap.nr_map * memmap.desc_size;
+       set_bit(EFI_MEMMAP, &efi.flags);
+
+       /* print new EFI memmap */
+       efi_print_memmap();
+}
+
+static int __init setup_fake_mem(char *p)
+{
+       u64 start = 0, mem_size = 0, attribute = 0;
+       int i;
+
+       if (!p)
+               return -EINVAL;
+
+       while (*p != '\0') {
+               mem_size = memparse(p, &p);
+               if (*p == '@')
+                       start = memparse(p+1, &p);
+               else
+                       break;
+
+               if (*p == ':')
+                       attribute = simple_strtoull(p+1, &p, 0);
+               else
+                       break;
+
+               if (nr_fake_mem >= EFI_MAX_FAKEMEM)
+                       break;
+
+               fake_mems[nr_fake_mem].range.start = start;
+               fake_mems[nr_fake_mem].range.end = start + mem_size - 1;
+               fake_mems[nr_fake_mem].attribute = attribute;
+               nr_fake_mem++;
+
+               if (*p == ',')
+                       p++;
+       }
+
+       sort(fake_mems, nr_fake_mem, sizeof(struct fake_mem),
+            cmp_fake_mem, NULL);
+
+       for (i = 0; i < nr_fake_mem; i++)
+               pr_info("efi_fake_mem: add attr=0x%016llx to [mem 0x%016llx-0x%016llx]",
+                       fake_mems[i].attribute, fake_mems[i].range.start,
+                       fake_mems[i].range.end);
+
+       return *p == '\0' ? 0 : -EINVAL;
+}
+
+early_param("efi_fake_mem", setup_fake_mem);
index e29560e6b40b0e5f28a141e7c88ceba1bdfa22ff..950c87f5d279335210088e4154eda135b24304d5 100644 (file)
@@ -13,6 +13,7 @@
  */
 
 #include <linux/efi.h>
+#include <linux/sort.h>
 #include <asm/efi.h>
 
 #include "efistub.h"
@@ -305,6 +306,44 @@ fail:
  */
 #define EFI_RT_VIRTUAL_BASE    0x40000000
 
+static int cmp_mem_desc(const void *l, const void *r)
+{
+       const efi_memory_desc_t *left = l, *right = r;
+
+       return (left->phys_addr > right->phys_addr) ? 1 : -1;
+}
+
+/*
+ * Returns whether region @left ends exactly where region @right starts,
+ * or false if either argument is NULL.
+ */
+static bool regions_are_adjacent(efi_memory_desc_t *left,
+                                efi_memory_desc_t *right)
+{
+       u64 left_end;
+
+       if (left == NULL || right == NULL)
+               return false;
+
+       left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE;
+
+       return left_end == right->phys_addr;
+}
+
+/*
+ * Returns whether region @left and region @right have compatible memory type
+ * mapping attributes, and are both EFI_MEMORY_RUNTIME regions.
+ */
+static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left,
+                                                     efi_memory_desc_t *right)
+{
+       static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT |
+                                        EFI_MEMORY_WC | EFI_MEMORY_UC |
+                                        EFI_MEMORY_RUNTIME;
+
+       return ((left->attribute ^ right->attribute) & mem_type_mask) == 0;
+}
+
 /*
  * efi_get_virtmap() - create a virtual mapping for the EFI memory map
  *
@@ -317,33 +356,52 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
                     int *count)
 {
        u64 efi_virt_base = EFI_RT_VIRTUAL_BASE;
-       efi_memory_desc_t *out = runtime_map;
+       efi_memory_desc_t *in, *prev = NULL, *out = runtime_map;
        int l;
 
-       for (l = 0; l < map_size; l += desc_size) {
-               efi_memory_desc_t *in = (void *)memory_map + l;
+       /*
+        * To work around potential issues with the Properties Table feature
+        * introduced in UEFI 2.5, which may split PE/COFF executable images
+        * in memory into several RuntimeServicesCode and RuntimeServicesData
+        * regions, we need to preserve the relative offsets between adjacent
+        * EFI_MEMORY_RUNTIME regions with the same memory type attributes.
+        * The easiest way to find adjacent regions is to sort the memory map
+        * before traversing it.
+        */
+       sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL);
+
+       for (l = 0; l < map_size; l += desc_size, prev = in) {
                u64 paddr, size;
 
+               in = (void *)memory_map + l;
                if (!(in->attribute & EFI_MEMORY_RUNTIME))
                        continue;
 
+               paddr = in->phys_addr;
+               size = in->num_pages * EFI_PAGE_SIZE;
+
                /*
                 * Make the mapping compatible with 64k pages: this allows
                 * a 4k page size kernel to kexec a 64k page size kernel and
                 * vice versa.
                 */
-               paddr = round_down(in->phys_addr, SZ_64K);
-               size = round_up(in->num_pages * EFI_PAGE_SIZE +
-                               in->phys_addr - paddr, SZ_64K);
-
-               /*
-                * Avoid wasting memory on PTEs by choosing a virtual base that
-                * is compatible with section mappings if this region has the
-                * appropriate size and physical alignment. (Sections are 2 MB
-                * on 4k granule kernels)
-                */
-               if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
-                       efi_virt_base = round_up(efi_virt_base, SZ_2M);
+               if (!regions_are_adjacent(prev, in) ||
+                   !regions_have_compatible_memory_type_attrs(prev, in)) {
+
+                       paddr = round_down(in->phys_addr, SZ_64K);
+                       size += in->phys_addr - paddr;
+
+                       /*
+                        * Avoid wasting memory on PTEs by choosing a virtual
+                        * base that is compatible with section mappings if this
+                        * region has the appropriate size and physical
+                        * alignment. (Sections are 2 MB on 4k granule kernels)
+                        */
+                       if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
+                               efi_virt_base = round_up(efi_virt_base, SZ_2M);
+                       else
+                               efi_virt_base = round_up(efi_virt_base, SZ_64K);
+               }
 
                in->virt_addr = efi_virt_base + in->phys_addr - paddr;
                efi_virt_base += size;
index 8949b3f6f74d207f0bc935af11dd76cf956b49ba..b18bea08ff253398db70846445dca4ee3ea0eafc 100644 (file)
@@ -119,6 +119,13 @@ config GPIO_ALTERA
 
          If driver is built as a module it will be called gpio-altera.
 
+config GPIO_AMDPT
+       tristate "AMD Promontory GPIO support"
+       depends on ACPI
+       help
+         driver for GPIO functionality on Promontory IOHub
+         Require ACPI ASL code to enumerate as a platform device.
+
 config GPIO_BCM_KONA
        bool "Broadcom Kona GPIO"
        depends on OF_GPIO && (ARCH_BCM_MOBILE || COMPILE_TEST)
@@ -176,16 +183,6 @@ config GPIO_ETRAXFS
        help
          Say yes here to support the GPIO controller on Axis ETRAX FS SoCs.
 
-config GPIO_F7188X
-       tristate "F71869, F71869A, F71882FG and F71889F GPIO support"
-       depends on X86
-       help
-         This option enables support for GPIOs found on Fintek Super-I/O
-         chips F71869, F71869A, F71882FG and F71889F.
-
-         To compile this driver as a module, choose M here: the module will
-         be called f7188x-gpio.
-
 config GPIO_GE_FPGA
        bool "GE FPGA based GPIO"
        depends on GE_FPGA
@@ -235,12 +232,6 @@ config GPIO_IOP
 
          If unsure, say N.
 
-config GPIO_IT8761E
-       tristate "IT8761E GPIO support"
-       depends on X86  # unconditional access to IO space.
-       help
-         Say yes here to support GPIO functionality of IT8761E super I/O chip.
-
 config GPIO_LOONGSON
        bool "Loongson-2/3 GPIO support"
        depends on CPU_LOONGSON2 || CPU_LOONGSON3
@@ -297,14 +288,6 @@ config GPIO_MPC8XXX
          Say Y here if you're going to use hardware that connects to the
          MPC512x/831x/834x/837x/8572/8610 GPIOs.
 
-config GPIO_MSM_V2
-       tristate "Qualcomm MSM GPIO v2"
-       depends on GPIOLIB && OF && ARCH_QCOM
-       help
-         Say yes here to support the GPIO interface on ARM v7 based
-         Qualcomm MSM chips.  Most of the pins on the MSM can be
-         selected for GPIO, and are controlled by this driver.
-
 config GPIO_MVEBU
        def_bool y
        depends on PLAT_ORION
@@ -368,42 +351,6 @@ config GPIO_SAMSUNG
          Legacy GPIO support. Use only for platforms without support for
          pinctrl.
 
-config GPIO_SCH
-       tristate "Intel SCH/TunnelCreek/Centerton/Quark X1000 GPIO"
-       depends on PCI && X86
-       select MFD_CORE
-       select LPC_SCH
-       help
-         Say yes here to support GPIO interface on Intel Poulsbo SCH,
-         Intel Tunnel Creek processor, Intel Centerton processor or
-         Intel Quark X1000 SoC.
-
-         The Intel SCH contains a total of 14 GPIO pins. Ten GPIOs are
-         powered by the core power rail and are turned off during sleep
-         modes (S3 and higher). The remaining four GPIOs are powered by
-         the Intel SCH suspend power supply. These GPIOs remain
-         active during S3. The suspend powered GPIOs can be used to wake the
-         system from the Suspend-to-RAM state.
-
-         The Intel Tunnel Creek processor has 5 GPIOs powered by the
-         core power rail and 9 from suspend power supply.
-
-         The Intel Centerton processor has a total of 30 GPIO pins.
-         Twenty-one are powered by the core power rail and 9 from the
-         suspend power supply.
-
-         The Intel Quark X1000 SoC has 2 GPIOs powered by the core
-         power well and 6 from the suspend power well.
-
-config GPIO_SCH311X
-       tristate "SMSC SCH311x SuperI/O GPIO"
-       help
-         Driver to enable the GPIOs found on SMSC SMSC SCH3112, SCH3114 and
-         SCH3116 "Super I/O" chipsets.
-
-         To compile this driver as a module, choose M here: the module will
-         be called gpio-sch311x.
-
 config GPIO_SPEAR_SPICS
        bool "ST SPEAr13xx SPI Chip Select as GPIO support"
        depends on PLAT_SPEAR
@@ -440,15 +387,6 @@ config GPIO_TB10X
        select GENERIC_IRQ_CHIP
        select OF_GPIO
 
-config GPIO_TS5500
-       tristate "TS-5500 DIO blocks and compatibles"
-       depends on TS5500 || COMPILE_TEST
-       help
-         This driver supports Digital I/O exposed by pin blocks found on some
-         Technologic Systems platforms. It includes, but is not limited to, 3
-         blocks of the TS-5500: DIO1, DIO2 and the LCD port, and the TS-5600
-         LCD port.
-
 config GPIO_TZ1090
        bool "Toumaz Xenif TZ1090 GPIO support"
        depends on SOC_TZ1090
@@ -508,13 +446,13 @@ config GPIO_XGENE_SB
 
 config GPIO_XILINX
        tristate "Xilinx GPIO support"
-       depends on OF_GPIO && (PPC || MICROBLAZE || ARCH_ZYNQ || X86)
+       depends on OF_GPIO
        help
          Say yes here to support the Xilinx FPGA GPIO device
 
 config GPIO_XLP
        tristate "Netlogic XLP GPIO support"
-       depends on CPU_XLP
+       depends on CPU_XLP && OF_GPIO
        select GPIOLIB_IRQCHIP
        help
          This driver provides support for GPIO interface on Netlogic XLP MIPS64
@@ -545,6 +483,87 @@ config GPIO_ZYNQ
        help
          Say yes here to support Xilinx Zynq GPIO controller.
 
+config GPIO_ZX
+       bool "ZTE ZX GPIO support"
+       select GPIOLIB_IRQCHIP
+       help
+         Say yes here to support the GPIO device on ZTE ZX SoCs.
+
+endmenu
+
+menu "Port-mapped I/O GPIO drivers"
+       depends on X86 # Unconditional I/O space access
+
+config GPIO_104_IDIO_16
+       tristate "ACCES 104-IDIO-16 GPIO support"
+       help
+         Enables GPIO support for the ACCES 104-IDIO-16 family.
+
+config GPIO_F7188X
+       tristate "F71869, F71869A, F71882FG and F71889F GPIO support"
+       help
+         This option enables support for GPIOs found on Fintek Super-I/O
+         chips F71869, F71869A, F71882FG and F71889F.
+
+         To compile this driver as a module, choose M here: the module will
+         be called f7188x-gpio.
+
+config GPIO_IT87
+       tristate "IT87xx GPIO support"
+       help
+         Say yes here to support GPIO functionality of IT87xx Super I/O chips.
+
+         This driver is tested with ITE IT8728 and IT8732 Super I/O chips, and
+         supports the IT8761E Super I/O chip as well.
+
+         To compile this driver as a module, choose M here: the module will
+         be called gpio_it87
+
+config GPIO_SCH
+       tristate "Intel SCH/TunnelCreek/Centerton/Quark X1000 GPIO"
+       depends on PCI
+       select MFD_CORE
+       select LPC_SCH
+       help
+         Say yes here to support GPIO interface on Intel Poulsbo SCH,
+         Intel Tunnel Creek processor, Intel Centerton processor or
+         Intel Quark X1000 SoC.
+
+         The Intel SCH contains a total of 14 GPIO pins. Ten GPIOs are
+         powered by the core power rail and are turned off during sleep
+         modes (S3 and higher). The remaining four GPIOs are powered by
+         the Intel SCH suspend power supply. These GPIOs remain
+         active during S3. The suspend powered GPIOs can be used to wake the
+         system from the Suspend-to-RAM state.
+
+         The Intel Tunnel Creek processor has 5 GPIOs powered by the
+         core power rail and 9 from suspend power supply.
+
+         The Intel Centerton processor has a total of 30 GPIO pins.
+         Twenty-one are powered by the core power rail and 9 from the
+         suspend power supply.
+
+         The Intel Quark X1000 SoC has 2 GPIOs powered by the core
+         power well and 6 from the suspend power well.
+
+config GPIO_SCH311X
+       tristate "SMSC SCH311x SuperI/O GPIO"
+       help
+         Driver to enable the GPIOs found on SMSC SMSC SCH3112, SCH3114 and
+         SCH3116 "Super I/O" chipsets.
+
+         To compile this driver as a module, choose M here: the module will
+         be called gpio-sch311x.
+
+config GPIO_TS5500
+       tristate "TS-5500 DIO blocks and compatibles"
+       depends on TS5500 || COMPILE_TEST
+       help
+         This driver supports Digital I/O exposed by pin blocks found on some
+         Technologic Systems platforms. It includes, but is not limited to, 3
+         blocks of the TS-5500: DIO1, DIO2 and the LCD port, and the TS-5600
+         LCD port.
+
 endmenu
 
 menu "I2C GPIO expanders"
@@ -552,7 +571,6 @@ menu "I2C GPIO expanders"
 
 config GPIO_ADP5588
        tristate "ADP5588 I2C GPIO expander"
-       depends on I2C
        help
          This option enables support for 18 GPIOs found
          on Analog Devices ADP5588 GPIO Expanders.
@@ -566,7 +584,7 @@ config GPIO_ADP5588_IRQ
 
 config GPIO_ADNP
        tristate "Avionic Design N-bit GPIO expander"
-       depends on I2C && OF_GPIO
+       depends on OF_GPIO
        select GPIOLIB_IRQCHIP
        help
          This option enables support for N GPIOs found on Avionic Design
@@ -578,14 +596,12 @@ config GPIO_ADNP
 
 config GPIO_MAX7300
        tristate "Maxim MAX7300 GPIO expander"
-       depends on I2C
        select GPIO_MAX730X
        help
          GPIO driver for Maxim MAX7300 I2C-based GPIO expander.
 
 config GPIO_MAX732X
        tristate "MAX7319, MAX7320-7327 I2C Port Expanders"
-       depends on I2C
        help
          Say yes here to support the MAX7319, MAX7320-7327 series of I2C
          Port Expanders. Each IO port on these chips has a fixed role of
@@ -618,7 +634,6 @@ config GPIO_MC9S08DZ60
 
 config GPIO_PCA953X
        tristate "PCA95[357]x, PCA9698, TCA64xx, and MAX7310 I/O ports"
-       depends on I2C
        help
          Say yes here to provide access to several register-oriented
          SMBus I/O expanders, made mostly by NXP or TI.  Compatible
@@ -646,7 +661,6 @@ config GPIO_PCA953X_IRQ
 
 config GPIO_PCF857X
        tristate "PCF857x, PCA{85,96}7x, and MAX732[89] I2C GPIO expanders"
-       depends on I2C
        select GPIOLIB_IRQCHIP
        select IRQ_DOMAIN
        help
@@ -976,7 +990,7 @@ menu "SPI GPIO expanders"
 
 config GPIO_74X164
        tristate "74x164 serial-in/parallel-out 8-bits shift register"
-       depends on SPI_MASTER && OF
+       depends on OF
        help
          Driver for 74x164 compatible serial-in/parallel-out 8-outputs
          shift registers. This driver can be used to provide access
@@ -984,32 +998,28 @@ config GPIO_74X164
 
 config GPIO_MAX7301
        tristate "Maxim MAX7301 GPIO expander"
-       depends on SPI_MASTER
        select GPIO_MAX730X
        help
          GPIO driver for Maxim MAX7301 SPI-based GPIO expander.
 
-config GPIO_MCP23S08
-       tristate "Microchip MCP23xxx I/O expander"
-       depends on (SPI_MASTER && !I2C) || I2C
-       help
-         SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
-         I/O expanders.
-         This provides a GPIO interface supporting inputs and outputs.
-         The I2C versions of the chips can be used as interrupt-controller.
-
 config GPIO_MC33880
        tristate "Freescale MC33880 high-side/low-side switch"
-       depends on SPI_MASTER
        help
          SPI driver for Freescale MC33880 high-side/low-side switch.
          This provides GPIO interface supporting inputs and outputs.
 
-config GPIO_ZX
-       bool "ZTE ZX GPIO support"
-       select GPIOLIB_IRQCHIP
+endmenu
+
+menu "SPI or I2C GPIO expanders"
+       depends on (SPI_MASTER && !I2C) || I2C
+
+config GPIO_MCP23S08
+       tristate "Microchip MCP23xxx I/O expander"
        help
-         Say yes here to support the GPIO device on ZTE ZX SoCs.
+         SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
+         I/O expanders.
+         This provides a GPIO interface supporting inputs and outputs.
+         The I2C versions of the chips can be used as interrupt-controller.
 
 endmenu
 
index f79a7c482a993c5cc534072398fa0ff2226798c4..986dbd838ceaceb03c42ffc15a704bf5f180b09d 100644 (file)
@@ -12,6 +12,7 @@ obj-$(CONFIG_GPIO_ACPI)               += gpiolib-acpi.o
 # Device drivers. Generally keep list sorted alphabetically
 obj-$(CONFIG_GPIO_GENERIC)     += gpio-generic.o
 
+obj-$(CONFIG_GPIO_104_IDIO_16) += gpio-104-idio-16.o
 obj-$(CONFIG_GPIO_74X164)      += gpio-74x164.o
 obj-$(CONFIG_GPIO_74XX_MMIO)   += gpio-74xx-mmio.o
 obj-$(CONFIG_GPIO_ADNP)                += gpio-adnp.o
@@ -19,6 +20,7 @@ obj-$(CONFIG_GPIO_ADP5520)    += gpio-adp5520.o
 obj-$(CONFIG_GPIO_ADP5588)     += gpio-adp5588.o
 obj-$(CONFIG_GPIO_ALTERA)      += gpio-altera.o
 obj-$(CONFIG_GPIO_AMD8111)     += gpio-amd8111.o
+obj-$(CONFIG_GPIO_AMDPT)       += gpio-amdpt.o
 obj-$(CONFIG_GPIO_ARIZONA)     += gpio-arizona.o
 obj-$(CONFIG_ATH79)            += gpio-ath79.o
 obj-$(CONFIG_GPIO_BCM_KONA)    += gpio-bcm-kona.o
@@ -40,7 +42,7 @@ obj-$(CONFIG_GPIO_GE_FPGA)    += gpio-ge.o
 obj-$(CONFIG_GPIO_GRGPIO)      += gpio-grgpio.o
 obj-$(CONFIG_GPIO_ICH)         += gpio-ich.o
 obj-$(CONFIG_GPIO_IOP)         += gpio-iop.o
-obj-$(CONFIG_GPIO_IT8761E)     += gpio-it8761e.o
+obj-$(CONFIG_GPIO_IT87)                += gpio-it87.o
 obj-$(CONFIG_GPIO_JANZ_TTL)    += gpio-janz-ttl.o
 obj-$(CONFIG_GPIO_KEMPLD)      += gpio-kempld.o
 obj-$(CONFIG_ARCH_KS8695)      += gpio-ks8695.o
@@ -64,7 +66,6 @@ obj-$(CONFIG_GPIO_MOXART)     += gpio-moxart.o
 obj-$(CONFIG_GPIO_MPC5200)     += gpio-mpc5200.o
 obj-$(CONFIG_GPIO_MPC8XXX)     += gpio-mpc8xxx.o
 obj-$(CONFIG_GPIO_MSIC)                += gpio-msic.o
-obj-$(CONFIG_GPIO_MSM_V2)      += gpio-msm-v2.o
 obj-$(CONFIG_GPIO_MVEBU)        += gpio-mvebu.o
 obj-$(CONFIG_GPIO_MXC)         += gpio-mxc.o
 obj-$(CONFIG_GPIO_MXS)         += gpio-mxs.o
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
new file mode 100644 (file)
index 0000000..5400d7d
--- /dev/null
@@ -0,0 +1,216 @@
+/*
+ * GPIO driver for the ACCES 104-IDIO-16 family
+ * Copyright (C) 2015 William Breathitt Gray
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+static unsigned idio_16_base;
+module_param(idio_16_base, uint, 0);
+MODULE_PARM_DESC(idio_16_base, "ACCES 104-IDIO-16 base address");
+
+/**
+ * struct idio_16_gpio - GPIO device private data structure
+ * @chip:      instance of the gpio_chip
+ * @lock:      synchronization lock to prevent gpio_set race conditions
+ * @base:      base port address of the GPIO device
+ * @extent:    extent of port address region of the GPIO device
+ * @out_state: output bits state
+ */
+struct idio_16_gpio {
+       struct gpio_chip chip;
+       spinlock_t lock;
+       unsigned base;
+       unsigned extent;
+       unsigned out_state;
+};
+
+static int idio_16_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+       if (offset > 15)
+               return 1;
+
+       return 0;
+}
+
+static int idio_16_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+       return 0;
+}
+
+static int idio_16_gpio_direction_output(struct gpio_chip *chip,
+       unsigned offset, int value)
+{
+       chip->set(chip, offset, value);
+       return 0;
+}
+
+static struct idio_16_gpio *to_idio16gpio(struct gpio_chip *gc)
+{
+       return container_of(gc, struct idio_16_gpio, chip);
+}
+
+static int idio_16_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+       struct idio_16_gpio *const idio16gpio = to_idio16gpio(chip);
+       const unsigned BIT_MASK = 1U << (offset-16);
+
+       if (offset < 16)
+               return -EINVAL;
+
+       if (offset < 24)
+               return !!(inb(idio16gpio->base + 1) & BIT_MASK);
+
+       return !!(inb(idio16gpio->base + 5) & (BIT_MASK>>8));
+}
+
+static void idio_16_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+       struct idio_16_gpio *const idio16gpio = to_idio16gpio(chip);
+       const unsigned BIT_MASK = 1U << offset;
+       unsigned long flags;
+
+       if (offset > 15)
+               return;
+
+       spin_lock_irqsave(&idio16gpio->lock, flags);
+
+       if (value)
+               idio16gpio->out_state |= BIT_MASK;
+       else
+               idio16gpio->out_state &= ~BIT_MASK;
+
+       if (offset > 7)
+               outb(idio16gpio->out_state >> 8, idio16gpio->base + 4);
+       else
+               outb(idio16gpio->out_state, idio16gpio->base);
+
+       spin_unlock_irqrestore(&idio16gpio->lock, flags);
+}
+
+static int __init idio_16_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct idio_16_gpio *idio16gpio;
+       int err;
+
+       const unsigned BASE = idio_16_base;
+       const unsigned EXTENT = 8;
+       const char *const NAME = dev_name(dev);
+
+       idio16gpio = devm_kzalloc(dev, sizeof(*idio16gpio), GFP_KERNEL);
+       if (!idio16gpio)
+               return -ENOMEM;
+
+       if (!request_region(BASE, EXTENT, NAME)) {
+               dev_err(dev, "Unable to lock %s port addresses (0x%X-0x%X)\n",
+                       NAME, BASE, BASE + EXTENT);
+               err = -EBUSY;
+               goto err_lock_io_port;
+       }
+
+       idio16gpio->chip.label = NAME;
+       idio16gpio->chip.dev = dev;
+       idio16gpio->chip.owner = THIS_MODULE;
+       idio16gpio->chip.base = -1;
+       idio16gpio->chip.ngpio = 32;
+       idio16gpio->chip.get_direction = idio_16_gpio_get_direction;
+       idio16gpio->chip.direction_input = idio_16_gpio_direction_input;
+       idio16gpio->chip.direction_output = idio_16_gpio_direction_output;
+       idio16gpio->chip.get = idio_16_gpio_get;
+       idio16gpio->chip.set = idio_16_gpio_set;
+       idio16gpio->base = BASE;
+       idio16gpio->extent = EXTENT;
+       idio16gpio->out_state = 0xFFFF;
+
+       spin_lock_init(&idio16gpio->lock);
+
+       dev_set_drvdata(dev, idio16gpio);
+
+       err = gpiochip_add(&idio16gpio->chip);
+       if (err) {
+               dev_err(dev, "GPIO registering failed (%d)\n", err);
+               goto err_gpio_register;
+       }
+
+       return 0;
+
+err_gpio_register:
+       release_region(BASE, EXTENT);
+err_lock_io_port:
+       return err;
+}
+
+static int idio_16_remove(struct platform_device *pdev)
+{
+       struct idio_16_gpio *const idio16gpio = platform_get_drvdata(pdev);
+
+       gpiochip_remove(&idio16gpio->chip);
+       release_region(idio16gpio->base, idio16gpio->extent);
+
+       return 0;
+}
+
+static struct platform_device *idio_16_device;
+
+static struct platform_driver idio_16_driver = {
+       .driver = {
+               .name = "104-idio-16"
+       },
+       .remove = idio_16_remove
+};
+
+static void __exit idio_16_exit(void)
+{
+       platform_device_unregister(idio_16_device);
+       platform_driver_unregister(&idio_16_driver);
+}
+
+static int __init idio_16_init(void)
+{
+       int err;
+
+       idio_16_device = platform_device_alloc(idio_16_driver.driver.name, -1);
+       if (!idio_16_device)
+               return -ENOMEM;
+
+       err = platform_device_add(idio_16_device);
+       if (err)
+               goto err_platform_device;
+
+       err = platform_driver_probe(&idio_16_driver, idio_16_probe);
+       if (err)
+               goto err_platform_driver;
+
+       return 0;
+
+err_platform_driver:
+       platform_device_del(idio_16_device);
+err_platform_device:
+       platform_device_put(idio_16_device);
+       return err;
+}
+
+module_init(idio_16_init);
+module_exit(idio_16_exit);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("ACCES 104-IDIO-16 GPIO driver");
+MODULE_LICENSE("GPL");
index 1b44941574fa6965d68e474b9197907e71473692..3e6661bab54aed3a7fe05c6c5748622032017b46 100644 (file)
@@ -42,6 +42,11 @@ struct altera_gpio_chip {
        int mapped_irq;
 };
 
+static struct altera_gpio_chip *to_altera(struct gpio_chip *gc)
+{
+       return container_of(gc, struct altera_gpio_chip, mmchip.gc);
+}
+
 static void altera_gpio_irq_unmask(struct irq_data *d)
 {
        struct altera_gpio_chip *altera_gc;
@@ -49,7 +54,7 @@ static void altera_gpio_irq_unmask(struct irq_data *d)
        unsigned long flags;
        u32 intmask;
 
-       altera_gc = irq_data_get_irq_chip_data(d);
+       altera_gc = to_altera(irq_data_get_irq_chip_data(d));
        mm_gc = &altera_gc->mmchip;
 
        spin_lock_irqsave(&altera_gc->gpio_lock, flags);
@@ -67,7 +72,7 @@ static void altera_gpio_irq_mask(struct irq_data *d)
        unsigned long flags;
        u32 intmask;
 
-       altera_gc = irq_data_get_irq_chip_data(d);
+       altera_gc = to_altera(irq_data_get_irq_chip_data(d));
        mm_gc = &altera_gc->mmchip;
 
        spin_lock_irqsave(&altera_gc->gpio_lock, flags);
@@ -87,7 +92,7 @@ static int altera_gpio_irq_set_type(struct irq_data *d,
 {
        struct altera_gpio_chip *altera_gc;
 
-       altera_gc = irq_data_get_irq_chip_data(d);
+       altera_gc = to_altera(irq_data_get_irq_chip_data(d));
 
        if (type == IRQ_TYPE_NONE)
                return 0;
@@ -210,7 +215,7 @@ static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
        unsigned long status;
        int i;
 
-       altera_gc = irq_desc_get_handler_data(desc);
+       altera_gc = to_altera(irq_desc_get_handler_data(desc));
        chip = irq_desc_get_chip(desc);
        mm_gc = &altera_gc->mmchip;
        irqdomain = altera_gc->mmchip.gc.irqdomain;
@@ -239,7 +244,7 @@ static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
        unsigned long status;
        int i;
 
-       altera_gc = irq_desc_get_handler_data(desc);
+       altera_gc = to_altera(irq_desc_get_handler_data(desc));
        chip = irq_desc_get_chip(desc);
        mm_gc = &altera_gc->mmchip;
        irqdomain = altera_gc->mmchip.gc.irqdomain;
diff --git a/drivers/gpio/gpio-amdpt.c b/drivers/gpio/gpio-amdpt.c
new file mode 100644 (file)
index 0000000..cbbb966
--- /dev/null
@@ -0,0 +1,261 @@
+/*
+ * AMD Promontory GPIO driver
+ *
+ * Copyright (C) 2015 ASMedia Technology Inc.
+ * Author: YD Tseng <yd_tseng@asmedia.com.tw>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gpio/driver.h>
+#include <linux/spinlock.h>
+#include <linux/acpi.h>
+#include <linux/platform_device.h>
+
+#define PT_TOTAL_GPIO 8
+
+/* PCI-E MMIO register offsets */
+#define PT_DIRECTION_REG   0x00
+#define PT_INPUTDATA_REG   0x04
+#define PT_OUTPUTDATA_REG  0x08
+#define PT_CLOCKRATE_REG   0x0C
+#define PT_SYNC_REG        0x28
+
+struct pt_gpio_chip {
+       struct gpio_chip         gc;
+       void __iomem             *reg_base;
+       spinlock_t               lock;
+};
+
+#define to_pt_gpio(c)  container_of(c, struct pt_gpio_chip, gc)
+
+static int pt_gpio_request(struct gpio_chip *gc, unsigned offset)
+{
+       struct pt_gpio_chip *pt_gpio = to_pt_gpio(gc);
+       unsigned long flags;
+       u32 using_pins;
+
+       dev_dbg(gc->dev, "pt_gpio_request offset=%x\n", offset);
+
+       spin_lock_irqsave(&pt_gpio->lock, flags);
+
+       using_pins = readl(pt_gpio->reg_base + PT_SYNC_REG);
+       if (using_pins & BIT(offset)) {
+               dev_warn(gc->dev, "PT GPIO pin %x reconfigured\n",
+                       offset);
+               spin_unlock_irqrestore(&pt_gpio->lock, flags);
+               return -EINVAL;
+       }
+
+       writel(using_pins | BIT(offset), pt_gpio->reg_base + PT_SYNC_REG);
+
+       spin_unlock_irqrestore(&pt_gpio->lock, flags);
+
+       return 0;
+}
+
+static void pt_gpio_free(struct gpio_chip *gc, unsigned offset)
+{
+       struct pt_gpio_chip *pt_gpio = to_pt_gpio(gc);
+       unsigned long flags;
+       u32 using_pins;
+
+       spin_lock_irqsave(&pt_gpio->lock, flags);
+
+       using_pins = readl(pt_gpio->reg_base + PT_SYNC_REG);
+       using_pins &= ~BIT(offset);
+       writel(using_pins, pt_gpio->reg_base + PT_SYNC_REG);
+
+       spin_unlock_irqrestore(&pt_gpio->lock, flags);
+
+       dev_dbg(gc->dev, "pt_gpio_free offset=%x\n", offset);
+}
+
+static void pt_gpio_set_value(struct gpio_chip *gc, unsigned offset, int value)
+{
+       struct pt_gpio_chip *pt_gpio = to_pt_gpio(gc);
+       unsigned long flags;
+       u32 data;
+
+       dev_dbg(gc->dev, "pt_gpio_set_value offset=%x, value=%x\n",
+               offset, value);
+
+       spin_lock_irqsave(&pt_gpio->lock, flags);
+
+       data = readl(pt_gpio->reg_base + PT_OUTPUTDATA_REG);
+       data &= ~BIT(offset);
+       if (value)
+               data |= BIT(offset);
+       writel(data, pt_gpio->reg_base + PT_OUTPUTDATA_REG);
+
+       spin_unlock_irqrestore(&pt_gpio->lock, flags);
+}
+
+static int pt_gpio_get_value(struct gpio_chip *gc, unsigned offset)
+{
+       struct pt_gpio_chip *pt_gpio = to_pt_gpio(gc);
+       unsigned long flags;
+       u32 data;
+
+       spin_lock_irqsave(&pt_gpio->lock, flags);
+
+       data = readl(pt_gpio->reg_base + PT_DIRECTION_REG);
+
+       /* configure as output */
+       if (data & BIT(offset))
+               data = readl(pt_gpio->reg_base + PT_OUTPUTDATA_REG);
+       else    /* configure as input */
+               data = readl(pt_gpio->reg_base + PT_INPUTDATA_REG);
+
+       spin_unlock_irqrestore(&pt_gpio->lock, flags);
+
+       data >>= offset;
+       data &= 1;
+
+       dev_dbg(gc->dev, "pt_gpio_get_value offset=%x, value=%x\n",
+               offset, data);
+
+       return data;
+}
+
+static int pt_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
+{
+       struct pt_gpio_chip *pt_gpio = to_pt_gpio(gc);
+       unsigned long flags;
+       u32 data;
+
+       dev_dbg(gc->dev, "pt_gpio_dirction_input offset=%x\n", offset);
+
+       spin_lock_irqsave(&pt_gpio->lock, flags);
+
+       data = readl(pt_gpio->reg_base + PT_DIRECTION_REG);
+       data &= ~BIT(offset);
+       writel(data, pt_gpio->reg_base + PT_DIRECTION_REG);
+
+       spin_unlock_irqrestore(&pt_gpio->lock, flags);
+
+       return 0;
+}
+
+static int pt_gpio_direction_output(struct gpio_chip *gc,
+                                       unsigned offset, int value)
+{
+       struct pt_gpio_chip *pt_gpio = to_pt_gpio(gc);
+       unsigned long flags;
+       u32 data;
+
+       dev_dbg(gc->dev, "pt_gpio_direction_output offset=%x, value=%x\n",
+               offset, value);
+
+       spin_lock_irqsave(&pt_gpio->lock, flags);
+
+       data = readl(pt_gpio->reg_base + PT_OUTPUTDATA_REG);
+       if (value)
+               data |= BIT(offset);
+       else
+               data &= ~BIT(offset);
+       writel(data, pt_gpio->reg_base + PT_OUTPUTDATA_REG);
+
+       data = readl(pt_gpio->reg_base + PT_DIRECTION_REG);
+       data |= BIT(offset);
+       writel(data, pt_gpio->reg_base + PT_DIRECTION_REG);
+
+       spin_unlock_irqrestore(&pt_gpio->lock, flags);
+
+       return 0;
+}
+
+static int pt_gpio_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct acpi_device *acpi_dev;
+       acpi_handle handle = ACPI_HANDLE(dev);
+       struct pt_gpio_chip *pt_gpio;
+       struct resource *res_mem;
+       int ret = 0;
+
+       if (acpi_bus_get_device(handle, &acpi_dev)) {
+               dev_err(dev, "PT GPIO device node not found\n");
+               return -ENODEV;
+       }
+
+       pt_gpio = devm_kzalloc(dev, sizeof(struct pt_gpio_chip), GFP_KERNEL);
+       if (!pt_gpio)
+               return -ENOMEM;
+
+       res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res_mem) {
+               dev_err(&pdev->dev, "Failed to get MMIO resource for PT GPIO.\n");
+               return -EINVAL;
+       }
+       pt_gpio->reg_base = devm_ioremap_resource(dev, res_mem);
+       if (IS_ERR(pt_gpio->reg_base)) {
+               dev_err(&pdev->dev, "Failed to map MMIO resource for PT GPIO.\n");
+               return PTR_ERR(pt_gpio->reg_base);
+       }
+
+       spin_lock_init(&pt_gpio->lock);
+
+       pt_gpio->gc.label            = pdev->name;
+       pt_gpio->gc.owner            = THIS_MODULE;
+       pt_gpio->gc.dev              = dev;
+       pt_gpio->gc.request          = pt_gpio_request;
+       pt_gpio->gc.free             = pt_gpio_free;
+       pt_gpio->gc.direction_input  = pt_gpio_direction_input;
+       pt_gpio->gc.direction_output = pt_gpio_direction_output;
+       pt_gpio->gc.get              = pt_gpio_get_value;
+       pt_gpio->gc.set              = pt_gpio_set_value;
+       pt_gpio->gc.base             = -1;
+       pt_gpio->gc.ngpio            = PT_TOTAL_GPIO;
+#if defined(CONFIG_OF_GPIO)
+       pt_gpio->gc.of_node          = pdev->dev.of_node;
+#endif
+       ret = gpiochip_add(&pt_gpio->gc);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to register GPIO lib\n");
+               return ret;
+       }
+
+       platform_set_drvdata(pdev, pt_gpio);
+
+       /* initialize register setting */
+       writel(0, pt_gpio->reg_base + PT_SYNC_REG);
+       writel(0, pt_gpio->reg_base + PT_CLOCKRATE_REG);
+
+       dev_dbg(&pdev->dev, "PT GPIO driver loaded\n");
+       return ret;
+}
+
+static int pt_gpio_remove(struct platform_device *pdev)
+{
+       struct pt_gpio_chip *pt_gpio = platform_get_drvdata(pdev);
+
+       gpiochip_remove(&pt_gpio->gc);
+
+       return 0;
+}
+
+static const struct acpi_device_id pt_gpio_acpi_match[] = {
+       { "AMDF030", 0 },
+       { },
+};
+MODULE_DEVICE_TABLE(acpi, pt_gpio_acpi_match);
+
+static struct platform_driver pt_gpio_driver = {
+       .driver = {
+               .name = "pt-gpio",
+               .acpi_match_table = ACPI_PTR(pt_gpio_acpi_match),
+       },
+       .probe = pt_gpio_probe,
+       .remove = pt_gpio_remove,
+};
+
+module_platform_driver(pt_gpio_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("YD Tseng <yd_tseng@asmedia.com.tw>");
+MODULE_DESCRIPTION("AMD Promontory GPIO Driver");
index 052fbc8fdaaa69da322297e6497a13778f3dabe3..ca002739616af3969431482de5a9df6a6e96a975 100644 (file)
@@ -118,6 +118,8 @@ static int arizona_gpio_probe(struct platform_device *pdev)
        case WM5110:
        case WM8280:
        case WM8997:
+       case WM8998:
+       case WM1814:
                arizona_gpio->gpio_chip.ngpio = 5;
                break;
        default:
index 03b995304ad68292046b2d7f2d231d6c79ee727c..e5827a56ff3b5eda451202849c41b2d8b0e54f31 100644 (file)
  *  by the Free Software Foundation.
  */
 
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
 #include <linux/platform_data/gpio-ath79.h>
 #include <linux/of_device.h>
 
 #include <asm/mach-ath79/ar71xx_regs.h>
 
-static void __iomem *ath79_gpio_base;
-static u32 ath79_gpio_count;
-static DEFINE_SPINLOCK(ath79_gpio_lock);
+struct ath79_gpio_ctrl {
+       struct gpio_chip chip;
+       void __iomem *base;
+       spinlock_t lock;
+};
+
+#define to_ath79_gpio_ctrl(c) container_of(c, struct ath79_gpio_ctrl, chip)
 
-static void __ath79_gpio_set_value(unsigned gpio, int value)
+static void ath79_gpio_set_value(struct gpio_chip *chip,
+                               unsigned gpio, int value)
 {
-       void __iomem *base = ath79_gpio_base;
+       struct ath79_gpio_ctrl *ctrl = to_ath79_gpio_ctrl(chip);
 
        if (value)
-               __raw_writel(1 << gpio, base + AR71XX_GPIO_REG_SET);
+               __raw_writel(BIT(gpio), ctrl->base + AR71XX_GPIO_REG_SET);
        else
-               __raw_writel(1 << gpio, base + AR71XX_GPIO_REG_CLEAR);
+               __raw_writel(BIT(gpio), ctrl->base + AR71XX_GPIO_REG_CLEAR);
 }
 
-static int __ath79_gpio_get_value(unsigned gpio)
+static int ath79_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
 {
-       return (__raw_readl(ath79_gpio_base + AR71XX_GPIO_REG_IN) >> gpio) & 1;
-}
+       struct ath79_gpio_ctrl *ctrl = to_ath79_gpio_ctrl(chip);
 
-static int ath79_gpio_get_value(struct gpio_chip *chip, unsigned offset)
-{
-       return __ath79_gpio_get_value(offset);
-}
-
-static void ath79_gpio_set_value(struct gpio_chip *chip,
-                                 unsigned offset, int value)
-{
-       __ath79_gpio_set_value(offset, value);
+       return (__raw_readl(ctrl->base + AR71XX_GPIO_REG_IN) >> gpio) & 1;
 }
 
 static int ath79_gpio_direction_input(struct gpio_chip *chip,
                                       unsigned offset)
 {
-       void __iomem *base = ath79_gpio_base;
+       struct ath79_gpio_ctrl *ctrl = to_ath79_gpio_ctrl(chip);
        unsigned long flags;
 
-       spin_lock_irqsave(&ath79_gpio_lock, flags);
+       spin_lock_irqsave(&ctrl->lock, flags);
 
-       __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset),
-                    base + AR71XX_GPIO_REG_OE);
+       __raw_writel(
+               __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & ~BIT(offset),
+               ctrl->base + AR71XX_GPIO_REG_OE);
 
-       spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+       spin_unlock_irqrestore(&ctrl->lock, flags);
 
        return 0;
 }
@@ -74,35 +64,37 @@ static int ath79_gpio_direction_input(struct gpio_chip *chip,
 static int ath79_gpio_direction_output(struct gpio_chip *chip,
                                        unsigned offset, int value)
 {
-       void __iomem *base = ath79_gpio_base;
+       struct ath79_gpio_ctrl *ctrl = to_ath79_gpio_ctrl(chip);
        unsigned long flags;
 
-       spin_lock_irqsave(&ath79_gpio_lock, flags);
+       spin_lock_irqsave(&ctrl->lock, flags);
 
        if (value)
-               __raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET);
+               __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_SET);
        else
-               __raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR);
+               __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_CLEAR);
 
-       __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset),
-                    base + AR71XX_GPIO_REG_OE);
+       __raw_writel(
+               __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) | BIT(offset),
+               ctrl->base + AR71XX_GPIO_REG_OE);
 
-       spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+       spin_unlock_irqrestore(&ctrl->lock, flags);
 
        return 0;
 }
 
 static int ar934x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 {
-       void __iomem *base = ath79_gpio_base;
+       struct ath79_gpio_ctrl *ctrl = to_ath79_gpio_ctrl(chip);
        unsigned long flags;
 
-       spin_lock_irqsave(&ath79_gpio_lock, flags);
+       spin_lock_irqsave(&ctrl->lock, flags);
 
-       __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset),
-                    base + AR71XX_GPIO_REG_OE);
+       __raw_writel(
+               __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) | BIT(offset),
+               ctrl->base + AR71XX_GPIO_REG_OE);
 
-       spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+       spin_unlock_irqrestore(&ctrl->lock, flags);
 
        return 0;
 }
@@ -110,25 +102,26 @@ static int ar934x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
                                        int value)
 {
-       void __iomem *base = ath79_gpio_base;
+       struct ath79_gpio_ctrl *ctrl = to_ath79_gpio_ctrl(chip);
        unsigned long flags;
 
-       spin_lock_irqsave(&ath79_gpio_lock, flags);
+       spin_lock_irqsave(&ctrl->lock, flags);
 
        if (value)
-               __raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET);
+               __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_SET);
        else
-               __raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR);
+               __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_CLEAR);
 
-       __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset),
-                    base + AR71XX_GPIO_REG_OE);
+       __raw_writel(
+               __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & BIT(offset),
+               ctrl->base + AR71XX_GPIO_REG_OE);
 
-       spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+       spin_unlock_irqrestore(&ctrl->lock, flags);
 
        return 0;
 }
 
-static struct gpio_chip ath79_gpio_chip = {
+static const struct gpio_chip ath79_gpio_chip = {
        .label                  = "ath79",
        .get                    = ath79_gpio_get_value,
        .set                    = ath79_gpio_set_value,
@@ -147,10 +140,16 @@ static int ath79_gpio_probe(struct platform_device *pdev)
 {
        struct ath79_gpio_platform_data *pdata = pdev->dev.platform_data;
        struct device_node *np = pdev->dev.of_node;
+       struct ath79_gpio_ctrl *ctrl;
        struct resource *res;
+       u32 ath79_gpio_count;
        bool oe_inverted;
        int err;
 
+       ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
+       if (!ctrl)
+               return -ENOMEM;
+
        if (np) {
                err = of_property_read_u32(np, "ngpios", &ath79_gpio_count);
                if (err) {
@@ -171,19 +170,21 @@ static int ath79_gpio_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       ath79_gpio_base = devm_ioremap_nocache(
+       ctrl->base = devm_ioremap_nocache(
                &pdev->dev, res->start, resource_size(res));
-       if (!ath79_gpio_base)
+       if (!ctrl->base)
                return -ENOMEM;
 
-       ath79_gpio_chip.dev = &pdev->dev;
-       ath79_gpio_chip.ngpio = ath79_gpio_count;
+       spin_lock_init(&ctrl->lock);
+       memcpy(&ctrl->chip, &ath79_gpio_chip, sizeof(ctrl->chip));
+       ctrl->chip.dev = &pdev->dev;
+       ctrl->chip.ngpio = ath79_gpio_count;
        if (oe_inverted) {
-               ath79_gpio_chip.direction_input = ar934x_gpio_direction_input;
-               ath79_gpio_chip.direction_output = ar934x_gpio_direction_output;
+               ctrl->chip.direction_input = ar934x_gpio_direction_input;
+               ctrl->chip.direction_output = ar934x_gpio_direction_output;
        }
 
-       err = gpiochip_add(&ath79_gpio_chip);
+       err = gpiochip_add(&ctrl->chip);
        if (err) {
                dev_err(&pdev->dev,
                        "cannot add AR71xx GPIO chip, error=%d", err);
index 2ffcd9fdd1f2f7d3df917aa0324a07e3f23b17c8..5c15dd12172db3cf38945dc347f9c1e71ac81971 100644 (file)
@@ -176,6 +176,11 @@ static const struct etraxfs_gpio_info etraxfs_gpio_artpec3 = {
        .rw_intr_pins   = ARTPEC3_rw_intr_pins,
 };
 
+static struct etraxfs_gpio_chip *to_etraxfs(struct gpio_chip *gc)
+{
+       return container_of(gc, struct etraxfs_gpio_chip, bgc.gc);
+}
+
 static unsigned int etraxfs_gpio_chip_to_port(struct gpio_chip *gc)
 {
        return gc->label[0] - 'A';
@@ -220,7 +225,8 @@ static unsigned int etraxfs_gpio_to_group_pin(struct etraxfs_gpio_chip *chip,
 
 static void etraxfs_gpio_irq_ack(struct irq_data *d)
 {
-       struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+       struct etraxfs_gpio_chip *chip =
+               to_etraxfs(irq_data_get_irq_chip_data(d));
        struct etraxfs_gpio_block *block = chip->block;
        unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
 
@@ -229,7 +235,8 @@ static void etraxfs_gpio_irq_ack(struct irq_data *d)
 
 static void etraxfs_gpio_irq_mask(struct irq_data *d)
 {
-       struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+       struct etraxfs_gpio_chip *chip =
+               to_etraxfs(irq_data_get_irq_chip_data(d));
        struct etraxfs_gpio_block *block = chip->block;
        unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
 
@@ -241,7 +248,8 @@ static void etraxfs_gpio_irq_mask(struct irq_data *d)
 
 static void etraxfs_gpio_irq_unmask(struct irq_data *d)
 {
-       struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+       struct etraxfs_gpio_chip *chip =
+               to_etraxfs(irq_data_get_irq_chip_data(d));
        struct etraxfs_gpio_block *block = chip->block;
        unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
 
@@ -253,7 +261,8 @@ static void etraxfs_gpio_irq_unmask(struct irq_data *d)
 
 static int etraxfs_gpio_irq_set_type(struct irq_data *d, u32 type)
 {
-       struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+       struct etraxfs_gpio_chip *chip =
+               to_etraxfs(irq_data_get_irq_chip_data(d));
        struct etraxfs_gpio_block *block = chip->block;
        unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
        u32 cfg;
@@ -289,7 +298,8 @@ static int etraxfs_gpio_irq_set_type(struct irq_data *d, u32 type)
 
 static int etraxfs_gpio_irq_request_resources(struct irq_data *d)
 {
-       struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+       struct etraxfs_gpio_chip *chip =
+               to_etraxfs(irq_data_get_irq_chip_data(d));
        struct etraxfs_gpio_block *block = chip->block;
        unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
        int ret = -EBUSY;
@@ -319,7 +329,8 @@ out:
 
 static void etraxfs_gpio_irq_release_resources(struct irq_data *d)
 {
-       struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+       struct etraxfs_gpio_chip *chip =
+               to_etraxfs(irq_data_get_irq_chip_data(d));
        struct etraxfs_gpio_block *block = chip->block;
        unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
 
index a3f07537fe6250864e7e829b97821555e79bf426..bd5193c67a9c272cb0bebf779ce3277d22aac479 100644 (file)
@@ -579,40 +579,20 @@ EXPORT_SYMBOL_GPL(bgpio_init);
 
 static void __iomem *bgpio_map(struct platform_device *pdev,
                               const char *name,
-                              resource_size_t sane_sz,
-                              int *err)
+                              resource_size_t sane_sz)
 {
-       struct device *dev = &pdev->dev;
        struct resource *r;
-       resource_size_t start;
        resource_size_t sz;
-       void __iomem *ret;
-
-       *err = 0;
 
        r = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
        if (!r)
                return NULL;
 
        sz = resource_size(r);
-       if (sz != sane_sz) {
-               *err = -EINVAL;
-               return NULL;
-       }
-
-       start = r->start;
-       if (!devm_request_mem_region(dev, start, sz, r->name)) {
-               *err = -EBUSY;
-               return NULL;
-       }
-
-       ret = devm_ioremap(dev, start, sz);
-       if (!ret) {
-               *err = -ENOMEM;
-               return NULL;
-       }
+       if (sz != sane_sz)
+               return IOMEM_ERR_PTR(-EINVAL);
 
-       return ret;
+       return devm_ioremap_resource(&pdev->dev, r);
 }
 
 static int bgpio_pdev_probe(struct platform_device *pdev)
@@ -636,25 +616,25 @@ static int bgpio_pdev_probe(struct platform_device *pdev)
 
        sz = resource_size(r);
 
-       dat = bgpio_map(pdev, "dat", sz, &err);
-       if (!dat)
-               return err ? err : -EINVAL;
+       dat = bgpio_map(pdev, "dat", sz);
+       if (IS_ERR(dat))
+               return PTR_ERR(dat);
 
-       set = bgpio_map(pdev, "set", sz, &err);
-       if (err)
-               return err;
+       set = bgpio_map(pdev, "set", sz);
+       if (IS_ERR(set))
+               return PTR_ERR(set);
 
-       clr = bgpio_map(pdev, "clr", sz, &err);
-       if (err)
-               return err;
+       clr = bgpio_map(pdev, "clr", sz);
+       if (IS_ERR(clr))
+               return PTR_ERR(clr);
 
-       dirout = bgpio_map(pdev, "dirout", sz, &err);
-       if (err)
-               return err;
+       dirout = bgpio_map(pdev, "dirout", sz);
+       if (IS_ERR(dirout))
+               return PTR_ERR(dirout);
 
-       dirin = bgpio_map(pdev, "dirin", sz, &err);
-       if (err)
-               return err;
+       dirin = bgpio_map(pdev, "dirin", sz);
+       if (IS_ERR(dirin))
+               return PTR_ERR(dirin);
 
        bgc = devm_kzalloc(&pdev->dev, sizeof(*bgc), GFP_KERNEL);
        if (!bgc)
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
new file mode 100644 (file)
index 0000000..21f6f7c
--- /dev/null
@@ -0,0 +1,411 @@
+/*
+ *  GPIO interface for IT87xx Super I/O chips
+ *
+ *  Author: Diego Elio Pettenò <flameeyes@flameeyes.eu>
+ *
+ *  Based on it87_wdt.c     by Oliver Schuster
+ *           gpio-it8761e.c by Denis Turischev
+ *           gpio-stmpe.c   by Rabin Vincent
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+
+/* Chip Id numbers */
+#define NO_DEV_ID      0xffff
+#define IT8728_ID      0x8728
+#define IT8732_ID      0x8732
+#define IT8761_ID      0x8761
+
+/* IO Ports */
+#define REG            0x2e
+#define VAL            0x2f
+
+/* Logical device Numbers LDN */
+#define GPIO           0x07
+
+/* Configuration Registers and Functions */
+#define LDNREG         0x07
+#define CHIPID         0x20
+#define CHIPREV                0x22
+
+/**
+ * struct it87_gpio - it87-specific GPIO chip
+ * @chip the underlying gpio_chip structure
+ * @lock a lock to avoid races between operations
+ * @io_base base address for gpio ports
+ * @io_size size of the port rage starting from io_base.
+ * @output_base Super I/O register address for Output Enable register
+ * @simple_base Super I/O 'Simple I/O' Enable register
+ * @simple_size Super IO 'Simple I/O' Enable register size; this is
+ *     required because IT87xx chips might only provide Simple I/O
+ *     switches on a subset of lines, whereas the others keep the
+ *     same status all time.
+ */
+struct it87_gpio {
+       struct gpio_chip chip;
+       spinlock_t lock;
+       u16 io_base;
+       u16 io_size;
+       u8 output_base;
+       u8 simple_base;
+       u8 simple_size;
+};
+
+static struct it87_gpio it87_gpio_chip = {
+       .lock = __SPIN_LOCK_UNLOCKED(it87_gpio_chip.lock),
+};
+
+static inline struct it87_gpio *to_it87_gpio(struct gpio_chip *chip)
+{
+       return container_of(chip, struct it87_gpio, chip);
+}
+
+/* Superio chip access functions; copied from wdt_it87 */
+
+static inline int superio_enter(void)
+{
+       /*
+        * Try to reserve REG and REG + 1 for exclusive access.
+        */
+       if (!request_muxed_region(REG, 2, KBUILD_MODNAME))
+               return -EBUSY;
+
+       outb(0x87, REG);
+       outb(0x01, REG);
+       outb(0x55, REG);
+       outb(0x55, REG);
+       return 0;
+}
+
+static inline void superio_exit(void)
+{
+       outb(0x02, REG);
+       outb(0x02, VAL);
+       release_region(REG, 2);
+}
+
+static inline void superio_select(int ldn)
+{
+       outb(LDNREG, REG);
+       outb(ldn, VAL);
+}
+
+static inline int superio_inb(int reg)
+{
+       outb(reg, REG);
+       return inb(VAL);
+}
+
+static inline void superio_outb(int val, int reg)
+{
+       outb(reg, REG);
+       outb(val, VAL);
+}
+
+static inline int superio_inw(int reg)
+{
+       int val;
+
+       outb(reg++, REG);
+       val = inb(VAL) << 8;
+       outb(reg, REG);
+       val |= inb(VAL);
+       return val;
+}
+
+static inline void superio_outw(int val, int reg)
+{
+       outb(reg++, REG);
+       outb(val >> 8, VAL);
+       outb(reg, REG);
+       outb(val, VAL);
+}
+
+static inline void superio_set_mask(int mask, int reg)
+{
+       u8 curr_val = superio_inb(reg);
+       u8 new_val = curr_val | mask;
+
+       if (curr_val != new_val)
+               superio_outb(new_val, reg);
+}
+
+static inline void superio_clear_mask(int mask, int reg)
+{
+       u8 curr_val = superio_inb(reg);
+       u8 new_val = curr_val & ~mask;
+
+       if (curr_val != new_val)
+               superio_outb(new_val, reg);
+}
+
+static int it87_gpio_request(struct gpio_chip *chip, unsigned gpio_num)
+{
+       u8 mask, group;
+       int rc = 0;
+       struct it87_gpio *it87_gpio = to_it87_gpio(chip);
+
+       mask = 1 << (gpio_num % 8);
+       group = (gpio_num / 8);
+
+       spin_lock(&it87_gpio->lock);
+
+       rc = superio_enter();
+       if (rc)
+               goto exit;
+
+       /* not all the IT87xx chips support Simple I/O and not all of
+        * them allow all the lines to be set/unset to Simple I/O.
+        */
+       if (group < it87_gpio->simple_size)
+               superio_set_mask(mask, group + it87_gpio->simple_base);
+
+       /* clear output enable, setting the pin to input, as all the
+        * newly-exported GPIO interfaces are set to input.
+        */
+       superio_clear_mask(mask, group + it87_gpio->output_base);
+
+       superio_exit();
+
+exit:
+       spin_unlock(&it87_gpio->lock);
+       return rc;
+}
+
+static int it87_gpio_get(struct gpio_chip *chip, unsigned gpio_num)
+{
+       u16 reg;
+       u8 mask;
+       struct it87_gpio *it87_gpio = to_it87_gpio(chip);
+
+       mask = 1 << (gpio_num % 8);
+       reg = (gpio_num / 8) + it87_gpio->io_base;
+
+       return !!(inb(reg) & mask);
+}
+
+static int it87_gpio_direction_in(struct gpio_chip *chip, unsigned gpio_num)
+{
+       u8 mask, group;
+       int rc = 0;
+       struct it87_gpio *it87_gpio = to_it87_gpio(chip);
+
+       mask = 1 << (gpio_num % 8);
+       group = (gpio_num / 8);
+
+       spin_lock(&it87_gpio->lock);
+
+       rc = superio_enter();
+       if (rc)
+               goto exit;
+
+       /* clear the output enable bit */
+       superio_clear_mask(mask, group + it87_gpio->output_base);
+
+       superio_exit();
+
+exit:
+       spin_unlock(&it87_gpio->lock);
+       return rc;
+}
+
+static void it87_gpio_set(struct gpio_chip *chip,
+                         unsigned gpio_num, int val)
+{
+       u8 mask, curr_vals;
+       u16 reg;
+       struct it87_gpio *it87_gpio = to_it87_gpio(chip);
+
+       mask = 1 << (gpio_num % 8);
+       reg = (gpio_num / 8) + it87_gpio->io_base;
+
+       curr_vals = inb(reg);
+       if (val)
+               outb(curr_vals | mask, reg);
+       else
+               outb(curr_vals & ~mask, reg);
+}
+
+static int it87_gpio_direction_out(struct gpio_chip *chip,
+                                  unsigned gpio_num, int val)
+{
+       u8 mask, group;
+       int rc = 0;
+       struct it87_gpio *it87_gpio = to_it87_gpio(chip);
+
+       mask = 1 << (gpio_num % 8);
+       group = (gpio_num / 8);
+
+       spin_lock(&it87_gpio->lock);
+
+       rc = superio_enter();
+       if (rc)
+               goto exit;
+
+       /* set the output enable bit */
+       superio_set_mask(mask, group + it87_gpio->output_base);
+
+       it87_gpio_set(chip, gpio_num, val);
+
+       superio_exit();
+
+exit:
+       spin_unlock(&it87_gpio->lock);
+       return rc;
+}
+
+static struct gpio_chip it87_template_chip = {
+       .label                  = KBUILD_MODNAME,
+       .owner                  = THIS_MODULE,
+       .request                = it87_gpio_request,
+       .get                    = it87_gpio_get,
+       .direction_input        = it87_gpio_direction_in,
+       .set                    = it87_gpio_set,
+       .direction_output       = it87_gpio_direction_out,
+       .base                   = -1
+};
+
+static int __init it87_gpio_init(void)
+{
+       int rc = 0, i;
+       u16 chip_type;
+       u8 chip_rev, gpio_ba_reg;
+       char *labels, **labels_table;
+
+       struct it87_gpio *it87_gpio = &it87_gpio_chip;
+
+       rc = superio_enter();
+       if (rc)
+               return rc;
+
+       chip_type = superio_inw(CHIPID);
+       chip_rev  = superio_inb(CHIPREV) & 0x0f;
+       superio_exit();
+
+       it87_gpio->chip = it87_template_chip;
+
+       switch (chip_type) {
+       case IT8728_ID:
+       case IT8732_ID:
+               gpio_ba_reg = 0x62;
+               it87_gpio->io_size = 8;
+               it87_gpio->output_base = 0xc8;
+               it87_gpio->simple_base = 0xc0;
+               it87_gpio->simple_size = 5;
+               it87_gpio->chip.ngpio = 64;
+               break;
+       case IT8761_ID:
+               gpio_ba_reg = 0x60;
+               it87_gpio->io_size = 4;
+               it87_gpio->output_base = 0xf0;
+               it87_gpio->simple_size = 0;
+               it87_gpio->chip.ngpio = 16;
+               break;
+       case NO_DEV_ID:
+               pr_err("no device\n");
+               return -ENODEV;
+       default:
+               pr_err("Unknown Chip found, Chip %04x Revision %x\n",
+                      chip_type, chip_rev);
+               return -ENODEV;
+       }
+
+       rc = superio_enter();
+       if (rc)
+               return rc;
+
+       superio_select(GPIO);
+
+       /* fetch GPIO base address */
+       it87_gpio->io_base = superio_inw(gpio_ba_reg);
+
+       superio_exit();
+
+       pr_info("Found Chip IT%04x rev %x. %u GPIO lines starting at %04xh\n",
+               chip_type, chip_rev, it87_gpio->chip.ngpio,
+               it87_gpio->io_base);
+
+       if (!request_region(it87_gpio->io_base, it87_gpio->io_size,
+                                                       KBUILD_MODNAME))
+               return -EBUSY;
+
+       /* Set up aliases for the GPIO connection.
+        *
+        * ITE documentation for recent chips such as the IT8728F
+        * refers to the GPIO lines as GPxy, with a coordinates system
+        * where x is the GPIO group (starting from 1) and y is the
+        * bit within the group.
+        *
+        * By creating these aliases, we make it easier to understand
+        * to which GPIO pin we're referring to.
+        */
+       labels = kcalloc(it87_gpio->chip.ngpio, sizeof("it87_gpXY"),
+                                                               GFP_KERNEL);
+       labels_table = kcalloc(it87_gpio->chip.ngpio, sizeof(const char *),
+                                                               GFP_KERNEL);
+
+       if (!labels || !labels_table) {
+               rc = -ENOMEM;
+               goto labels_free;
+       }
+
+       for (i = 0; i < it87_gpio->chip.ngpio; i++) {
+               char *label = &labels[i * sizeof("it87_gpXY")];
+
+               sprintf(label, "it87_gp%u%u", 1+(i/8), i%8);
+               labels_table[i] = label;
+       }
+
+       it87_gpio->chip.names = (const char *const*)labels_table;
+
+       rc = gpiochip_add(&it87_gpio->chip);
+       if (rc)
+               goto labels_free;
+
+       return 0;
+
+labels_free:
+       kfree(labels_table);
+       kfree(labels);
+       release_region(it87_gpio->io_base, it87_gpio->io_size);
+       return rc;
+}
+
+static void __exit it87_gpio_exit(void)
+{
+       struct it87_gpio *it87_gpio = &it87_gpio_chip;
+
+       gpiochip_remove(&it87_gpio->chip);
+       release_region(it87_gpio->io_base, it87_gpio->io_size);
+       kfree(it87_gpio->chip.names[0]);
+       kfree(it87_gpio->chip.names);
+}
+
+module_init(it87_gpio_init);
+module_exit(it87_gpio_exit);
+
+MODULE_AUTHOR("Diego Elio Pettenò <flameeyes@flameeyes.eu>");
+MODULE_DESCRIPTION("GPIO interface for IT87xx Super I/O chips");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-it8761e.c b/drivers/gpio/gpio-it8761e.c
deleted file mode 100644 (file)
index 30a8f24..0000000
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- *  GPIO interface for IT8761E Super I/O chip
- *
- *  Author: Denis Turischev <denis@compulab.co.il>
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License 2 as published
- *  by the Free Software Foundation.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; see the file COPYING.  If not, write to
- *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-
-#include <linux/gpio.h>
-
-#define SIO_CHIP_ID            0x8761
-#define CHIP_ID_HIGH_BYTE      0x20
-#define CHIP_ID_LOW_BYTE       0x21
-
-static u8 ports[2] = { 0x2e, 0x4e };
-static u8 port;
-
-static DEFINE_SPINLOCK(sio_lock);
-
-#define GPIO_NAME              "it8761-gpio"
-#define GPIO_BA_HIGH_BYTE      0x60
-#define GPIO_BA_LOW_BYTE       0x61
-#define GPIO_IOSIZE            4
-#define GPIO1X_IO              0xf0
-#define GPIO2X_IO              0xf1
-
-static u16 gpio_ba;
-
-static u8 read_reg(u8 addr, u8 port)
-{
-       outb(addr, port);
-       return inb(port + 1);
-}
-
-static void write_reg(u8 data, u8 addr, u8 port)
-{
-       outb(addr, port);
-       outb(data, port + 1);
-}
-
-static void enter_conf_mode(u8 port)
-{
-       outb(0x87, port);
-       outb(0x61, port);
-       outb(0x55, port);
-       outb((port == 0x2e) ? 0x55 : 0xaa, port);
-}
-
-static void exit_conf_mode(u8 port)
-{
-       outb(0x2, port);
-       outb(0x2, port + 1);
-}
-
-static void enter_gpio_mode(u8 port)
-{
-       write_reg(0x2, 0x7, port);
-}
-
-static int it8761e_gpio_get(struct gpio_chip *gc, unsigned gpio_num)
-{
-       u16 reg;
-       u8 bit;
-
-       bit = gpio_num % 8;
-       reg = (gpio_num >= 8) ? gpio_ba + 1 : gpio_ba;
-
-       return !!(inb(reg) & (1 << bit));
-}
-
-static int it8761e_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num)
-{
-       u8 curr_dirs;
-       u8 io_reg, bit;
-
-       bit = gpio_num % 8;
-       io_reg = (gpio_num >= 8) ? GPIO2X_IO : GPIO1X_IO;
-
-       spin_lock(&sio_lock);
-
-       enter_conf_mode(port);
-       enter_gpio_mode(port);
-
-       curr_dirs = read_reg(io_reg, port);
-
-       if (curr_dirs & (1 << bit))
-               write_reg(curr_dirs & ~(1 << bit), io_reg, port);
-
-       exit_conf_mode(port);
-
-       spin_unlock(&sio_lock);
-       return 0;
-}
-
-static void it8761e_gpio_set(struct gpio_chip *gc,
-                               unsigned gpio_num, int val)
-{
-       u8 curr_vals, bit;
-       u16 reg;
-
-       bit = gpio_num % 8;
-       reg = (gpio_num >= 8) ? gpio_ba + 1 : gpio_ba;
-
-       spin_lock(&sio_lock);
-
-       curr_vals = inb(reg);
-       if (val)
-               outb(curr_vals | (1 << bit), reg);
-       else
-               outb(curr_vals & ~(1 << bit), reg);
-
-       spin_unlock(&sio_lock);
-}
-
-static int it8761e_gpio_direction_out(struct gpio_chip *gc,
-                                       unsigned gpio_num, int val)
-{
-       u8 curr_dirs, io_reg, bit;
-
-       bit = gpio_num % 8;
-       io_reg = (gpio_num >= 8) ? GPIO2X_IO : GPIO1X_IO;
-
-       it8761e_gpio_set(gc, gpio_num, val);
-
-       spin_lock(&sio_lock);
-
-       enter_conf_mode(port);
-       enter_gpio_mode(port);
-
-       curr_dirs = read_reg(io_reg, port);
-
-       if (!(curr_dirs & (1 << bit)))
-               write_reg(curr_dirs | (1 << bit), io_reg, port);
-
-       exit_conf_mode(port);
-
-       spin_unlock(&sio_lock);
-       return 0;
-}
-
-static struct gpio_chip it8761e_gpio_chip = {
-       .label                  = GPIO_NAME,
-       .owner                  = THIS_MODULE,
-       .get                    = it8761e_gpio_get,
-       .direction_input        = it8761e_gpio_direction_in,
-       .set                    = it8761e_gpio_set,
-       .direction_output       = it8761e_gpio_direction_out,
-};
-
-static int __init it8761e_gpio_init(void)
-{
-       int i, id, err;
-
-       /* chip and port detection */
-       for (i = 0; i < ARRAY_SIZE(ports); i++) {
-               spin_lock(&sio_lock);
-               enter_conf_mode(ports[i]);
-
-               id = (read_reg(CHIP_ID_HIGH_BYTE, ports[i]) << 8) +
-                               read_reg(CHIP_ID_LOW_BYTE, ports[i]);
-
-               exit_conf_mode(ports[i]);
-               spin_unlock(&sio_lock);
-
-               if (id == SIO_CHIP_ID) {
-                       port = ports[i];
-                       break;
-               }
-       }
-
-       if (!port)
-               return -ENODEV;
-
-       /* fetch GPIO base address */
-       enter_conf_mode(port);
-       enter_gpio_mode(port);
-       gpio_ba = (read_reg(GPIO_BA_HIGH_BYTE, port) << 8) +
-                               read_reg(GPIO_BA_LOW_BYTE, port);
-       exit_conf_mode(port);
-
-       if (!request_region(gpio_ba, GPIO_IOSIZE, GPIO_NAME))
-               return -EBUSY;
-
-       it8761e_gpio_chip.base = -1;
-       it8761e_gpio_chip.ngpio = 16;
-
-       err = gpiochip_add(&it8761e_gpio_chip);
-       if (err < 0)
-               goto gpiochip_add_err;
-
-       return 0;
-
-gpiochip_add_err:
-       release_region(gpio_ba, GPIO_IOSIZE);
-       gpio_ba = 0;
-       return err;
-}
-
-static void __exit it8761e_gpio_exit(void)
-{
-       if (gpio_ba) {
-               gpiochip_remove(&it8761e_gpio_chip);
-               release_region(gpio_ba, GPIO_IOSIZE);
-               gpio_ba = 0;
-       }
-}
-module_init(it8761e_gpio_init);
-module_exit(it8761e_gpio_exit);
-
-MODULE_AUTHOR("Denis Turischev <denis@compulab.co.il>");
-MODULE_DESCRIPTION("GPIO interface for IT8761E Super I/O chip");
-MODULE_LICENSE("GPL");
index eb68603136b0e78e76d6a87b2739efac997f2538..e39dcb0af8ae1b918a56672314f63d9913d3e238 100644 (file)
@@ -36,16 +36,6 @@ static inline struct lpc18xx_gpio_chip *to_lpc18xx_gpio(struct gpio_chip *chip)
        return container_of(chip, struct lpc18xx_gpio_chip, gpio);
 }
 
-static int lpc18xx_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-       return pinctrl_request_gpio(offset);
-}
-
-static void lpc18xx_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       pinctrl_free_gpio(offset);
-}
-
 static void lpc18xx_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 {
        struct lpc18xx_gpio_chip *gc = to_lpc18xx_gpio(chip);
@@ -95,8 +85,8 @@ static int lpc18xx_gpio_direction_output(struct gpio_chip *chip,
 
 static struct gpio_chip lpc18xx_chip = {
        .label                  = "lpc18xx/43xx-gpio",
-       .request                = lpc18xx_gpio_request,
-       .free                   = lpc18xx_gpio_free,
+       .request                = gpiochip_generic_request,
+       .free                   = gpiochip_generic_free,
        .direction_input        = lpc18xx_gpio_direction_input,
        .direction_output       = lpc18xx_gpio_direction_output,
        .set                    = lpc18xx_gpio_set,
index 18ab89e208065911cd9fe4261686579360331095..0f57d2d248ec9e367ac67a9194e2b3c8e9460948 100644 (file)
@@ -236,7 +236,6 @@ int __max730x_remove(struct device *dev)
        ts->write(dev, 0x04, 0x00);
        gpiochip_remove(&ts->chip);
        mutex_destroy(&ts->lock);
-       kfree(ts);
        return 0;
 }
 EXPORT_SYMBOL_GPL(__max730x_remove);
index abd8676ce2b69d5cdb26449c1c6d5b2fcd67d1b5..d3355a6dc9b1f126db770166de33f65fe1adbdbf 100644 (file)
 #define GPIO_DATA_IN           0x04
 #define GPIO_PIN_DIRECTION     0x08
 
-static int moxart_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-       return pinctrl_request_gpio(offset);
-}
-
-static void moxart_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       pinctrl_free_gpio(offset);
-}
-
 static int moxart_gpio_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -66,8 +56,8 @@ static int moxart_gpio_probe(struct platform_device *pdev)
        }
 
        bgc->gc.label = "moxart-gpio";
-       bgc->gc.request = moxart_gpio_request;
-       bgc->gc.free = moxart_gpio_free;
+       bgc->gc.request = gpiochip_generic_request;
+       bgc->gc.free = gpiochip_generic_free;
        bgc->data = bgc->read_reg(bgc->reg_set);
        bgc->gc.base = 0;
        bgc->gc.ngpio = 32;
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
deleted file mode 100644 (file)
index 4b42221..0000000
+++ /dev/null
@@ -1,453 +0,0 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-#define pr_fmt(fmt) "%s: " fmt, __func__
-
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
-#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irqchip/chained_irq.h>
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-
-#define MAX_NR_GPIO 300
-
-/* Bits of interest in the GPIO_IN_OUT register.
- */
-enum {
-       GPIO_IN  = 0,
-       GPIO_OUT = 1
-};
-
-/* Bits of interest in the GPIO_INTR_STATUS register.
- */
-enum {
-       INTR_STATUS = 0,
-};
-
-/* Bits of interest in the GPIO_CFG register.
- */
-enum {
-       GPIO_OE = 9,
-};
-
-/* Bits of interest in the GPIO_INTR_CFG register.
- * When a GPIO triggers, two separate decisions are made, controlled
- * by two separate flags.
- *
- * - First, INTR_RAW_STATUS_EN controls whether or not the GPIO_INTR_STATUS
- * register for that GPIO will be updated to reflect the triggering of that
- * gpio.  If this bit is 0, this register will not be updated.
- * - Second, INTR_ENABLE controls whether an interrupt is triggered.
- *
- * If INTR_ENABLE is set and INTR_RAW_STATUS_EN is NOT set, an interrupt
- * can be triggered but the status register will not reflect it.
- */
-enum {
-       INTR_ENABLE        = 0,
-       INTR_POL_CTL       = 1,
-       INTR_DECT_CTL      = 2,
-       INTR_RAW_STATUS_EN = 3,
-};
-
-/* Codes of interest in GPIO_INTR_CFG_SU.
- */
-enum {
-       TARGET_PROC_SCORPION = 4,
-       TARGET_PROC_NONE     = 7,
-};
-
-/**
- * struct msm_gpio_dev: the MSM8660 SoC GPIO device structure
- *
- * @enabled_irqs: a bitmap used to optimize the summary-irq handler.  By
- * keeping track of which gpios are unmasked as irq sources, we avoid
- * having to do readl calls on hundreds of iomapped registers each time
- * the summary interrupt fires in order to locate the active interrupts.
- *
- * @wake_irqs: a bitmap for tracking which interrupt lines are enabled
- * as wakeup sources.  When the device is suspended, interrupts which are
- * not wakeup sources are disabled.
- *
- * @dual_edge_irqs: a bitmap used to track which irqs are configured
- * as dual-edge, as this is not supported by the hardware and requires
- * some special handling in the driver.
- */
-struct msm_gpio_dev {
-       struct gpio_chip gpio_chip;
-       DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
-       DECLARE_BITMAP(wake_irqs, MAX_NR_GPIO);
-       DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
-       struct irq_domain *domain;
-       int summary_irq;
-       void __iomem *msm_tlmm_base;
-};
-
-static struct msm_gpio_dev msm_gpio;
-
-#define GPIO_INTR_CFG_SU(gpio)    (msm_gpio.msm_tlmm_base + 0x0400 + \
-                                                               (0x04 * (gpio)))
-#define GPIO_CONFIG(gpio)         (msm_gpio.msm_tlmm_base + 0x1000 + \
-                                                               (0x10 * (gpio)))
-#define GPIO_IN_OUT(gpio)         (msm_gpio.msm_tlmm_base + 0x1004 + \
-                                                               (0x10 * (gpio)))
-#define GPIO_INTR_CFG(gpio)       (msm_gpio.msm_tlmm_base + 0x1008 + \
-                                                               (0x10 * (gpio)))
-#define GPIO_INTR_STATUS(gpio)    (msm_gpio.msm_tlmm_base + 0x100c + \
-                                                               (0x10 * (gpio)))
-
-static DEFINE_SPINLOCK(tlmm_lock);
-
-static inline struct msm_gpio_dev *to_msm_gpio_dev(struct gpio_chip *chip)
-{
-       return container_of(chip, struct msm_gpio_dev, gpio_chip);
-}
-
-static inline void set_gpio_bits(unsigned n, void __iomem *reg)
-{
-       writel(readl(reg) | n, reg);
-}
-
-static inline void clear_gpio_bits(unsigned n, void __iomem *reg)
-{
-       writel(readl(reg) & ~n, reg);
-}
-
-static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
-       return readl(GPIO_IN_OUT(offset)) & BIT(GPIO_IN);
-}
-
-static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
-{
-       writel(val ? BIT(GPIO_OUT) : 0, GPIO_IN_OUT(offset));
-}
-
-static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
-       unsigned long irq_flags;
-
-       spin_lock_irqsave(&tlmm_lock, irq_flags);
-       clear_gpio_bits(BIT(GPIO_OE), GPIO_CONFIG(offset));
-       spin_unlock_irqrestore(&tlmm_lock, irq_flags);
-       return 0;
-}
-
-static int msm_gpio_direction_output(struct gpio_chip *chip,
-                               unsigned offset,
-                               int val)
-{
-       unsigned long irq_flags;
-
-       spin_lock_irqsave(&tlmm_lock, irq_flags);
-       msm_gpio_set(chip, offset, val);
-       set_gpio_bits(BIT(GPIO_OE), GPIO_CONFIG(offset));
-       spin_unlock_irqrestore(&tlmm_lock, irq_flags);
-       return 0;
-}
-
-static int msm_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-       return 0;
-}
-
-static void msm_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       return;
-}
-
-static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
-       struct msm_gpio_dev *g_dev = to_msm_gpio_dev(chip);
-       struct irq_domain *domain = g_dev->domain;
-
-       return irq_create_mapping(domain, offset);
-}
-
-/* For dual-edge interrupts in software, since the hardware has no
- * such support:
- *
- * At appropriate moments, this function may be called to flip the polarity
- * settings of both-edge irq lines to try and catch the next edge.
- *
- * The attempt is considered successful if:
- * - the status bit goes high, indicating that an edge was caught, or
- * - the input value of the gpio doesn't change during the attempt.
- * If the value changes twice during the process, that would cause the first
- * test to fail but would force the second, as two opposite
- * transitions would cause a detection no matter the polarity setting.
- *
- * The do-loop tries to sledge-hammer closed the timing hole between
- * the initial value-read and the polarity-write - if the line value changes
- * during that window, an interrupt is lost, the new polarity setting is
- * incorrect, and the first success test will fail, causing a retry.
- *
- * Algorithm comes from Google's msmgpio driver, see mach-msm/gpio.c.
- */
-static void msm_gpio_update_dual_edge_pos(unsigned gpio)
-{
-       int loop_limit = 100;
-       unsigned val, val2, intstat;
-
-       do {
-               val = readl(GPIO_IN_OUT(gpio)) & BIT(GPIO_IN);
-               if (val)
-                       clear_gpio_bits(BIT(INTR_POL_CTL), GPIO_INTR_CFG(gpio));
-               else
-                       set_gpio_bits(BIT(INTR_POL_CTL), GPIO_INTR_CFG(gpio));
-               val2 = readl(GPIO_IN_OUT(gpio)) & BIT(GPIO_IN);
-               intstat = readl(GPIO_INTR_STATUS(gpio)) & BIT(INTR_STATUS);
-               if (intstat || val == val2)
-                       return;
-       } while (loop_limit-- > 0);
-       pr_err("%s: dual-edge irq failed to stabilize, "
-              "interrupts dropped. %#08x != %#08x\n",
-              __func__, val, val2);
-}
-
-static void msm_gpio_irq_ack(struct irq_data *d)
-{
-       int gpio = d->hwirq;
-
-       writel(BIT(INTR_STATUS), GPIO_INTR_STATUS(gpio));
-       if (test_bit(gpio, msm_gpio.dual_edge_irqs))
-               msm_gpio_update_dual_edge_pos(gpio);
-}
-
-static void msm_gpio_irq_mask(struct irq_data *d)
-{
-       unsigned long irq_flags;
-       int gpio = d->hwirq;
-
-       spin_lock_irqsave(&tlmm_lock, irq_flags);
-       writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
-       clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
-       __clear_bit(gpio, msm_gpio.enabled_irqs);
-       spin_unlock_irqrestore(&tlmm_lock, irq_flags);
-}
-
-static void msm_gpio_irq_unmask(struct irq_data *d)
-{
-       unsigned long irq_flags;
-       int gpio = d->hwirq;
-
-       spin_lock_irqsave(&tlmm_lock, irq_flags);
-       __set_bit(gpio, msm_gpio.enabled_irqs);
-       set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
-       writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio));
-       spin_unlock_irqrestore(&tlmm_lock, irq_flags);
-}
-
-static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
-{
-       unsigned long irq_flags;
-       int gpio = d->hwirq;
-       uint32_t bits;
-
-       spin_lock_irqsave(&tlmm_lock, irq_flags);
-
-       bits = readl(GPIO_INTR_CFG(gpio));
-
-       if (flow_type & IRQ_TYPE_EDGE_BOTH) {
-               bits |= BIT(INTR_DECT_CTL);
-               irq_set_handler_locked(d, handle_edge_irq);
-               if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
-                       __set_bit(gpio, msm_gpio.dual_edge_irqs);
-               else
-                       __clear_bit(gpio, msm_gpio.dual_edge_irqs);
-       } else {
-               bits &= ~BIT(INTR_DECT_CTL);
-               irq_set_handler_locked(d, handle_level_irq);
-               __clear_bit(gpio, msm_gpio.dual_edge_irqs);
-       }
-
-       if (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
-               bits |= BIT(INTR_POL_CTL);
-       else
-               bits &= ~BIT(INTR_POL_CTL);
-
-       writel(bits, GPIO_INTR_CFG(gpio));
-
-       if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
-               msm_gpio_update_dual_edge_pos(gpio);
-
-       spin_unlock_irqrestore(&tlmm_lock, irq_flags);
-
-       return 0;
-}
-
-/*
- * When the summary IRQ is raised, any number of GPIO lines may be high.
- * It is the job of the summary handler to find all those GPIO lines
- * which have been set as summary IRQ lines and which are triggered,
- * and to call their interrupt handlers.
- */
-static void msm_summary_irq_handler(struct irq_desc *desc)
-{
-       unsigned long i;
-       struct irq_chip *chip = irq_desc_get_chip(desc);
-
-       chained_irq_enter(chip, desc);
-
-       for_each_set_bit(i, msm_gpio.enabled_irqs, MAX_NR_GPIO) {
-               if (readl(GPIO_INTR_STATUS(i)) & BIT(INTR_STATUS))
-                       generic_handle_irq(irq_find_mapping(msm_gpio.domain,
-                                                               i));
-       }
-
-       chained_irq_exit(chip, desc);
-}
-
-static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
-{
-       int gpio = d->hwirq;
-
-       if (on) {
-               if (bitmap_empty(msm_gpio.wake_irqs, MAX_NR_GPIO))
-                       irq_set_irq_wake(msm_gpio.summary_irq, 1);
-               set_bit(gpio, msm_gpio.wake_irqs);
-       } else {
-               clear_bit(gpio, msm_gpio.wake_irqs);
-               if (bitmap_empty(msm_gpio.wake_irqs, MAX_NR_GPIO))
-                       irq_set_irq_wake(msm_gpio.summary_irq, 0);
-       }
-
-       return 0;
-}
-
-static struct irq_chip msm_gpio_irq_chip = {
-       .name           = "msmgpio",
-       .irq_mask       = msm_gpio_irq_mask,
-       .irq_unmask     = msm_gpio_irq_unmask,
-       .irq_ack        = msm_gpio_irq_ack,
-       .irq_set_type   = msm_gpio_irq_set_type,
-       .irq_set_wake   = msm_gpio_irq_set_wake,
-};
-
-static struct lock_class_key msm_gpio_lock_class;
-
-static int msm_gpio_irq_domain_map(struct irq_domain *d, unsigned int irq,
-                                  irq_hw_number_t hwirq)
-{
-       irq_set_lockdep_class(irq, &msm_gpio_lock_class);
-       irq_set_chip_and_handler(irq, &msm_gpio_irq_chip,
-                       handle_level_irq);
-
-       return 0;
-}
-
-static const struct irq_domain_ops msm_gpio_irq_domain_ops = {
-       .xlate = irq_domain_xlate_twocell,
-       .map = msm_gpio_irq_domain_map,
-};
-
-static int msm_gpio_probe(struct platform_device *pdev)
-{
-       int ret, ngpio;
-       struct resource *res;
-
-       if (of_property_read_u32(pdev->dev.of_node, "ngpio", &ngpio)) {
-               dev_err(&pdev->dev, "%s: ngpio property missing\n", __func__);
-               return -EINVAL;
-       }
-
-       if (ngpio > MAX_NR_GPIO)
-               WARN(1, "ngpio exceeds the MAX_NR_GPIO. Increase MAX_NR_GPIO\n");
-
-       bitmap_zero(msm_gpio.enabled_irqs, MAX_NR_GPIO);
-       bitmap_zero(msm_gpio.wake_irqs, MAX_NR_GPIO);
-       bitmap_zero(msm_gpio.dual_edge_irqs, MAX_NR_GPIO);
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       msm_gpio.msm_tlmm_base = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(msm_gpio.msm_tlmm_base))
-               return PTR_ERR(msm_gpio.msm_tlmm_base);
-
-       msm_gpio.gpio_chip.ngpio = ngpio;
-       msm_gpio.gpio_chip.label = pdev->name;
-       msm_gpio.gpio_chip.dev = &pdev->dev;
-       msm_gpio.gpio_chip.base = 0;
-       msm_gpio.gpio_chip.direction_input = msm_gpio_direction_input;
-       msm_gpio.gpio_chip.direction_output = msm_gpio_direction_output;
-       msm_gpio.gpio_chip.get = msm_gpio_get;
-       msm_gpio.gpio_chip.set = msm_gpio_set;
-       msm_gpio.gpio_chip.to_irq = msm_gpio_to_irq;
-       msm_gpio.gpio_chip.request = msm_gpio_request;
-       msm_gpio.gpio_chip.free = msm_gpio_free;
-
-       ret = gpiochip_add(&msm_gpio.gpio_chip);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "gpiochip_add failed with error %d\n", ret);
-               return ret;
-       }
-
-       msm_gpio.summary_irq = platform_get_irq(pdev, 0);
-       if (msm_gpio.summary_irq < 0) {
-               dev_err(&pdev->dev, "No Summary irq defined for msmgpio\n");
-               return msm_gpio.summary_irq;
-       }
-
-       msm_gpio.domain = irq_domain_add_linear(pdev->dev.of_node, ngpio,
-                                               &msm_gpio_irq_domain_ops,
-                                               &msm_gpio);
-       if (!msm_gpio.domain)
-               return -ENODEV;
-
-       irq_set_chained_handler(msm_gpio.summary_irq, msm_summary_irq_handler);
-
-       return 0;
-}
-
-static const struct of_device_id msm_gpio_of_match[] = {
-       { .compatible = "qcom,msm-gpio", },
-       { },
-};
-MODULE_DEVICE_TABLE(of, msm_gpio_of_match);
-
-static int msm_gpio_remove(struct platform_device *dev)
-{
-       gpiochip_remove(&msm_gpio.gpio_chip);
-
-       irq_set_handler(msm_gpio.summary_irq, NULL);
-
-       return 0;
-}
-
-static struct platform_driver msm_gpio_driver = {
-       .probe = msm_gpio_probe,
-       .remove = msm_gpio_remove,
-       .driver = {
-               .name = "msmgpio",
-               .of_match_table = msm_gpio_of_match,
-       },
-};
-
-module_platform_driver(msm_gpio_driver)
-
-MODULE_AUTHOR("Gregory Bean <gbean@codeaurora.org>");
-MODULE_DESCRIPTION("Driver for Qualcomm MSM TLMMv2 SoC GPIOs");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:msmgpio");
index df418b81456dfdc5f7a876221cc6c9a2ef80c380..d428b97876c52e933ee37b42540313b777c39c73 100644 (file)
@@ -185,16 +185,6 @@ static void __iomem *mvebu_gpioreg_level_mask(struct mvebu_gpio_chip *mvchip)
  * Functions implementing the gpio_chip methods
  */
 
-static int mvebu_gpio_request(struct gpio_chip *chip, unsigned pin)
-{
-       return pinctrl_request_gpio(chip->base + pin);
-}
-
-static void mvebu_gpio_free(struct gpio_chip *chip, unsigned pin)
-{
-       pinctrl_free_gpio(chip->base + pin);
-}
-
 static void mvebu_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
 {
        struct mvebu_gpio_chip *mvchip =
@@ -709,8 +699,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
        mvchip->soc_variant = soc_variant;
        mvchip->chip.label = dev_name(&pdev->dev);
        mvchip->chip.dev = &pdev->dev;
-       mvchip->chip.request = mvebu_gpio_request;
-       mvchip->chip.free = mvebu_gpio_free;
+       mvchip->chip.request = gpiochip_generic_request;
+       mvchip->chip.free = gpiochip_generic_free;
        mvchip->chip.direction_input = mvebu_gpio_direction_input;
        mvchip->chip.get = mvebu_gpio_get;
        mvchip->chip.direction_output = mvebu_gpio_direction_output;
index 5236db161e76047db31bf8c8e5ae7299f12fd06d..56d2d026e62e42bf6fec7db5ad2aa25aebd970f3 100644 (file)
@@ -51,7 +51,7 @@ struct gpio_regs {
 struct gpio_bank {
        struct list_head node;
        void __iomem *base;
-       u16 irq;
+       int irq;
        u32 non_wakeup_gpios;
        u32 enabled_non_wakeup_gpios;
        struct gpio_regs context;
@@ -59,6 +59,7 @@ struct gpio_bank {
        u32 level_mask;
        u32 toggle_mask;
        raw_spinlock_t lock;
+       raw_spinlock_t wa_lock;
        struct gpio_chip chip;
        struct clk *dbck;
        u32 mod_usage;
@@ -496,9 +497,6 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
                (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
                return -EINVAL;
 
-       if (!BANK_USED(bank))
-               pm_runtime_get_sync(bank->dev);
-
        raw_spin_lock_irqsave(&bank->lock, flags);
        retval = omap_set_gpio_triggering(bank, offset, type);
        if (retval) {
@@ -521,8 +519,6 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
        return 0;
 
 error:
-       if (!BANK_USED(bank))
-               pm_runtime_put(bank->dev);
        return retval;
 }
 
@@ -654,8 +650,13 @@ static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable)
 {
        struct gpio_bank *bank = omap_irq_data_get_bank(d);
        unsigned offset = d->hwirq;
+       int ret;
+
+       ret = omap_set_gpio_wakeup(bank, offset, enable);
+       if (!ret)
+               ret = irq_set_irq_wake(bank->irq, enable);
 
-       return omap_set_gpio_wakeup(bank, offset, enable);
+       return ret;
 }
 
 static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
@@ -709,26 +710,21 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
  * line's interrupt handler has been run, we may miss some nested
  * interrupts.
  */
-static void omap_gpio_irq_handler(struct irq_desc *desc)
+static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
 {
        void __iomem *isr_reg = NULL;
        u32 isr;
        unsigned int bit;
-       struct gpio_bank *bank;
-       int unmasked = 0;
-       struct irq_chip *irqchip = irq_desc_get_chip(desc);
-       struct gpio_chip *chip = irq_desc_get_handler_data(desc);
+       struct gpio_bank *bank = gpiobank;
+       unsigned long wa_lock_flags;
        unsigned long lock_flags;
 
-       chained_irq_enter(irqchip, desc);
-
-       bank = container_of(chip, struct gpio_bank, chip);
        isr_reg = bank->base + bank->regs->irqstatus;
-       pm_runtime_get_sync(bank->dev);
-
        if (WARN_ON(!isr_reg))
                goto exit;
 
+       pm_runtime_get_sync(bank->dev);
+
        while (1) {
                u32 isr_saved, level_mask = 0;
                u32 enabled;
@@ -750,13 +746,6 @@ static void omap_gpio_irq_handler(struct irq_desc *desc)
 
                raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
 
-               /* if there is only edge sensitive GPIO pin interrupts
-               configured, we could unmask GPIO bank interrupt immediately */
-               if (!level_mask && !unmasked) {
-                       unmasked = 1;
-                       chained_irq_exit(irqchip, desc);
-               }
-
                if (!isr)
                        break;
 
@@ -777,18 +766,18 @@ static void omap_gpio_irq_handler(struct irq_desc *desc)
 
                        raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
 
+                       raw_spin_lock_irqsave(&bank->wa_lock, wa_lock_flags);
+
                        generic_handle_irq(irq_find_mapping(bank->chip.irqdomain,
                                                            bit));
+
+                       raw_spin_unlock_irqrestore(&bank->wa_lock,
+                                                  wa_lock_flags);
                }
        }
-       /* if bank has any level sensitive GPIO pin interrupt
-       configured, we must unmask the bank interrupt only after
-       handler(s) are executed in order to avoid spurious bank
-       interrupt */
 exit:
-       if (!unmasked)
-               chained_irq_exit(irqchip, desc);
        pm_runtime_put(bank->dev);
+       return IRQ_HANDLED;
 }
 
 static unsigned int omap_gpio_irq_startup(struct irq_data *d)
@@ -797,9 +786,6 @@ static unsigned int omap_gpio_irq_startup(struct irq_data *d)
        unsigned long flags;
        unsigned offset = d->hwirq;
 
-       if (!BANK_USED(bank))
-               pm_runtime_get_sync(bank->dev);
-
        raw_spin_lock_irqsave(&bank->lock, flags);
 
        if (!LINE_USED(bank->mod_usage, offset))
@@ -815,8 +801,6 @@ static unsigned int omap_gpio_irq_startup(struct irq_data *d)
        return 0;
 err:
        raw_spin_unlock_irqrestore(&bank->lock, flags);
-       if (!BANK_USED(bank))
-               pm_runtime_put(bank->dev);
        return -EINVAL;
 }
 
@@ -835,6 +819,19 @@ static void omap_gpio_irq_shutdown(struct irq_data *d)
                omap_clear_gpio_debounce(bank, offset);
        omap_disable_gpio_module(bank, offset);
        raw_spin_unlock_irqrestore(&bank->lock, flags);
+}
+
+static void omap_gpio_irq_bus_lock(struct irq_data *data)
+{
+       struct gpio_bank *bank = omap_irq_data_get_bank(data);
+
+       if (!BANK_USED(bank))
+               pm_runtime_get_sync(bank->dev);
+}
+
+static void gpio_irq_bus_sync_unlock(struct irq_data *data)
+{
+       struct gpio_bank *bank = omap_irq_data_get_bank(data);
 
        /*
         * If this is the last IRQ to be freed in the bank,
@@ -1132,7 +1129,7 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
        }
 
        ret = gpiochip_irqchip_add(&bank->chip, irqc,
-                                  irq_base, omap_gpio_irq_handler,
+                                  irq_base, handle_bad_irq,
                                   IRQ_TYPE_NONE);
 
        if (ret) {
@@ -1141,10 +1138,14 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
                return -ENODEV;
        }
 
-       gpiochip_set_chained_irqchip(&bank->chip, irqc,
-                                    bank->irq, omap_gpio_irq_handler);
+       gpiochip_set_chained_irqchip(&bank->chip, irqc, bank->irq, NULL);
 
-       return 0;
+       ret = devm_request_irq(bank->dev, bank->irq, omap_gpio_irq_handler,
+                              0, dev_name(bank->dev), bank);
+       if (ret)
+               gpiochip_remove(&bank->chip);
+
+       return ret;
 }
 
 static const struct of_device_id omap_gpio_match[];
@@ -1183,6 +1184,8 @@ static int omap_gpio_probe(struct platform_device *pdev)
        irqc->irq_unmask = omap_gpio_unmask_irq,
        irqc->irq_set_type = omap_gpio_irq_type,
        irqc->irq_set_wake = omap_gpio_wake_enable,
+       irqc->irq_bus_lock = omap_gpio_irq_bus_lock,
+       irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock,
        irqc->name = dev_name(&pdev->dev);
 
        bank->irq = platform_get_irq(pdev, 0);
@@ -1224,6 +1227,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
                bank->set_dataout = omap_set_gpio_dataout_mask;
 
        raw_spin_lock_init(&bank->lock);
+       raw_spin_lock_init(&bank->wa_lock);
 
        /* Static mapping, never released */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index 50caeb1ee3509da04b57d38dd332396e76ea25de..2d4892cc70fb0c4acc149dd6a9864439644a1673 100644 (file)
@@ -21,6 +21,7 @@
 #ifdef CONFIG_OF_GPIO
 #include <linux/of_platform.h>
 #endif
+#include <linux/acpi.h>
 
 #define PCA953X_INPUT          0
 #define PCA953X_OUTPUT         1
@@ -42,6 +43,9 @@
 #define PCA_INT                        0x0100
 #define PCA953X_TYPE           0x1000
 #define PCA957X_TYPE           0x2000
+#define PCA_TYPE_MASK          0xF000
+
+#define PCA_CHIP_TYPE(x)       ((x) & PCA_TYPE_MASK)
 
 static const struct i2c_device_id pca953x_id[] = {
        { "pca9505", 40 | PCA953X_TYPE | PCA_INT, },
@@ -67,11 +71,18 @@ static const struct i2c_device_id pca953x_id[] = {
        { "tca6408", 8  | PCA953X_TYPE | PCA_INT, },
        { "tca6416", 16 | PCA953X_TYPE | PCA_INT, },
        { "tca6424", 24 | PCA953X_TYPE | PCA_INT, },
+       { "tca9539", 16 | PCA953X_TYPE | PCA_INT, },
        { "xra1202", 8  | PCA953X_TYPE },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, pca953x_id);
 
+static const struct acpi_device_id pca953x_acpi_ids[] = {
+       { "INT3491", 16 | PCA953X_TYPE | PCA_INT, },
+       { }
+};
+MODULE_DEVICE_TABLE(acpi, pca953x_acpi_ids);
+
 #define MAX_BANK 5
 #define BANK_SZ 8
 
@@ -95,6 +106,7 @@ struct pca953x_chip {
        struct gpio_chip gpio_chip;
        const char *const *names;
        int     chip_type;
+       unsigned long driver_data;
 };
 
 static inline struct pca953x_chip *to_pca(struct gpio_chip *gc)
@@ -517,14 +529,13 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid)
 }
 
 static int pca953x_irq_setup(struct pca953x_chip *chip,
-                            const struct i2c_device_id *id,
                             int irq_base)
 {
        struct i2c_client *client = chip->client;
        int ret, i, offset = 0;
 
        if (client->irq && irq_base != -1
-                       && (id->driver_data & PCA_INT)) {
+                       && (chip->driver_data & PCA_INT)) {
 
                switch (chip->chip_type) {
                case PCA953X_TYPE:
@@ -581,12 +592,11 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
 
 #else /* CONFIG_GPIO_PCA953X_IRQ */
 static int pca953x_irq_setup(struct pca953x_chip *chip,
-                            const struct i2c_device_id *id,
                             int irq_base)
 {
        struct i2c_client *client = chip->client;
 
-       if (irq_base != -1 && (id->driver_data & PCA_INT))
+       if (irq_base != -1 && (chip->driver_data & PCA_INT))
                dev_warn(&client->dev, "interrupt support not compiled in\n");
 
        return 0;
@@ -635,11 +645,15 @@ static int device_pca957x_init(struct pca953x_chip *chip, u32 invert)
                memset(val, 0xFF, NBANK(chip));
        else
                memset(val, 0, NBANK(chip));
-       pca953x_write_regs(chip, PCA957X_INVRT, val);
+       ret = pca953x_write_regs(chip, PCA957X_INVRT, val);
+       if (ret)
+               goto out;
 
        /* To enable register 6, 7 to control pull up and pull down */
        memset(val, 0x02, NBANK(chip));
-       pca953x_write_regs(chip, PCA957X_BKEN, val);
+       ret = pca953x_write_regs(chip, PCA957X_BKEN, val);
+       if (ret)
+               goto out;
 
        return 0;
 out:
@@ -673,14 +687,26 @@ static int pca953x_probe(struct i2c_client *client,
 
        chip->client = client;
 
-       chip->chip_type = id->driver_data & (PCA953X_TYPE | PCA957X_TYPE);
+       if (id) {
+               chip->driver_data = id->driver_data;
+       } else {
+               const struct acpi_device_id *id;
+
+               id = acpi_match_device(pca953x_acpi_ids, &client->dev);
+               if (!id)
+                       return -ENODEV;
+
+               chip->driver_data = id->driver_data;
+       }
+
+       chip->chip_type = PCA_CHIP_TYPE(chip->driver_data);
 
        mutex_init(&chip->i2c_lock);
 
        /* initialize cached registers from their original values.
         * we can't share this chip with another i2c master.
         */
-       pca953x_setup_gpio(chip, id->driver_data & PCA_GPIO_MASK);
+       pca953x_setup_gpio(chip, chip->driver_data & PCA_GPIO_MASK);
 
        if (chip->chip_type == PCA953X_TYPE)
                ret = device_pca953x_init(chip, invert);
@@ -693,7 +719,7 @@ static int pca953x_probe(struct i2c_client *client,
        if (ret)
                return ret;
 
-       ret = pca953x_irq_setup(chip, id, irq_base);
+       ret = pca953x_irq_setup(chip, irq_base);
        if (ret)
                return ret;
 
@@ -765,6 +791,7 @@ static struct i2c_driver pca953x_driver = {
        .driver = {
                .name   = "pca953x",
                .of_match_table = pca953x_dt_ids,
+               .acpi_match_table = ACPI_PTR(pca953x_acpi_ids),
        },
        .probe          = pca953x_probe,
        .remove         = pca953x_remove,
index 229ef653e0f8f631172e1956ad4c9f9c74c50b02..4d4b3767670220a03076301ced6b3b12cffcd265 100644 (file)
@@ -52,36 +52,12 @@ struct pl061_gpio {
 
        void __iomem            *base;
        struct gpio_chip        gc;
-       bool                    uses_pinctrl;
 
 #ifdef CONFIG_PM
        struct pl061_context_save_regs csave_regs;
 #endif
 };
 
-static int pl061_gpio_request(struct gpio_chip *gc, unsigned offset)
-{
-       /*
-        * Map back to global GPIO space and request muxing, the direction
-        * parameter does not matter for this controller.
-        */
-       struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
-       int gpio = gc->base + offset;
-
-       if (chip->uses_pinctrl)
-               return pinctrl_request_gpio(gpio);
-       return 0;
-}
-
-static void pl061_gpio_free(struct gpio_chip *gc, unsigned offset)
-{
-       struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
-       int gpio = gc->base + offset;
-
-       if (chip->uses_pinctrl)
-               pinctrl_free_gpio(gpio);
-}
-
 static int pl061_direction_input(struct gpio_chip *gc, unsigned offset)
 {
        struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
@@ -152,6 +128,17 @@ static int pl061_irq_type(struct irq_data *d, unsigned trigger)
        if (offset < 0 || offset >= PL061_GPIO_NR)
                return -EINVAL;
 
+       if ((trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) &&
+           (trigger & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)))
+       {
+               dev_err(gc->dev,
+                       "trying to configure line %d for both level and edge "
+                       "detection, choose one!\n",
+                       offset);
+               return -EINVAL;
+       }
+
+
        spin_lock_irqsave(&chip->lock, flags);
 
        gpioiev = readb(chip->base + GPIOIEV);
@@ -159,23 +146,53 @@ static int pl061_irq_type(struct irq_data *d, unsigned trigger)
        gpioibe = readb(chip->base + GPIOIBE);
 
        if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
+               bool polarity = trigger & IRQ_TYPE_LEVEL_HIGH;
+
+               /* Disable edge detection */
+               gpioibe &= ~bit;
+               /* Enable level detection */
                gpiois |= bit;
-               if (trigger & IRQ_TYPE_LEVEL_HIGH)
+               /* Select polarity */
+               if (polarity)
                        gpioiev |= bit;
                else
                        gpioiev &= ~bit;
-       } else
+               irq_set_handler_locked(d, handle_level_irq);
+               dev_dbg(gc->dev, "line %d: IRQ on %s level\n",
+                       offset,
+                       polarity ? "HIGH" : "LOW");
+       } else if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
+               /* Disable level detection */
                gpiois &= ~bit;
-
-       if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
-               /* Setting this makes GPIOEV be ignored */
+               /* Select both edges, setting this makes GPIOEV be ignored */
                gpioibe |= bit;
-       else {
+               irq_set_handler_locked(d, handle_edge_irq);
+               dev_dbg(gc->dev, "line %d: IRQ on both edges\n", offset);
+       } else if ((trigger & IRQ_TYPE_EDGE_RISING) ||
+                  (trigger & IRQ_TYPE_EDGE_FALLING)) {
+               bool rising = trigger & IRQ_TYPE_EDGE_RISING;
+
+               /* Disable level detection */
+               gpiois &= ~bit;
+               /* Clear detection on both edges */
                gpioibe &= ~bit;
-               if (trigger & IRQ_TYPE_EDGE_RISING)
+               /* Select edge */
+               if (rising)
                        gpioiev |= bit;
-               else if (trigger & IRQ_TYPE_EDGE_FALLING)
+               else
                        gpioiev &= ~bit;
+               irq_set_handler_locked(d, handle_edge_irq);
+               dev_dbg(gc->dev, "line %d: IRQ on %s edge\n",
+                       offset,
+                       rising ? "RISING" : "FALLING");
+       } else {
+               /* No trigger: disable everything */
+               gpiois &= ~bit;
+               gpioibe &= ~bit;
+               gpioiev &= ~bit;
+               irq_set_handler_locked(d, handle_bad_irq);
+               dev_warn(gc->dev, "no trigger selected for line %d\n",
+                        offset);
        }
 
        writeb(gpiois, chip->base + GPIOIS);
@@ -198,7 +215,6 @@ static void pl061_irq_handler(struct irq_desc *desc)
        chained_irq_enter(irqchip, desc);
 
        pending = readb(chip->base + GPIOMIS);
-       writeb(pending, chip->base + GPIOIC);
        if (pending) {
                for_each_set_bit(offset, &pending, PL061_GPIO_NR)
                        generic_handle_irq(irq_find_mapping(gc->irqdomain,
@@ -234,8 +250,28 @@ static void pl061_irq_unmask(struct irq_data *d)
        spin_unlock(&chip->lock);
 }
 
+/**
+ * pl061_irq_ack() - ACK an edge IRQ
+ * @d: IRQ data for this IRQ
+ *
+ * This gets called from the edge IRQ handler to ACK the edge IRQ
+ * in the GPIOIC (interrupt-clear) register. For level IRQs this is
+ * not needed: these go away when the level signal goes away.
+ */
+static void pl061_irq_ack(struct irq_data *d)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
+       u8 mask = BIT(irqd_to_hwirq(d) % PL061_GPIO_NR);
+
+       spin_lock(&chip->lock);
+       writeb(mask, chip->base + GPIOIC);
+       spin_unlock(&chip->lock);
+}
+
 static struct irq_chip pl061_irqchip = {
        .name           = "pl061",
+       .irq_ack        = pl061_irq_ack,
        .irq_mask       = pl061_irq_mask,
        .irq_unmask     = pl061_irq_unmask,
        .irq_set_type   = pl061_irq_type,
@@ -269,11 +305,11 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
                return PTR_ERR(chip->base);
 
        spin_lock_init(&chip->lock);
-       if (of_property_read_bool(dev->of_node, "gpio-ranges"))
-               chip->uses_pinctrl = true;
+       if (of_property_read_bool(dev->of_node, "gpio-ranges")) {
+               chip->gc.request = gpiochip_generic_request;
+               chip->gc.free = gpiochip_generic_free;
+       }
 
-       chip->gc.request = pl061_gpio_request;
-       chip->gc.free = pl061_gpio_free;
        chip->gc.direction_input = pl061_direction_input;
        chip->gc.direction_output = pl061_direction_output;
        chip->gc.get = pl061_get_value;
@@ -298,7 +334,7 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
        }
 
        ret = gpiochip_irqchip_add(&chip->gc, &pl061_irqchip,
-                                  irq_base, handle_simple_irq,
+                                  irq_base, handle_bad_irq,
                                   IRQ_TYPE_NONE);
        if (ret) {
                dev_info(&adev->dev, "could not add irqchip\n");
index 65bc9f47a68ed98c89889431a5ece6cf1f8a01b7..34b02b42ab9e5bbfd2bc2f7bd16ba31c704718ec 100644 (file)
@@ -102,7 +102,7 @@ static int sdv_xlate(struct irq_domain *h, struct device_node *node,
 {
        u32 line, type;
 
-       if (node != h->of_node)
+       if (node != irq_domain_get_of_node(h))
                return -EINVAL;
 
        if (intsize < 2)
index 9c6b96707c9f286a9bbf13c756beab7d4cedbace..76f920173a2f847cd7e370149b5d417d625ed750 100644 (file)
@@ -160,6 +160,11 @@ static const struct of_device_id sx150x_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, sx150x_of_match);
 
+struct sx150x_chip *to_sx150x(struct gpio_chip *gc)
+{
+       return container_of(gc, struct sx150x_chip, gpio_chip);
+}
+
 static s32 sx150x_i2c_write(struct i2c_client *client, u8 reg, u8 val)
 {
        s32 err = i2c_smbus_write_byte_data(client, reg, val);
@@ -296,11 +301,9 @@ static int sx150x_io_output(struct sx150x_chip *chip, unsigned offset, int val)
 
 static int sx150x_gpio_get(struct gpio_chip *gc, unsigned offset)
 {
-       struct sx150x_chip *chip;
+       struct sx150x_chip *chip = to_sx150x(gc);
        int status = -EINVAL;
 
-       chip = container_of(gc, struct sx150x_chip, gpio_chip);
-
        if (!offset_is_oscio(chip, offset)) {
                mutex_lock(&chip->lock);
                status = sx150x_get_io(chip, offset);
@@ -312,9 +315,7 @@ static int sx150x_gpio_get(struct gpio_chip *gc, unsigned offset)
 
 static void sx150x_gpio_set(struct gpio_chip *gc, unsigned offset, int val)
 {
-       struct sx150x_chip *chip;
-
-       chip = container_of(gc, struct sx150x_chip, gpio_chip);
+       struct sx150x_chip *chip = to_sx150x(gc);
 
        mutex_lock(&chip->lock);
        if (offset_is_oscio(chip, offset))
@@ -326,11 +327,9 @@ static void sx150x_gpio_set(struct gpio_chip *gc, unsigned offset, int val)
 
 static int sx150x_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
 {
-       struct sx150x_chip *chip;
+       struct sx150x_chip *chip = to_sx150x(gc);
        int status = -EINVAL;
 
-       chip = container_of(gc, struct sx150x_chip, gpio_chip);
-
        if (!offset_is_oscio(chip, offset)) {
                mutex_lock(&chip->lock);
                status = sx150x_io_input(chip, offset);
@@ -343,11 +342,9 @@ static int sx150x_gpio_direction_output(struct gpio_chip *gc,
                                        unsigned offset,
                                        int val)
 {
-       struct sx150x_chip *chip;
+       struct sx150x_chip *chip = to_sx150x(gc);
        int status = 0;
 
-       chip = container_of(gc, struct sx150x_chip, gpio_chip);
-
        if (!offset_is_oscio(chip, offset)) {
                mutex_lock(&chip->lock);
                status = sx150x_io_output(chip, offset, val);
@@ -358,7 +355,7 @@ static int sx150x_gpio_direction_output(struct gpio_chip *gc,
 
 static void sx150x_irq_mask(struct irq_data *d)
 {
-       struct sx150x_chip *chip = irq_data_get_irq_chip_data(d);
+       struct sx150x_chip *chip = to_sx150x(irq_data_get_irq_chip_data(d));
        unsigned n = d->hwirq;
 
        chip->irq_masked |= (1 << n);
@@ -367,7 +364,7 @@ static void sx150x_irq_mask(struct irq_data *d)
 
 static void sx150x_irq_unmask(struct irq_data *d)
 {
-       struct sx150x_chip *chip = irq_data_get_irq_chip_data(d);
+       struct sx150x_chip *chip = to_sx150x(irq_data_get_irq_chip_data(d));
        unsigned n = d->hwirq;
 
        chip->irq_masked &= ~(1 << n);
@@ -376,7 +373,7 @@ static void sx150x_irq_unmask(struct irq_data *d)
 
 static int sx150x_irq_set_type(struct irq_data *d, unsigned int flow_type)
 {
-       struct sx150x_chip *chip = irq_data_get_irq_chip_data(d);
+       struct sx150x_chip *chip = to_sx150x(irq_data_get_irq_chip_data(d));
        unsigned n, val = 0;
 
        if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
@@ -431,14 +428,14 @@ static irqreturn_t sx150x_irq_thread_fn(int irq, void *dev_id)
 
 static void sx150x_irq_bus_lock(struct irq_data *d)
 {
-       struct sx150x_chip *chip = irq_data_get_irq_chip_data(d);
+       struct sx150x_chip *chip = to_sx150x(irq_data_get_irq_chip_data(d));
 
        mutex_lock(&chip->lock);
 }
 
 static void sx150x_irq_bus_sync_unlock(struct irq_data *d)
 {
-       struct sx150x_chip *chip = irq_data_get_irq_chip_data(d);
+       struct sx150x_chip *chip = to_sx150x(irq_data_get_irq_chip_data(d));
        unsigned n;
 
        if (chip->irq_update == NO_UPDATE_PENDING)
index 12c99d969b983638ca16838919f34b0db76c7148..4356e6c20fc59a3792c8575417db5dfa389b3b66 100644 (file)
@@ -138,16 +138,6 @@ static int tb10x_gpio_direction_out(struct gpio_chip *chip,
        return 0;
 }
 
-static int tb10x_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-       return pinctrl_request_gpio(chip->base + offset);
-}
-
-static void tb10x_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       pinctrl_free_gpio(chip->base + offset);
-}
-
 static int tb10x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
 {
        struct tb10x_gpio *tb10x_gpio = to_tb10x_gpio(chip);
@@ -213,8 +203,8 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
        tb10x_gpio->gc.get              = tb10x_gpio_get;
        tb10x_gpio->gc.direction_output = tb10x_gpio_direction_out;
        tb10x_gpio->gc.set              = tb10x_gpio_set;
-       tb10x_gpio->gc.request          = tb10x_gpio_request;
-       tb10x_gpio->gc.free             = tb10x_gpio_free;
+       tb10x_gpio->gc.request          = gpiochip_generic_request;
+       tb10x_gpio->gc.free             = gpiochip_generic_free;
        tb10x_gpio->gc.base             = -1;
        tb10x_gpio->gc.ngpio            = ngpio;
        tb10x_gpio->gc.can_sleep        = false;
index ede7e403ffdee8565530de696e77af69f083e7e7..3623d009d8087fc6d1fc1538c732b260e36709de 100644 (file)
@@ -137,16 +137,6 @@ static void tz1090_pdc_gpio_set(struct gpio_chip *chip, unsigned int offset,
        __global_unlock2(lstat);
 }
 
-static int tz1090_pdc_gpio_request(struct gpio_chip *chip, unsigned int offset)
-{
-       return pinctrl_request_gpio(chip->base + offset);
-}
-
-static void tz1090_pdc_gpio_free(struct gpio_chip *chip, unsigned int offset)
-{
-       pinctrl_free_gpio(chip->base + offset);
-}
-
 static int tz1090_pdc_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
 {
        struct tz1090_pdc_gpio *priv = to_pdc(chip);
@@ -203,8 +193,8 @@ static int tz1090_pdc_gpio_probe(struct platform_device *pdev)
        priv->chip.direction_output     = tz1090_pdc_gpio_direction_output;
        priv->chip.get                  = tz1090_pdc_gpio_get;
        priv->chip.set                  = tz1090_pdc_gpio_set;
-       priv->chip.free                 = tz1090_pdc_gpio_free;
-       priv->chip.request              = tz1090_pdc_gpio_request;
+       priv->chip.free                 = gpiochip_generic_free;
+       priv->chip.request              = gpiochip_generic_request;
        priv->chip.to_irq               = tz1090_pdc_gpio_to_irq;
        priv->chip.of_node              = np;
 
index 069f9e4b7daae0b5afe7bef0543b822191de22f3..87b950cec6ec929689a3d52b428f9224acebc623 100644 (file)
@@ -62,6 +62,11 @@ struct vf610_gpio_port {
 
 static struct irq_chip vf610_gpio_irq_chip;
 
+static struct vf610_gpio_port *to_vf610_gp(struct gpio_chip *gc)
+{
+       return container_of(gc, struct vf610_gpio_port, gc);
+}
+
 static const struct of_device_id vf610_gpio_dt_ids[] = {
        { .compatible = "fsl,vf610-gpio" },
        { /* sentinel */ }
@@ -77,28 +82,16 @@ static inline u32 vf610_gpio_readl(void __iomem *reg)
        return readl_relaxed(reg);
 }
 
-static int vf610_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-       return pinctrl_request_gpio(chip->base + offset);
-}
-
-static void vf610_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       pinctrl_free_gpio(chip->base + offset);
-}
-
 static int vf610_gpio_get(struct gpio_chip *gc, unsigned int gpio)
 {
-       struct vf610_gpio_port *port =
-               container_of(gc, struct vf610_gpio_port, gc);
+       struct vf610_gpio_port *port = to_vf610_gp(gc);
 
        return !!(vf610_gpio_readl(port->gpio_base + GPIO_PDIR) & BIT(gpio));
 }
 
 static void vf610_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
 {
-       struct vf610_gpio_port *port =
-               container_of(gc, struct vf610_gpio_port, gc);
+       struct vf610_gpio_port *port = to_vf610_gp(gc);
        unsigned long mask = BIT(gpio);
 
        if (val)
@@ -122,7 +115,8 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
 
 static void vf610_gpio_irq_handler(struct irq_desc *desc)
 {
-       struct vf610_gpio_port *port = irq_desc_get_handler_data(desc);
+       struct vf610_gpio_port *port =
+               to_vf610_gp(irq_desc_get_handler_data(desc));
        struct irq_chip *chip = irq_desc_get_chip(desc);
        int pin;
        unsigned long irq_isfr;
@@ -142,7 +136,8 @@ static void vf610_gpio_irq_handler(struct irq_desc *desc)
 
 static void vf610_gpio_irq_ack(struct irq_data *d)
 {
-       struct vf610_gpio_port *port = irq_data_get_irq_chip_data(d);
+       struct vf610_gpio_port *port =
+               to_vf610_gp(irq_data_get_irq_chip_data(d));
        int gpio = d->hwirq;
 
        vf610_gpio_writel(BIT(gpio), port->base + PORT_ISFR);
@@ -150,7 +145,8 @@ static void vf610_gpio_irq_ack(struct irq_data *d)
 
 static int vf610_gpio_irq_set_type(struct irq_data *d, u32 type)
 {
-       struct vf610_gpio_port *port = irq_data_get_irq_chip_data(d);
+       struct vf610_gpio_port *port =
+               to_vf610_gp(irq_data_get_irq_chip_data(d));
        u8 irqc;
 
        switch (type) {
@@ -185,7 +181,8 @@ static int vf610_gpio_irq_set_type(struct irq_data *d, u32 type)
 
 static void vf610_gpio_irq_mask(struct irq_data *d)
 {
-       struct vf610_gpio_port *port = irq_data_get_irq_chip_data(d);
+       struct vf610_gpio_port *port =
+               to_vf610_gp(irq_data_get_irq_chip_data(d));
        void __iomem *pcr_base = port->base + PORT_PCR(d->hwirq);
 
        vf610_gpio_writel(0, pcr_base);
@@ -193,7 +190,8 @@ static void vf610_gpio_irq_mask(struct irq_data *d)
 
 static void vf610_gpio_irq_unmask(struct irq_data *d)
 {
-       struct vf610_gpio_port *port = irq_data_get_irq_chip_data(d);
+       struct vf610_gpio_port *port =
+               to_vf610_gp(irq_data_get_irq_chip_data(d));
        void __iomem *pcr_base = port->base + PORT_PCR(d->hwirq);
 
        vf610_gpio_writel(port->irqc[d->hwirq] << PORT_PCR_IRQC_OFFSET,
@@ -202,7 +200,8 @@ static void vf610_gpio_irq_unmask(struct irq_data *d)
 
 static int vf610_gpio_irq_set_wake(struct irq_data *d, u32 enable)
 {
-       struct vf610_gpio_port *port = irq_data_get_irq_chip_data(d);
+       struct vf610_gpio_port *port =
+               to_vf610_gp(irq_data_get_irq_chip_data(d));
 
        if (enable)
                enable_irq_wake(port->irq);
@@ -255,8 +254,8 @@ static int vf610_gpio_probe(struct platform_device *pdev)
        gc->ngpio = VF610_GPIO_PER_PORT;
        gc->base = of_alias_get_id(np, "gpio") * VF610_GPIO_PER_PORT;
 
-       gc->request = vf610_gpio_request;
-       gc->free = vf610_gpio_free;
+       gc->request = gpiochip_generic_request;
+       gc->free = gpiochip_generic_free;
        gc->direction_input = vf610_gpio_direction_input;
        gc->get = vf610_gpio_get;
        gc->direction_output = vf610_gpio_direction_output;
index e02499a15e72263846e813ee1f990916113923b3..bc06a2cd2c1d6663add40cec337953feebcb443c 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/module.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
 
 /*
  * XLP GPIO has multiple 32 bit registers for each feature where each register
@@ -208,25 +209,28 @@ static struct irq_chip xlp_gpio_irq_chip = {
        .flags          = IRQCHIP_ONESHOT_SAFE,
 };
 
-static irqreturn_t xlp_gpio_generic_handler(int irq, void *data)
+static void xlp_gpio_generic_handler(struct irq_desc *desc)
 {
-       struct xlp_gpio_priv *priv = data;
+       struct xlp_gpio_priv *priv = irq_desc_get_handler_data(desc);
+       struct irq_chip *irqchip = irq_desc_get_chip(desc);
        int gpio, regoff;
        u32 gpio_stat;
 
        regoff = -1;
        gpio_stat = 0;
+
+       chained_irq_enter(irqchip, desc);
        for_each_set_bit(gpio, priv->gpio_enabled_mask, XLP_MAX_NR_GPIO) {
                if (regoff != gpio / XLP_GPIO_REGSZ) {
                        regoff = gpio / XLP_GPIO_REGSZ;
                        gpio_stat = readl(priv->gpio_intr_stat + regoff * 4);
                }
+
                if (gpio_stat & BIT(gpio % XLP_GPIO_REGSZ))
                        generic_handle_irq(irq_find_mapping(
                                                priv->chip.irqdomain, gpio));
        }
-
-       return IRQ_HANDLED;
+       chained_irq_exit(irqchip, desc);
 }
 
 static int xlp_gpio_dir_output(struct gpio_chip *gc, unsigned gpio, int state)
@@ -378,12 +382,6 @@ static int xlp_gpio_probe(struct platform_device *pdev)
        gc->get = xlp_gpio_get;
 
        spin_lock_init(&priv->lock);
-
-       err = devm_request_irq(&pdev->dev, irq, xlp_gpio_generic_handler,
-                       IRQ_TYPE_NONE, pdev->name, priv);
-       if (err)
-               return err;
-
        irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0);
        if (irq_base < 0) {
                dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n");
@@ -401,6 +399,9 @@ static int xlp_gpio_probe(struct platform_device *pdev)
                goto out_gpio_remove;
        }
 
+       gpiochip_set_chained_irqchip(gc, &xlp_gpio_irq_chip, irq,
+                       xlp_gpio_generic_handler);
+
        dev_info(&pdev->dev, "registered %d GPIOs\n", gc->ngpio);
 
        return 0;
index 4b8a2691070537cde2affdce5ff7624d032ce193..1dcf7a66dd36ed62eda1cf2bb4b4a867a29b89c5 100644 (file)
@@ -41,7 +41,6 @@ struct zx_gpio {
 
        void __iomem            *base;
        struct gpio_chip        gc;
-       bool                    uses_pinctrl;
 };
 
 static inline struct zx_gpio *to_zx(struct gpio_chip *gc)
@@ -49,25 +48,6 @@ static inline struct zx_gpio *to_zx(struct gpio_chip *gc)
        return container_of(gc, struct zx_gpio, gc);
 }
 
-static int zx_gpio_request(struct gpio_chip *gc, unsigned offset)
-{
-       struct zx_gpio *chip = to_zx(gc);
-       int gpio = gc->base + offset;
-
-       if (chip->uses_pinctrl)
-               return pinctrl_request_gpio(gpio);
-       return 0;
-}
-
-static void zx_gpio_free(struct gpio_chip *gc, unsigned offset)
-{
-       struct zx_gpio *chip = to_zx(gc);
-       int gpio = gc->base + offset;
-
-       if (chip->uses_pinctrl)
-               pinctrl_free_gpio(gpio);
-}
-
 static int zx_direction_input(struct gpio_chip *gc, unsigned offset)
 {
        struct zx_gpio *chip = to_zx(gc);
@@ -252,12 +232,12 @@ static int zx_gpio_probe(struct platform_device *pdev)
                return PTR_ERR(chip->base);
 
        spin_lock_init(&chip->lock);
-       if (of_property_read_bool(dev->of_node, "gpio-ranges"))
-               chip->uses_pinctrl = true;
+       if (of_property_read_bool(dev->of_node, "gpio-ranges")) {
+               chip->gc.request = gpiochip_generic_request;
+               chip->gc.free = gpiochip_generic_free;
+       }
 
        id = of_alias_get_id(dev->of_node, "gpio");
-       chip->gc.request = zx_gpio_request;
-       chip->gc.free = zx_gpio_free;
        chip->gc.direction_input = zx_direction_input;
        chip->gc.direction_output = zx_direction_output;
        chip->gc.get = zx_get_value;
index 1d1a5865ede90449d8e2b4abba88f14f6c225b82..8abeacac588579c16eaf7e537f7da96b44b2d490 100644 (file)
@@ -130,6 +130,12 @@ struct zynq_platform_data {
 
 static struct irq_chip zynq_gpio_level_irqchip;
 static struct irq_chip zynq_gpio_edge_irqchip;
+
+static struct zynq_gpio *to_zynq_gpio(struct gpio_chip *gc)
+{
+       return container_of(gc, struct zynq_gpio, chip);
+}
+
 /**
  * zynq_gpio_get_bank_pin - Get the bank number and pin number within that bank
  * for a given pin in the GPIO device
@@ -177,7 +183,7 @@ static int zynq_gpio_get_value(struct gpio_chip *chip, unsigned int pin)
 {
        u32 data;
        unsigned int bank_num, bank_pin_num;
-       struct zynq_gpio *gpio = container_of(chip, struct zynq_gpio, chip);
+       struct zynq_gpio *gpio = to_zynq_gpio(chip);
 
        zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio);
 
@@ -201,7 +207,7 @@ static void zynq_gpio_set_value(struct gpio_chip *chip, unsigned int pin,
                                int state)
 {
        unsigned int reg_offset, bank_num, bank_pin_num;
-       struct zynq_gpio *gpio = container_of(chip, struct zynq_gpio, chip);
+       struct zynq_gpio *gpio = to_zynq_gpio(chip);
 
        zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio);
 
@@ -238,7 +244,7 @@ static int zynq_gpio_dir_in(struct gpio_chip *chip, unsigned int pin)
 {
        u32 reg;
        unsigned int bank_num, bank_pin_num;
-       struct zynq_gpio *gpio = container_of(chip, struct zynq_gpio, chip);
+       struct zynq_gpio *gpio = to_zynq_gpio(chip);
 
        zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio);
 
@@ -271,7 +277,7 @@ static int zynq_gpio_dir_out(struct gpio_chip *chip, unsigned int pin,
 {
        u32 reg;
        unsigned int bank_num, bank_pin_num;
-       struct zynq_gpio *gpio = container_of(chip, struct zynq_gpio, chip);
+       struct zynq_gpio *gpio = to_zynq_gpio(chip);
 
        zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio);
 
@@ -301,7 +307,8 @@ static int zynq_gpio_dir_out(struct gpio_chip *chip, unsigned int pin,
 static void zynq_gpio_irq_mask(struct irq_data *irq_data)
 {
        unsigned int device_pin_num, bank_num, bank_pin_num;
-       struct zynq_gpio *gpio = irq_data_get_irq_chip_data(irq_data);
+       struct zynq_gpio *gpio =
+               to_zynq_gpio(irq_data_get_irq_chip_data(irq_data));
 
        device_pin_num = irq_data->hwirq;
        zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num, gpio);
@@ -321,7 +328,8 @@ static void zynq_gpio_irq_mask(struct irq_data *irq_data)
 static void zynq_gpio_irq_unmask(struct irq_data *irq_data)
 {
        unsigned int device_pin_num, bank_num, bank_pin_num;
-       struct zynq_gpio *gpio = irq_data_get_irq_chip_data(irq_data);
+       struct zynq_gpio *gpio =
+               to_zynq_gpio(irq_data_get_irq_chip_data(irq_data));
 
        device_pin_num = irq_data->hwirq;
        zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num, gpio);
@@ -340,7 +348,8 @@ static void zynq_gpio_irq_unmask(struct irq_data *irq_data)
 static void zynq_gpio_irq_ack(struct irq_data *irq_data)
 {
        unsigned int device_pin_num, bank_num, bank_pin_num;
-       struct zynq_gpio *gpio = irq_data_get_irq_chip_data(irq_data);
+       struct zynq_gpio *gpio =
+               to_zynq_gpio(irq_data_get_irq_chip_data(irq_data));
 
        device_pin_num = irq_data->hwirq;
        zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num, gpio);
@@ -390,7 +399,8 @@ static int zynq_gpio_set_irq_type(struct irq_data *irq_data, unsigned int type)
 {
        u32 int_type, int_pol, int_any;
        unsigned int device_pin_num, bank_num, bank_pin_num;
-       struct zynq_gpio *gpio = irq_data_get_irq_chip_data(irq_data);
+       struct zynq_gpio *gpio =
+               to_zynq_gpio(irq_data_get_irq_chip_data(irq_data));
 
        device_pin_num = irq_data->hwirq;
        zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num, gpio);
@@ -453,7 +463,8 @@ static int zynq_gpio_set_irq_type(struct irq_data *irq_data, unsigned int type)
 
 static int zynq_gpio_set_wake(struct irq_data *data, unsigned int on)
 {
-       struct zynq_gpio *gpio = irq_data_get_irq_chip_data(data);
+       struct zynq_gpio *gpio =
+               to_zynq_gpio(irq_data_get_irq_chip_data(data));
 
        irq_set_irq_wake(gpio->irq, on);
 
@@ -518,7 +529,8 @@ static void zynq_gpio_irqhandler(struct irq_desc *desc)
 {
        u32 int_sts, int_enb;
        unsigned int bank_num;
-       struct zynq_gpio *gpio = irq_desc_get_handler_data(desc);
+       struct zynq_gpio *gpio =
+               to_zynq_gpio(irq_desc_get_handler_data(desc));
        struct irq_chip *irqchip = irq_desc_get_chip(desc);
 
        chained_irq_enter(irqchip, desc);
index 143a9bdbaa53e1ba056a02549e521ccd6efc2220..bbcac3af2a7ab84804fa862fe6a31869ca35aca3 100644 (file)
@@ -304,7 +304,6 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
        if (ACPI_FAILURE(status))
                return;
 
-       INIT_LIST_HEAD(&acpi_gpio->events);
        acpi_walk_resources(handle, "_AEI",
                            acpi_gpiochip_request_interrupt, acpi_gpio);
 }
@@ -603,6 +602,25 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
                                break;
                        }
                }
+
+               /*
+                * The same GPIO can be shared between operation region and
+                * event but only if the access here is ACPI_READ. In that
+                * case we "borrow" the event GPIO instead.
+                */
+               if (!found && agpio->sharable == ACPI_SHARED &&
+                    function == ACPI_READ) {
+                       struct acpi_gpio_event *event;
+
+                       list_for_each_entry(event, &achip->events, node) {
+                               if (event->pin == pin) {
+                                       desc = event->desc;
+                                       found = true;
+                                       break;
+                               }
+                       }
+               }
+
                if (!found) {
                        desc = gpiochip_request_own_desc(chip, pin,
                                                         "ACPI:OpRegion");
@@ -719,6 +737,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
        }
 
        acpi_gpio->chip = chip;
+       INIT_LIST_HEAD(&acpi_gpio->events);
 
        status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio);
        if (ACPI_FAILURE(status)) {
index 8b830996fe0212d3ae0153ae5b708a84fa53976e..3a5c7011ad3b3e832d7cce6e55ed62562bba94be 100644 (file)
@@ -28,10 +28,6 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
        if (!desc && gpio_is_valid(gpio))
                return -EPROBE_DEFER;
 
-       err = gpiod_request(desc, label);
-       if (err)
-               return err;
-
        if (flags & GPIOF_OPEN_DRAIN)
                set_bit(FLAG_OPEN_DRAIN, &desc->flags);
 
@@ -41,6 +37,10 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
        if (flags & GPIOF_ACTIVE_LOW)
                set_bit(FLAG_ACTIVE_LOW, &desc->flags);
 
+       err = gpiod_request(desc, label);
+       if (err)
+               return err;
+
        if (flags & GPIOF_DIR_IN)
                err = gpiod_direction_input(desc);
        else
index fa6e3c8823d614dba89167e9eca1ad2dd74577b8..5fe34a9df3e6b8538ddc47a8ec6008654c89d89c 100644 (file)
@@ -119,20 +119,20 @@ int of_get_named_gpio_flags(struct device_node *np, const char *list_name,
 EXPORT_SYMBOL(of_get_named_gpio_flags);
 
 /**
- * of_get_gpio_hog() - Get a GPIO hog descriptor, names and flags for GPIO API
+ * of_parse_own_gpio() - Get a GPIO hog descriptor, names and flags for GPIO API
  * @np:                device node to get GPIO from
  * @name:      GPIO line name
  * @lflags:    gpio_lookup_flags - returned from of_find_gpio() or
- *             of_get_gpio_hog()
+ *             of_parse_own_gpio()
  * @dflags:    gpiod_flags - optional GPIO initialization flags
  *
  * Returns GPIO descriptor to use with Linux GPIO API, or one of the errno
  * value on the error condition.
  */
-static struct gpio_desc *of_get_gpio_hog(struct device_node *np,
-                                 const char **name,
-                                 enum gpio_lookup_flags *lflags,
-                                 enum gpiod_flags *dflags)
+static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
+                                          const char **name,
+                                          enum gpio_lookup_flags *lflags,
+                                          enum gpiod_flags *dflags)
 {
        struct device_node *chip_np;
        enum of_gpio_flags xlate_flags;
@@ -196,13 +196,13 @@ static struct gpio_desc *of_get_gpio_hog(struct device_node *np,
 }
 
 /**
- * of_gpiochip_scan_hogs - Scan gpio-controller and apply GPIO hog as requested
+ * of_gpiochip_scan_gpios - Scan gpio-controller for gpio definitions
  * @chip:      gpio chip to act on
  *
  * This is only used by of_gpiochip_add to request/set GPIO initial
  * configuration.
  */
-static void of_gpiochip_scan_hogs(struct gpio_chip *chip)
+static void of_gpiochip_scan_gpios(struct gpio_chip *chip)
 {
        struct gpio_desc *desc = NULL;
        struct device_node *np;
@@ -214,7 +214,7 @@ static void of_gpiochip_scan_hogs(struct gpio_chip *chip)
                if (!of_property_read_bool(np, "gpio-hog"))
                        continue;
 
-               desc = of_get_gpio_hog(np, &name, &lflags, &dflags);
+               desc = of_parse_own_gpio(np, &name, &lflags, &dflags);
                if (IS_ERR(desc))
                        continue;
 
@@ -440,7 +440,7 @@ int of_gpiochip_add(struct gpio_chip *chip)
 
        of_node_get(chip->of_node);
 
-       of_gpiochip_scan_hogs(chip);
+       of_gpiochip_scan_gpios(chip);
 
        return 0;
 }
index 5db3445552b176d2c11ca8b225ef88b4b80d22eb..6798355c61c6f763c6d30cc5ec623d0afd873827 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/acpi.h>
 #include <linux/gpio/driver.h>
 #include <linux/gpio/machine.h>
+#include <linux/pinctrl/consumer.h>
 
 #include "gpiolib.h"
 
@@ -47,8 +48,6 @@
  */
 DEFINE_SPINLOCK(gpio_lock);
 
-#define GPIO_OFFSET_VALID(chip, offset) (offset >= 0 && offset < chip->ngpio)
-
 static DEFINE_MUTEX(gpio_lookup_lock);
 static LIST_HEAD(gpio_lookup_list);
 LIST_HEAD(gpio_chips);
@@ -218,6 +217,68 @@ static int gpiochip_add_to_list(struct gpio_chip *chip)
        return err;
 }
 
+/**
+ * Convert a GPIO name to its descriptor
+ */
+static struct gpio_desc *gpio_name_to_desc(const char * const name)
+{
+       struct gpio_chip *chip;
+       unsigned long flags;
+
+       spin_lock_irqsave(&gpio_lock, flags);
+
+       list_for_each_entry(chip, &gpio_chips, list) {
+               int i;
+
+               for (i = 0; i != chip->ngpio; ++i) {
+                       struct gpio_desc *gpio = &chip->desc[i];
+
+                       if (!gpio->name)
+                               continue;
+
+                       if (!strcmp(gpio->name, name)) {
+                               spin_unlock_irqrestore(&gpio_lock, flags);
+                               return gpio;
+                       }
+               }
+       }
+
+       spin_unlock_irqrestore(&gpio_lock, flags);
+
+       return NULL;
+}
+
+/*
+ * Takes the names from gc->names and checks if they are all unique. If they
+ * are, they are assigned to their gpio descriptors.
+ *
+ * Returns -EEXIST if one of the names is already used for a different GPIO.
+ */
+static int gpiochip_set_desc_names(struct gpio_chip *gc)
+{
+       int i;
+
+       if (!gc->names)
+               return 0;
+
+       /* First check all names if they are unique */
+       for (i = 0; i != gc->ngpio; ++i) {
+               struct gpio_desc *gpio;
+
+               gpio = gpio_name_to_desc(gc->names[i]);
+               if (gpio)
+                       dev_warn(gc->dev, "Detected name collision for "
+                                "GPIO name '%s'\n",
+                                gc->names[i]);
+       }
+
+       /* Then add all names to the GPIO descriptors */
+       for (i = 0; i != gc->ngpio; ++i)
+               gc->desc[i].name = gc->names[i];
+
+       return 0;
+}
+
 /**
  * gpiochip_add() - register a gpio_chip
  * @chip: the chip to register, with chip->base initialized
@@ -290,6 +351,10 @@ int gpiochip_add(struct gpio_chip *chip)
        if (!chip->owner && chip->dev && chip->dev->driver)
                chip->owner = chip->dev->driver->owner;
 
+       status = gpiochip_set_desc_names(chip);
+       if (status)
+               goto err_remove_from_list;
+
        status = of_gpiochip_add(chip);
        if (status)
                goto err_remove_chip;
@@ -310,6 +375,7 @@ err_remove_chip:
        acpi_gpiochip_remove(chip);
        gpiochip_free_hogs(chip);
        of_gpiochip_remove(chip);
+err_remove_from_list:
        spin_lock_irqsave(&gpio_lock, flags);
        list_del(&chip->list);
        spin_unlock_irqrestore(&gpio_lock, flags);
@@ -680,6 +746,28 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) {}
 
 #endif /* CONFIG_GPIOLIB_IRQCHIP */
 
+/**
+ * gpiochip_generic_request() - request the gpio function for a pin
+ * @chip: the gpiochip owning the GPIO
+ * @offset: the offset of the GPIO to request for GPIO function
+ */
+int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset)
+{
+       return pinctrl_request_gpio(chip->base + offset);
+}
+EXPORT_SYMBOL_GPL(gpiochip_generic_request);
+
+/**
+ * gpiochip_generic_free() - free the gpio function from a pin
+ * @chip: the gpiochip to request the gpio function for
+ * @offset: the offset of the GPIO to free from GPIO function
+ */
+void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset)
+{
+       pinctrl_free_gpio(chip->base + offset);
+}
+EXPORT_SYMBOL_GPL(gpiochip_generic_free);
+
 #ifdef CONFIG_PINCTRL
 
 /**
@@ -839,6 +927,14 @@ static int __gpiod_request(struct gpio_desc *desc, const char *label)
                spin_lock_irqsave(&gpio_lock, flags);
        }
 done:
+       if (status < 0) {
+               /* Clear flags that might have been set by the caller before
+                * requesting the GPIO.
+                */
+               clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
+               clear_bit(FLAG_OPEN_DRAIN, &desc->flags);
+               clear_bit(FLAG_OPEN_SOURCE, &desc->flags);
+       }
        spin_unlock_irqrestore(&gpio_lock, flags);
        return status;
 }
@@ -928,7 +1024,7 @@ const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset)
 {
        struct gpio_desc *desc;
 
-       if (!GPIO_OFFSET_VALID(chip, offset))
+       if (offset >= chip->ngpio)
                return NULL;
 
        desc = &chip->desc[offset];
@@ -1735,6 +1831,13 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
        if (of_flags & OF_GPIO_ACTIVE_LOW)
                *flags |= GPIO_ACTIVE_LOW;
 
+       if (of_flags & OF_GPIO_SINGLE_ENDED) {
+               if (of_flags & OF_GPIO_ACTIVE_LOW)
+                       *flags |= GPIO_OPEN_DRAIN;
+               else
+                       *flags |= GPIO_OPEN_SOURCE;
+       }
+
        return desc;
 }
 
@@ -1953,13 +2056,28 @@ struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(gpiod_get_optional);
 
+/**
+ * gpiod_parse_flags - helper function to parse GPIO lookup flags
+ * @desc:      gpio to be setup
+ * @lflags:    gpio_lookup_flags - returned from of_find_gpio() or
+ *             of_get_gpio_hog()
+ *
+ * Set the GPIO descriptor flags based on the given GPIO lookup flags.
+ */
+static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
+{
+       if (lflags & GPIO_ACTIVE_LOW)
+               set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+       if (lflags & GPIO_OPEN_DRAIN)
+               set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+       if (lflags & GPIO_OPEN_SOURCE)
+               set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+}
 
 /**
  * gpiod_configure_flags - helper function to configure a given GPIO
  * @desc:      gpio whose value will be assigned
  * @con_id:    function within the GPIO consumer
- * @lflags:    gpio_lookup_flags - returned from of_find_gpio() or
- *             of_get_gpio_hog()
  * @dflags:    gpiod_flags - optional GPIO initialization flags
  *
  * Return 0 on success, -ENOENT if no GPIO has been assigned to the
@@ -1967,17 +2085,10 @@ EXPORT_SYMBOL_GPL(gpiod_get_optional);
  * occurred while trying to acquire the GPIO.
  */
 static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
-               unsigned long lflags, enum gpiod_flags dflags)
+                                enum gpiod_flags dflags)
 {
        int status;
 
-       if (lflags & GPIO_ACTIVE_LOW)
-               set_bit(FLAG_ACTIVE_LOW, &desc->flags);
-       if (lflags & GPIO_OPEN_DRAIN)
-               set_bit(FLAG_OPEN_DRAIN, &desc->flags);
-       if (lflags & GPIO_OPEN_SOURCE)
-               set_bit(FLAG_OPEN_SOURCE, &desc->flags);
-
        /* No particular flag request, return here... */
        if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
                pr_debug("no flags found for %s\n", con_id);
@@ -2044,11 +2155,13 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
                return desc;
        }
 
+       gpiod_parse_flags(desc, lookupflags);
+
        status = gpiod_request(desc, con_id);
        if (status < 0)
                return ERR_PTR(status);
 
-       status = gpiod_configure_flags(desc, con_id, lookupflags, flags);
+       status = gpiod_configure_flags(desc, con_id, flags);
        if (status < 0) {
                dev_dbg(dev, "setup of GPIO %s failed\n", con_id);
                gpiod_put(desc);
@@ -2078,6 +2191,7 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
 {
        struct gpio_desc *desc = ERR_PTR(-ENODEV);
        bool active_low = false;
+       bool single_ended = false;
        int ret;
 
        if (!fwnode)
@@ -2088,8 +2202,10 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
 
                desc = of_get_named_gpiod_flags(to_of_node(fwnode), propname, 0,
                                                &flags);
-               if (!IS_ERR(desc))
+               if (!IS_ERR(desc)) {
                        active_low = flags & OF_GPIO_ACTIVE_LOW;
+                       single_ended = flags & OF_GPIO_SINGLE_ENDED;
+               }
        } else if (is_acpi_node(fwnode)) {
                struct acpi_gpio_info info;
 
@@ -2102,14 +2218,20 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
        if (IS_ERR(desc))
                return desc;
 
+       if (active_low)
+               set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+
+       if (single_ended) {
+               if (active_low)
+                       set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+               else
+                       set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+       }
+
        ret = gpiod_request(desc, NULL);
        if (ret)
                return ERR_PTR(ret);
 
-       /* Only value flag can be set from both DT and ACPI is active_low */
-       if (active_low)
-               set_bit(FLAG_ACTIVE_LOW, &desc->flags);
-
        return desc;
 }
 EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
@@ -2162,6 +2284,8 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
        chip = gpiod_to_chip(desc);
        hwnum = gpio_chip_hwgpio(desc);
 
+       gpiod_parse_flags(desc, lflags);
+
        local_desc = gpiochip_request_own_desc(chip, hwnum, name);
        if (IS_ERR(local_desc)) {
                pr_err("requesting hog GPIO %s (chip %s, offset %d) failed\n",
@@ -2169,7 +2293,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
                return PTR_ERR(local_desc);
        }
 
-       status = gpiod_configure_flags(desc, name, lflags, dflags);
+       status = gpiod_configure_flags(desc, name, dflags);
        if (status < 0) {
                pr_err("setup of hog GPIO %s (chip %s, offset %d) failed\n",
                       name, chip->label, hwnum);
@@ -2309,14 +2433,19 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
        int                     is_irq;
 
        for (i = 0; i < chip->ngpio; i++, gpio++, gdesc++) {
-               if (!test_bit(FLAG_REQUESTED, &gdesc->flags))
+               if (!test_bit(FLAG_REQUESTED, &gdesc->flags)) {
+                       if (gdesc->name) {
+                               seq_printf(s, " gpio-%-3d (%-20.20s)\n",
+                                          gpio, gdesc->name);
+                       }
                        continue;
+               }
 
                gpiod_get_direction(gdesc);
                is_out = test_bit(FLAG_IS_OUT, &gdesc->flags);
                is_irq = test_bit(FLAG_USED_AS_IRQ, &gdesc->flags);
-               seq_printf(s, " gpio-%-3d (%-20.20s) %s %s %s",
-                       gpio, gdesc->label,
+               seq_printf(s, " gpio-%-3d (%-20.20s|%-20.20s) %s %s %s",
+                       gpio, gdesc->name ? gdesc->name : "", gdesc->label,
                        is_out ? "out" : "in ",
                        chip->get
                                ? (chip->get(chip, i) ? "hi" : "lo")
index bf343004b0085c4bcc21243a63195cb875ca636a..78e634d1c719b10846e4399de1c766f644d2f31c 100644 (file)
@@ -89,7 +89,10 @@ struct gpio_desc {
 #define FLAG_USED_AS_IRQ 9     /* GPIO is connected to an IRQ */
 #define FLAG_IS_HOGGED 11      /* GPIO is hogged */
 
+       /* Connection label */
        const char              *label;
+       /* Name of the GPIO */
+       const char              *name;
 };
 
 int gpiod_request(struct gpio_desc *desc, const char *label);
index 6647fb26ef25ce21dba9bffb87cb31e5abe73d80..0d13e6368b96de2ace3b4373a7aebd20f5cd3826 100644 (file)
@@ -1654,6 +1654,7 @@ struct amdgpu_pm {
        u8                      fan_max_rpm;
        /* dpm */
        bool                    dpm_enabled;
+       bool                    sysfs_initialized;
        struct amdgpu_dpm       dpm;
        const struct firmware   *fw;    /* SMC firmware */
        uint32_t                fw_version;
index 77f1d7c6ea3af627324b147e63b21b6cbdd16302..9416e0f5c1db2bf8c5601ddee999b1ade5efabc0 100644 (file)
@@ -672,8 +672,12 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
                /* disp clock */
                adev->clock.default_dispclk =
                        le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
-               if (adev->clock.default_dispclk == 0)
-                       adev->clock.default_dispclk = 54000; /* 540 Mhz */
+               /* set a reasonable default for DP */
+               if (adev->clock.default_dispclk < 53900) {
+                       DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
+                                adev->clock.default_dispclk / 100);
+                       adev->clock.default_dispclk = 60000;
+               }
                adev->clock.dp_extclk =
                        le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
                adev->clock.current_dispclk = adev->clock.default_dispclk;
index 1c3fc99c5465bd10489ac1b31e17484426b7adb9..8e995148f56e263ecde7e5e7a390645b585f2c52 100644 (file)
@@ -208,44 +208,6 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
        return ret;
 }
 
-static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd,
-                                    cgs_handle_t *handle)
-{
-       CGS_FUNC_ADEV;
-       int r;
-       uint32_t dma_handle;
-       struct drm_gem_object *obj;
-       struct amdgpu_bo *bo;
-       struct drm_device *dev = adev->ddev;
-       struct drm_file *file_priv = NULL, *priv;
-
-       mutex_lock(&dev->struct_mutex);
-       list_for_each_entry(priv, &dev->filelist, lhead) {
-               rcu_read_lock();
-               if (priv->pid == get_pid(task_pid(current)))
-                       file_priv = priv;
-               rcu_read_unlock();
-               if (file_priv)
-                       break;
-       }
-       mutex_unlock(&dev->struct_mutex);
-       r = dev->driver->prime_fd_to_handle(dev,
-                                           file_priv, dmabuf_fd,
-                                           &dma_handle);
-       spin_lock(&file_priv->table_lock);
-
-       /* Check if we currently have a reference on the object */
-       obj = idr_find(&file_priv->object_idr, dma_handle);
-       if (obj == NULL) {
-               spin_unlock(&file_priv->table_lock);
-               return -EINVAL;
-       }
-       spin_unlock(&file_priv->table_lock);
-       bo = gem_to_amdgpu_bo(obj);
-       *handle = (cgs_handle_t)bo;
-       return 0;
-}
-
 static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
 {
        struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
@@ -810,7 +772,6 @@ static const struct cgs_ops amdgpu_cgs_ops = {
 };
 
 static const struct cgs_os_ops amdgpu_cgs_os_ops = {
-       amdgpu_cgs_import_gpu_mem,
        amdgpu_cgs_add_irq_source,
        amdgpu_cgs_irq_get,
        amdgpu_cgs_irq_put
index 749420f1ea6fbf2bc1417cfd5ea0210cf3c6243d..fd16652aa277c75d8ed5ca28e9088c153699addd 100644 (file)
@@ -156,7 +156,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
        uint64_t *chunk_array_user;
        uint64_t *chunk_array;
        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
-       unsigned size, i;
+       unsigned size;
+       int i;
        int ret;
 
        if (cs->in.num_chunks == 0)
@@ -176,7 +177,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 
        /* get chunks */
        INIT_LIST_HEAD(&p->validated);
-       chunk_array_user = (uint64_t __user *)(cs->in.chunks);
+       chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks);
        if (copy_from_user(chunk_array, chunk_array_user,
                           sizeof(uint64_t)*cs->in.num_chunks)) {
                ret = -EFAULT;
@@ -196,7 +197,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
                struct drm_amdgpu_cs_chunk user_chunk;
                uint32_t __user *cdata;
 
-               chunk_ptr = (void __user *)chunk_array[i];
+               chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
                if (copy_from_user(&user_chunk, chunk_ptr,
                                       sizeof(struct drm_amdgpu_cs_chunk))) {
                        ret = -EFAULT;
@@ -207,7 +208,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
                p->chunks[i].length_dw = user_chunk.length_dw;
 
                size = p->chunks[i].length_dw;
-               cdata = (void __user *)user_chunk.chunk_data;
+               cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
                p->chunks[i].user_ptr = cdata;
 
                p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
index e3d70772b53104f1f6a48020088d8391d10985b3..6c9e0902a41438920ddfcbdde9408b3792b41b0e 100644 (file)
@@ -85,8 +85,6 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
        /* We borrow the event spin lock for protecting flip_status */
        spin_lock_irqsave(&crtc->dev->event_lock, flags);
 
-       /* set the proper interrupt */
-       amdgpu_irq_get(adev, &adev->pageflip_irq, work->crtc_id);
        /* do the flip (mmio) */
        adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
        /* set the flip status */
@@ -186,10 +184,6 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
                goto cleanup;
        }
 
-       fence_get(work->excl);
-       for (i = 0; i < work->shared_count; ++i)
-               fence_get(work->shared[i]);
-
        amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
        amdgpu_bo_unreserve(new_rbo);
 
index adb48353f2e1a10f169df7c2cd4fc6d6f8e2c23a..b190c2a83680260dba3cfccca1fa6fad6ee6feae 100644 (file)
@@ -242,11 +242,11 @@ static struct pci_device_id pciidlist[] = {
        {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
 #endif
        /* topaz */
-       {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
-       {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
-       {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
-       {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
-       {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+       {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
        /* tonga */
        {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
        {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
index 8a122b1b77861028c123301726b8bb440537ad55..96290d9cddcab6ad8f0e9e8927a71ff97a093c80 100644 (file)
@@ -402,3 +402,19 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
                return true;
        return false;
 }
+
+void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
+{
+       struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev;
+       struct drm_fb_helper *fb_helper;
+       int ret;
+
+       if (!afbdev)
+               return;
+
+       fb_helper = &afbdev->helper;
+
+       ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+       if (ret)
+               DRM_DEBUG("failed to restore crtc mode\n");
+}
index 8c735f544b6608b0f814dfe2396650ddf9c8a34b..5d11e798230ce759af5d13d5c318aa77cfe755d2 100644 (file)
@@ -485,7 +485,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
  * Outdated mess for old drm with Xorg being in charge (void function now).
  */
 /**
- * amdgpu_driver_firstopen_kms - drm callback for last close
+ * amdgpu_driver_lastclose_kms - drm callback for last close
  *
  * @dev: drm dev pointer
  *
@@ -493,6 +493,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
  */
 void amdgpu_driver_lastclose_kms(struct drm_device *dev)
 {
+       struct amdgpu_device *adev = dev->dev_private;
+
+       amdgpu_fbdev_restore_mode(adev);
        vga_switcheroo_process_delayed_switch();
 }
 
index 64efe5b52e6500f840f0ad7edbdc6a9f64db6f66..7bd470d9ac30556825260575671c24a3229f28d6 100644 (file)
@@ -567,6 +567,7 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev);
 void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state);
 int amdgpu_fbdev_total_size(struct amdgpu_device *adev);
 bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj);
+void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev);
 
 void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev);
 
index efed11509f4a2326cb5c22b68d20cac77c499705..22a8c7d3a3ab03e9dc3fb294c1f0eb818a6a4880 100644 (file)
@@ -294,10 +294,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
        struct amdgpu_device *adev = dev_get_drvdata(dev);
        umode_t effective_mode = attr->mode;
 
-       /* Skip limit attributes if DPM is not enabled */
+       /* Skip attributes if DPM is not enabled */
        if (!adev->pm.dpm_enabled &&
            (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
-            attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
+            attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
+            attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
+            attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
+            attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+            attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
                return 0;
 
        /* Skip fan attributes if fan is not present */
@@ -691,6 +695,9 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
 {
        int ret;
 
+       if (adev->pm.sysfs_initialized)
+               return 0;
+
        if (adev->pm.funcs->get_temperature == NULL)
                return 0;
        adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
@@ -719,6 +726,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
                return ret;
        }
 
+       adev->pm.sysfs_initialized = true;
+
        return 0;
 }
 
index 1e14531353e05ec7aadd69ea9d6e019310a25682..53d551f2d8395ccc24dc799887160e0977419ef2 100644 (file)
@@ -455,8 +455,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
                return -ENOMEM;
 
        r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
-       if (r)
+       if (r) {
+               kfree(ib);
                return r;
+       }
        ib->length_dw = 0;
 
        /* walk over the address space and update the page directory */
index cd6edc40c9cd0c3dc2f09e96a2207f35e8b53d28..1e0bba29e16796f97c8eee38fc9d3100c405f6bc 100644 (file)
@@ -1279,8 +1279,7 @@ amdgpu_atombios_encoder_setup_dig(struct drm_encoder *encoder, int action)
                        amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
                }
                if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
-                       amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
-                                                              ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+                       amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder, dig->backlight_level);
                if (ext_encoder)
                        amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE);
        } else {
index 82e8d073051759f7b0307b7675282a8dfea280e8..a1a35a5df8e71357eea132019d3500a35a89fce4 100644 (file)
@@ -6185,6 +6185,11 @@ static int ci_dpm_late_init(void *handle)
        if (!amdgpu_dpm)
                return 0;
 
+       /* init the sysfs and debugfs files late */
+       ret = amdgpu_pm_sysfs_init(adev);
+       if (ret)
+               return ret;
+
        ret = ci_set_temperature_range(adev);
        if (ret)
                return ret;
@@ -6232,9 +6237,6 @@ static int ci_dpm_sw_init(void *handle)
        adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
        if (amdgpu_dpm == 1)
                amdgpu_pm_print_power_states(adev);
-       ret = amdgpu_pm_sysfs_init(adev);
-       if (ret)
-               goto dpm_failed;
        mutex_unlock(&adev->pm.mutex);
        DRM_INFO("amdgpu: dpm initialized\n");
 
index 4b6ce74753cded5179b17eaf698ddd16766760b1..484710cfdf8243d563afe908c2b9c9884879f971 100644 (file)
@@ -1567,6 +1567,9 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
        int ret, i;
        u16 tmp16;
 
+       if (pci_is_root_bus(adev->pdev->bus))
+               return;
+
        if (amdgpu_pcie_gen2 == 0)
                return;
 
index 44fa96ad47099b765ac81e5c439766a8f9849392..2e3373ed4c942d9fc753851d50adb9a3034ebff8 100644 (file)
@@ -596,6 +596,12 @@ static int cz_dpm_late_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        if (amdgpu_dpm) {
+               int ret;
+               /* init the sysfs and debugfs files late */
+               ret = amdgpu_pm_sysfs_init(adev);
+               if (ret)
+                       return ret;
+
                /* powerdown unused blocks for now */
                cz_dpm_powergate_uvd(adev, true);
                cz_dpm_powergate_vce(adev, true);
@@ -632,10 +638,6 @@ static int cz_dpm_sw_init(void *handle)
        if (amdgpu_dpm == 1)
                amdgpu_pm_print_power_states(adev);
 
-       ret = amdgpu_pm_sysfs_init(adev);
-       if (ret)
-               goto dpm_init_failed;
-
        mutex_unlock(&adev->pm.mutex);
        DRM_INFO("amdgpu: dpm initialized\n");
 
index e4d101b1252a47eaf7a2c7e35c2d8d83f737d762..d4c82b6257273475d15c3274cda7a30404fca231 100644 (file)
@@ -255,6 +255,24 @@ static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
                return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 }
 
+static void dce_v10_0_pageflip_interrupt_init(struct amdgpu_device *adev)
+{
+       unsigned i;
+
+       /* Enable pflip interrupts */
+       for (i = 0; i < adev->mode_info.num_crtc; i++)
+               amdgpu_irq_get(adev, &adev->pageflip_irq, i);
+}
+
+static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
+{
+       unsigned i;
+
+       /* Disable pflip interrupts */
+       for (i = 0; i < adev->mode_info.num_crtc; i++)
+               amdgpu_irq_put(adev, &adev->pageflip_irq, i);
+}
+
 /**
  * dce_v10_0_page_flip - pageflip callback.
  *
@@ -2663,9 +2681,10 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
                dce_v10_0_vga_enable(crtc, true);
                amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
                dce_v10_0_vga_enable(crtc, false);
-               /* Make sure VBLANK interrupt is still enabled */
+               /* Make sure VBLANK and PFLIP interrupts are still enabled */
                type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
                amdgpu_irq_update(adev, &adev->crtc_irq, type);
+               amdgpu_irq_update(adev, &adev->pageflip_irq, type);
                drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
                dce_v10_0_crtc_load_lut(crtc);
                break;
@@ -3025,6 +3044,8 @@ static int dce_v10_0_hw_init(void *handle)
                dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
        }
 
+       dce_v10_0_pageflip_interrupt_init(adev);
+
        return 0;
 }
 
@@ -3039,6 +3060,8 @@ static int dce_v10_0_hw_fini(void *handle)
                dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
        }
 
+       dce_v10_0_pageflip_interrupt_fini(adev);
+
        return 0;
 }
 
@@ -3050,6 +3073,8 @@ static int dce_v10_0_suspend(void *handle)
 
        dce_v10_0_hpd_fini(adev);
 
+       dce_v10_0_pageflip_interrupt_fini(adev);
+
        return 0;
 }
 
@@ -3075,6 +3100,8 @@ static int dce_v10_0_resume(void *handle)
        /* initialize hpd */
        dce_v10_0_hpd_init(adev);
 
+       dce_v10_0_pageflip_interrupt_init(adev);
+
        return 0;
 }
 
@@ -3369,7 +3396,6 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
        spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
        drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
-       amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
        queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
 
        return 0;
index 6411e824467164831eef8af634051f95b8faba69..7e1cf5e4eebf468dfd7c6fc0d97aeccb805e0fbe 100644 (file)
@@ -233,6 +233,24 @@ static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
                return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 }
 
+static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev)
+{
+       unsigned i;
+
+       /* Enable pflip interrupts */
+       for (i = 0; i < adev->mode_info.num_crtc; i++)
+               amdgpu_irq_get(adev, &adev->pageflip_irq, i);
+}
+
+static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
+{
+       unsigned i;
+
+       /* Disable pflip interrupts */
+       for (i = 0; i < adev->mode_info.num_crtc; i++)
+               amdgpu_irq_put(adev, &adev->pageflip_irq, i);
+}
+
 /**
  * dce_v11_0_page_flip - pageflip callback.
  *
@@ -2640,9 +2658,10 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
                dce_v11_0_vga_enable(crtc, true);
                amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
                dce_v11_0_vga_enable(crtc, false);
-               /* Make sure VBLANK interrupt is still enabled */
+               /* Make sure VBLANK and PFLIP interrupts are still enabled */
                type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
                amdgpu_irq_update(adev, &adev->crtc_irq, type);
+               amdgpu_irq_update(adev, &adev->pageflip_irq, type);
                drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
                dce_v11_0_crtc_load_lut(crtc);
                break;
@@ -2888,7 +2907,7 @@ static int dce_v11_0_early_init(void *handle)
 
        switch (adev->asic_type) {
        case CHIP_CARRIZO:
-               adev->mode_info.num_crtc = 4;
+               adev->mode_info.num_crtc = 3;
                adev->mode_info.num_hpd = 6;
                adev->mode_info.num_dig = 9;
                break;
@@ -3000,6 +3019,8 @@ static int dce_v11_0_hw_init(void *handle)
                dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
        }
 
+       dce_v11_0_pageflip_interrupt_init(adev);
+
        return 0;
 }
 
@@ -3014,6 +3035,8 @@ static int dce_v11_0_hw_fini(void *handle)
                dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
        }
 
+       dce_v11_0_pageflip_interrupt_fini(adev);
+
        return 0;
 }
 
@@ -3025,6 +3048,8 @@ static int dce_v11_0_suspend(void *handle)
 
        dce_v11_0_hpd_fini(adev);
 
+       dce_v11_0_pageflip_interrupt_fini(adev);
+
        return 0;
 }
 
@@ -3051,6 +3076,8 @@ static int dce_v11_0_resume(void *handle)
        /* initialize hpd */
        dce_v11_0_hpd_init(adev);
 
+       dce_v11_0_pageflip_interrupt_init(adev);
+
        return 0;
 }
 
@@ -3345,7 +3372,6 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
        spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
        drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
-       amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
        queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
 
        return 0;
index c86911c2ea2a896f414473f798d782e9c08518cf..34b9c2a9d8d489c7958af39e0fdbd4e484a572c6 100644 (file)
@@ -204,6 +204,24 @@ static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
                return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 }
 
+static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
+{
+       unsigned i;
+
+       /* Enable pflip interrupts */
+       for (i = 0; i < adev->mode_info.num_crtc; i++)
+               amdgpu_irq_get(adev, &adev->pageflip_irq, i);
+}
+
+static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
+{
+       unsigned i;
+
+       /* Disable pflip interrupts */
+       for (i = 0; i < adev->mode_info.num_crtc; i++)
+               amdgpu_irq_put(adev, &adev->pageflip_irq, i);
+}
+
 /**
  * dce_v8_0_page_flip - pageflip callback.
  *
@@ -2575,9 +2593,10 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
                dce_v8_0_vga_enable(crtc, true);
                amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
                dce_v8_0_vga_enable(crtc, false);
-               /* Make sure VBLANK interrupt is still enabled */
+               /* Make sure VBLANK and PFLIP interrupts are still enabled */
                type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
                amdgpu_irq_update(adev, &adev->crtc_irq, type);
+               amdgpu_irq_update(adev, &adev->pageflip_irq, type);
                drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
                dce_v8_0_crtc_load_lut(crtc);
                break;
@@ -2933,6 +2952,8 @@ static int dce_v8_0_hw_init(void *handle)
                dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
        }
 
+       dce_v8_0_pageflip_interrupt_init(adev);
+
        return 0;
 }
 
@@ -2947,6 +2968,8 @@ static int dce_v8_0_hw_fini(void *handle)
                dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
        }
 
+       dce_v8_0_pageflip_interrupt_fini(adev);
+
        return 0;
 }
 
@@ -2958,6 +2981,8 @@ static int dce_v8_0_suspend(void *handle)
 
        dce_v8_0_hpd_fini(adev);
 
+       dce_v8_0_pageflip_interrupt_fini(adev);
+
        return 0;
 }
 
@@ -2981,6 +3006,8 @@ static int dce_v8_0_resume(void *handle)
        /* initialize hpd */
        dce_v8_0_hpd_init(adev);
 
+       dce_v8_0_pageflip_interrupt_init(adev);
+
        return 0;
 }
 
@@ -3376,7 +3403,6 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
        spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
        drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
-       amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
        queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
 
        return 0;
index 774528ab8704387f00525618b1c48578387b70df..fab5471d25d7e3dc3a3605d22c52fd669e919f7b 100644 (file)
@@ -1262,6 +1262,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
        addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
        status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
        mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+       /* reset addr and status */
+       WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
+
+       if (!addr && !status)
+               return 0;
+
        dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
                entry->src_id, entry->src_data);
        dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
@@ -1269,8 +1275,6 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
        dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
                status);
        gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
-       /* reset addr and status */
-       WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
 
        return 0;
 }
index 9a07742620d0361ad054930ff3b07c75c9bbcf2c..7bc9e9fcf3d26cbbaa6d7aa76fbef0349964ec6f 100644 (file)
@@ -1262,6 +1262,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
        addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
        status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
        mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+       /* reset addr and status */
+       WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
+
+       if (!addr && !status)
+               return 0;
+
        dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
                entry->src_id, entry->src_data);
        dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
@@ -1269,8 +1275,6 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
        dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
                status);
        gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
-       /* reset addr and status */
-       WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
 
        return 0;
 }
index 94ec04a9c4d5c975eeb329dc770d3dd055b74ae5..7e9154c7f1dbbb7f9d3eee6791c0e4fcc835ea8d 100644 (file)
@@ -2995,6 +2995,15 @@ static int kv_dpm_late_init(void *handle)
 {
        /* powerdown unused blocks for now */
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int ret;
+
+       if (!amdgpu_dpm)
+               return 0;
+
+       /* init the sysfs and debugfs files late */
+       ret = amdgpu_pm_sysfs_init(adev);
+       if (ret)
+               return ret;
 
        kv_dpm_powergate_acp(adev, true);
        kv_dpm_powergate_samu(adev, true);
@@ -3038,9 +3047,6 @@ static int kv_dpm_sw_init(void *handle)
        adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
        if (amdgpu_dpm == 1)
                amdgpu_pm_print_power_states(adev);
-       ret = amdgpu_pm_sysfs_init(adev);
-       if (ret)
-               goto dpm_failed;
        mutex_unlock(&adev->pm.mutex);
        DRM_INFO("amdgpu: dpm initialized\n");
 
index b55ceb14fdcd91e92f7a5924a232490a6419a80a..0bac8702e9348c2ee9c86ed52d6aaf490ef5032f 100644 (file)
@@ -1005,6 +1005,9 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
        u32 mask;
        int ret;
 
+       if (pci_is_root_bus(adev->pdev->bus))
+               return;
+
        if (amdgpu_pcie_gen2 == 0)
                return;
 
index 488642f08267902c628c9db541030c6a27cb2def..3b47ae313e36bc7a0385365c082a519207fd7470 100644 (file)
 
 #include "cgs_common.h"
 
-/**
- * cgs_import_gpu_mem() - Import dmabuf handle
- * @cgs_device:  opaque device handle
- * @dmabuf_fd:   DMABuf file descriptor
- * @handle:      memory handle (output)
- *
- * Must be called in the process context that dmabuf_fd belongs to.
- *
- * Return:  0 on success, -errno otherwise
- */
-typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd,
-                                   cgs_handle_t *handle);
-
 /**
  * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources
  * @private_data:  private data provided to cgs_add_irq_source
@@ -114,16 +101,12 @@ typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type);
 typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type);
 
 struct cgs_os_ops {
-       cgs_import_gpu_mem_t import_gpu_mem;
-
        /* IRQ handling */
        cgs_add_irq_source_t add_irq_source;
        cgs_irq_get_t irq_get;
        cgs_irq_put_t irq_put;
 };
 
-#define cgs_import_gpu_mem(dev,dmabuf_fd,handle)               \
-       CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle)
 #define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \
        CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler,    \
                    private_data)
index 33d877c65ced6a3c138af9ab47c824038410be0b..8328e7059205d266a456710613628c64b097f787 100644 (file)
@@ -4105,7 +4105,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
        struct drm_property_blob *blob;
        int ret;
 
-       if (!length)
+       if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
                return ERR_PTR(-EINVAL);
 
        blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
@@ -4454,7 +4454,7 @@ int drm_mode_createblob_ioctl(struct drm_device *dev,
         * not associated with any file_priv. */
        mutex_lock(&dev->mode_config.blob_lock);
        out_resp->blob_id = blob->base.id;
-       list_add_tail(&file_priv->blobs, &blob->head_file);
+       list_add_tail(&blob->head_file, &file_priv->blobs);
        mutex_unlock(&dev->mode_config.blob_lock);
 
        return 0;
index e23df5fd3836b1b70169a1ee125782c1e754b875..809959d56d7826364b540204a16190aaf270f9c4 100644 (file)
@@ -53,8 +53,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
                                  struct drm_dp_mst_port *port,
                                  int offset, int size, u8 *bytes);
 
-static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
-                                   struct drm_dp_mst_branch *mstb);
+static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+                                    struct drm_dp_mst_branch *mstb);
 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
                                           struct drm_dp_mst_branch *mstb,
                                           struct drm_dp_mst_port *port);
@@ -804,8 +804,6 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
        struct drm_dp_mst_port *port, *tmp;
        bool wake_tx = false;
 
-       cancel_work_sync(&mstb->mgr->work);
-
        /*
         * destroy all ports - don't need lock
         * as there are no more references to the mst branch
@@ -863,29 +861,33 @@ static void drm_dp_destroy_port(struct kref *kref)
 {
        struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
        struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+
        if (!port->input) {
                port->vcpi.num_slots = 0;
 
                kfree(port->cached_edid);
 
-               /* we can't destroy the connector here, as
-                  we might be holding the mode_config.mutex
-                  from an EDID retrieval */
+               /*
+                * The only time we don't have a connector
+                * on an output port is if the connector init
+                * fails.
+                */
                if (port->connector) {
+                       /* we can't destroy the connector here, as
+                        * we might be holding the mode_config.mutex
+                        * from an EDID retrieval */
+
                        mutex_lock(&mgr->destroy_connector_lock);
                        list_add(&port->next, &mgr->destroy_connector_list);
                        mutex_unlock(&mgr->destroy_connector_lock);
                        schedule_work(&mgr->destroy_connector_work);
                        return;
                }
+               /* no need to clean up vcpi
+                * as if we have no connector we never setup a vcpi */
                drm_dp_port_teardown_pdt(port, port->pdt);
-
-               if (!port->input && port->vcpi.vcpi > 0)
-                       drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
        }
        kfree(port);
-
-       (*mgr->cbs->hotplug)(mgr);
 }
 
 static void drm_dp_put_port(struct drm_dp_mst_port *port)
@@ -1027,8 +1029,8 @@ static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
        }
 }
 
-static void build_mst_prop_path(struct drm_dp_mst_port *port,
-                               struct drm_dp_mst_branch *mstb,
+static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
+                               int pnum,
                                char *proppath,
                                size_t proppath_size)
 {
@@ -1041,7 +1043,7 @@ static void build_mst_prop_path(struct drm_dp_mst_port *port,
                snprintf(temp, sizeof(temp), "-%d", port_num);
                strlcat(proppath, temp, proppath_size);
        }
-       snprintf(temp, sizeof(temp), "-%d", port->port_num);
+       snprintf(temp, sizeof(temp), "-%d", pnum);
        strlcat(proppath, temp, proppath_size);
 }
 
@@ -1105,22 +1107,32 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
                drm_dp_port_teardown_pdt(port, old_pdt);
 
                ret = drm_dp_port_setup_pdt(port);
-               if (ret == true) {
+               if (ret == true)
                        drm_dp_send_link_address(mstb->mgr, port->mstb);
-                       port->mstb->link_address_sent = true;
-               }
        }
 
        if (created && !port->input) {
                char proppath[255];
-               build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
-               port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
 
-               if (port->port_num >= 8) {
+               build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
+               port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
+               if (!port->connector) {
+                       /* remove it from the port list */
+                       mutex_lock(&mstb->mgr->lock);
+                       list_del(&port->next);
+                       mutex_unlock(&mstb->mgr->lock);
+                       /* drop port list reference */
+                       drm_dp_put_port(port);
+                       goto out;
+               }
+               if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
                        port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
+                       drm_mode_connector_set_tile_property(port->connector);
                }
+               (*mstb->mgr->cbs->register_connector)(port->connector);
        }
 
+out:
        /* put reference to this port */
        drm_dp_put_port(port);
 }
@@ -1182,17 +1194,18 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
 
                list_for_each_entry(port, &mstb->ports, next) {
                        if (port->port_num == port_num) {
-                               if (!port->mstb) {
+                               mstb = port->mstb;
+                               if (!mstb) {
                                        DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
-                                       return NULL;
+                                       goto out;
                                }
 
-                               mstb = port->mstb;
                                break;
                        }
                }
        }
        kref_get(&mstb->kref);
+out:
        mutex_unlock(&mgr->lock);
        return mstb;
 }
@@ -1202,10 +1215,9 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
 {
        struct drm_dp_mst_port *port;
        struct drm_dp_mst_branch *mstb_child;
-       if (!mstb->link_address_sent) {
+       if (!mstb->link_address_sent)
                drm_dp_send_link_address(mgr, mstb);
-               mstb->link_address_sent = true;
-       }
+
        list_for_each_entry(port, &mstb->ports, next) {
                if (port->input)
                        continue;
@@ -1458,8 +1470,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
        mutex_unlock(&mgr->qlock);
 }
 
-static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
-                                   struct drm_dp_mst_branch *mstb)
+static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+                                    struct drm_dp_mst_branch *mstb)
 {
        int len;
        struct drm_dp_sideband_msg_tx *txmsg;
@@ -1467,11 +1479,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
 
        txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
        if (!txmsg)
-               return -ENOMEM;
+               return;
 
        txmsg->dst = mstb;
        len = build_link_address(txmsg);
 
+       mstb->link_address_sent = true;
        drm_dp_queue_down_tx(mgr, txmsg);
 
        ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
@@ -1499,11 +1512,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
                        }
                        (*mgr->cbs->hotplug)(mgr);
                }
-       } else
+       } else {
+               mstb->link_address_sent = false;
                DRM_DEBUG_KMS("link address failed %d\n", ret);
+       }
 
        kfree(txmsg);
-       return 0;
 }
 
 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
@@ -1978,6 +1992,8 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
        drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
                           DP_MST_EN | DP_UPSTREAM_IS_SRC);
        mutex_unlock(&mgr->lock);
+       flush_work(&mgr->work);
+       flush_work(&mgr->destroy_connector_work);
 }
 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
 
@@ -2263,10 +2279,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
 
        if (port->cached_edid)
                edid = drm_edid_duplicate(port->cached_edid);
-       else
+       else {
                edid = drm_get_edid(connector, &port->aux.ddc);
-
-       drm_mode_connector_set_tile_property(connector);
+               drm_mode_connector_set_tile_property(connector);
+       }
        drm_dp_put_port(port);
        return edid;
 }
@@ -2671,7 +2687,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
 {
        struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
        struct drm_dp_mst_port *port;
-
+       bool send_hotplug = false;
        /*
         * Not a regular list traverse as we have to drop the destroy
         * connector lock before destroying the connector, to avoid AB->BA
@@ -2694,7 +2710,10 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
                if (!port->input && port->vcpi.vcpi > 0)
                        drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
                kfree(port);
+               send_hotplug = true;
        }
+       if (send_hotplug)
+               (*mgr->cbs->hotplug)(mgr);
 }
 
 /**
@@ -2747,6 +2766,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
  */
 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
 {
+       flush_work(&mgr->work);
        flush_work(&mgr->destroy_connector_work);
        mutex_lock(&mgr->payload_lock);
        kfree(mgr->payloads);
@@ -2782,12 +2802,13 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
        if (msgs[num - 1].flags & I2C_M_RD)
                reading = true;
 
-       if (!reading) {
+       if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
                DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
                ret = -EIO;
                goto out;
        }
 
+       memset(&msg, 0, sizeof(msg));
        msg.req_type = DP_REMOTE_I2C_READ;
        msg.u.i2c_read.num_transactions = num - 1;
        msg.u.i2c_read.port_number = port->port_num;
index 418d299f3b129b307f86a970fdf49d3a826af4c8..ca08c472311bd3f6238f7513bc4ac26737228884 100644 (file)
@@ -345,7 +345,11 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
                struct drm_crtc *crtc = mode_set->crtc;
                int ret;
 
-               if (crtc->funcs->cursor_set) {
+               if (crtc->funcs->cursor_set2) {
+                       ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
+                       if (ret)
+                               error = true;
+               } else if (crtc->funcs->cursor_set) {
                        ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
                        if (ret)
                                error = true;
index d734780b31c0fdcd67d2d622333b0a78c8098e10..a18164f2f6d28290c09462dd7a755168873a42d3 100644 (file)
@@ -94,7 +94,18 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
 }
 
 #define DRM_OUTPUT_POLL_PERIOD (10*HZ)
-static void __drm_kms_helper_poll_enable(struct drm_device *dev)
+/**
+ * drm_kms_helper_poll_enable_locked - re-enable output polling.
+ * @dev: drm_device
+ *
+ * This function re-enables the output polling work without
+ * locking the mode_config mutex.
+ *
+ * This is like drm_kms_helper_poll_enable() however it is to be
+ * called from a context where the mode_config mutex is locked
+ * already.
+ */
+void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
 {
        bool poll = false;
        struct drm_connector *connector;
@@ -113,6 +124,8 @@ static void __drm_kms_helper_poll_enable(struct drm_device *dev)
        if (poll)
                schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
 }
+EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
+
 
 static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
                                                              uint32_t maxX, uint32_t maxY, bool merge_type_bits)
@@ -174,7 +187,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
 
        /* Re-enable polling in case the global poll config changed. */
        if (drm_kms_helper_poll != dev->mode_config.poll_running)
-               __drm_kms_helper_poll_enable(dev);
+               drm_kms_helper_poll_enable_locked(dev);
 
        dev->mode_config.poll_running = drm_kms_helper_poll;
 
@@ -428,7 +441,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable);
 void drm_kms_helper_poll_enable(struct drm_device *dev)
 {
        mutex_lock(&dev->mode_config.mutex);
-       __drm_kms_helper_poll_enable(dev);
+       drm_kms_helper_poll_enable_locked(dev);
        mutex_unlock(&dev->mode_config.mutex);
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_enable);
index 0f6cd33b531f104f5094513a0992eb99361e65b3..684bd4a138439ef254f7123c66ffde989fede279 100644 (file)
@@ -235,18 +235,12 @@ static ssize_t dpms_show(struct device *device,
                           char *buf)
 {
        struct drm_connector *connector = to_drm_connector(device);
-       struct drm_device *dev = connector->dev;
-       uint64_t dpms_status;
-       int ret;
+       int dpms;
 
-       ret = drm_object_property_get_value(&connector->base,
-                                           dev->mode_config.dpms_property,
-                                           &dpms_status);
-       if (ret)
-               return 0;
+       dpms = READ_ONCE(connector->dpms);
 
        return snprintf(buf, PAGE_SIZE, "%s\n",
-                       drm_get_dpms_name((int)dpms_status));
+                       drm_get_dpms_name(dpms));
 }
 
 static ssize_t enabled_show(struct device *device,
index cbdb78ef3baca57cd1ddf701425b953f140377f2..e6cbaca821a47b9740f4d35d7cfce00bbd0aa11a 100644 (file)
@@ -37,7 +37,6 @@
  * DECON stands for Display and Enhancement controller.
  */
 
-#define DECON_DEFAULT_FRAMERATE 60
 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128
 
 #define WINDOWS_NR     2
@@ -165,16 +164,6 @@ static u32 decon_calc_clkdiv(struct decon_context *ctx,
        return (clkdiv < 0x100) ? clkdiv : 0xff;
 }
 
-static bool decon_mode_fixup(struct exynos_drm_crtc *crtc,
-               const struct drm_display_mode *mode,
-               struct drm_display_mode *adjusted_mode)
-{
-       if (adjusted_mode->vrefresh == 0)
-               adjusted_mode->vrefresh = DECON_DEFAULT_FRAMERATE;
-
-       return true;
-}
-
 static void decon_commit(struct exynos_drm_crtc *crtc)
 {
        struct decon_context *ctx = crtc->ctx;
@@ -637,7 +626,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
 static const struct exynos_drm_crtc_ops decon_crtc_ops = {
        .enable = decon_enable,
        .disable = decon_disable,
-       .mode_fixup = decon_mode_fixup,
        .commit = decon_commit,
        .enable_vblank = decon_enable_vblank,
        .disable_vblank = decon_disable_vblank,
index d66ade0efac892b84ce7e26c4f9e132870806d33..124fb9a56f02b596b5a6c4cf6cbf3f28151f4470 100644 (file)
@@ -1383,28 +1383,6 @@ static int exynos_dp_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int exynos_dp_suspend(struct device *dev)
-{
-       struct exynos_dp_device *dp = dev_get_drvdata(dev);
-
-       exynos_dp_disable(&dp->encoder);
-       return 0;
-}
-
-static int exynos_dp_resume(struct device *dev)
-{
-       struct exynos_dp_device *dp = dev_get_drvdata(dev);
-
-       exynos_dp_enable(&dp->encoder);
-       return 0;
-}
-#endif
-
-static const struct dev_pm_ops exynos_dp_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume)
-};
-
 static const struct of_device_id exynos_dp_match[] = {
        { .compatible = "samsung,exynos5-dp" },
        {},
@@ -1417,7 +1395,6 @@ struct platform_driver dp_driver = {
        .driver         = {
                .name   = "exynos-dp",
                .owner  = THIS_MODULE,
-               .pm     = &exynos_dp_pm_ops,
                .of_match_table = exynos_dp_match,
        },
 };
index c68a6a2a9b5794558015abdeefb0e58c4958049d..7f55ba6771c6b94e5f45bee6bdec078c27c74f5b 100644 (file)
@@ -28,7 +28,6 @@ int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register);
 
 int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
 {
@@ -39,7 +38,6 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister);
 
 int exynos_drm_device_subdrv_probe(struct drm_device *dev)
 {
@@ -69,7 +67,6 @@ int exynos_drm_device_subdrv_probe(struct drm_device *dev)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe);
 
 int exynos_drm_device_subdrv_remove(struct drm_device *dev)
 {
@@ -87,7 +84,6 @@ int exynos_drm_device_subdrv_remove(struct drm_device *dev)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove);
 
 int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
 {
@@ -111,7 +107,6 @@ err:
        }
        return ret;
 }
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open);
 
 void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
 {
@@ -122,4 +117,3 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
                        subdrv->close(dev, subdrv->dev, file);
        }
 }
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close);
index 0872aa2f450f273a992bc414081e8501e11bf787..ed28823d3b35ef704a5dded0c1c55c1eff6ef3ed 100644 (file)
@@ -41,20 +41,6 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
                exynos_crtc->ops->disable(exynos_crtc);
 }
 
-static bool
-exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc,
-                           const struct drm_display_mode *mode,
-                           struct drm_display_mode *adjusted_mode)
-{
-       struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
-
-       if (exynos_crtc->ops->mode_fixup)
-               return exynos_crtc->ops->mode_fixup(exynos_crtc, mode,
-                                                   adjusted_mode);
-
-       return true;
-}
-
 static void
 exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
 {
@@ -99,7 +85,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
 static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
        .enable         = exynos_drm_crtc_enable,
        .disable        = exynos_drm_crtc_disable,
-       .mode_fixup     = exynos_drm_crtc_mode_fixup,
        .mode_set_nofb  = exynos_drm_crtc_mode_set_nofb,
        .atomic_begin   = exynos_crtc_atomic_begin,
        .atomic_flush   = exynos_crtc_atomic_flush,
index 831d2e4cacf9d0bb951f5bbd9d7bbfab4b681c91..ae9e6b2d3758a97104ac6be69f1a970e6c0f3bb6 100644 (file)
@@ -304,6 +304,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
 {
        struct drm_connector *connector;
@@ -340,6 +341,7 @@ static int exynos_drm_resume(struct drm_device *dev)
 
        return 0;
 }
+#endif
 
 static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
 {
index b7ba21dfb69641f36410550711f024fde541bb72..6c717ba672dbc8ad41fcc3adedfc82c32c6de5bc 100644 (file)
@@ -82,7 +82,6 @@ struct exynos_drm_plane {
  *
  * @enable: enable the device
  * @disable: disable the device
- * @mode_fixup: fix mode data before applying it
  * @commit: set current hw specific display mode to hw.
  * @enable_vblank: specific driver callback for enabling vblank interrupt.
  * @disable_vblank: specific driver callback for disabling vblank interrupt.
@@ -103,9 +102,6 @@ struct exynos_drm_crtc;
 struct exynos_drm_crtc_ops {
        void (*enable)(struct exynos_drm_crtc *crtc);
        void (*disable)(struct exynos_drm_crtc *crtc);
-       bool (*mode_fixup)(struct exynos_drm_crtc *crtc,
-                               const struct drm_display_mode *mode,
-                               struct drm_display_mode *adjusted_mode);
        void (*commit)(struct exynos_drm_crtc *crtc);
        int (*enable_vblank)(struct exynos_drm_crtc *crtc);
        void (*disable_vblank)(struct exynos_drm_crtc *crtc);
index 2a652359af644b51f257cde7528d70b6016897da..dd3a5e6d58c8f04c43fb8afd7c7b6243f7312761 100644 (file)
@@ -1206,23 +1206,6 @@ static struct exynos_drm_ipp_ops fimc_dst_ops = {
        .set_addr = fimc_dst_set_addr,
 };
 
-static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
-{
-       DRM_DEBUG_KMS("enable[%d]\n", enable);
-
-       if (enable) {
-               clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
-               clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
-               ctx->suspended = false;
-       } else {
-               clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
-               clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
-               ctx->suspended = true;
-       }
-
-       return 0;
-}
-
 static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
 {
        struct fimc_context *ctx = dev_id;
@@ -1780,6 +1763,24 @@ static int fimc_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM
+static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
+{
+       DRM_DEBUG_KMS("enable[%d]\n", enable);
+
+       if (enable) {
+               clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
+               clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
+               ctx->suspended = false;
+       } else {
+               clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
+               clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
+               ctx->suspended = true;
+       }
+
+       return 0;
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int fimc_suspend(struct device *dev)
 {
@@ -1806,7 +1807,6 @@ static int fimc_resume(struct device *dev)
 }
 #endif
 
-#ifdef CONFIG_PM
 static int fimc_runtime_suspend(struct device *dev)
 {
        struct fimc_context *ctx = get_fimc_context(dev);
index 750a9e6b9e8d92c312e3bb685fb524f249ad0488..3d1aba67758baf4a4e25e4c383e300d5a6dd954e 100644 (file)
@@ -41,7 +41,6 @@
  * CPU Interface.
  */
 
-#define FIMD_DEFAULT_FRAMERATE 60
 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128
 
 /* position control register for hardware window 0, 2 ~ 4.*/
@@ -377,16 +376,6 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
        return (clkdiv < 0x100) ? clkdiv : 0xff;
 }
 
-static bool fimd_mode_fixup(struct exynos_drm_crtc *crtc,
-               const struct drm_display_mode *mode,
-               struct drm_display_mode *adjusted_mode)
-{
-       if (adjusted_mode->vrefresh == 0)
-               adjusted_mode->vrefresh = FIMD_DEFAULT_FRAMERATE;
-
-       return true;
-}
-
 static void fimd_commit(struct exynos_drm_crtc *crtc)
 {
        struct fimd_context *ctx = crtc->ctx;
@@ -882,13 +871,12 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
                return;
 
        val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
-       writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
+       writel(val, ctx->regs + DP_MIE_CLKCON);
 }
 
 static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
        .enable = fimd_enable,
        .disable = fimd_disable,
-       .mode_fixup = fimd_mode_fixup,
        .commit = fimd_commit,
        .enable_vblank = fimd_enable_vblank,
        .disable_vblank = fimd_disable_vblank,
index 3734c34aed16a22938509454cf794e5b4069ac35..c17efdb238a6e24f6fcecac58c395b8964b12703 100644 (file)
@@ -1059,7 +1059,6 @@ int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl);
 
 int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
                                 struct drm_file *file)
@@ -1230,7 +1229,6 @@ err:
        g2d_put_cmdlist(g2d, node);
        return ret;
 }
-EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl);
 
 int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
                          struct drm_file *file)
@@ -1293,7 +1291,6 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
 out:
        return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
 
 static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
 {
index f12fbc36b120065902c50253a4e91e9cc8952df5..407afedb60031a00f7f0cc5cb599cf7b3e57a9b8 100644 (file)
@@ -56,39 +56,35 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)
        nr_pages = obj->size >> PAGE_SHIFT;
 
        if (!is_drm_iommu_supported(dev)) {
-               dma_addr_t start_addr;
-               unsigned int i = 0;
-
                obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
                if (!obj->pages) {
                        DRM_ERROR("failed to allocate pages.\n");
                        return -ENOMEM;
                }
+       }
 
-               obj->cookie = dma_alloc_attrs(dev->dev,
-                                       obj->size,
-                                       &obj->dma_addr, GFP_KERNEL,
-                                       &obj->dma_attrs);
-               if (!obj->cookie) {
-                       DRM_ERROR("failed to allocate buffer.\n");
+       obj->cookie = dma_alloc_attrs(dev->dev, obj->size, &obj->dma_addr,
+                                     GFP_KERNEL, &obj->dma_attrs);
+       if (!obj->cookie) {
+               DRM_ERROR("failed to allocate buffer.\n");
+               if (obj->pages)
                        drm_free_large(obj->pages);
-                       return -ENOMEM;
-               }
+               return -ENOMEM;
+       }
+
+       if (obj->pages) {
+               dma_addr_t start_addr;
+               unsigned int i = 0;
 
                start_addr = obj->dma_addr;
                while (i < nr_pages) {
-                       obj->pages[i] = phys_to_page(start_addr);
+                       obj->pages[i] = pfn_to_page(dma_to_pfn(dev->dev,
+                                                              start_addr));
                        start_addr += PAGE_SIZE;
                        i++;
                }
        } else {
-               obj->pages = dma_alloc_attrs(dev->dev, obj->size,
-                                       &obj->dma_addr, GFP_KERNEL,
-                                       &obj->dma_attrs);
-               if (!obj->pages) {
-                       DRM_ERROR("failed to allocate buffer.\n");
-                       return -ENOMEM;
-               }
+               obj->pages = obj->cookie;
        }
 
        DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
@@ -110,15 +106,11 @@ static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)
        DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
                        (unsigned long)obj->dma_addr, obj->size);
 
-       if (!is_drm_iommu_supported(dev)) {
-               dma_free_attrs(dev->dev, obj->size, obj->cookie,
-                               (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
-               drm_free_large(obj->pages);
-       } else
-               dma_free_attrs(dev->dev, obj->size, obj->pages,
-                               (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
+       dma_free_attrs(dev->dev, obj->size, obj->cookie,
+                       (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
 
-       obj->dma_addr = (dma_addr_t)NULL;
+       if (!is_drm_iommu_supported(dev))
+               drm_free_large(obj->pages);
 }
 
 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -156,18 +148,14 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
         * once dmabuf's refcount becomes 0.
         */
        if (obj->import_attach)
-               goto out;
-
-       exynos_drm_free_buf(exynos_gem_obj);
-
-out:
-       drm_gem_free_mmap_offset(obj);
+               drm_prime_gem_destroy(obj, exynos_gem_obj->sgt);
+       else
+               exynos_drm_free_buf(exynos_gem_obj);
 
        /* release file pointer to gem object. */
        drm_gem_object_release(obj);
 
        kfree(exynos_gem_obj);
-       exynos_gem_obj = NULL;
 }
 
 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
@@ -190,8 +178,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
        return exynos_gem_obj->size;
 }
 
-
-struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
+static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
                                                      unsigned long size)
 {
        struct exynos_drm_gem_obj *exynos_gem_obj;
@@ -212,6 +199,13 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
                return ERR_PTR(ret);
        }
 
+       ret = drm_gem_create_mmap_offset(obj);
+       if (ret < 0) {
+               drm_gem_object_release(obj);
+               kfree(exynos_gem_obj);
+               return ERR_PTR(ret);
+       }
+
        DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
 
        return exynos_gem_obj;
@@ -313,7 +307,7 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
        drm_gem_object_unreference_unlocked(obj);
 }
 
-int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
+static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
                                      struct vm_area_struct *vma)
 {
        struct drm_device *drm_dev = exynos_gem_obj->base.dev;
@@ -342,7 +336,8 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
 
 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
                                      struct drm_file *file_priv)
-{      struct exynos_drm_gem_obj *exynos_gem_obj;
+{
+       struct exynos_drm_gem_obj *exynos_gem_obj;
        struct drm_exynos_gem_info *args = data;
        struct drm_gem_object *obj;
 
@@ -402,6 +397,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
                               struct drm_mode_create_dumb *args)
 {
        struct exynos_drm_gem_obj *exynos_gem_obj;
+       unsigned int flags;
        int ret;
 
        /*
@@ -413,16 +409,12 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
        args->pitch = args->width * ((args->bpp + 7) / 8);
        args->size = args->pitch * args->height;
 
-       if (is_drm_iommu_supported(dev)) {
-               exynos_gem_obj = exynos_drm_gem_create(dev,
-                       EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
-                       args->size);
-       } else {
-               exynos_gem_obj = exynos_drm_gem_create(dev,
-                       EXYNOS_BO_CONTIG | EXYNOS_BO_WC,
-                       args->size);
-       }
+       if (is_drm_iommu_supported(dev))
+               flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
+       else
+               flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
 
+       exynos_gem_obj = exynos_drm_gem_create(dev, flags, args->size);
        if (IS_ERR(exynos_gem_obj)) {
                dev_warn(dev->dev, "FB allocation failed.\n");
                return PTR_ERR(exynos_gem_obj);
@@ -460,14 +452,9 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
                goto unlock;
        }
 
-       ret = drm_gem_create_mmap_offset(obj);
-       if (ret)
-               goto out;
-
        *offset = drm_vma_node_offset_addr(&obj->vma_node);
        DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
 
-out:
        drm_gem_object_unreference(obj);
 unlock:
        mutex_unlock(&dev->struct_mutex);
@@ -543,7 +530,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 
 err_close_vm:
        drm_gem_vm_close(vma);
-       drm_gem_free_mmap_offset(obj);
 
        return ret;
 }
@@ -588,6 +574,8 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
        if (ret < 0)
                goto err_free_large;
 
+       exynos_gem_obj->sgt = sgt;
+
        if (sgt->nents == 1) {
                /* always physically continuous memory if sgt->nents is 1. */
                exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
index cd62f8410d1e5d86f6dd221aebf924be1fe71db9..b62d1007c0e05f88e4cfc38970bb7baa9a87fe7a 100644 (file)
@@ -39,6 +39,7 @@
  *     - this address could be physical address without IOMMU and
  *     device address with IOMMU.
  * @pages: Array of backing pages.
+ * @sgt: Imported sg_table.
  *
  * P.S. this object would be transferred to user as kms_bo.handle so
  *     user can access the buffer through kms_bo.handle.
@@ -52,6 +53,7 @@ struct exynos_drm_gem_obj {
        dma_addr_t              dma_addr;
        struct dma_attrs        dma_attrs;
        struct page             **pages;
+       struct sg_table         *sgt;
 };
 
 struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
@@ -59,10 +61,6 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
 /* destroy a buffer with gem object */
 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
 
-/* create a private gem object and initialize it. */
-struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
-                                                     unsigned long size);
-
 /* create a new buffer with gem object */
 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
                                                unsigned int flags,
index 425e7062538812c0613c055b9b4fdeea709c4be0..2f5c118f4c8ef5ea27a68532756ca77549e325f4 100644 (file)
@@ -786,6 +786,7 @@ static int rotator_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM
 static int rotator_clk_crtl(struct rot_context *rot, bool enable)
 {
        if (enable) {
@@ -822,7 +823,6 @@ static int rotator_resume(struct device *dev)
 }
 #endif
 
-#ifdef CONFIG_PM
 static int rotator_runtime_suspend(struct device *dev)
 {
        struct rot_context *rot = dev_get_drvdata(dev);
index f6ecbda2c60475297b36b1da6cff0766fb0fe833..674341708033b0aa2900ee0776db9a78424b2028 100644 (file)
@@ -143,7 +143,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
 }
 
 /**
- * i915_gem_shrink - Shrink buffer object caches completely
+ * i915_gem_shrink_all - Shrink buffer object caches completely
  * @dev_priv: i915 device
  *
  * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
index 8fd431bcdfd3a33ffb6afda7a1584b44e33d8296..a96b9006a51e5a893eea071d1d638ef3c2cef6fb 100644 (file)
@@ -804,7 +804,10 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
  * Also note, that the object created here is not currently a "first class"
  * object, in that several ioctls are banned. These are the CPU access
  * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
- * direct access via your pointer rather than use those ioctls.
+ * direct access via your pointer rather than use those ioctls. Another
+ * restriction is that we do not allow userptr surfaces to be pinned to the
+ * hardware and so we reject any attempt to create a framebuffer out of a
+ * userptr.
  *
  * If you think this is a good interface to use to pass GPU memory between
  * drivers, please use dma-buf instead. In fact, wherever possible use
index cf418be7d30a52d0e25ac42201b61b0e42f16dbe..b2270d576979bd2acf42b6bec10d48eed947ab57 100644 (file)
@@ -1724,6 +1724,15 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
                           I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
        }
 
+       /*
+        * Apparently we need to have VGA mode enabled prior to changing
+        * the P1/P2 dividers. Otherwise the DPLL will keep using the old
+        * dividers, even though the register value does change.
+        */
+       I915_WRITE(reg, 0);
+
+       I915_WRITE(reg, dpll);
+
        /* Wait for the clocks to stabilize. */
        POSTING_READ(reg);
        udelay(150);
@@ -14107,6 +14116,11 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
        struct drm_i915_gem_object *obj = intel_fb->obj;
 
+       if (obj->userptr.mm) {
+               DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
+               return -EINVAL;
+       }
+
        return drm_gem_handle_create(file, &obj->base, handle);
 }
 
@@ -14897,9 +14911,19 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
        /* restore vblank interrupts to correct state */
        drm_crtc_vblank_reset(&crtc->base);
        if (crtc->active) {
+               struct intel_plane *plane;
+
                drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
                update_scanline_offset(crtc);
                drm_crtc_vblank_on(&crtc->base);
+
+               /* Disable everything but the primary plane */
+               for_each_intel_plane_on_crtc(dev, crtc, plane) {
+                       if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+                               continue;
+
+                       plane->disable_plane(&plane->base, &crtc->base);
+               }
        }
 
        /* We need to sanitize the plane -> pipe mapping first because this will
@@ -15067,38 +15091,25 @@ void i915_redisable_vga(struct drm_device *dev)
        i915_redisable_vga_power_on(dev);
 }
 
-static bool primary_get_hw_state(struct intel_crtc *crtc)
+static bool primary_get_hw_state(struct intel_plane *plane)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 
-       return !!(I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE);
+       return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
 }
 
-static void readout_plane_state(struct intel_crtc *crtc,
-                               struct intel_crtc_state *crtc_state)
+/* FIXME read out full plane state for all planes */
+static void readout_plane_state(struct intel_crtc *crtc)
 {
-       struct intel_plane *p;
-       struct intel_plane_state *plane_state;
-       bool active = crtc_state->base.active;
-
-       for_each_intel_plane(crtc->base.dev, p) {
-               if (crtc->pipe != p->pipe)
-                       continue;
-
-               plane_state = to_intel_plane_state(p->base.state);
+       struct drm_plane *primary = crtc->base.primary;
+       struct intel_plane_state *plane_state =
+               to_intel_plane_state(primary->state);
 
-               if (p->base.type == DRM_PLANE_TYPE_PRIMARY) {
-                       plane_state->visible = primary_get_hw_state(crtc);
-                       if (plane_state->visible)
-                               crtc->base.state->plane_mask |=
-                                       1 << drm_plane_index(&p->base);
-               } else {
-                       if (active)
-                               p->disable_plane(&p->base, &crtc->base);
+       plane_state->visible =
+               primary_get_hw_state(to_intel_plane(primary));
 
-                       plane_state->visible = false;
-               }
-       }
+       if (plane_state->visible)
+               crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
 }
 
 static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@ -15121,34 +15132,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                crtc->base.state->active = crtc->active;
                crtc->base.enabled = crtc->active;
 
-               memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
-               if (crtc->base.state->active) {
-                       intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
-                       intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
-                       WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
-
-                       /*
-                        * The initial mode needs to be set in order to keep
-                        * the atomic core happy. It wants a valid mode if the
-                        * crtc's enabled, so we do the above call.
-                        *
-                        * At this point some state updated by the connectors
-                        * in their ->detect() callback has not run yet, so
-                        * no recalculation can be done yet.
-                        *
-                        * Even if we could do a recalculation and modeset
-                        * right now it would cause a double modeset if
-                        * fbdev or userspace chooses a different initial mode.
-                        *
-                        * If that happens, someone indicated they wanted a
-                        * mode change, which means it's safe to do a full
-                        * recalculation.
-                        */
-                       crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
-               }
-
-               crtc->base.hwmode = crtc->config->base.adjusted_mode;
-               readout_plane_state(crtc, to_intel_crtc_state(crtc->base.state));
+               readout_plane_state(crtc);
 
                DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
                              crtc->base.base.id,
@@ -15207,6 +15191,36 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                              connector->base.name,
                              connector->base.encoder ? "enabled" : "disabled");
        }
+
+       for_each_intel_crtc(dev, crtc) {
+               crtc->base.hwmode = crtc->config->base.adjusted_mode;
+
+               memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
+               if (crtc->base.state->active) {
+                       intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
+                       intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
+                       WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
+
+                       /*
+                        * The initial mode needs to be set in order to keep
+                        * the atomic core happy. It wants a valid mode if the
+                        * crtc's enabled, so we do the above call.
+                        *
+                        * At this point some state updated by the connectors
+                        * in their ->detect() callback has not run yet, so
+                        * no recalculation can be done yet.
+                        *
+                        * Even if we could do a recalculation and modeset
+                        * right now it would cause a double modeset if
+                        * fbdev or userspace chooses a different initial mode.
+                        *
+                        * If that happens, someone indicated they wanted a
+                        * mode change, which means it's safe to do a full
+                        * recalculation.
+                        */
+                       crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
+               }
+       }
 }
 
 /* Scan out the current hw modeset state,
index 3e4be5a3becdddf9fd2a23e6be26f02da90a28f2..6ade068884328680ffe024dd91eabb9ffe6d9013 100644 (file)
@@ -462,11 +462,17 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
        drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
 
        drm_mode_connector_set_path_property(connector, pathprop);
+       return connector;
+}
+
+static void intel_dp_register_mst_connector(struct drm_connector *connector)
+{
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct drm_device *dev = connector->dev;
        drm_modeset_lock_all(dev);
        intel_connector_add_to_fbdev(intel_connector);
        drm_modeset_unlock_all(dev);
        drm_connector_register(&intel_connector->base);
-       return connector;
 }
 
 static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
@@ -512,6 +518,7 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
 
 static struct drm_dp_mst_topology_cbs mst_cbs = {
        .add_connector = intel_dp_add_mst_connector,
+       .register_connector = intel_dp_register_mst_connector,
        .destroy_connector = intel_dp_destroy_mst_connector,
        .hotplug = intel_dp_mst_hotplug,
 };
index 53c0173a39fe182d5d2e50ac2ffc6637f77526fd..b17785719598c9ca867340dc77aaed858f0c72db 100644 (file)
@@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
 
        /* Enable polling and queue hotplug re-enabling. */
        if (hpd_disabled) {
-               drm_kms_helper_poll_enable(dev);
+               drm_kms_helper_poll_enable_locked(dev);
                mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
                                 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
        }
index 72e0edd7bbde77d3b12812bead2a3f676589385a..29dd4488dc49856b6518ba5fce760cbd4710a1e8 100644 (file)
@@ -484,18 +484,18 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
        status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
 
        read_pointer = ring->next_context_status_buffer;
-       write_pointer = status_pointer & 0x07;
+       write_pointer = status_pointer & GEN8_CSB_PTR_MASK;
        if (read_pointer > write_pointer)
-               write_pointer += 6;
+               write_pointer += GEN8_CSB_ENTRIES;
 
        spin_lock(&ring->execlist_lock);
 
        while (read_pointer < write_pointer) {
                read_pointer++;
                status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
-                               (read_pointer % 6) * 8);
+                               (read_pointer % GEN8_CSB_ENTRIES) * 8);
                status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
-                               (read_pointer % 6) * 8 + 4);
+                               (read_pointer % GEN8_CSB_ENTRIES) * 8 + 4);
 
                if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
                        continue;
@@ -521,10 +521,12 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
        spin_unlock(&ring->execlist_lock);
 
        WARN(submit_contexts > 2, "More than two context complete events?\n");
-       ring->next_context_status_buffer = write_pointer % 6;
+       ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
 
        I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
-                  _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8));
+                  _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8,
+                                ((u32)ring->next_context_status_buffer &
+                                 GEN8_CSB_PTR_MASK) << 8));
 }
 
 static int execlists_context_queue(struct drm_i915_gem_request *request)
@@ -1422,6 +1424,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       u8 next_context_status_buffer_hw;
 
        I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
        I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
@@ -1436,7 +1439,29 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
                   _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
                   _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
        POSTING_READ(RING_MODE_GEN7(ring));
-       ring->next_context_status_buffer = 0;
+
+       /*
+        * Instead of resetting the Context Status Buffer (CSB) read pointer to
+        * zero, we need to read the write pointer from hardware and use its
+        * value because "this register is power context save restored".
+        * Effectively, these states have been observed:
+        *
+        *      | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
+        * BDW  | CSB regs not reset       | CSB regs reset       |
+        * CHT  | CSB regs not reset       | CSB regs not reset   |
+        */
+       next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring))
+                                                  & GEN8_CSB_PTR_MASK);
+
+       /*
+        * When the CSB registers are reset (also after power-up / gpu reset),
+        * CSB write pointer is set to all 1's, which is not valid, use '5' in
+        * this special case, so the first element read is CSB[0].
+        */
+       if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
+               next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
+
+       ring->next_context_status_buffer = next_context_status_buffer_hw;
        DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
 
        memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
@@ -1634,6 +1659,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
        if (flush_domains) {
                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+               flags |= PIPE_CONTROL_FLUSH_ENABLE;
        }
 
        if (invalidate_domains) {
index 64f89f9982a20f745cb41ef5950cd637437000ca..3c63bb32ad81c657e418b1b7143ed934d036c66f 100644 (file)
@@ -25,6 +25,8 @@
 #define _INTEL_LRC_H_
 
 #define GEN8_LR_CONTEXT_ALIGN 4096
+#define GEN8_CSB_ENTRIES 6
+#define GEN8_CSB_PTR_MASK 0x07
 
 /* Execlists regs */
 #define RING_ELSP(ring)                        ((ring)->mmio_base+0x230)
index 6e6b8db996ef2450c615a71ef10b7ffcbbc62479..61b451fbd09e6ec9de8a42b20a1bb11b6438496f 100644 (file)
@@ -347,6 +347,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
        if (flush_domains) {
                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+               flags |= PIPE_CONTROL_FLUSH_ENABLE;
        }
        if (invalidate_domains) {
                flags |= PIPE_CONTROL_TLB_INVALIDATE;
@@ -418,6 +419,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
        if (flush_domains) {
                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+               flags |= PIPE_CONTROL_FLUSH_ENABLE;
        }
        if (invalidate_domains) {
                flags |= PIPE_CONTROL_TLB_INVALIDATE;
index af7fdb3bd663aef062a5cd41a2cdacbb4492515d..7401cf90b0dbcd1eb335e0c6defc42d22c9bb631 100644 (file)
@@ -246,7 +246,8 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
        }
 
        if (power_well->data == SKL_DISP_PW_1) {
-               intel_prepare_ddi(dev);
+               if (!dev_priv->power_domains.initializing)
+                       intel_prepare_ddi(dev);
                gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
        }
 }
index cc6c228e11c83566d1ac1a2c59fcefa959345463..e905c00acf1a37baef92d66a6f38b888372b9834 100644 (file)
@@ -469,9 +469,13 @@ nouveau_display_create(struct drm_device *dev)
        if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
                dev->mode_config.max_width = 4096;
                dev->mode_config.max_height = 4096;
-       } else {
+       } else
+       if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI) {
                dev->mode_config.max_width = 8192;
                dev->mode_config.max_height = 8192;
+       } else {
+               dev->mode_config.max_width = 16384;
+               dev->mode_config.max_height = 16384;
        }
 
        dev->mode_config.preferred_depth = 24;
index 2791701685dc82bf4e2655ce3ea8ea6c3b278e49..59f27e774acb5e9c98c9854bd72195efdbec3a48 100644 (file)
@@ -178,8 +178,30 @@ nouveau_fbcon_sync(struct fb_info *info)
        return 0;
 }
 
+static int
+nouveau_fbcon_open(struct fb_info *info, int user)
+{
+       struct nouveau_fbdev *fbcon = info->par;
+       struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+       int ret = pm_runtime_get_sync(drm->dev->dev);
+       if (ret < 0 && ret != -EACCES)
+               return ret;
+       return 0;
+}
+
+static int
+nouveau_fbcon_release(struct fb_info *info, int user)
+{
+       struct nouveau_fbdev *fbcon = info->par;
+       struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+       pm_runtime_put(drm->dev->dev);
+       return 0;
+}
+
 static struct fb_ops nouveau_fbcon_ops = {
        .owner = THIS_MODULE,
+       .fb_open = nouveau_fbcon_open,
+       .fb_release = nouveau_fbcon_release,
        .fb_check_var = drm_fb_helper_check_var,
        .fb_set_par = drm_fb_helper_set_par,
        .fb_fillrect = nouveau_fbcon_fillrect,
@@ -195,6 +217,8 @@ static struct fb_ops nouveau_fbcon_ops = {
 
 static struct fb_ops nouveau_fbcon_sw_ops = {
        .owner = THIS_MODULE,
+       .fb_open = nouveau_fbcon_open,
+       .fb_release = nouveau_fbcon_release,
        .fb_check_var = drm_fb_helper_check_var,
        .fb_set_par = drm_fb_helper_set_par,
        .fb_fillrect = drm_fb_helper_cfb_fillrect,
index 2c9981512d27b7702f6f196f64b54276acd8b86e..41be584147b936a921d0c6bdd4e5fe0b4251c70c 100644 (file)
@@ -227,11 +227,12 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
        struct nouveau_bo *nvbo = nouveau_gem_object(gem);
        struct nvkm_vma *vma;
 
-       if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+       if (is_power_of_2(nvbo->valid_domains))
+               rep->domain = nvbo->valid_domains;
+       else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
                rep->domain = NOUVEAU_GEM_DOMAIN_GART;
        else
                rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
-
        rep->offset = nvbo->bo.offset;
        if (cli->vm) {
                vma = nouveau_bo_vma_find(nvbo, cli->vm);
index 65af31441e9c29496647084d927c11f393974653..a7d69ce7abc1ad33b5fd283a5f4b5321feb89f18 100644 (file)
@@ -267,6 +267,12 @@ init_i2c(struct nvbios_init *init, int index)
                index = NVKM_I2C_BUS_PRI;
                if (init->outp && init->outp->i2c_upper_default)
                        index = NVKM_I2C_BUS_SEC;
+       } else
+       if (index == 0x80) {
+               index = NVKM_I2C_BUS_PRI;
+       } else
+       if (index == 0x81) {
+               index = NVKM_I2C_BUS_SEC;
        }
 
        bus = nvkm_i2c_bus_find(i2c, index);
index e0ec2a6b7b795c964e119eae2dfed644d24e4ae2..212800ecdce99e4eb1a3a23ebdab9c207cd860da 100644 (file)
@@ -8,7 +8,10 @@ struct nvbios_source {
        void *(*init)(struct nvkm_bios *, const char *);
        void  (*fini)(void *);
        u32   (*read)(void *, u32 offset, u32 length, struct nvkm_bios *);
+       u32   (*size)(void *);
        bool rw;
+       bool ignore_checksum;
+       bool no_pcir;
 };
 
 int nvbios_extend(struct nvkm_bios *, u32 length);
index 792f017525f689bb1d38b86c0bf2e746f9495c8d..b2557e87afdd6d0e95910b3b4b91e37ce9a3e269 100644 (file)
@@ -45,7 +45,7 @@ shadow_fetch(struct nvkm_bios *bios, struct shadow *mthd, u32 upto)
                u32 read = mthd->func->read(data, start, limit - start, bios);
                bios->size = start + read;
        }
-       return bios->size >= limit;
+       return bios->size >= upto;
 }
 
 static int
@@ -55,14 +55,22 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
        struct nvbios_image image;
        int score = 1;
 
-       if (!shadow_fetch(bios, mthd, offset + 0x1000)) {
-               nvkm_debug(subdev, "%08x: header fetch failed\n", offset);
-               return 0;
-       }
+       if (mthd->func->no_pcir) {
+               image.base = 0;
+               image.type = 0;
+               image.size = mthd->func->size(mthd->data);
+               image.last = 1;
+       } else {
+               if (!shadow_fetch(bios, mthd, offset + 0x1000)) {
+                       nvkm_debug(subdev, "%08x: header fetch failed\n",
+                                  offset);
+                       return 0;
+               }
 
-       if (!nvbios_image(bios, idx, &image)) {
-               nvkm_debug(subdev, "image %d invalid\n", idx);
-               return 0;
+               if (!nvbios_image(bios, idx, &image)) {
+                       nvkm_debug(subdev, "image %d invalid\n", idx);
+                       return 0;
+               }
        }
        nvkm_debug(subdev, "%08x: type %02x, %d bytes\n",
                   image.base, image.type, image.size);
@@ -74,7 +82,8 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
 
        switch (image.type) {
        case 0x00:
-               if (nvbios_checksum(&bios->data[image.base], image.size)) {
+               if (!mthd->func->ignore_checksum &&
+                   nvbios_checksum(&bios->data[image.base], image.size)) {
                        nvkm_debug(subdev, "%08x: checksum failed\n",
                                   image.base);
                        if (mthd->func->rw)
index bd60d7dd09f51a45b70f120597ca38adaf8c102b..4bf486b57101367708bba2b6fe4bdd1d985f1d19 100644 (file)
@@ -21,6 +21,7 @@
  *
  */
 #include "priv.h"
+
 #include <core/pci.h>
 
 #if defined(__powerpc__)
@@ -33,17 +34,26 @@ static u32
 of_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
 {
        struct priv *priv = data;
-       if (offset + length <= priv->size) {
+       if (offset < priv->size) {
+               length = min_t(u32, length, priv->size - offset);
                memcpy_fromio(bios->data + offset, priv->data + offset, length);
                return length;
        }
        return 0;
 }
 
+static u32
+of_size(void *data)
+{
+       struct priv *priv = data;
+       return priv->size;
+}
+
 static void *
 of_init(struct nvkm_bios *bios, const char *name)
 {
-       struct pci_dev *pdev = bios->subdev.device->func->pci(bios->subdev.device)->pdev;
+       struct nvkm_device *device = bios->subdev.device;
+       struct pci_dev *pdev = device->func->pci(device)->pdev;
        struct device_node *dn;
        struct priv *priv;
        if (!(dn = pci_device_to_OF_node(pdev)))
@@ -62,7 +72,10 @@ nvbios_of = {
        .init = of_init,
        .fini = (void(*)(void *))kfree,
        .read = of_read,
+       .size = of_size,
        .rw = false,
+       .ignore_checksum = true,
+       .no_pcir = true,
 };
 #else
 const struct nvbios_source
index 814cb51cc87372bd4c18225b16b1401d10285b60..385a90f91ed6a14e394ba1e8b4743d9c38c06412 100644 (file)
@@ -35,6 +35,8 @@ static const struct nvkm_device_agp_quirk
 nvkm_device_agp_quirks[] = {
        /* VIA Apollo PRO133x / GeForce FX 5600 Ultra - fdo#20341 */
        { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 },
+       /* SiS 761 does not support AGP cards, use PCI mode */
+       { PCI_VENDOR_ID_SI, 0x0761, PCI_ANY_ID, PCI_ANY_ID, 0 },
        {},
 };
 
@@ -137,8 +139,10 @@ nvkm_agp_ctor(struct nvkm_pci *pci)
        while (quirk->hostbridge_vendor) {
                if (info.device->vendor == quirk->hostbridge_vendor &&
                    info.device->device == quirk->hostbridge_device &&
-                   pci->pdev->vendor == quirk->chip_vendor &&
-                   pci->pdev->device == quirk->chip_device) {
+                   (quirk->chip_vendor == (u16)PCI_ANY_ID ||
+                   pci->pdev->vendor == quirk->chip_vendor) &&
+                   (quirk->chip_device == (u16)PCI_ANY_ID ||
+                   pci->pdev->device == quirk->chip_device)) {
                        nvkm_info(subdev, "forcing default agp mode to %dX, "
                                          "use NvAGP=<mode> to override\n",
                                  quirk->mode);
index dd845f82cc24aaa5b46cf5680ffd607d3a8ebb2b..183aea1abebc4afe5ad28f4694bc92ccc788ee8d 100644 (file)
@@ -242,6 +242,10 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc,
        bo->is_primary = true;
 
        ret = qxl_bo_reserve(bo, false);
+       if (ret)
+               return ret;
+       ret = qxl_bo_pin(bo, bo->type, NULL);
+       qxl_bo_unreserve(bo);
        if (ret)
                return ret;
 
@@ -257,7 +261,11 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc,
        }
        drm_vblank_put(dev, qcrtc->index);
 
-       qxl_bo_unreserve(bo);
+       ret = qxl_bo_reserve(bo, false);
+       if (!ret) {
+               qxl_bo_unpin(bo);
+               qxl_bo_unreserve(bo);
+       }
 
        return 0;
 }
@@ -618,7 +626,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
                  adjusted_mode->hdisplay,
                  adjusted_mode->vdisplay);
 
-       if (qcrtc->index == 0)
+       if (bo->is_primary == false)
                recreate_primary = true;
 
        if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
index 41c422fee31a02dbc932964bc4686921e533fdd3..c4a552637c9353d70cab76083b7d7786dc436d29 100644 (file)
@@ -144,14 +144,17 @@ static void qxl_dirty_update(struct qxl_fbdev *qfbdev,
 
        spin_lock_irqsave(&qfbdev->dirty.lock, flags);
 
-       if (qfbdev->dirty.y1 < y)
-               y = qfbdev->dirty.y1;
-       if (qfbdev->dirty.y2 > y2)
-               y2 = qfbdev->dirty.y2;
-       if (qfbdev->dirty.x1 < x)
-               x = qfbdev->dirty.x1;
-       if (qfbdev->dirty.x2 > x2)
-               x2 = qfbdev->dirty.x2;
+       if ((qfbdev->dirty.y2 - qfbdev->dirty.y1) &&
+           (qfbdev->dirty.x2 - qfbdev->dirty.x1)) {
+               if (qfbdev->dirty.y1 < y)
+                       y = qfbdev->dirty.y1;
+               if (qfbdev->dirty.y2 > y2)
+                       y2 = qfbdev->dirty.y2;
+               if (qfbdev->dirty.x1 < x)
+                       x = qfbdev->dirty.x1;
+               if (qfbdev->dirty.x2 > x2)
+                       x2 = qfbdev->dirty.x2;
+       }
 
        qfbdev->dirty.x1 = x;
        qfbdev->dirty.x2 = x2;
index b66ec331c17cd51f1b81022ebd29d18944258b43..4efa8e261baf59546ca24eb39920bc4159358ab7 100644 (file)
@@ -307,7 +307,7 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
                idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
                if (idr_ret < 0)
                        return idr_ret;
-               bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
+               bo = to_qxl_bo(entry->tv.bo);
 
                (*release)->release_offset = create_rel->release_offset + 64;
 
@@ -316,8 +316,6 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
                info = qxl_release_map(qdev, *release);
                info->id = idr_ret;
                qxl_release_unmap(qdev, *release, info);
-
-               qxl_bo_unref(&bo);
                return 0;
        }
 
index c3872598b85a3856787b1bf0b7113633a468e020..bb292143997ee8ccce3e6e6898edba2a5eeb12b6 100644 (file)
@@ -237,6 +237,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
        backlight_update_status(bd);
 
        DRM_INFO("radeon atom DIG backlight initialized\n");
+       rdev->mode_info.bl_encoder = radeon_encoder;
 
        return;
 
@@ -1624,8 +1625,14 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
                } else
                        atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
                if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
-                       args.ucAction = ATOM_LCD_BLON;
-                       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+                       if (rdev->mode_info.bl_encoder) {
+                               struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+                               atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
+                       } else {
+                               args.ucAction = ATOM_LCD_BLON;
+                               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+                       }
                }
                break;
        case DRM_MODE_DPMS_STANDBY:
@@ -1705,9 +1712,13 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
                        if (ASIC_IS_DCE4(rdev))
                                atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
                }
-               if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
-                       atombios_dig_transmitter_setup(encoder,
-                                                      ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+               if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+                       if (rdev->mode_info.bl_encoder)
+                               atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
+                       else
+                               atombios_dig_transmitter_setup(encoder,
+                                                              ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+               }
                if (ext_encoder)
                        atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
                break;
index f03b7eb152336d1f799ab33d1d195c1fba40b360..b6cbd816537e7e69bc920482961cdb47b6dfd68d 100644 (file)
@@ -1658,6 +1658,7 @@ struct radeon_pm {
        u8                      fan_max_rpm;
        /* dpm */
        bool                    dpm_enabled;
+       bool                    sysfs_initialized;
        struct radeon_dpm       dpm;
 };
 
index d2e9e9efc159c053b954aed21840ebe7d91f2739..6743174acdbcd22b5d357d0275f1e564dc653d81 100644 (file)
@@ -1633,18 +1633,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
        radeon_fbdev_init(rdev);
        drm_kms_helper_poll_init(rdev->ddev);
 
-       if (rdev->pm.dpm_enabled) {
-               /* do dpm late init */
-               ret = radeon_pm_late_init(rdev);
-               if (ret) {
-                       rdev->pm.dpm_enabled = false;
-                       DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
-               }
-               /* set the dpm state for PX since there won't be
-                * a modeset to call this.
-                */
-               radeon_pm_compute_clocks(rdev);
-       }
+       /* do pm late init */
+       ret = radeon_pm_late_init(rdev);
 
        return 0;
 }
index 5e09c061847f50c688650d12625a462e8c4737cd..744f5c49c66463c56187dbc2130a77539a264130 100644 (file)
@@ -265,7 +265,6 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
 {
        struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
        struct drm_device *dev = master->base.dev;
-       struct radeon_device *rdev = dev->dev_private;
        struct radeon_connector *radeon_connector;
        struct drm_connector *connector;
 
@@ -284,14 +283,22 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
        radeon_connector->mst_encoder = radeon_dp_create_fake_mst_encoder(master);
 
        drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
+       drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
        drm_mode_connector_set_path_property(connector, pathprop);
 
+       return connector;
+}
+
+static void radeon_dp_register_mst_connector(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct radeon_device *rdev = dev->dev_private;
+
        drm_modeset_lock_all(dev);
        radeon_fb_add_connector(rdev, connector);
        drm_modeset_unlock_all(dev);
 
        drm_connector_register(connector);
-       return connector;
 }
 
 static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
@@ -324,6 +331,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
 
 struct drm_dp_mst_topology_cbs mst_cbs = {
        .add_connector = radeon_dp_add_mst_connector,
+       .register_connector = radeon_dp_register_mst_connector,
        .destroy_connector = radeon_dp_destroy_mst_connector,
        .hotplug = radeon_dp_mst_hotplug,
 };
index ef99917f000d96a7dcf3fc88f089bdf08e0afc28..c6ee80216cf4a71f510bf239fc4fdf6f67235f48 100644 (file)
@@ -194,7 +194,6 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
                        radeon_atom_backlight_init(radeon_encoder, connector);
                else
                        radeon_legacy_backlight_init(radeon_encoder, connector);
-               rdev->mode_info.bl_encoder = radeon_encoder;
        }
 }
 
index 7214858ffceaa8c20409206533dcc332fd663705..26da2f4d7b4f56fca3948af07bca9c061bb5ddaf 100644 (file)
@@ -48,40 +48,10 @@ struct radeon_fbdev {
        struct radeon_device *rdev;
 };
 
-/**
- * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev.
- *
- * @info: fbdev info
- *
- * This function hides the cursor on all CRTCs used by fbdev.
- */
-static int radeon_fb_helper_set_par(struct fb_info *info)
-{
-       int ret;
-
-       ret = drm_fb_helper_set_par(info);
-
-       /* XXX: with universal plane support fbdev will automatically disable
-        * all non-primary planes (including the cursor)
-        */
-       if (ret == 0) {
-               struct drm_fb_helper *fb_helper = info->par;
-               int i;
-
-               for (i = 0; i < fb_helper->crtc_count; i++) {
-                       struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc;
-
-                       radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
-               }
-       }
-
-       return ret;
-}
-
 static struct fb_ops radeonfb_ops = {
        .owner = THIS_MODULE,
        .fb_check_var = drm_fb_helper_check_var,
-       .fb_set_par = radeon_fb_helper_set_par,
+       .fb_set_par = drm_fb_helper_set_par,
        .fb_fillrect = drm_fb_helper_cfb_fillrect,
        .fb_copyarea = drm_fb_helper_cfb_copyarea,
        .fb_imageblit = drm_fb_helper_cfb_imageblit,
@@ -427,3 +397,19 @@ void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector
 {
        drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
 }
+
+void radeon_fbdev_restore_mode(struct radeon_device *rdev)
+{
+       struct radeon_fbdev *rfbdev = rdev->mode_info.rfbdev;
+       struct drm_fb_helper *fb_helper;
+       int ret;
+
+       if (!rfbdev)
+               return;
+
+       fb_helper = &rfbdev->helper;
+
+       ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+       if (ret)
+               DRM_DEBUG("failed to restore crtc mode\n");
+}
index 4a119c255ba9709692b234c51a928d826cc22ec2..0e932bf932c11f95a59a57bb3c9126e01a6baf3d 100644 (file)
@@ -598,7 +598,7 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
  * Outdated mess for old drm with Xorg being in charge (void function now).
  */
 /**
- * radeon_driver_firstopen_kms - drm callback for last close
+ * radeon_driver_lastclose_kms - drm callback for last close
  *
  * @dev: drm dev pointer
  *
@@ -606,6 +606,9 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
  */
 void radeon_driver_lastclose_kms(struct drm_device *dev)
 {
+       struct radeon_device *rdev = dev->dev_private;
+
+       radeon_fbdev_restore_mode(rdev);
        vga_switcheroo_process_delayed_switch();
 }
 
index 45715307db7177a9af7393a697d255c8ed277344..30de43366eae806d0afd45192ce9da032c08490c 100644 (file)
@@ -441,6 +441,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
        backlight_update_status(bd);
 
        DRM_INFO("radeon legacy LVDS backlight initialized\n");
+       rdev->mode_info.bl_encoder = radeon_encoder;
 
        return;
 
index aecc3e3dec0ca093441e3871df414627b51e92ec..457b026a0972782fc6d777b1069b683c1fda037f 100644 (file)
@@ -980,6 +980,7 @@ int radeon_fbdev_init(struct radeon_device *rdev);
 void radeon_fbdev_fini(struct radeon_device *rdev);
 void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state);
 bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
+void radeon_fbdev_restore_mode(struct radeon_device *rdev);
 
 void radeon_fb_output_poll_changed(struct radeon_device *rdev);
 
index 05751f3f84449d40457b3f989f0d7ab874935bbf..5feee3b4c55741011ae3501f501f72df0e1311ac 100644 (file)
@@ -717,10 +717,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
        struct radeon_device *rdev = dev_get_drvdata(dev);
        umode_t effective_mode = attr->mode;
 
-       /* Skip limit attributes if DPM is not enabled */
+       /* Skip attributes if DPM is not enabled */
        if (rdev->pm.pm_method != PM_METHOD_DPM &&
            (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
-            attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
+            attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
+            attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
+            attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
+            attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+            attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
                return 0;
 
        /* Skip fan attributes if fan is not present */
@@ -1326,14 +1330,6 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
        INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
 
        if (rdev->pm.num_power_states > 1) {
-               /* where's the best place to put these? */
-               ret = device_create_file(rdev->dev, &dev_attr_power_profile);
-               if (ret)
-                       DRM_ERROR("failed to create device file for power profile\n");
-               ret = device_create_file(rdev->dev, &dev_attr_power_method);
-               if (ret)
-                       DRM_ERROR("failed to create device file for power method\n");
-
                if (radeon_debugfs_pm_init(rdev)) {
                        DRM_ERROR("Failed to register debugfs file for PM!\n");
                }
@@ -1391,20 +1387,6 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
                goto dpm_failed;
        rdev->pm.dpm_enabled = true;
 
-       ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
-       if (ret)
-               DRM_ERROR("failed to create device file for dpm state\n");
-       ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
-       if (ret)
-               DRM_ERROR("failed to create device file for dpm state\n");
-       /* XXX: these are noops for dpm but are here for backwards compat */
-       ret = device_create_file(rdev->dev, &dev_attr_power_profile);
-       if (ret)
-               DRM_ERROR("failed to create device file for power profile\n");
-       ret = device_create_file(rdev->dev, &dev_attr_power_method);
-       if (ret)
-               DRM_ERROR("failed to create device file for power method\n");
-
        if (radeon_debugfs_pm_init(rdev)) {
                DRM_ERROR("Failed to register debugfs file for dpm!\n");
        }
@@ -1545,9 +1527,51 @@ int radeon_pm_late_init(struct radeon_device *rdev)
        int ret = 0;
 
        if (rdev->pm.pm_method == PM_METHOD_DPM) {
-               mutex_lock(&rdev->pm.mutex);
-               ret = radeon_dpm_late_enable(rdev);
-               mutex_unlock(&rdev->pm.mutex);
+               if (rdev->pm.dpm_enabled) {
+                       if (!rdev->pm.sysfs_initialized) {
+                               ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
+                               if (ret)
+                                       DRM_ERROR("failed to create device file for dpm state\n");
+                               ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
+                               if (ret)
+                                       DRM_ERROR("failed to create device file for dpm state\n");
+                               /* XXX: these are noops for dpm but are here for backwards compat */
+                               ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+                               if (ret)
+                                       DRM_ERROR("failed to create device file for power profile\n");
+                               ret = device_create_file(rdev->dev, &dev_attr_power_method);
+                               if (ret)
+                                       DRM_ERROR("failed to create device file for power method\n");
+                               if (!ret)
+                                       rdev->pm.sysfs_initialized = true;
+                       }
+
+                       mutex_lock(&rdev->pm.mutex);
+                       ret = radeon_dpm_late_enable(rdev);
+                       mutex_unlock(&rdev->pm.mutex);
+                       if (ret) {
+                               rdev->pm.dpm_enabled = false;
+                               DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
+                       } else {
+                               /* set the dpm state for PX since there won't be
+                                * a modeset to call this.
+                                */
+                               radeon_pm_compute_clocks(rdev);
+                       }
+               }
+       } else {
+               if ((rdev->pm.num_power_states > 1) &&
+                   (!rdev->pm.sysfs_initialized)) {
+                       /* where's the best place to put these? */
+                       ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+                       if (ret)
+                               DRM_ERROR("failed to create device file for power profile\n");
+                       ret = device_create_file(rdev->dev, &dev_attr_power_method);
+                       if (ret)
+                               DRM_ERROR("failed to create device file for power method\n");
+                       if (!ret)
+                               rdev->pm.sysfs_initialized = true;
+               }
        }
        return ret;
 }
index e9115d3f67b0ca0a34ff68ce564b316895c81939..e72bf46042e0a42f469cbfd8ff285b1ae9abb155 100644 (file)
@@ -2928,6 +2928,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
        { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
        { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
        { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 },
+       { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
        { 0, 0, 0, 0 },
 };
 
index db8b49101a8b620f742af39c24154933bf3d1ff7..512263919282328cb55505abf2542987c5f9f9cd 100644 (file)
@@ -34,8 +34,8 @@ virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
 
-       seq_printf(m, "fence %ld %lld\n",
-                  atomic64_read(&vgdev->fence_drv.last_seq),
+       seq_printf(m, "fence %llu %lld\n",
+                  (u64)atomic64_read(&vgdev->fence_drv.last_seq),
                   vgdev->fence_drv.sync_seq);
        return 0;
 }
index 1da632631dac808e8273fe3aa77a5426950f9156..67097c9ce9c143e2d6ac3534c4379d0f024d4887 100644 (file)
@@ -61,7 +61,7 @@ static void virtio_timeline_value_str(struct fence *f, char *str, int size)
 {
        struct virtio_gpu_fence *fence = to_virtio_fence(f);
 
-       snprintf(str, size, "%lu", atomic64_read(&fence->drv->last_seq));
+       snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq));
 }
 
 static const struct fence_ops virtio_fence_ops = {
index 5ae8f921da2a478bef55b617c3a28f66ed1e2773..6377e8151000f5401309133b635150acf128c2ad 100644 (file)
@@ -415,16 +415,16 @@ static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
  *
  * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
  * command buffers left that are not submitted to hardware, Make sure
- * IRQ handling is turned on. Otherwise, make sure it's turned off. This
- * function may return -EAGAIN to indicate it should be rerun due to
- * possibly missed IRQs if IRQs has just been turned on.
+ * IRQ handling is turned on. Otherwise, make sure it's turned off.
  */
-static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
+static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
 {
-       int notempty = 0;
+       int notempty;
        struct vmw_cmdbuf_context *ctx;
        int i;
 
+retry:
+       notempty = 0;
        for_each_cmdbuf_ctx(man, i, ctx)
                vmw_cmdbuf_ctx_process(man, ctx, &notempty);
 
@@ -440,10 +440,8 @@ static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
                man->irq_on = true;
 
                /* Rerun in case we just missed an irq. */
-               return -EAGAIN;
+               goto retry;
        }
-
-       return 0;
 }
 
 /**
@@ -468,8 +466,7 @@ static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
        header->cb_context = cb_context;
        list_add_tail(&header->list, &man->ctx[cb_context].submitted);
 
-       if (vmw_cmdbuf_man_process(man) == -EAGAIN)
-               vmw_cmdbuf_man_process(man);
+       vmw_cmdbuf_man_process(man);
 }
 
 /**
@@ -488,8 +485,7 @@ static void vmw_cmdbuf_man_tasklet(unsigned long data)
        struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
 
        spin_lock(&man->lock);
-       if (vmw_cmdbuf_man_process(man) == -EAGAIN)
-               (void) vmw_cmdbuf_man_process(man);
+       vmw_cmdbuf_man_process(man);
        spin_unlock(&man->lock);
 }
 
@@ -507,6 +503,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
        struct vmw_cmdbuf_man *man =
                container_of(work, struct vmw_cmdbuf_man, work);
        struct vmw_cmdbuf_header *entry, *next;
+       uint32_t dummy;
        bool restart = false;
 
        spin_lock_bh(&man->lock);
@@ -523,6 +520,8 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
        if (restart && vmw_cmdbuf_startstop(man, true))
                DRM_ERROR("Failed restarting command buffer context 0.\n");
 
+       /* Send a new fence in case one was removed */
+       vmw_fifo_send_fence(man->dev_priv, &dummy);
 }
 
 /**
@@ -681,6 +680,14 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
                                         0, 0,
                                         DRM_MM_SEARCH_DEFAULT,
                                         DRM_MM_CREATE_DEFAULT);
+       if (ret) {
+               vmw_cmdbuf_man_process(man);
+               ret = drm_mm_insert_node_generic(&man->mm, info->node,
+                                                info->page_size, 0, 0,
+                                                DRM_MM_SEARCH_DEFAULT,
+                                                DRM_MM_CREATE_DEFAULT);
+       }
+
        spin_unlock_bh(&man->lock);
        info->done = !ret;
 
@@ -1160,7 +1167,14 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
        drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
 
        man->has_pool = true;
-       man->default_size = default_size;
+
+       /*
+        * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
+        * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
+        * needs to wait for space and we block on further command
+        * submissions to be able to free up space.
+        */
+       man->default_size = VMW_CMDBUF_INLINE_SIZE;
        DRM_INFO("Using command buffers with %s pool.\n",
                 (man->using_mob) ? "MOB" : "DMA");
 
index 64b50409fa0749558844cf561aac983e36197241..03f63c749c02333f412c82184f20def8ce1d8d74 100644 (file)
@@ -657,7 +657,8 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
        struct vmw_resource *res = &user_srf->srf.res;
 
        *p_base = NULL;
-       ttm_base_object_unref(&user_srf->backup_base);
+       if (user_srf->backup_base)
+               ttm_base_object_unref(&user_srf->backup_base);
        vmw_resource_unreference(&res);
 }
 
index e13c902e8966977581f1959155b3f83774e94aa5..796569eeaf1d762c9d0283165b132c7a196d0e30 100644 (file)
@@ -840,6 +840,16 @@ config SENSORS_MAX6697
          This driver can also be built as a module.  If so, the module
          will be called max6697.
 
+config SENSORS_MAX31790
+       tristate "Maxim MAX31790 sensor chip"
+       depends on I2C
+       help
+         If you say yes here you get support for 6-Channel PWM-Output
+         Fan RPM Controller.
+
+         This driver can also be built as a module.  If so, the module
+         will be called max31790.
+
 config SENSORS_HTU21
        tristate "Measurement Specialties HTU21D humidity/temperature sensors"
        depends on I2C
index 9e0f3dd2841daaa1531acd3897c9077b2d18a09d..01855ee641d1d358dd01ed035ce6b20c0cbd080f 100644 (file)
@@ -115,6 +115,7 @@ obj-$(CONFIG_SENSORS_MAX6639)       += max6639.o
 obj-$(CONFIG_SENSORS_MAX6642)  += max6642.o
 obj-$(CONFIG_SENSORS_MAX6650)  += max6650.o
 obj-$(CONFIG_SENSORS_MAX6697)  += max6697.o
+obj-$(CONFIG_SENSORS_MAX31790) += max31790.o
 obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
 obj-$(CONFIG_SENSORS_MCP3021)  += mcp3021.o
 obj-$(CONFIG_SENSORS_MENF21BMC_HWMON) += menf21bmc_hwmon.o
index 6cb89c0ebab6df03f7e8b38fc81cecd3136e57de..d87cae8c635fea8367609c4cd0013017cf41ceb6 100644 (file)
@@ -377,7 +377,7 @@ static int setup_irqs(struct platform_device *pdev)
        }
 
        ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
-               abx500_temp_irq_handler, IRQF_NO_SUSPEND, "abx500-temp", pdev);
+               abx500_temp_irq_handler, 0, "abx500-temp", pdev);
        if (ret < 0)
                dev_err(&pdev->dev, "Request threaded irq failed (%d)\n", ret);
 
@@ -470,6 +470,7 @@ static const struct of_device_id abx500_temp_match[] = {
        { .compatible = "stericsson,abx500-temp" },
        {},
 };
+MODULE_DEVICE_TABLE(of, abx500_temp_match);
 #endif
 
 static struct platform_driver abx500_temp_driver = {
index 3e03379e7c5d92c0191af13883c4e440dbc2cc9c..6a27eb2fed1714a01b6b41f9ab6a1411b7c6a274 100644 (file)
@@ -52,7 +52,7 @@ module_param_named(tjmax, force_tjmax, int, 0444);
 MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
 
 #define BASE_SYSFS_ATTR_NO     2       /* Sysfs Base attr no for coretemp */
-#define NUM_REAL_CORES         32      /* Number of Real cores per cpu */
+#define NUM_REAL_CORES         128     /* Number of Real cores per cpu */
 #define CORETEMP_NAME_LENGTH   19      /* String Length of attrs */
 #define MAX_CORE_ATTRS         4       /* Maximum no of basic attrs */
 #define TOTAL_ATTRS            (MAX_CORE_ATTRS + 1)
index e80ee23b62d317c007d2445d69917b2e65b35559..5f7067d7b625c6578c4b135becd159d50e8b0eee 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/pci.h>
 #include <linux/bitops.h>
 #include <asm/processor.h>
+#include <asm/msr.h>
 
 MODULE_DESCRIPTION("AMD Family 15h CPU processor power monitor");
 MODULE_AUTHOR("Andreas Herrmann <herrmann.der.user@googlemail.com>");
@@ -41,12 +42,21 @@ MODULE_LICENSE("GPL");
 #define REG_TDP_RUNNING_AVERAGE                0xe0
 #define REG_TDP_LIMIT3                 0xe8
 
+#define FAM15H_MIN_NUM_ATTRS           2
+#define FAM15H_NUM_GROUPS              2
+
+#define MSR_F15H_CU_MAX_PWR_ACCUMULATOR        0xc001007b
+
 struct fam15h_power_data {
        struct pci_dev *pdev;
        unsigned int tdp_to_watts;
        unsigned int base_tdp;
        unsigned int processor_pwr_watts;
        unsigned int cpu_pwr_sample_ratio;
+       const struct attribute_group *groups[FAM15H_NUM_GROUPS];
+       struct attribute_group group;
+       /* maximum accumulated power of a compute unit */
+       u64 max_cu_acc_power;
 };
 
 static ssize_t show_power(struct device *dev,
@@ -105,29 +115,36 @@ static ssize_t show_power_crit(struct device *dev,
 }
 static DEVICE_ATTR(power1_crit, S_IRUGO, show_power_crit, NULL);
 
-static umode_t fam15h_power_is_visible(struct kobject *kobj,
-                                      struct attribute *attr,
-                                      int index)
+static int fam15h_power_init_attrs(struct pci_dev *pdev,
+                                  struct fam15h_power_data *data)
 {
-       /* power1_input is only reported for Fam15h, Models 00h-0fh */
-       if (attr == &dev_attr_power1_input.attr &&
-          (boot_cpu_data.x86 != 0x15 || boot_cpu_data.x86_model > 0xf))
-               return 0;
+       int n = FAM15H_MIN_NUM_ATTRS;
+       struct attribute **fam15h_power_attrs;
+       struct cpuinfo_x86 *c = &boot_cpu_data;
 
-       return attr->mode;
-}
+       if (c->x86 == 0x15 &&
+           (c->x86_model <= 0xf ||
+            (c->x86_model >= 0x60 && c->x86_model <= 0x6f)))
+               n += 1;
 
-static struct attribute *fam15h_power_attrs[] = {
-       &dev_attr_power1_input.attr,
-       &dev_attr_power1_crit.attr,
-       NULL
-};
+       fam15h_power_attrs = devm_kcalloc(&pdev->dev, n,
+                                         sizeof(*fam15h_power_attrs),
+                                         GFP_KERNEL);
 
-static const struct attribute_group fam15h_power_group = {
-       .attrs = fam15h_power_attrs,
-       .is_visible = fam15h_power_is_visible,
-};
-__ATTRIBUTE_GROUPS(fam15h_power);
+       if (!fam15h_power_attrs)
+               return -ENOMEM;
+
+       n = 0;
+       fam15h_power_attrs[n++] = &dev_attr_power1_crit.attr;
+       if (c->x86 == 0x15 &&
+           (c->x86_model <= 0xf ||
+            (c->x86_model >= 0x60 && c->x86_model <= 0x6f)))
+               fam15h_power_attrs[n++] = &dev_attr_power1_input.attr;
+
+       data->group.attrs = fam15h_power_attrs;
+
+       return 0;
+}
 
 static bool should_load_on_this_node(struct pci_dev *f4)
 {
@@ -186,11 +203,12 @@ static int fam15h_power_resume(struct pci_dev *pdev)
 #define fam15h_power_resume NULL
 #endif
 
-static void fam15h_power_init_data(struct pci_dev *f4,
-                                            struct fam15h_power_data *data)
+static int fam15h_power_init_data(struct pci_dev *f4,
+                                 struct fam15h_power_data *data)
 {
        u32 val, eax, ebx, ecx, edx;
        u64 tmp;
+       int ret;
 
        pci_read_config_dword(f4, REG_PROCESSOR_TDP, &val);
        data->base_tdp = val >> 16;
@@ -211,11 +229,15 @@ static void fam15h_power_init_data(struct pci_dev *f4,
        /* convert to microWatt */
        data->processor_pwr_watts = (tmp * 15625) >> 10;
 
+       ret = fam15h_power_init_attrs(f4, data);
+       if (ret)
+               return ret;
+
        cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
 
        /* CPUID Fn8000_0007:EDX[12] indicates to support accumulated power */
        if (!(edx & BIT(12)))
-               return;
+               return 0;
 
        /*
         * determine the ratio of the compute unit power accumulator
@@ -223,14 +245,24 @@ static void fam15h_power_init_data(struct pci_dev *f4,
         * Fn8000_0007:ECX
         */
        data->cpu_pwr_sample_ratio = ecx;
+
+       if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &tmp)) {
+               pr_err("Failed to read max compute unit power accumulator MSR\n");
+               return -ENODEV;
+       }
+
+       data->max_cu_acc_power = tmp;
+
+       return 0;
 }
 
 static int fam15h_power_probe(struct pci_dev *pdev,
-                                       const struct pci_device_id *id)
+                             const struct pci_device_id *id)
 {
        struct fam15h_power_data *data;
        struct device *dev = &pdev->dev;
        struct device *hwmon_dev;
+       int ret;
 
        /*
         * though we ignore every other northbridge, we still have to
@@ -246,12 +278,17 @@ static int fam15h_power_probe(struct pci_dev *pdev,
        if (!data)
                return -ENOMEM;
 
-       fam15h_power_init_data(pdev, data);
+       ret = fam15h_power_init_data(pdev, data);
+       if (ret)
+               return ret;
+
        data->pdev = pdev;
 
+       data->groups[0] = &data->group;
+
        hwmon_dev = devm_hwmon_device_register_with_groups(dev, "fam15h_power",
                                                           data,
-                                                          fam15h_power_groups);
+                                                          &data->groups[0]);
        return PTR_ERR_OR_ZERO(hwmon_dev);
 }
 
index a3dae6d0082a0d08e4183f63e27b79ae5510b863..82de3deeb18a7ddf5e041e695b35cc8b1500abea 100644 (file)
@@ -539,6 +539,7 @@ static const struct of_device_id of_gpio_fan_match[] = {
        { .compatible = "gpio-fan", },
        {},
 };
+MODULE_DEVICE_TABLE(of, of_gpio_fan_match);
 #endif /* CONFIG_OF_GPIO */
 
 static int gpio_fan_probe(struct platform_device *pdev)
index 4255514b2c72d917c2e2151f3d1fac405033e935..55b5a8ff1cfe2253c967e8fab4df33a44280a0fc 100644 (file)
@@ -474,11 +474,18 @@ static const struct platform_device_id opal_sensor_driver_ids[] = {
 };
 MODULE_DEVICE_TABLE(platform, opal_sensor_driver_ids);
 
+static const struct of_device_id opal_sensor_match[] = {
+       { .compatible   = "ibm,opal-sensor" },
+       { },
+};
+MODULE_DEVICE_TABLE(of, opal_sensor_match);
+
 static struct platform_driver ibmpowernv_driver = {
        .probe          = ibmpowernv_probe,
        .id_table       = opal_sensor_driver_ids,
        .driver         = {
                .name   = DRVNAME,
+               .of_match_table = opal_sensor_match,
        },
 };
 
index 4d2815079fc209ff425267d333fa86b9c82ab97d..b24f1d3045f04386f7dc3a1c5af4b7ee7724e891 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/of.h>
 #include <linux/delay.h>
 #include <linux/util_macros.h>
+#include <linux/regmap.h>
 
 #include <linux/platform_data/ina2xx.h>
 
  */
 #define INA226_TOTAL_CONV_TIME_DEFAULT 2200
 
+static struct regmap_config ina2xx_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 16,
+};
+
 enum ina2xx_ids { ina219, ina226 };
 
 struct ina2xx_config {
@@ -97,20 +103,13 @@ struct ina2xx_config {
 };
 
 struct ina2xx_data {
-       struct i2c_client *client;
        const struct ina2xx_config *config;
 
        long rshunt;
-       u16 curr_config;
-
-       struct mutex update_lock;
-       bool valid;
-       unsigned long last_updated;
-       int update_interval; /* in jiffies */
+       struct mutex config_lock;
+       struct regmap *regmap;
 
-       int kind;
        const struct attribute_group *groups[INA2XX_MAX_ATTRIBUTE_GROUPS];
-       u16 regs[INA2XX_MAX_REGISTERS];
 };
 
 static const struct ina2xx_config ina2xx_config[] = {
@@ -153,7 +152,11 @@ static int ina226_reg_to_interval(u16 config)
        return DIV_ROUND_CLOSEST(avg * INA226_TOTAL_CONV_TIME_DEFAULT, 1000);
 }
 
-static u16 ina226_interval_to_reg(int interval, u16 config)
+/*
+ * Return the new, shifted AVG field value of CONFIG register,
+ * to use with regmap_update_bits
+ */
+static u16 ina226_interval_to_reg(int interval)
 {
        int avg, avg_bits;
 
@@ -162,15 +165,7 @@ static u16 ina226_interval_to_reg(int interval, u16 config)
        avg_bits = find_closest(avg, ina226_avg_tab,
                                ARRAY_SIZE(ina226_avg_tab));
 
-       return (config & ~INA226_AVG_RD_MASK) | INA226_SHIFT_AVG(avg_bits);
-}
-
-static void ina226_set_update_interval(struct ina2xx_data *data)
-{
-       int ms;
-
-       ms = ina226_reg_to_interval(data->curr_config);
-       data->update_interval = msecs_to_jiffies(ms);
+       return INA226_SHIFT_AVG(avg_bits);
 }
 
 static int ina2xx_calibrate(struct ina2xx_data *data)
@@ -178,8 +173,7 @@ static int ina2xx_calibrate(struct ina2xx_data *data)
        u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
                                    data->rshunt);
 
-       return i2c_smbus_write_word_swapped(data->client,
-                                           INA2XX_CALIBRATION, val);
+       return regmap_write(data->regmap, INA2XX_CALIBRATION, val);
 }
 
 /*
@@ -187,12 +181,8 @@ static int ina2xx_calibrate(struct ina2xx_data *data)
  */
 static int ina2xx_init(struct ina2xx_data *data)
 {
-       struct i2c_client *client = data->client;
-       int ret;
-
-       /* device configuration */
-       ret = i2c_smbus_write_word_swapped(client, INA2XX_CONFIG,
-                                          data->curr_config);
+       int ret = regmap_write(data->regmap, INA2XX_CONFIG,
+                              data->config->config_default);
        if (ret < 0)
                return ret;
 
@@ -203,47 +193,52 @@ static int ina2xx_init(struct ina2xx_data *data)
        return ina2xx_calibrate(data);
 }
 
-static int ina2xx_do_update(struct device *dev)
+static int ina2xx_read_reg(struct device *dev, int reg, unsigned int *regval)
 {
        struct ina2xx_data *data = dev_get_drvdata(dev);
-       struct i2c_client *client = data->client;
-       int i, rv, retry;
+       int ret, retry;
 
-       dev_dbg(&client->dev, "Starting ina2xx update\n");
+       dev_dbg(dev, "Starting register %d read\n", reg);
 
        for (retry = 5; retry; retry--) {
-               /* Read all registers */
-               for (i = 0; i < data->config->registers; i++) {
-                       rv = i2c_smbus_read_word_swapped(client, i);
-                       if (rv < 0)
-                               return rv;
-                       data->regs[i] = rv;
-               }
+
+               ret = regmap_read(data->regmap, reg, regval);
+               if (ret < 0)
+                       return ret;
+
+               dev_dbg(dev, "read %d, val = 0x%04x\n", reg, *regval);
 
                /*
                 * If the current value in the calibration register is 0, the
                 * power and current registers will also remain at 0. In case
                 * the chip has been reset let's check the calibration
                 * register and reinitialize if needed.
+                * We do that extra read of the calibration register if there
+                * is some hint of a chip reset.
                 */
-               if (data->regs[INA2XX_CALIBRATION] == 0) {
-                       dev_warn(dev, "chip not calibrated, reinitializing\n");
-
-                       rv = ina2xx_init(data);
-                       if (rv < 0)
-                               return rv;
-
-                       /*
-                        * Let's make sure the power and current registers
-                        * have been updated before trying again.
-                        */
-                       msleep(INA2XX_MAX_DELAY);
-                       continue;
+               if (*regval == 0) {
+                       unsigned int cal;
+
+                       ret = regmap_read(data->regmap, INA2XX_CALIBRATION,
+                                         &cal);
+                       if (ret < 0)
+                               return ret;
+
+                       if (cal == 0) {
+                               dev_warn(dev, "chip not calibrated, reinitializing\n");
+
+                               ret = ina2xx_init(data);
+                               if (ret < 0)
+                                       return ret;
+                               /*
+                                * Let's make sure the power and current
+                                * registers have been updated before trying
+                                * again.
+                                */
+                               msleep(INA2XX_MAX_DELAY);
+                               continue;
+                       }
                }
-
-               data->last_updated = jiffies;
-               data->valid = 1;
-
                return 0;
        }
 
@@ -256,51 +251,31 @@ static int ina2xx_do_update(struct device *dev)
        return -ENODEV;
 }
 
-static struct ina2xx_data *ina2xx_update_device(struct device *dev)
-{
-       struct ina2xx_data *data = dev_get_drvdata(dev);
-       struct ina2xx_data *ret = data;
-       unsigned long after;
-       int rv;
-
-       mutex_lock(&data->update_lock);
-
-       after = data->last_updated + data->update_interval;
-       if (time_after(jiffies, after) || !data->valid) {
-               rv = ina2xx_do_update(dev);
-               if (rv < 0)
-                       ret = ERR_PTR(rv);
-       }
-
-       mutex_unlock(&data->update_lock);
-       return ret;
-}
-
-static int ina2xx_get_value(struct ina2xx_data *data, u8 reg)
+static int ina2xx_get_value(struct ina2xx_data *data, u8 reg,
+                           unsigned int regval)
 {
        int val;
 
        switch (reg) {
        case INA2XX_SHUNT_VOLTAGE:
                /* signed register */
-               val = DIV_ROUND_CLOSEST((s16)data->regs[reg],
-                                       data->config->shunt_div);
+               val = DIV_ROUND_CLOSEST((s16)regval, data->config->shunt_div);
                break;
        case INA2XX_BUS_VOLTAGE:
-               val = (data->regs[reg] >> data->config->bus_voltage_shift)
+               val = (regval >> data->config->bus_voltage_shift)
                  * data->config->bus_voltage_lsb;
                val = DIV_ROUND_CLOSEST(val, 1000);
                break;
        case INA2XX_POWER:
-               val = data->regs[reg] * data->config->power_lsb;
+               val = regval * data->config->power_lsb;
                break;
        case INA2XX_CURRENT:
                /* signed register, LSB=1mA (selected), in mA */
-               val = (s16)data->regs[reg];
+               val = (s16)regval;
                break;
        case INA2XX_CALIBRATION:
                val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
-                                       data->regs[reg]);
+                                       regval);
                break;
        default:
                /* programmer goofed */
@@ -316,25 +291,25 @@ static ssize_t ina2xx_show_value(struct device *dev,
                                 struct device_attribute *da, char *buf)
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
-       struct ina2xx_data *data = ina2xx_update_device(dev);
+       struct ina2xx_data *data = dev_get_drvdata(dev);
+       unsigned int regval;
+
+       int err = ina2xx_read_reg(dev, attr->index, &regval);
 
-       if (IS_ERR(data))
-               return PTR_ERR(data);
+       if (err < 0)
+               return err;
 
        return snprintf(buf, PAGE_SIZE, "%d\n",
-                       ina2xx_get_value(data, attr->index));
+                       ina2xx_get_value(data, attr->index, regval));
 }
 
 static ssize_t ina2xx_set_shunt(struct device *dev,
                                struct device_attribute *da,
                                const char *buf, size_t count)
 {
-       struct ina2xx_data *data = ina2xx_update_device(dev);
        unsigned long val;
        int status;
-
-       if (IS_ERR(data))
-               return PTR_ERR(data);
+       struct ina2xx_data *data = dev_get_drvdata(dev);
 
        status = kstrtoul(buf, 10, &val);
        if (status < 0)
@@ -345,10 +320,10 @@ static ssize_t ina2xx_set_shunt(struct device *dev,
            val > data->config->calibration_factor)
                return -EINVAL;
 
-       mutex_lock(&data->update_lock);
+       mutex_lock(&data->config_lock);
        data->rshunt = val;
        status = ina2xx_calibrate(data);
-       mutex_unlock(&data->update_lock);
+       mutex_unlock(&data->config_lock);
        if (status < 0)
                return status;
 
@@ -370,17 +345,9 @@ static ssize_t ina226_set_interval(struct device *dev,
        if (val > INT_MAX || val == 0)
                return -EINVAL;
 
-       mutex_lock(&data->update_lock);
-       data->curr_config = ina226_interval_to_reg(val,
-                                                  data->regs[INA2XX_CONFIG]);
-       status = i2c_smbus_write_word_swapped(data->client,
-                                             INA2XX_CONFIG,
-                                             data->curr_config);
-
-       ina226_set_update_interval(data);
-       /* Make sure the next access re-reads all registers. */
-       data->valid = 0;
-       mutex_unlock(&data->update_lock);
+       status = regmap_update_bits(data->regmap, INA2XX_CONFIG,
+                                   INA226_AVG_RD_MASK,
+                                   ina226_interval_to_reg(val));
        if (status < 0)
                return status;
 
@@ -390,18 +357,15 @@ static ssize_t ina226_set_interval(struct device *dev,
 static ssize_t ina226_show_interval(struct device *dev,
                                    struct device_attribute *da, char *buf)
 {
-       struct ina2xx_data *data = ina2xx_update_device(dev);
+       struct ina2xx_data *data = dev_get_drvdata(dev);
+       int status;
+       unsigned int regval;
 
-       if (IS_ERR(data))
-               return PTR_ERR(data);
+       status = regmap_read(data->regmap, INA2XX_CONFIG, &regval);
+       if (status)
+               return status;
 
-       /*
-        * We don't use data->update_interval here as we want to display
-        * the actual interval used by the chip and jiffies_to_msecs()
-        * doesn't seem to be accurate enough.
-        */
-       return snprintf(buf, PAGE_SIZE, "%d\n",
-                       ina226_reg_to_interval(data->regs[INA2XX_CONFIG]));
+       return snprintf(buf, PAGE_SIZE, "%d\n", ina226_reg_to_interval(regval));
 }
 
 /* shunt voltage */
@@ -455,60 +419,51 @@ static const struct attribute_group ina226_group = {
 static int ina2xx_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
 {
-       struct i2c_adapter *adapter = client->adapter;
-       struct ina2xx_platform_data *pdata;
        struct device *dev = &client->dev;
        struct ina2xx_data *data;
        struct device *hwmon_dev;
        u32 val;
        int ret, group = 0;
 
-       if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
-               return -ENODEV;
-
        data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
-       if (dev_get_platdata(dev)) {
-               pdata = dev_get_platdata(dev);
-               data->rshunt = pdata->shunt_uohms;
-       } else if (!of_property_read_u32(dev->of_node,
-                                        "shunt-resistor", &val)) {
-               data->rshunt = val;
-       } else {
-               data->rshunt = INA2XX_RSHUNT_DEFAULT;
-       }
-
        /* set the device type */
-       data->kind = id->driver_data;
-       data->config = &ina2xx_config[data->kind];
-       data->curr_config = data->config->config_default;
-       data->client = client;
+       data->config = &ina2xx_config[id->driver_data];
 
-       /*
-        * Ina226 has a variable update_interval. For ina219 we
-        * use a constant value.
-        */
-       if (data->kind == ina226)
-               ina226_set_update_interval(data);
-       else
-               data->update_interval = HZ / INA2XX_CONVERSION_RATE;
+       if (of_property_read_u32(dev->of_node, "shunt-resistor", &val) < 0) {
+               struct ina2xx_platform_data *pdata = dev_get_platdata(dev);
+
+               if (pdata)
+                       val = pdata->shunt_uohms;
+               else
+                       val = INA2XX_RSHUNT_DEFAULT;
+       }
 
-       if (data->rshunt <= 0 ||
-           data->rshunt > data->config->calibration_factor)
+       if (val <= 0 || val > data->config->calibration_factor)
                return -ENODEV;
 
+       data->rshunt = val;
+
+       ina2xx_regmap_config.max_register = data->config->registers;
+
+       data->regmap = devm_regmap_init_i2c(client, &ina2xx_regmap_config);
+       if (IS_ERR(data->regmap)) {
+               dev_err(dev, "failed to allocate register map\n");
+               return PTR_ERR(data->regmap);
+       }
+
        ret = ina2xx_init(data);
        if (ret < 0) {
                dev_err(dev, "error configuring the device: %d\n", ret);
                return -ENODEV;
        }
 
-       mutex_init(&data->update_lock);
+       mutex_init(&data->config_lock);
 
        data->groups[group++] = &ina2xx_group;
-       if (data->kind == ina226)
+       if (id->driver_data == ina226)
                data->groups[group++] = &ina226_group;
 
        hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
index e4e57bbafb10eeb621ad03ff03e8985030c43e4a..0addc84ba948a09e6b740db56593fbd124b8c8d5 100644 (file)
@@ -57,6 +57,7 @@ enum lm75_type {              /* keep sorted in alphabetical order */
        tmp175,
        tmp275,
        tmp75,
+       tmp75c,
 };
 
 /* Addresses scanned */
@@ -280,6 +281,11 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
                data->resolution = 12;
                data->sample_time = HZ / 2;
                break;
+       case tmp75c:
+               clr_mask |= 1 << 5;             /* not one-shot mode */
+               data->resolution = 12;
+               data->sample_time = HZ / 4;
+               break;
        }
 
        /* configure as specified */
@@ -343,6 +349,7 @@ static const struct i2c_device_id lm75_ids[] = {
        { "tmp175", tmp175, },
        { "tmp275", tmp275, },
        { "tmp75", tmp75, },
+       { "tmp75c", tmp75c, },
        { /* LIST END */ }
 };
 MODULE_DEVICE_TABLE(i2c, lm75_ids);
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
new file mode 100644 (file)
index 0000000..69c0ac8
--- /dev/null
@@ -0,0 +1,603 @@
+/*
+ * max31790.c - Part of lm_sensors, Linux kernel modules for hardware
+ *             monitoring.
+ *
+ * (C) 2015 by Il Han <corone.il.han@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+/* MAX31790 registers */
+#define MAX31790_REG_GLOBAL_CONFIG     0x00
+#define MAX31790_REG_FAN_CONFIG(ch)    (0x02 + (ch))
+#define MAX31790_REG_FAN_DYNAMICS(ch)  (0x08 + (ch))
+#define MAX31790_REG_FAN_FAULT_STATUS2 0x10
+#define MAX31790_REG_FAN_FAULT_STATUS1 0x11
+#define MAX31790_REG_TACH_COUNT(ch)    (0x18 + (ch) * 2)
+#define MAX31790_REG_PWM_DUTY_CYCLE(ch)        (0x30 + (ch) * 2)
+#define MAX31790_REG_PWMOUT(ch)                (0x40 + (ch) * 2)
+#define MAX31790_REG_TARGET_COUNT(ch)  (0x50 + (ch) * 2)
+
+/* Fan Config register bits */
+#define MAX31790_FAN_CFG_RPM_MODE      0x80
+#define MAX31790_FAN_CFG_TACH_INPUT_EN 0x08
+#define MAX31790_FAN_CFG_TACH_INPUT    0x01
+
+/* Fan Dynamics register bits */
+#define MAX31790_FAN_DYN_SR_SHIFT      5
+#define MAX31790_FAN_DYN_SR_MASK       0xE0
+#define SR_FROM_REG(reg)               (((reg) & MAX31790_FAN_DYN_SR_MASK) \
+                                        >> MAX31790_FAN_DYN_SR_SHIFT)
+
+#define FAN_RPM_MIN                    120
+#define FAN_RPM_MAX                    7864320
+
+#define RPM_FROM_REG(reg, sr)          (((reg) >> 4) ? \
+                                        ((60 * (sr) * 8192) / ((reg) >> 4)) : \
+                                        FAN_RPM_MAX)
+#define RPM_TO_REG(rpm, sr)            ((60 * (sr) * 8192) / ((rpm) * 2))
+
+#define NR_CHANNEL                     6
+
+/*
+ * Client data (each client gets its own)
+ */
+struct max31790_data {
+       struct i2c_client *client;
+       struct mutex update_lock;
+       bool valid; /* zero until following fields are valid */
+       unsigned long last_updated; /* in jiffies */
+
+       /* register values */
+       u8 fan_config[NR_CHANNEL];
+       u8 fan_dynamics[NR_CHANNEL];
+       u16 fault_status;
+       u16 tach[NR_CHANNEL * 2];
+       u16 pwm[NR_CHANNEL];
+       u16 target_count[NR_CHANNEL];
+};
+
+static struct max31790_data *max31790_update_device(struct device *dev)
+{
+       struct max31790_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
+       struct max31790_data *ret = data;
+       int i;
+       int rv;
+
+       mutex_lock(&data->update_lock);
+
+       if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+               rv = i2c_smbus_read_byte_data(client,
+                               MAX31790_REG_FAN_FAULT_STATUS1);
+               if (rv < 0)
+                       goto abort;
+               data->fault_status = rv & 0x3F;
+
+               rv = i2c_smbus_read_byte_data(client,
+                               MAX31790_REG_FAN_FAULT_STATUS2);
+               if (rv < 0)
+                       goto abort;
+               data->fault_status |= (rv & 0x3F) << 6;
+
+               for (i = 0; i < NR_CHANNEL; i++) {
+                       rv = i2c_smbus_read_word_swapped(client,
+                                       MAX31790_REG_TACH_COUNT(i));
+                       if (rv < 0)
+                               goto abort;
+                       data->tach[i] = rv;
+
+                       if (data->fan_config[i]
+                           & MAX31790_FAN_CFG_TACH_INPUT) {
+                               rv = i2c_smbus_read_word_swapped(client,
+                                       MAX31790_REG_TACH_COUNT(NR_CHANNEL
+                                                               + i));
+                               if (rv < 0)
+                                       goto abort;
+                               data->tach[NR_CHANNEL + i] = rv;
+                       } else {
+                               rv = i2c_smbus_read_word_swapped(client,
+                                               MAX31790_REG_PWMOUT(i));
+                               if (rv < 0)
+                                       goto abort;
+                               data->pwm[i] = rv;
+
+                               rv = i2c_smbus_read_word_swapped(client,
+                                               MAX31790_REG_TARGET_COUNT(i));
+                               if (rv < 0)
+                                       goto abort;
+                               data->target_count[i] = rv;
+                       }
+               }
+
+               data->last_updated = jiffies;
+               data->valid = true;
+       }
+       goto done;
+
+abort:
+       data->valid = false;
+       ret = ERR_PTR(rv);
+
+done:
+       mutex_unlock(&data->update_lock);
+
+       return ret;
+}
+
+static const u8 tach_period[8] = { 1, 2, 4, 8, 16, 32, 32, 32 };
+
+static u8 get_tach_period(u8 fan_dynamics)
+{
+       return tach_period[SR_FROM_REG(fan_dynamics)];
+}
+
+static u8 bits_for_tach_period(int rpm)
+{
+       u8 bits;
+
+       if (rpm < 500)
+               bits = 0x0;
+       else if (rpm < 1000)
+               bits = 0x1;
+       else if (rpm < 2000)
+               bits = 0x2;
+       else if (rpm < 4000)
+               bits = 0x3;
+       else if (rpm < 8000)
+               bits = 0x4;
+       else
+               bits = 0x5;
+
+       return bits;
+}
+
+static ssize_t get_fan(struct device *dev,
+                      struct device_attribute *devattr, char *buf)
+{
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+       struct max31790_data *data = max31790_update_device(dev);
+       int sr, rpm;
+
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
+       sr = get_tach_period(data->fan_dynamics[attr->index]);
+       rpm = RPM_FROM_REG(data->tach[attr->index], sr);
+
+       return sprintf(buf, "%d\n", rpm);
+}
+
+static ssize_t get_fan_target(struct device *dev,
+                             struct device_attribute *devattr, char *buf)
+{
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+       struct max31790_data *data = max31790_update_device(dev);
+       int sr, rpm;
+
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
+       sr = get_tach_period(data->fan_dynamics[attr->index]);
+       rpm = RPM_FROM_REG(data->target_count[attr->index], sr);
+
+       return sprintf(buf, "%d\n", rpm);
+}
+
+static ssize_t set_fan_target(struct device *dev,
+                             struct device_attribute *devattr,
+                             const char *buf, size_t count)
+{
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+       struct max31790_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
+       u8 bits;
+       int sr;
+       int target_count;
+       unsigned long rpm;
+       int err;
+
+       err = kstrtoul(buf, 10, &rpm);
+       if (err)
+               return err;
+
+       mutex_lock(&data->update_lock);
+
+       rpm = clamp_val(rpm, FAN_RPM_MIN, FAN_RPM_MAX);
+       bits = bits_for_tach_period(rpm);
+       data->fan_dynamics[attr->index] =
+                       ((data->fan_dynamics[attr->index]
+                         & ~MAX31790_FAN_DYN_SR_MASK)
+                        | (bits << MAX31790_FAN_DYN_SR_SHIFT));
+       err = i2c_smbus_write_byte_data(client,
+                       MAX31790_REG_FAN_DYNAMICS(attr->index),
+                       data->fan_dynamics[attr->index]);
+
+       if (err < 0) {
+               mutex_unlock(&data->update_lock);
+               return err;
+       }
+
+       sr = get_tach_period(data->fan_dynamics[attr->index]);
+       target_count = RPM_TO_REG(rpm, sr);
+       target_count = clamp_val(target_count, 0x1, 0x7FF);
+
+       data->target_count[attr->index] = target_count << 5;
+
+       err = i2c_smbus_write_word_swapped(client,
+                       MAX31790_REG_TARGET_COUNT(attr->index),
+                       data->target_count[attr->index]);
+
+       mutex_unlock(&data->update_lock);
+
+       if (err < 0)
+               return err;
+
+       return count;
+}
+
+static ssize_t get_pwm(struct device *dev,
+                      struct device_attribute *devattr, char *buf)
+{
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+       struct max31790_data *data = max31790_update_device(dev);
+       int pwm;
+
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
+       pwm = data->pwm[attr->index] >> 8;
+
+       return sprintf(buf, "%d\n", pwm);
+}
+
+static ssize_t set_pwm(struct device *dev,
+                      struct device_attribute *devattr,
+                      const char *buf, size_t count)
+{
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+       struct max31790_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
+       unsigned long pwm;
+       int err;
+
+       err = kstrtoul(buf, 10, &pwm);
+       if (err)
+               return err;
+
+       if (pwm > 255)
+               return -EINVAL;
+
+       mutex_lock(&data->update_lock);
+
+       data->pwm[attr->index] = pwm << 8;
+       err = i2c_smbus_write_word_swapped(client,
+                       MAX31790_REG_PWMOUT(attr->index),
+                       data->pwm[attr->index]);
+
+       mutex_unlock(&data->update_lock);
+
+       if (err < 0)
+               return err;
+
+       return count;
+}
+
+static ssize_t get_pwm_enable(struct device *dev,
+                             struct device_attribute *devattr, char *buf)
+{
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+       struct max31790_data *data = max31790_update_device(dev);
+       int mode;
+
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
+       if (data->fan_config[attr->index] & MAX31790_FAN_CFG_RPM_MODE)
+               mode = 2;
+       else if (data->fan_config[attr->index] & MAX31790_FAN_CFG_TACH_INPUT_EN)
+               mode = 1;
+       else
+               mode = 0;
+
+       return sprintf(buf, "%d\n", mode);
+}
+
+static ssize_t set_pwm_enable(struct device *dev,
+                             struct device_attribute *devattr,
+                             const char *buf, size_t count)
+{
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+       struct max31790_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
+       unsigned long mode;
+       int err;
+
+       err = kstrtoul(buf, 10, &mode);
+       if (err)
+               return err;
+
+       switch (mode) {
+       case 0:
+               data->fan_config[attr->index] =
+                       data->fan_config[attr->index]
+                       & ~(MAX31790_FAN_CFG_TACH_INPUT_EN
+                           | MAX31790_FAN_CFG_RPM_MODE);
+               break;
+       case 1:
+               data->fan_config[attr->index] =
+                       (data->fan_config[attr->index]
+                        | MAX31790_FAN_CFG_TACH_INPUT_EN)
+                       & ~MAX31790_FAN_CFG_RPM_MODE;
+               break;
+       case 2:
+               data->fan_config[attr->index] =
+                       data->fan_config[attr->index]
+                       | MAX31790_FAN_CFG_TACH_INPUT_EN
+                       | MAX31790_FAN_CFG_RPM_MODE;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       mutex_lock(&data->update_lock);
+
+       err = i2c_smbus_write_byte_data(client,
+                       MAX31790_REG_FAN_CONFIG(attr->index),
+                       data->fan_config[attr->index]);
+
+       mutex_unlock(&data->update_lock);
+
+       if (err < 0)
+               return err;
+
+       return count;
+}
+
+static ssize_t get_fan_fault(struct device *dev,
+                            struct device_attribute *devattr, char *buf)
+{
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+       struct max31790_data *data = max31790_update_device(dev);
+       int fault;
+
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
+       fault = !!(data->fault_status & (1 << attr->index));
+
+       return sprintf(buf, "%d\n", fault);
+}
+
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, get_fan, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, get_fan, NULL, 2);
+static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, get_fan, NULL, 3);
+static SENSOR_DEVICE_ATTR(fan5_input, S_IRUGO, get_fan, NULL, 4);
+static SENSOR_DEVICE_ATTR(fan6_input, S_IRUGO, get_fan, NULL, 5);
+
+static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, get_fan_fault, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_fault, S_IRUGO, get_fan_fault, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan3_fault, S_IRUGO, get_fan_fault, NULL, 2);
+static SENSOR_DEVICE_ATTR(fan4_fault, S_IRUGO, get_fan_fault, NULL, 3);
+static SENSOR_DEVICE_ATTR(fan5_fault, S_IRUGO, get_fan_fault, NULL, 4);
+static SENSOR_DEVICE_ATTR(fan6_fault, S_IRUGO, get_fan_fault, NULL, 5);
+
+static SENSOR_DEVICE_ATTR(fan7_input, S_IRUGO, get_fan, NULL, 6);
+static SENSOR_DEVICE_ATTR(fan8_input, S_IRUGO, get_fan, NULL, 7);
+static SENSOR_DEVICE_ATTR(fan9_input, S_IRUGO, get_fan, NULL, 8);
+static SENSOR_DEVICE_ATTR(fan10_input, S_IRUGO, get_fan, NULL, 9);
+static SENSOR_DEVICE_ATTR(fan11_input, S_IRUGO, get_fan, NULL, 10);
+static SENSOR_DEVICE_ATTR(fan12_input, S_IRUGO, get_fan, NULL, 11);
+
+static SENSOR_DEVICE_ATTR(fan7_fault, S_IRUGO, get_fan_fault, NULL, 6);
+static SENSOR_DEVICE_ATTR(fan8_fault, S_IRUGO, get_fan_fault, NULL, 7);
+static SENSOR_DEVICE_ATTR(fan9_fault, S_IRUGO, get_fan_fault, NULL, 8);
+static SENSOR_DEVICE_ATTR(fan10_fault, S_IRUGO, get_fan_fault, NULL, 9);
+static SENSOR_DEVICE_ATTR(fan11_fault, S_IRUGO, get_fan_fault, NULL, 10);
+static SENSOR_DEVICE_ATTR(fan12_fault, S_IRUGO, get_fan_fault, NULL, 11);
+
+static SENSOR_DEVICE_ATTR(fan1_target, S_IWUSR | S_IRUGO,
+               get_fan_target, set_fan_target, 0);
+static SENSOR_DEVICE_ATTR(fan2_target, S_IWUSR | S_IRUGO,
+               get_fan_target, set_fan_target, 1);
+static SENSOR_DEVICE_ATTR(fan3_target, S_IWUSR | S_IRUGO,
+               get_fan_target, set_fan_target, 2);
+static SENSOR_DEVICE_ATTR(fan4_target, S_IWUSR | S_IRUGO,
+               get_fan_target, set_fan_target, 3);
+static SENSOR_DEVICE_ATTR(fan5_target, S_IWUSR | S_IRUGO,
+               get_fan_target, set_fan_target, 4);
+static SENSOR_DEVICE_ATTR(fan6_target, S_IWUSR | S_IRUGO,
+               get_fan_target, set_fan_target, 5);
+
+static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 0);
+static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 1);
+static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 2);
+static SENSOR_DEVICE_ATTR(pwm4, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 3);
+static SENSOR_DEVICE_ATTR(pwm5, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 4);
+static SENSOR_DEVICE_ATTR(pwm6, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 5);
+
+static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
+               get_pwm_enable, set_pwm_enable, 0);
+static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO,
+               get_pwm_enable, set_pwm_enable, 1);
+static SENSOR_DEVICE_ATTR(pwm3_enable, S_IWUSR | S_IRUGO,
+               get_pwm_enable, set_pwm_enable, 2);
+static SENSOR_DEVICE_ATTR(pwm4_enable, S_IWUSR | S_IRUGO,
+               get_pwm_enable, set_pwm_enable, 3);
+static SENSOR_DEVICE_ATTR(pwm5_enable, S_IWUSR | S_IRUGO,
+               get_pwm_enable, set_pwm_enable, 4);
+static SENSOR_DEVICE_ATTR(pwm6_enable, S_IWUSR | S_IRUGO,
+               get_pwm_enable, set_pwm_enable, 5);
+
+static struct attribute *max31790_attrs[] = {
+       &sensor_dev_attr_fan1_input.dev_attr.attr,
+       &sensor_dev_attr_fan2_input.dev_attr.attr,
+       &sensor_dev_attr_fan3_input.dev_attr.attr,
+       &sensor_dev_attr_fan4_input.dev_attr.attr,
+       &sensor_dev_attr_fan5_input.dev_attr.attr,
+       &sensor_dev_attr_fan6_input.dev_attr.attr,
+
+       &sensor_dev_attr_fan1_fault.dev_attr.attr,
+       &sensor_dev_attr_fan2_fault.dev_attr.attr,
+       &sensor_dev_attr_fan3_fault.dev_attr.attr,
+       &sensor_dev_attr_fan4_fault.dev_attr.attr,
+       &sensor_dev_attr_fan5_fault.dev_attr.attr,
+       &sensor_dev_attr_fan6_fault.dev_attr.attr,
+
+       &sensor_dev_attr_fan7_input.dev_attr.attr,
+       &sensor_dev_attr_fan8_input.dev_attr.attr,
+       &sensor_dev_attr_fan9_input.dev_attr.attr,
+       &sensor_dev_attr_fan10_input.dev_attr.attr,
+       &sensor_dev_attr_fan11_input.dev_attr.attr,
+       &sensor_dev_attr_fan12_input.dev_attr.attr,
+
+       &sensor_dev_attr_fan7_fault.dev_attr.attr,
+       &sensor_dev_attr_fan8_fault.dev_attr.attr,
+       &sensor_dev_attr_fan9_fault.dev_attr.attr,
+       &sensor_dev_attr_fan10_fault.dev_attr.attr,
+       &sensor_dev_attr_fan11_fault.dev_attr.attr,
+       &sensor_dev_attr_fan12_fault.dev_attr.attr,
+
+       &sensor_dev_attr_fan1_target.dev_attr.attr,
+       &sensor_dev_attr_fan2_target.dev_attr.attr,
+       &sensor_dev_attr_fan3_target.dev_attr.attr,
+       &sensor_dev_attr_fan4_target.dev_attr.attr,
+       &sensor_dev_attr_fan5_target.dev_attr.attr,
+       &sensor_dev_attr_fan6_target.dev_attr.attr,
+
+       &sensor_dev_attr_pwm1.dev_attr.attr,
+       &sensor_dev_attr_pwm2.dev_attr.attr,
+       &sensor_dev_attr_pwm3.dev_attr.attr,
+       &sensor_dev_attr_pwm4.dev_attr.attr,
+       &sensor_dev_attr_pwm5.dev_attr.attr,
+       &sensor_dev_attr_pwm6.dev_attr.attr,
+
+       &sensor_dev_attr_pwm1_enable.dev_attr.attr,
+       &sensor_dev_attr_pwm2_enable.dev_attr.attr,
+       &sensor_dev_attr_pwm3_enable.dev_attr.attr,
+       &sensor_dev_attr_pwm4_enable.dev_attr.attr,
+       &sensor_dev_attr_pwm5_enable.dev_attr.attr,
+       &sensor_dev_attr_pwm6_enable.dev_attr.attr,
+       NULL
+};
+
+static umode_t max31790_attrs_visible(struct kobject *kobj,
+                                    struct attribute *a, int n)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct max31790_data *data = dev_get_drvdata(dev);
+       struct device_attribute *devattr =
+                       container_of(a, struct device_attribute, attr);
+       int index = to_sensor_dev_attr(devattr)->index % NR_CHANNEL;
+       u8 fan_config;
+
+       fan_config = data->fan_config[index];
+
+       if (n >= NR_CHANNEL * 2 && n < NR_CHANNEL * 4 &&
+           !(fan_config & MAX31790_FAN_CFG_TACH_INPUT))
+               return 0;
+       if (n >= NR_CHANNEL * 4 && (fan_config & MAX31790_FAN_CFG_TACH_INPUT))
+               return 0;
+
+       return a->mode;
+}
+
+static const struct attribute_group max31790_group = {
+       .attrs = max31790_attrs,
+       .is_visible = max31790_attrs_visible,
+};
+__ATTRIBUTE_GROUPS(max31790);
+
+static int max31790_init_client(struct i2c_client *client,
+                               struct max31790_data *data)
+{
+       int i, rv;
+
+       for (i = 0; i < NR_CHANNEL; i++) {
+               rv = i2c_smbus_read_byte_data(client,
+                               MAX31790_REG_FAN_CONFIG(i));
+               if (rv < 0)
+                       return rv;
+               data->fan_config[i] = rv;
+
+               rv = i2c_smbus_read_byte_data(client,
+                               MAX31790_REG_FAN_DYNAMICS(i));
+               if (rv < 0)
+                       return rv;
+               data->fan_dynamics[i] = rv;
+       }
+
+       return 0;
+}
+
+static int max31790_probe(struct i2c_client *client,
+                         const struct i2c_device_id *id)
+{
+       struct i2c_adapter *adapter = client->adapter;
+       struct device *dev = &client->dev;
+       struct max31790_data *data;
+       struct device *hwmon_dev;
+       int err;
+
+       if (!i2c_check_functionality(adapter,
+                       I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
+               return -ENODEV;
+
+       data = devm_kzalloc(dev, sizeof(struct max31790_data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->client = client;
+       mutex_init(&data->update_lock);
+
+       /*
+        * Initialize the max31790 chip
+        */
+       err = max31790_init_client(client, data);
+       if (err)
+               return err;
+
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev,
+                       client->name, data, max31790_groups);
+
+       return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id max31790_id[] = {
+       { "max31790", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, max31790_id);
+
+static struct i2c_driver max31790_driver = {
+       .class          = I2C_CLASS_HWMON,
+       .probe          = max31790_probe,
+       .driver = {
+               .name   = "max31790",
+       },
+       .id_table       = max31790_id,
+};
+
+module_i2c_driver(max31790_driver);
+
+MODULE_AUTHOR("Il Han <corone.il.han@gmail.com>");
+MODULE_DESCRIPTION("MAX31790 sensor driver");
+MODULE_LICENSE("GPL");
index 8b4fa55e46c6afceb3895515bc0df0cb3b8a4b85..d7ebdf8651f5f57fe598197b1e0620754d1dcc55 100644 (file)
@@ -515,16 +515,24 @@ static const char *const nct6779_temp_label[] = {
        "PCH_DIM1_TEMP",
        "PCH_DIM2_TEMP",
        "PCH_DIM3_TEMP",
-       "BYTE_TEMP"
+       "BYTE_TEMP",
+       "",
+       "",
+       "",
+       "",
+       "Virtual_TEMP"
 };
 
-static const u16 NCT6779_REG_TEMP_ALTERNATE[ARRAY_SIZE(nct6779_temp_label) - 1]
+#define NCT6779_NUM_LABELS     (ARRAY_SIZE(nct6779_temp_label) - 5)
+#define NCT6791_NUM_LABELS     ARRAY_SIZE(nct6779_temp_label)
+
+static const u16 NCT6779_REG_TEMP_ALTERNATE[NCT6791_NUM_LABELS - 1]
        = { 0x490, 0x491, 0x492, 0x493, 0x494, 0x495, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0,
            0, 0x400, 0x401, 0x402, 0x404, 0x405, 0x406, 0x407,
            0x408, 0 };
 
-static const u16 NCT6779_REG_TEMP_CRIT[ARRAY_SIZE(nct6779_temp_label) - 1]
+static const u16 NCT6779_REG_TEMP_CRIT[NCT6791_NUM_LABELS - 1]
        = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x709, 0x70a };
 
 /* NCT6791 specific data */
@@ -557,6 +565,76 @@ static const u16 NCT6792_REG_TEMP_MON[] = {
 static const u16 NCT6792_REG_BEEP[NUM_REG_BEEP] = {
        0xb2, 0xb3, 0xb4, 0xb5, 0xbf };
 
+static const char *const nct6792_temp_label[] = {
+       "",
+       "SYSTIN",
+       "CPUTIN",
+       "AUXTIN0",
+       "AUXTIN1",
+       "AUXTIN2",
+       "AUXTIN3",
+       "",
+       "SMBUSMASTER 0",
+       "SMBUSMASTER 1",
+       "SMBUSMASTER 2",
+       "SMBUSMASTER 3",
+       "SMBUSMASTER 4",
+       "SMBUSMASTER 5",
+       "SMBUSMASTER 6",
+       "SMBUSMASTER 7",
+       "PECI Agent 0",
+       "PECI Agent 1",
+       "PCH_CHIP_CPU_MAX_TEMP",
+       "PCH_CHIP_TEMP",
+       "PCH_CPU_TEMP",
+       "PCH_MCH_TEMP",
+       "PCH_DIM0_TEMP",
+       "PCH_DIM1_TEMP",
+       "PCH_DIM2_TEMP",
+       "PCH_DIM3_TEMP",
+       "BYTE_TEMP",
+       "PECI Agent 0 Calibration",
+       "PECI Agent 1 Calibration",
+       "",
+       "",
+       "Virtual_TEMP"
+};
+
+static const char *const nct6793_temp_label[] = {
+       "",
+       "SYSTIN",
+       "CPUTIN",
+       "AUXTIN0",
+       "AUXTIN1",
+       "AUXTIN2",
+       "AUXTIN3",
+       "",
+       "SMBUSMASTER 0",
+       "SMBUSMASTER 1",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "PECI Agent 0",
+       "PECI Agent 1",
+       "PCH_CHIP_CPU_MAX_TEMP",
+       "PCH_CHIP_TEMP",
+       "PCH_CPU_TEMP",
+       "PCH_MCH_TEMP",
+       "Agent0 Dimm0 ",
+       "Agent0 Dimm1",
+       "Agent1 Dimm0",
+       "Agent1 Dimm1",
+       "BYTE_TEMP0",
+       "BYTE_TEMP1",
+       "PECI Agent 0 Calibration",
+       "PECI Agent 1 Calibration",
+       "",
+       "Virtual_TEMP"
+};
+
 /* NCT6102D/NCT6106D specific data */
 
 #define NCT6106_REG_VBAT       0x318
@@ -3605,7 +3683,7 @@ static int nct6775_probe(struct platform_device *pdev)
                data->speed_tolerance_limit = 63;
 
                data->temp_label = nct6779_temp_label;
-               data->temp_label_num = ARRAY_SIZE(nct6779_temp_label);
+               data->temp_label_num = NCT6779_NUM_LABELS;
 
                data->REG_CONFIG = NCT6775_REG_CONFIG;
                data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3682,8 +3760,19 @@ static int nct6775_probe(struct platform_device *pdev)
                data->tolerance_mask = 0x07;
                data->speed_tolerance_limit = 63;
 
-               data->temp_label = nct6779_temp_label;
-               data->temp_label_num = ARRAY_SIZE(nct6779_temp_label);
+               switch (data->kind) {
+               default:
+               case nct6791:
+                       data->temp_label = nct6779_temp_label;
+                       break;
+               case nct6792:
+                       data->temp_label = nct6792_temp_label;
+                       break;
+               case nct6793:
+                       data->temp_label = nct6793_temp_label;
+                       break;
+               }
+               data->temp_label_num = NCT6791_NUM_LABELS;
 
                data->REG_CONFIG = NCT6775_REG_CONFIG;
                data->REG_VBAT = NCT6775_REG_VBAT;
index 2d9a712699ff5d541e831629834b23882b4fa606..3e23003f78b01ca731e8c232f38e1132eccf251f 100644 (file)
@@ -323,6 +323,7 @@ static const struct of_device_id of_pwm_fan_match[] = {
        { .compatible = "pwm-fan", },
        {},
 };
+MODULE_DEVICE_TABLE(of, of_pwm_fan_match);
 
 static struct platform_driver pwm_fan_driver = {
        .probe          = pwm_fan_probe,
index 3dd2de31a2f8d380f71ff61c562a53d8638f9eb5..472b88285c755e5f18d25ba2c935dbdaca449546 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/delay.h>
+#include <linux/dmi.h>
 #include <linux/i2c.h>
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
@@ -51,6 +52,22 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
 }
 
 #ifdef CONFIG_ACPI
+/*
+ * The HCNT/LCNT information coming from ACPI should be the most accurate
+ * for given platform. However, some systems get it wrong. On such systems
+ * we get better results by calculating those based on the input clock.
+ */
+static const struct dmi_system_id dw_i2c_no_acpi_params[] = {
+       {
+               .ident = "Dell Inspiron 7348",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7348"),
+               },
+       },
+       { }
+};
+
 static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
                               u16 *hcnt, u16 *lcnt, u32 *sda_hold)
 {
@@ -58,6 +75,9 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
        acpi_handle handle = ACPI_HANDLE(&pdev->dev);
        union acpi_object *obj;
 
+       if (dmi_check_system(dw_i2c_no_acpi_params))
+               return;
+
        if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf)))
                return;
 
@@ -253,12 +273,6 @@ static int dw_i2c_probe(struct platform_device *pdev)
        adap->dev.parent = &pdev->dev;
        adap->dev.of_node = pdev->dev.of_node;
 
-       r = i2c_add_numbered_adapter(adap);
-       if (r) {
-               dev_err(&pdev->dev, "failure adding adapter\n");
-               return r;
-       }
-
        if (dev->pm_runtime_disabled) {
                pm_runtime_forbid(&pdev->dev);
        } else {
@@ -268,6 +282,13 @@ static int dw_i2c_probe(struct platform_device *pdev)
                pm_runtime_enable(&pdev->dev);
        }
 
+       r = i2c_add_numbered_adapter(adap);
+       if (r) {
+               dev_err(&pdev->dev, "failure adding adapter\n");
+               pm_runtime_disable(&pdev->dev);
+               return r;
+       }
+
        return 0;
 }
 
index 30059c1df2a3b57ea559fc984b559c79e2f06ce6..5801227b97ab089022f90608f6fc425ebd13cd87 100644 (file)
@@ -669,8 +669,6 @@ mv64xxx_i2c_can_offload(struct mv64xxx_i2c_data *drv_data)
        struct i2c_msg *msgs = drv_data->msgs;
        int num = drv_data->num_msgs;
 
-       return false;
-
        if (!drv_data->offload_enabled)
                return false;
 
index e814a36d9b78f26436cd8cf1745dd2aaa3b36db8..6f8b446be5b0e5787deede7f675044332d72f8cb 100644 (file)
@@ -600,7 +600,7 @@ static int i2c_pnx_controller_suspend(struct device *dev)
 {
        struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev);
 
-       clk_disable(alg_data->clk);
+       clk_disable_unprepare(alg_data->clk);
 
        return 0;
 }
@@ -609,7 +609,7 @@ static int i2c_pnx_controller_resume(struct device *dev)
 {
        struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev);
 
-       return clk_enable(alg_data->clk);
+       return clk_prepare_enable(alg_data->clk);
 }
 
 static SIMPLE_DEV_PM_OPS(i2c_pnx_pm,
@@ -672,7 +672,7 @@ static int i2c_pnx_probe(struct platform_device *pdev)
        if (IS_ERR(alg_data->ioaddr))
                return PTR_ERR(alg_data->ioaddr);
 
-       ret = clk_enable(alg_data->clk);
+       ret = clk_prepare_enable(alg_data->clk);
        if (ret)
                return ret;
 
@@ -726,7 +726,7 @@ static int i2c_pnx_probe(struct platform_device *pdev)
        return 0;
 
 out_clock:
-       clk_disable(alg_data->clk);
+       clk_disable_unprepare(alg_data->clk);
        return ret;
 }
 
@@ -735,7 +735,7 @@ static int i2c_pnx_remove(struct platform_device *pdev)
        struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev);
 
        i2c_del_adapter(&alg_data->adapter);
-       clk_disable(alg_data->clk);
+       clk_disable_unprepare(alg_data->clk);
 
        return 0;
 }
index d8361dada584556baccc2c6bd861eb11028c6d51..d8b5a8fee1e6c85588dd569b80894306b1c76b1a 100644 (file)
@@ -690,15 +690,16 @@ static int rcar_i2c_probe(struct platform_device *pdev)
                return ret;
        }
 
+       pm_runtime_enable(dev);
+       platform_set_drvdata(pdev, priv);
+
        ret = i2c_add_numbered_adapter(adap);
        if (ret < 0) {
                dev_err(dev, "reg adap failed: %d\n", ret);
+               pm_runtime_disable(dev);
                return ret;
        }
 
-       pm_runtime_enable(dev);
-       platform_set_drvdata(pdev, priv);
-
        dev_info(dev, "probed\n");
 
        return 0;
index 50bfd8cef5f224aebb189a5b6635b62316f6117c..5df819610d5280cc1fee176344be4d226fc5ea56 100644 (file)
@@ -1243,17 +1243,19 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
        i2c->adap.nr = i2c->pdata->bus_num;
        i2c->adap.dev.of_node = pdev->dev.of_node;
 
+       platform_set_drvdata(pdev, i2c);
+
+       pm_runtime_enable(&pdev->dev);
+
        ret = i2c_add_numbered_adapter(&i2c->adap);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to add bus to i2c core\n");
+               pm_runtime_disable(&pdev->dev);
                s3c24xx_i2c_deregister_cpufreq(i2c);
                clk_unprepare(i2c->clk);
                return ret;
        }
 
-       platform_set_drvdata(pdev, i2c);
-
-       pm_runtime_enable(&pdev->dev);
        pm_runtime_enable(&i2c->adap.dev);
 
        dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev));
index 5f89f1e3c2f24fc562a519eb173d33de8c280f42..a59c3111f7fb98df957e19d1fa93faac16322e20 100644 (file)
@@ -694,12 +694,12 @@ static int i2c_device_probe(struct device *dev)
                goto err_clear_wakeup_irq;
 
        status = dev_pm_domain_attach(&client->dev, true);
-       if (status != -EPROBE_DEFER) {
-               status = driver->probe(client, i2c_match_id(driver->id_table,
-                                       client));
-               if (status)
-                       goto err_detach_pm_domain;
-       }
+       if (status == -EPROBE_DEFER)
+               goto err_clear_wakeup_irq;
+
+       status = driver->probe(client, i2c_match_id(driver->id_table, client));
+       if (status)
+               goto err_detach_pm_domain;
 
        return 0;
 
index 3a3738fe016b3af0a2e0b723396822b786e2c20f..cd4510a6337548d26344b8ccc5cb427ab13dd4ef 100644 (file)
@@ -620,7 +620,7 @@ static struct cpuidle_state skl_cstates[] = {
                .name = "C6-SKL",
                .desc = "MWAIT 0x20",
                .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
-               .exit_latency = 75,
+               .exit_latency = 85,
                .target_residency = 200,
                .enter = &intel_idle,
                .enter_freeze = intel_idle_freeze, },
@@ -636,10 +636,18 @@ static struct cpuidle_state skl_cstates[] = {
                .name = "C8-SKL",
                .desc = "MWAIT 0x40",
                .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
-               .exit_latency = 174,
+               .exit_latency = 200,
                .target_residency = 800,
                .enter = &intel_idle,
                .enter_freeze = intel_idle_freeze, },
+       {
+               .name = "C9-SKL",
+               .desc = "MWAIT 0x50",
+               .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 480,
+               .target_residency = 5000,
+               .enter = &intel_idle,
+               .enter_freeze = intel_idle_freeze, },
        {
                .name = "C10-SKL",
                .desc = "MWAIT 0x60",
index ff30f880688019fa099d79d7fce3f53b28cfb828..fb93111104249bcc93d390e7492c96f4c211ac27 100644 (file)
 #define ST_ACCEL_4_BDU_MASK                    0x40
 #define ST_ACCEL_4_DRDY_IRQ_ADDR               0x21
 #define ST_ACCEL_4_DRDY_IRQ_INT1_MASK          0x04
-#define ST_ACCEL_4_IG1_EN_ADDR                 0x21
-#define ST_ACCEL_4_IG1_EN_MASK                 0x08
 #define ST_ACCEL_4_MULTIREAD_BIT               true
 
 /* CUSTOM VALUES FOR SENSOR 5 */
@@ -489,10 +487,6 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
                .drdy_irq = {
                        .addr = ST_ACCEL_4_DRDY_IRQ_ADDR,
                        .mask_int1 = ST_ACCEL_4_DRDY_IRQ_INT1_MASK,
-                       .ig1 = {
-                               .en_addr = ST_ACCEL_4_IG1_EN_ADDR,
-                               .en_mask = ST_ACCEL_4_IG1_EN_MASK,
-                       },
                },
                .multi_read_bit = ST_ACCEL_4_MULTIREAD_BIT,
                .bootime = 2, /* guess */
index ebe415f1064000c95c88f5d20da48a7f52f71d84..0c74869a540ad390a0f995a702ee953cf53e0669 100644 (file)
 #include <linux/types.h>
 #include <linux/gfp.h>
 #include <linux/err.h>
+#include <linux/regulator/consumer.h>
 
 #include <linux/iio/iio.h>
 
+#define TWL4030_USB_SEL_MADC_MCPC      (1<<3)
+#define TWL4030_USB_CARKIT_ANA_CTRL    0xBB
+
 /**
  * struct twl4030_madc_data - a container for madc info
  * @dev:               Pointer to device structure for madc
  * @lock:              Mutex protecting this data structure
+ * @regulator:         Pointer to bias regulator for madc
  * @requests:          Array of request struct corresponding to SW1, SW2 and RT
  * @use_second_irq:    IRQ selection (main or co-processor)
  * @imr:               Interrupt mask register of MADC
@@ -60,6 +65,7 @@
 struct twl4030_madc_data {
        struct device *dev;
        struct mutex lock;      /* mutex protecting this data structure */
+       struct regulator *usb3v1;
        struct twl4030_madc_request requests[TWL4030_MADC_NUM_METHODS];
        bool use_second_irq;
        u8 imr;
@@ -841,6 +847,32 @@ static int twl4030_madc_probe(struct platform_device *pdev)
        }
        twl4030_madc = madc;
 
+       /* Configure MADC[3:6] */
+       ret = twl_i2c_read_u8(TWL_MODULE_USB, &regval,
+                       TWL4030_USB_CARKIT_ANA_CTRL);
+       if (ret) {
+               dev_err(&pdev->dev, "unable to read reg CARKIT_ANA_CTRL  0x%X\n",
+                               TWL4030_USB_CARKIT_ANA_CTRL);
+               goto err_i2c;
+       }
+       regval |= TWL4030_USB_SEL_MADC_MCPC;
+       ret = twl_i2c_write_u8(TWL_MODULE_USB, regval,
+                                TWL4030_USB_CARKIT_ANA_CTRL);
+       if (ret) {
+               dev_err(&pdev->dev, "unable to write reg CARKIT_ANA_CTRL 0x%X\n",
+                               TWL4030_USB_CARKIT_ANA_CTRL);
+               goto err_i2c;
+       }
+
+       /* Enable 3v1 bias regulator for MADC[3:6] */
+       madc->usb3v1 = devm_regulator_get(madc->dev, "vusb3v1");
+       if (IS_ERR(madc->usb3v1))
+               return -ENODEV;
+
+       ret = regulator_enable(madc->usb3v1);
+       if (ret)
+               dev_err(madc->dev, "could not enable 3v1 bias regulator\n");
+
        ret = iio_device_register(iio_dev);
        if (ret) {
                dev_err(&pdev->dev, "could not register iio device\n");
@@ -866,6 +898,8 @@ static int twl4030_madc_remove(struct platform_device *pdev)
        twl4030_madc_set_current_generator(madc, 0, 0);
        twl4030_madc_set_power(madc, 0);
 
+       regulator_disable(madc->usb3v1);
+
        return 0;
 }
 
index 8f66c67ff0df09380dc7c486dce44d92efac1f18..87471ef371986c11f59e6761e7566ebec78cc1cd 100644 (file)
@@ -508,12 +508,12 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
        memset(&gid_attr, 0, sizeof(gid_attr));
        gid_attr.ndev = ndev;
 
+       mutex_lock(&table->lock);
        ix = find_gid(table, NULL, NULL, true, GID_ATTR_FIND_MASK_DEFAULT);
 
        /* Coudn't find default GID location */
        WARN_ON(ix < 0);
 
-       mutex_lock(&table->lock);
        if (!__ib_cache_gid_get(ib_dev, port, ix,
                                &current_gid, &current_gid_attr) &&
            mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
index ea4db9c1d44fba56ea5798649f3744c78b148e87..4f918b929eca955532cd5dc0541bc842272a0a3b 100644 (file)
@@ -835,6 +835,11 @@ retest:
        case IB_CM_SIDR_REQ_RCVD:
                spin_unlock_irq(&cm_id_priv->lock);
                cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
+               spin_lock_irq(&cm.lock);
+               if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
+                       rb_erase(&cm_id_priv->sidr_id_node,
+                                &cm.remote_sidr_table);
+               spin_unlock_irq(&cm.lock);
                break;
        case IB_CM_REQ_SENT:
        case IB_CM_MRA_REQ_RCVD:
@@ -3172,7 +3177,10 @@ int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 
        spin_lock_irqsave(&cm.lock, flags);
-       rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
+       if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
+               rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
+               RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
+       }
        spin_unlock_irqrestore(&cm.lock, flags);
        return 0;
 
index b1ab13f3e182bb520cc986512d11e9016ecf1362..36b12d560e17e5a862a2e37d1f56875b46425f4b 100644 (file)
@@ -1067,14 +1067,14 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event,
                       sizeof(req->local_gid));
                req->has_gid    = true;
                req->service_id = req_param->primary_path->service_id;
-               req->pkey       = req_param->bth_pkey;
+               req->pkey       = be16_to_cpu(req_param->primary_path->pkey);
                break;
        case IB_CM_SIDR_REQ_RECEIVED:
                req->device     = sidr_param->listen_id->device;
                req->port       = sidr_param->port;
                req->has_gid    = false;
                req->service_id = sidr_param->service_id;
-               req->pkey       = sidr_param->bth_pkey;
+               req->pkey       = sidr_param->pkey;
                break;
        default:
                return -EINVAL;
@@ -1232,14 +1232,32 @@ static bool cma_match_private_data(struct rdma_id_private *id_priv,
        return true;
 }
 
+static bool cma_protocol_roce_dev_port(struct ib_device *device, int port_num)
+{
+       enum rdma_link_layer ll = rdma_port_get_link_layer(device, port_num);
+       enum rdma_transport_type transport =
+               rdma_node_get_transport(device->node_type);
+
+       return ll == IB_LINK_LAYER_ETHERNET && transport == RDMA_TRANSPORT_IB;
+}
+
+static bool cma_protocol_roce(const struct rdma_cm_id *id)
+{
+       struct ib_device *device = id->device;
+       const int port_num = id->port_num ?: rdma_start_port(device);
+
+       return cma_protocol_roce_dev_port(device, port_num);
+}
+
 static bool cma_match_net_dev(const struct rdma_id_private *id_priv,
                              const struct net_device *net_dev)
 {
        const struct rdma_addr *addr = &id_priv->id.route.addr;
 
        if (!net_dev)
-               /* This request is an AF_IB request */
-               return addr->src_addr.ss_family == AF_IB;
+               /* This request is an AF_IB request or a RoCE request */
+               return addr->src_addr.ss_family == AF_IB ||
+                      cma_protocol_roce(&id_priv->id);
 
        return !addr->dev_addr.bound_dev_if ||
               (net_eq(dev_net(net_dev), &init_net) &&
@@ -1294,6 +1312,10 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
                if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) {
                        /* Assuming the protocol is AF_IB */
                        *net_dev = NULL;
+               } else if (cma_protocol_roce_dev_port(req.device, req.port)) {
+                       /* TODO find the net dev matching the request parameters
+                        * through the RoCE GID table */
+                       *net_dev = NULL;
                } else {
                        return ERR_CAST(*net_dev);
                }
@@ -1302,7 +1324,7 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
        bind_list = cma_ps_find(rdma_ps_from_service_id(req.service_id),
                                cma_port_from_service_id(req.service_id));
        id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev);
-       if (IS_ERR(id_priv)) {
+       if (IS_ERR(id_priv) && *net_dev) {
                dev_put(*net_dev);
                *net_dev = NULL;
        }
@@ -1593,11 +1615,16 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
                if (ret)
                        goto err;
        } else {
-               /* An AF_IB connection */
-               WARN_ON_ONCE(ss_family != AF_IB);
-
-               cma_translate_ib((struct sockaddr_ib *)cma_src_addr(id_priv),
-                                &rt->addr.dev_addr);
+               if (!cma_protocol_roce(listen_id) &&
+                   cma_any_addr(cma_src_addr(id_priv))) {
+                       rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
+                       rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
+                       ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
+               } else if (!cma_any_addr(cma_src_addr(id_priv))) {
+                       ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr);
+                       if (ret)
+                               goto err;
+               }
        }
        rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
 
@@ -1635,13 +1662,12 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
                if (ret)
                        goto err;
        } else {
-               /* An AF_IB connection */
-               WARN_ON_ONCE(ss_family != AF_IB);
-
-               if (!cma_any_addr(cma_src_addr(id_priv)))
-                       cma_translate_ib((struct sockaddr_ib *)
-                                               cma_src_addr(id_priv),
-                                        &id->route.addr.dev_addr);
+               if (!cma_any_addr(cma_src_addr(id_priv))) {
+                       ret = cma_translate_addr(cma_src_addr(id_priv),
+                                                &id->route.addr.dev_addr);
+                       if (ret)
+                               goto err;
+               }
        }
 
        id_priv->state = RDMA_CM_CONNECT;
index 6b24cba1e474df33002186275fb2a7ca81c83717..178f98482e13e217c16d447978a855a900808bc0 100644 (file)
@@ -250,25 +250,44 @@ static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
                                 u8 port, struct net_device *ndev)
 {
        struct in_device *in_dev;
+       struct sin_list {
+               struct list_head        list;
+               struct sockaddr_in      ip;
+       };
+       struct sin_list *sin_iter;
+       struct sin_list *sin_temp;
 
+       LIST_HEAD(sin_list);
        if (ndev->reg_state >= NETREG_UNREGISTERING)
                return;
 
-       in_dev = in_dev_get(ndev);
-       if (!in_dev)
+       rcu_read_lock();
+       in_dev = __in_dev_get_rcu(ndev);
+       if (!in_dev) {
+               rcu_read_unlock();
                return;
+       }
 
        for_ifa(in_dev) {
-               struct sockaddr_in ip;
+               struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
 
-               ip.sin_family = AF_INET;
-               ip.sin_addr.s_addr = ifa->ifa_address;
-               update_gid_ip(GID_ADD, ib_dev, port, ndev,
-                             (struct sockaddr *)&ip);
+               if (!entry) {
+                       pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv4 update\n");
+                       continue;
+               }
+               entry->ip.sin_family = AF_INET;
+               entry->ip.sin_addr.s_addr = ifa->ifa_address;
+               list_add_tail(&entry->list, &sin_list);
        }
        endfor_ifa(in_dev);
+       rcu_read_unlock();
 
-       in_dev_put(in_dev);
+       list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) {
+               update_gid_ip(GID_ADD, ib_dev, port, ndev,
+                             (struct sockaddr *)&sin_iter->ip);
+               list_del(&sin_iter->list);
+               kfree(sin_iter);
+       }
 }
 
 static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
index a53fc9b01c69957cb6d45433c4a34a0a69a5c367..30467d10df91b170e40657864354356153545e76 100644 (file)
@@ -1624,11 +1624,16 @@ static int ucma_open(struct inode *inode, struct file *filp)
        if (!file)
                return -ENOMEM;
 
+       file->close_wq = create_singlethread_workqueue("ucma_close_id");
+       if (!file->close_wq) {
+               kfree(file);
+               return -ENOMEM;
+       }
+
        INIT_LIST_HEAD(&file->event_list);
        INIT_LIST_HEAD(&file->ctx_list);
        init_waitqueue_head(&file->poll_wait);
        mutex_init(&file->mut);
-       file->close_wq = create_singlethread_workqueue("ucma_close_id");
 
        filp->private_data = file;
        file->filp = filp;
index 41d6911e244e1765a34b77fe067cf4e4ddeab172..f1ccd40beae9eb2b7a8e6aaad7ef1a98ab8a0a81 100644 (file)
@@ -245,7 +245,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
                props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
        if (MLX5_CAP_GEN(mdev, apm))
                props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
-       props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
        if (MLX5_CAP_GEN(mdev, xrc))
                props->device_cap_flags |= IB_DEVICE_XRC;
        props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
@@ -795,53 +794,6 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
        return 0;
 }
 
-static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn)
-{
-       struct mlx5_create_mkey_mbox_in *in;
-       struct mlx5_mkey_seg *seg;
-       struct mlx5_core_mr mr;
-       int err;
-
-       in = kzalloc(sizeof(*in), GFP_KERNEL);
-       if (!in)
-               return -ENOMEM;
-
-       seg = &in->seg;
-       seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA;
-       seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
-       seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
-       seg->start_addr = 0;
-
-       err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in),
-                                   NULL, NULL, NULL);
-       if (err) {
-               mlx5_ib_warn(dev, "failed to create mkey, %d\n", err);
-               goto err_in;
-       }
-
-       kfree(in);
-       *key = mr.key;
-
-       return 0;
-
-err_in:
-       kfree(in);
-
-       return err;
-}
-
-static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key)
-{
-       struct mlx5_core_mr mr;
-       int err;
-
-       memset(&mr, 0, sizeof(mr));
-       mr.key = key;
-       err = mlx5_core_destroy_mkey(dev->mdev, &mr);
-       if (err)
-               mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key);
-}
-
 static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
                                      struct ib_ucontext *context,
                                      struct ib_udata *udata)
@@ -867,13 +819,6 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
                        kfree(pd);
                        return ERR_PTR(-EFAULT);
                }
-       } else {
-               err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn);
-               if (err) {
-                       mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
-                       kfree(pd);
-                       return ERR_PTR(err);
-               }
        }
 
        return &pd->ibpd;
@@ -884,9 +829,6 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
        struct mlx5_ib_dev *mdev = to_mdev(pd->device);
        struct mlx5_ib_pd *mpd = to_mpd(pd);
 
-       if (!pd->uobject)
-               free_pa_mkey(mdev, mpd->pa_lkey);
-
        mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
        kfree(mpd);
 
@@ -1245,18 +1187,10 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
        struct ib_srq_init_attr attr;
        struct mlx5_ib_dev *dev;
        struct ib_cq_init_attr cq_attr = {.cqe = 1};
-       u32 rsvd_lkey;
        int ret = 0;
 
        dev = container_of(devr, struct mlx5_ib_dev, devr);
 
-       ret = mlx5_core_query_special_context(dev->mdev, &rsvd_lkey);
-       if (ret) {
-               pr_err("Failed to query special context %d\n", ret);
-               return ret;
-       }
-       dev->ib_dev.local_dma_lkey = rsvd_lkey;
-
        devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
        if (IS_ERR(devr->p0)) {
                ret = PTR_ERR(devr->p0);
@@ -1418,6 +1352,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
        dev->ib_dev.owner               = THIS_MODULE;
        dev->ib_dev.node_type           = RDMA_NODE_IB_CA;
+       dev->ib_dev.local_dma_lkey      = 0 /* not supported for now */;
        dev->num_ports          = MLX5_CAP_GEN(mdev, num_ports);
        dev->ib_dev.phys_port_cnt     = dev->num_ports;
        dev->ib_dev.num_comp_vectors    =
index bb8cda79e8812cf1122feaa70a3f113958858d77..22123b79d550d6a7e0474501592f36dc6f0b632e 100644 (file)
@@ -103,7 +103,6 @@ static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibuconte
 struct mlx5_ib_pd {
        struct ib_pd            ibpd;
        u32                     pdn;
-       u32                     pa_lkey;
 };
 
 /* Use macros here so that don't have to duplicate
@@ -213,7 +212,6 @@ struct mlx5_ib_qp {
        int                     uuarn;
 
        int                     create_type;
-       u32                     pa_lkey;
 
        /* Store signature errors */
        bool                    signature_en;
index c745c6c5e10da0b296fd19ef6ee01d7650af44ff..6f521a3418e8e1c69b9cca74fc8443dd05e30dac 100644 (file)
@@ -925,8 +925,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
                        err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
                        if (err)
                                mlx5_ib_dbg(dev, "err %d\n", err);
-                       else
-                               qp->pa_lkey = to_mpd(pd)->pa_lkey;
                }
 
                if (err)
@@ -2045,7 +2043,7 @@ static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
                mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
        dseg->addr = cpu_to_be64(mfrpl->map);
        dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64));
-       dseg->lkey = cpu_to_be32(pd->pa_lkey);
+       dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
 }
 
 static __be32 send_ieth(struct ib_send_wr *wr)
index 5be13d8991bce0e7c4edec27c8034e847d4cba29..f903502d3883256e8044dbbec9b2ef9baa2fbdc4 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 04a66229584e0089ef72e027f986d3d71a574b0d..7fe9502ce8d3df43a57b8e7325ecd0aa262949cd 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 3935672661423ff007f6cfc405197967e56c9792..596e0ed49a8e2a066097b5612da723535fdcccbc 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 9d737ed5e55d905e6452d2c5fb20f040cf765705..b54986de5f0cad3677461af613862351ca3fb3f2 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 5d13860161a4a64af533b3a5aee3c43e25e475d0..5e55b8bc6fe402af423118c1454b5bc67d21d8ba 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 4087d24a88f6d9a1e6080b3a2b91dff473cd0944..98453e91daa6f0a1d91c9eb50d468a2585bfcdbe 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index e3c9bd9d3ba366d7f5cd8ef30f5cd5f43b3fbe74..3c37dd59c04eaec750e5ddbf98383e8e4b64d669 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 93713a2230b36724c5bef33933de4c512ba89784..3a8add9ddf4611f89272ee21a0a17dc70fe5028f 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index e5a9297dd1bd3eac9f2dda97f2954e002a31ca4f..525bf272671e6973afb13472263dd8f730c4eac3 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 34c49b8105feb4b59ae320997622e4145d24a742..0c15bd885035ee5fe5110e1317cafe09e762fe07 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index db3588df3546b3ceaf8dce0ae8e175e6326265a3..85dc3f989ff72aa565c85efb4d07fdc7772d3c93 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index b0aafe8db0c35a9d2930e59207a0a01f530b1792..b1458be1d402d60b417904ec450e3d7a0b5b5020 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 27dc67c1689ff4245dff59931f91e5c3d22b58c4..3412ea06116e2cca6ba802571edd5e48186edc7f 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 0d09b493cd02b4f8fa12a217c8cfc19e39cc5693..3d98e16cfeaf67fa9300d941eb064b96fd40b274 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 7df43827cb29661a039d1794d24a20e6cbf1b094..f8e3211689a3453164c9044f3bef4601053c1dba 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 0bd04efa16f33f514c30b3b5c32cf62f8488c271..414eaa566bd94e5f05a796a5416ef8f76e7939f6 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 75777a66c6848a1d7e946782052382e8f031c0b9..183fcb6a952f4bdaf0714492e1e3cce4a3d15852 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index ddef6f77a78cf4c3fbf68a1b867a23b843fd3124..de318389a301a58058fb166acd85aef0189e89d5 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 7e5dc6d9f462d68ecc3a99a7ef997e74d4e2b626..9a7a2d9755c021928ea38be608abda733129a76d 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index cb2337f0532b5dcbe0d328eb74a196d976b7b6dc..645a5f6e6c88f0a4166bf0647d26d3b7bf082670 100644 (file)
@@ -7,7 +7,7 @@
  * licenses.  You may choose to be licensed under the terms of the GNU
  * General Public License (GPL) Version 2, available from the file
  * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
+ * BSD license below:
  *
  *     Redistribution and use in source and binary forms, with or
  *     without modification, are permitted provided that the following
index 70440996e8f2cbee289fb137d16dbd7ece78d345..45ca7c1613a7f76a86b8ac81c0d215746490ea04 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 3a4288e0fbace4621df7166e7e64ca7a97a9d283..42b4b4c4e452eae8b712fb0ef295f304a7ab7492 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2014, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index d4f752e258fd4812ec81c2f627ba4ff9e222e4cc..c0b0b876ab905a574dd23c3a87fa13d8d9ce4990 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 656b88c39edab15c27214e2e963ce31104fcb2e5..66de93fb8ea934c15051f7b33fa7808306f19f05 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 14d931a8829dc4616571af436e30fd2b03070825..a08423e478af2778f9529f558221658aa62c1f42 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index ca2873698d75444066312640a1eb84dc5e2190db..edc5b8565d6d9eb5ba6e22e1baefd21d53986b5f 100644 (file)
@@ -80,7 +80,7 @@ enum {
        IPOIB_NUM_WC              = 4,
 
        IPOIB_MAX_PATH_REC_QUEUE  = 3,
-       IPOIB_MAX_MCAST_QUEUE     = 3,
+       IPOIB_MAX_MCAST_QUEUE     = 64,
 
        IPOIB_FLAG_OPER_UP        = 0,
        IPOIB_FLAG_INITIALIZED    = 1,
@@ -495,6 +495,7 @@ void ipoib_dev_cleanup(struct net_device *dev);
 void ipoib_mcast_join_task(struct work_struct *work);
 void ipoib_mcast_carrier_on_task(struct work_struct *work);
 void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
+void ipoib_mcast_free(struct ipoib_mcast *mc);
 
 void ipoib_mcast_restart_task(struct work_struct *work);
 int ipoib_mcast_start_thread(struct net_device *dev);
@@ -548,6 +549,8 @@ void ipoib_path_iter_read(struct ipoib_path_iter *iter,
 
 int ipoib_mcast_attach(struct net_device *dev, u16 mlid,
                       union ib_gid *mgid, int set_qkey);
+int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast);
+struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid);
 
 int ipoib_init_qp(struct net_device *dev);
 int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca);
index 36536ce5a3e2f9d51278be970d51bb51ec07232c..babba05d7a0eb707f472d7de3cb06843a0844eff 100644 (file)
@@ -1149,6 +1149,9 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
        unsigned long dt;
        unsigned long flags;
        int i;
+       LIST_HEAD(remove_list);
+       struct ipoib_mcast *mcast, *tmcast;
+       struct net_device *dev = priv->dev;
 
        if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
                return;
@@ -1176,6 +1179,19 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
                                                          lockdep_is_held(&priv->lock))) != NULL) {
                        /* was the neigh idle for two GC periods */
                        if (time_after(neigh_obsolete, neigh->alive)) {
+                               u8 *mgid = neigh->daddr + 4;
+
+                               /* Is this multicast ? */
+                               if (*mgid == 0xff) {
+                                       mcast = __ipoib_mcast_find(dev, mgid);
+
+                                       if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
+                                               list_del(&mcast->list);
+                                               rb_erase(&mcast->rb_node, &priv->multicast_tree);
+                                               list_add_tail(&mcast->list, &remove_list);
+                                       }
+                               }
+
                                rcu_assign_pointer(*np,
                                                   rcu_dereference_protected(neigh->hnext,
                                                                             lockdep_is_held(&priv->lock)));
@@ -1191,6 +1207,10 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
 
 out_unlock:
        spin_unlock_irqrestore(&priv->lock, flags);
+       list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
+               ipoib_mcast_leave(dev, mcast);
+               ipoib_mcast_free(mcast);
+       }
 }
 
 static void ipoib_reap_neigh(struct work_struct *work)
index 09a1748f9d131423f020020456d61d2f6c44a8b1..d750a86042f3d8da0736c23a52f41737b329c37d 100644 (file)
@@ -106,7 +106,7 @@ static void __ipoib_mcast_schedule_join_thread(struct ipoib_dev_priv *priv,
                queue_delayed_work(priv->wq, &priv->mcast_task, 0);
 }
 
-static void ipoib_mcast_free(struct ipoib_mcast *mcast)
+void ipoib_mcast_free(struct ipoib_mcast *mcast)
 {
        struct net_device *dev = mcast->dev;
        int tx_dropped = 0;
@@ -153,7 +153,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
        return mcast;
 }
 
-static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
+struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct rb_node *n = priv->multicast_tree.rb_node;
@@ -508,17 +508,19 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
                rec.hop_limit     = priv->broadcast->mcmember.hop_limit;
 
                /*
-                * Historically Linux IPoIB has never properly supported SEND
-                * ONLY join. It emulated it by not providing all the required
-                * attributes, which is enough to prevent group creation and
-                * detect if there are full members or not. A major problem
-                * with supporting SEND ONLY is detecting when the group is
-                * auto-destroyed as IPoIB will cache the MLID..
+                * Send-only IB Multicast joins do not work at the core
+                * IB layer yet, so we can't use them here.  However,
+                * we are emulating an Ethernet multicast send, which
+                * does not require a multicast subscription and will
+                * still send properly.  The most appropriate thing to
+                * do is to create the group if it doesn't exist as that
+                * most closely emulates the behavior, from a user space
+                * application perspecitive, of Ethernet multicast
+                * operation.  For now, we do a full join, maybe later
+                * when the core IB layers support send only joins we
+                * will use them.
                 */
-#if 1
-               if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
-                       comp_mask &= ~IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
-#else
+#if 0
                if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
                        rec.join_state = 4;
 #endif
@@ -675,7 +677,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev)
        return 0;
 }
 
-static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
+int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        int ret = 0;
index 1ace5d83a4d761b82ffbe446bbf41a1f051cd66b..f58ff96b6cbb9778153b4ab79794f8a206fa9343 100644 (file)
@@ -97,6 +97,11 @@ unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS;
 module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024");
 
+bool iser_always_reg = true;
+module_param_named(always_register, iser_always_reg, bool, S_IRUGO);
+MODULE_PARM_DESC(always_register,
+                "Always register memory, even for continuous memory regions (default:true)");
+
 bool iser_pi_enable = false;
 module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO);
 MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
index 86f6583485ef3f99c678a5ce1087f45e7e2ba1f1..a5edd6ede692c7be3d1c6da2f355b062cea9e43e 100644 (file)
@@ -611,6 +611,7 @@ extern int iser_debug_level;
 extern bool iser_pi_enable;
 extern int iser_pi_guard;
 extern unsigned int iser_max_sectors;
+extern bool iser_always_reg;
 
 int iser_assign_reg_ops(struct iser_device *device);
 
index 2493cc748db839b4ec885b82e5292633f242ad01..4c46d67d37a13100b60c6daa0a0b01b8f6855608 100644 (file)
@@ -803,11 +803,12 @@ static int
 iser_reg_prot_sg(struct iscsi_iser_task *task,
                 struct iser_data_buf *mem,
                 struct iser_fr_desc *desc,
+                bool use_dma_key,
                 struct iser_mem_reg *reg)
 {
        struct iser_device *device = task->iser_conn->ib_conn.device;
 
-       if (mem->dma_nents == 1)
+       if (use_dma_key)
                return iser_reg_dma(device, mem, reg);
 
        return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg);
@@ -817,11 +818,12 @@ static int
 iser_reg_data_sg(struct iscsi_iser_task *task,
                 struct iser_data_buf *mem,
                 struct iser_fr_desc *desc,
+                bool use_dma_key,
                 struct iser_mem_reg *reg)
 {
        struct iser_device *device = task->iser_conn->ib_conn.device;
 
-       if (mem->dma_nents == 1)
+       if (use_dma_key)
                return iser_reg_dma(device, mem, reg);
 
        return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg);
@@ -836,14 +838,17 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
        struct iser_mem_reg *reg = &task->rdma_reg[dir];
        struct iser_mem_reg *data_reg;
        struct iser_fr_desc *desc = NULL;
+       bool use_dma_key;
        int err;
 
        err = iser_handle_unaligned_buf(task, mem, dir);
        if (unlikely(err))
                return err;
 
-       if (mem->dma_nents != 1 ||
-           scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) {
+       use_dma_key = (mem->dma_nents == 1 && !iser_always_reg &&
+                      scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL);
+
+       if (!use_dma_key) {
                desc = device->reg_ops->reg_desc_get(ib_conn);
                reg->mem_h = desc;
        }
@@ -853,7 +858,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
        else
                data_reg = &task->desc.data_reg;
 
-       err = iser_reg_data_sg(task, mem, desc, data_reg);
+       err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg);
        if (unlikely(err))
                goto err_reg;
 
@@ -866,7 +871,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
                        if (unlikely(err))
                                goto err_reg;
 
-                       err = iser_reg_prot_sg(task, mem, desc, prot_reg);
+                       err = iser_reg_prot_sg(task, mem, desc,
+                                              use_dma_key, prot_reg);
                        if (unlikely(err))
                                goto err_reg;
                }
index ae70cc1463ac2b75d7eae512bf3224e90ba2d59f..85132d867bc86fcfcd99b7065e9f746301422de1 100644 (file)
@@ -133,11 +133,15 @@ static int iser_create_device_ib_res(struct iser_device *device)
                             (unsigned long)comp);
        }
 
-       device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
-                                  IB_ACCESS_REMOTE_WRITE |
-                                  IB_ACCESS_REMOTE_READ);
-       if (IS_ERR(device->mr))
-               goto dma_mr_err;
+       if (!iser_always_reg) {
+               int access = IB_ACCESS_LOCAL_WRITE |
+                            IB_ACCESS_REMOTE_WRITE |
+                            IB_ACCESS_REMOTE_READ;
+
+               device->mr = ib_get_dma_mr(device->pd, access);
+               if (IS_ERR(device->mr))
+                       goto dma_mr_err;
+       }
 
        INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
                                iser_event_handler);
@@ -147,7 +151,8 @@ static int iser_create_device_ib_res(struct iser_device *device)
        return 0;
 
 handler_err:
-       ib_dereg_mr(device->mr);
+       if (device->mr)
+               ib_dereg_mr(device->mr);
 dma_mr_err:
        for (i = 0; i < device->comps_used; i++)
                tasklet_kill(&device->comps[i].tasklet);
@@ -173,7 +178,6 @@ comps_err:
 static void iser_free_device_ib_res(struct iser_device *device)
 {
        int i;
-       BUG_ON(device->mr == NULL);
 
        for (i = 0; i < device->comps_used; i++) {
                struct iser_comp *comp = &device->comps[i];
@@ -184,7 +188,8 @@ static void iser_free_device_ib_res(struct iser_device *device)
        }
 
        (void)ib_unregister_event_handler(&device->event_handler);
-       (void)ib_dereg_mr(device->mr);
+       if (device->mr)
+               (void)ib_dereg_mr(device->mr);
        ib_dealloc_pd(device->pd);
 
        kfree(device->comps);
index 56eb471b5576954f6119664b23a1d7b83316d3fb..4215b5382092c15d693e62de6e029626e9fa551d 100644 (file)
@@ -196,6 +196,7 @@ config JOYSTICK_TWIDJOY
 config JOYSTICK_ZHENHUA
        tristate "5-byte Zhenhua RC transmitter"
        select SERIO
+       select BITREVERSE
        help
          Say Y here if you have a Zhen Hua PPM-4CH transmitter which is
          supplied with a ready to fly micro electric indoor helicopters
index b76ac580703ce5dc9ef97fac6620adc47ea44273..a8bc2fe170dd83e12ff78706f97f9bc32a72e5cd 100644 (file)
@@ -150,7 +150,7 @@ static void walkera0701_irq_handler(void *handler_data)
                if (w->counter == 24) { /* full frame */
                        walkera0701_parse_frame(w);
                        w->counter = NO_SYNC;
-                       if (abs(pulse_time - SYNC_PULSE) < RESERVE)     /* new frame sync */
+                       if (abs64(pulse_time - SYNC_PULSE) < RESERVE)   /* new frame sync */
                                w->counter = 0;
                } else {
                        if ((pulse_time > (ANALOG_MIN_PULSE - RESERVE)
@@ -161,7 +161,7 @@ static void walkera0701_irq_handler(void *handler_data)
                        } else
                                w->counter = NO_SYNC;
                }
-       } else if (abs(pulse_time - SYNC_PULSE - BIN0_PULSE) <
+       } else if (abs64(pulse_time - SYNC_PULSE - BIN0_PULSE) <
                                RESERVE + BIN1_PULSE - BIN0_PULSE)      /* frame sync .. */
                w->counter = 0;
 
index b052afec9a11f0d323eb735dee3d262eabf116a2..6639b2b8528aa6da9a518d3bd3dc0da3010f4a09 100644 (file)
@@ -266,7 +266,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
 
        error = omap4_keypad_parse_dt(&pdev->dev, keypad_data);
        if (error)
-               return error;
+               goto err_free_keypad;
 
        res = request_mem_region(res->start, resource_size(res), pdev->name);
        if (!res) {
index 867db8a91372017d2af6a11d24f189a7bc796fc2..e317b75357a0182d99ef3c04223596bd1de6fc80 100644 (file)
@@ -93,7 +93,7 @@ static int pm8941_reboot_notify(struct notifier_block *nb,
        default:
                reset_type = PON_PS_HOLD_TYPE_HARD_RESET;
                break;
-       };
+       }
 
        error = regmap_update_bits(pwrkey->regmap,
                                   pwrkey->baseaddr + PON_PS_HOLD_RST_CTL,
index 345df9b03aed7f1eff56cf2f1a743c59c8d5e780..5adbcedcb81cf4391bfdddefe11aae5d3131dd76 100644 (file)
@@ -414,7 +414,7 @@ static int uinput_setup_device(struct uinput_device *udev,
        dev->id.product = user_dev->id.product;
        dev->id.version = user_dev->id.version;
 
-       for_each_set_bit(i, dev->absbit, ABS_CNT) {
+       for (i = 0; i < ABS_CNT; i++) {
                input_abs_set_max(dev, i, user_dev->absmax[i]);
                input_abs_set_min(dev, i, user_dev->absmin[i]);
                input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]);
index 4d246861d692b810f3074aa7917cda86893ac6c2..41e6cb501e6a1d5b07801463460fe4ebbaace887 100644 (file)
@@ -100,7 +100,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
 #define ALPS_FOUR_BUTTONS      0x40    /* 4 direction button present */
 #define ALPS_PS2_INTERLEAVED   0x80    /* 3-byte PS/2 packet interleaved with
                                           6-byte ALPS packet */
-#define ALPS_DELL              0x100   /* device is a Dell laptop */
+#define ALPS_STICK_BITS                0x100   /* separate stick button bits */
 #define ALPS_BUTTONPAD         0x200   /* device is a clickpad */
 
 static const struct alps_model_info alps_model_data[] = {
@@ -159,6 +159,43 @@ static const struct alps_protocol_info alps_v8_protocol_data = {
        ALPS_PROTO_V8, 0x18, 0x18, 0
 };
 
+/*
+ * Some v2 models report the stick buttons in separate bits
+ */
+static const struct dmi_system_id alps_dmi_has_separate_stick_buttons[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+       {
+               /* Extrapolated from other entries */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D420"),
+               },
+       },
+       {
+               /* Reported-by: Hans de Bruin <jmdebruin@xmsnet.nl> */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D430"),
+               },
+       },
+       {
+               /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D620"),
+               },
+       },
+       {
+               /* Extrapolated from other entries */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D630"),
+               },
+       },
+#endif
+       { }
+};
+
 static void alps_set_abs_params_st(struct alps_data *priv,
                                   struct input_dev *dev1);
 static void alps_set_abs_params_semi_mt(struct alps_data *priv,
@@ -253,9 +290,8 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
                return;
        }
 
-       /* Dell non interleaved V2 dualpoint has separate stick button bits */
-       if (priv->proto_version == ALPS_PROTO_V2 &&
-           priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) {
+       /* Some models have separate stick button bits */
+       if (priv->flags & ALPS_STICK_BITS) {
                left |= packet[0] & 1;
                right |= packet[0] & 2;
                middle |= packet[0] & 4;
@@ -2552,8 +2588,6 @@ static int alps_set_protocol(struct psmouse *psmouse,
        priv->byte0 = protocol->byte0;
        priv->mask0 = protocol->mask0;
        priv->flags = protocol->flags;
-       if (dmi_name_in_vendors("Dell"))
-               priv->flags |= ALPS_DELL;
 
        priv->x_max = 2000;
        priv->y_max = 1400;
@@ -2568,6 +2602,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
                priv->set_abs_params = alps_set_abs_params_st;
                priv->x_max = 1023;
                priv->y_max = 767;
+               if (dmi_check_system(alps_dmi_has_separate_stick_buttons))
+                       priv->flags |= ALPS_STICK_BITS;
                break;
 
        case ALPS_PROTO_V3:
index 5f191071d44a033d45d1f433475cd3b7d0b4befd..e4eb048d1bf63f8bc2d7027800a81a58cdf4f855 100644 (file)
@@ -241,14 +241,10 @@ static int cyapa_gen6_read_sys_info(struct cyapa *cyapa)
        memcpy(&cyapa->product_id[13], &resp_data[62], 2);
        cyapa->product_id[15] = '\0';
 
+       /* Get the number of Rx electrodes. */
        rotat_align = resp_data[68];
-       if (rotat_align) {
-               cyapa->electrodes_rx = cyapa->electrodes_y;
-               cyapa->electrodes_rx = cyapa->electrodes_y;
-       } else {
-               cyapa->electrodes_rx = cyapa->electrodes_x;
-               cyapa->electrodes_rx = cyapa->electrodes_y;
-       }
+       cyapa->electrodes_rx =
+               rotat_align ? cyapa->electrodes_y : cyapa->electrodes_x;
        cyapa->aligned_electrodes_rx = (cyapa->electrodes_rx + 3) & ~3u;
 
        if (!cyapa->electrodes_x || !cyapa->electrodes_y ||
index 73670f2aebfd5e189ab794c6ffd4b44759bb5acb..c0ec26118732879f7674f68bd0458b3d27602f61 100644 (file)
@@ -60,7 +60,7 @@ struct elan_transport_ops {
        int (*get_sm_version)(struct i2c_client *client,
                              u8* ic_type, u8 *version);
        int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum);
-       int (*get_product_id)(struct i2c_client *client, u8 *id);
+       int (*get_product_id)(struct i2c_client *client, u16 *id);
 
        int (*get_max)(struct i2c_client *client,
                       unsigned int *max_x, unsigned int *max_y);
index fa945304b9a576d4303c778eca929a5f3517092a..5e1665bbaa0baca86e2c865ba88162be2f209d2e 100644 (file)
@@ -40,7 +40,7 @@
 #include "elan_i2c.h"
 
 #define DRIVER_NAME            "elan_i2c"
-#define ELAN_DRIVER_VERSION    "1.6.0"
+#define ELAN_DRIVER_VERSION    "1.6.1"
 #define ETP_MAX_PRESSURE       255
 #define ETP_FWIDTH_REDUCE      90
 #define ETP_FINGER_WIDTH       15
@@ -76,7 +76,7 @@ struct elan_tp_data {
        unsigned int            x_res;
        unsigned int            y_res;
 
-       u                     product_id;
+       u16                     product_id;
        u8                      fw_version;
        u8                      sm_version;
        u8                      iap_version;
@@ -98,15 +98,25 @@ static int elan_get_fwinfo(u8 iap_version, u16 *validpage_count,
                           u16 *signature_address)
 {
        switch (iap_version) {
+       case 0x00:
+       case 0x06:
        case 0x08:
                *validpage_count = 512;
                break;
+       case 0x03:
+       case 0x07:
        case 0x09:
+       case 0x0A:
+       case 0x0B:
+       case 0x0C:
                *validpage_count = 768;
                break;
        case 0x0D:
                *validpage_count = 896;
                break;
+       case 0x0E:
+               *validpage_count = 640;
+               break;
        default:
                /* unknown ic type clear value */
                *validpage_count = 0;
@@ -266,11 +276,10 @@ static int elan_query_device_info(struct elan_tp_data *data)
 
        error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count,
                                &data->fw_signature_address);
-       if (error) {
-               dev_err(&data->client->dev,
-                       "unknown iap version %d\n", data->iap_version);
-               return error;
-       }
+       if (error)
+               dev_warn(&data->client->dev,
+                        "unexpected iap version %#04x (ic type: %#04x), firmware update will not work\n",
+                        data->iap_version, data->ic_type);
 
        return 0;
 }
@@ -486,6 +495,9 @@ static ssize_t elan_sysfs_update_fw(struct device *dev,
        const u8 *fw_signature;
        static const u8 signature[] = {0xAA, 0x55, 0xCC, 0x33, 0xFF, 0xFF};
 
+       if (data->fw_validpage_count == 0)
+               return -EINVAL;
+
        /* Look for a firmware with the product id appended. */
        fw_name = kasprintf(GFP_KERNEL, ETP_FW_NAME, data->product_id);
        if (!fw_name) {
index 683c840c9dd73f31d1279b7db88dfacd1f42b7b0..a679e56c44cd49ddea4361aebdb97d4fe7f1e12e 100644 (file)
@@ -276,7 +276,7 @@ static int elan_i2c_get_sm_version(struct i2c_client *client,
        return 0;
 }
 
-static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id)
+static int elan_i2c_get_product_id(struct i2c_client *client, u16 *id)
 {
        int error;
        u8 val[3];
@@ -287,7 +287,7 @@ static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id)
                return error;
        }
 
-       *id = val[0];
+       *id = le16_to_cpup((__le16 *)val);
        return 0;
 }
 
index ff36a366b2aa1aadbe3c9a7f0e9de3eb687d83c3..cb6aecbc1dc28a20885885c4b361ecf8343bc445 100644 (file)
@@ -183,7 +183,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
        return 0;
 }
 
-static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id)
+static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id)
 {
        int error;
        u8 val[3];
@@ -195,7 +195,7 @@ static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id)
                return error;
        }
 
-       *id = val[1];
+       *id = be16_to_cpup((__be16 *)val);
        return 0;
 }
 
index 994ae788615698bf3af613f1de765fe0411be995..6025eb430c0a5010c908961ccf8897943fd3c945 100644 (file)
@@ -519,18 +519,14 @@ static int synaptics_set_mode(struct psmouse *psmouse)
        struct synaptics_data *priv = psmouse->private;
 
        priv->mode = 0;
-
-       if (priv->absolute_mode) {
+       if (priv->absolute_mode)
                priv->mode |= SYN_BIT_ABSOLUTE_MODE;
-               if (SYN_CAP_EXTENDED(priv->capabilities))
-                       priv->mode |= SYN_BIT_W_MODE;
-       }
-
-       if (!SYN_MODE_WMODE(priv->mode) && priv->disable_gesture)
+       if (priv->disable_gesture)
                priv->mode |= SYN_BIT_DISABLE_GESTURE;
-
        if (psmouse->rate >= 80)
                priv->mode |= SYN_BIT_HIGH_RATE;
+       if (SYN_CAP_EXTENDED(priv->capabilities))
+               priv->mode |= SYN_BIT_W_MODE;
 
        if (synaptics_mode_cmd(psmouse, priv->mode))
                return -1;
index 75516996db2070b621c6250cdd9710acf473ad42..316f2c8971011dae527d506ee18d49ce96f316e0 100644 (file)
@@ -212,12 +212,17 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
         * time before the ACK arrives.
         */
        if (ps2_sendbyte(ps2dev, command & 0xff,
-                        command == PS2_CMD_RESET_BAT ? 1000 : 200))
-               goto out;
+                        command == PS2_CMD_RESET_BAT ? 1000 : 200)) {
+               serio_pause_rx(ps2dev->serio);
+               goto out_reset_flags;
+       }
 
-       for (i = 0; i < send; i++)
-               if (ps2_sendbyte(ps2dev, param[i], 200))
-                       goto out;
+       for (i = 0; i < send; i++) {
+               if (ps2_sendbyte(ps2dev, param[i], 200)) {
+                       serio_pause_rx(ps2dev->serio);
+                       goto out_reset_flags;
+               }
+       }
 
        /*
         * The reset command takes a long time to execute.
@@ -234,17 +239,18 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
                                   !(ps2dev->flags & PS2_FLAG_CMD), timeout);
        }
 
+       serio_pause_rx(ps2dev->serio);
+
        if (param)
                for (i = 0; i < receive; i++)
                        param[i] = ps2dev->cmdbuf[(receive - 1) - i];
 
        if (ps2dev->cmdcnt && (command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1))
-               goto out;
+               goto out_reset_flags;
 
        rc = 0;
 
- out:
-       serio_pause_rx(ps2dev->serio);
+ out_reset_flags:
        ps2dev->flags = 0;
        serio_continue_rx(ps2dev->serio);
 
index 26b45936f9fdf3334c6f083aeaa7c5ee5bb178dd..1e8cd6f1fe9e875005af95b54787890f81116a5f 100644 (file)
@@ -194,6 +194,7 @@ static int __init parkbd_init(void)
        parkbd_port = parkbd_allocate_serio();
        if (!parkbd_port) {
                parport_release(parkbd_dev);
+               parport_unregister_device(parkbd_dev);
                return -ENOMEM;
        }
 
index 600dcceff5426aaf4f6fc7b20ce966960bf0aa92..deb14c12ae8b19a84c5ba8e0074f4bac43260ecc 100644 (file)
@@ -1006,6 +1006,7 @@ config TOUCHSCREEN_SUN4I
 config TOUCHSCREEN_SUR40
        tristate "Samsung SUR40 (Surface 2.0/PixelSense) touchscreen"
        depends on USB && MEDIA_USB_SUPPORT && HAS_DMA
+       depends on VIDEO_V4L2
        select INPUT_POLLDEV
        select VIDEOBUF2_DMA_SG
        help
index 0f5f968592bd02afd9c5381a8839b3428c3bcbea..04edc8f7122fa77d9c043694ba8f47d0c83dab0c 100644 (file)
@@ -668,18 +668,22 @@ static int ads7846_no_filter(void *ads, int data_idx, int *val)
 
 static int ads7846_get_value(struct ads7846 *ts, struct spi_message *m)
 {
+       int value;
        struct spi_transfer *t =
                list_entry(m->transfers.prev, struct spi_transfer, transfer_list);
 
        if (ts->model == 7845) {
-               return be16_to_cpup((__be16 *)&(((char*)t->rx_buf)[1])) >> 3;
+               value = be16_to_cpup((__be16 *)&(((char *)t->rx_buf)[1]));
        } else {
                /*
                 * adjust:  on-wire is a must-ignore bit, a BE12 value, then
                 * padding; built from two 8 bit values written msb-first.
                 */
-               return be16_to_cpup((__be16 *)t->rx_buf) >> 3;
+               value = be16_to_cpup((__be16 *)t->rx_buf);
        }
+
+       /* enforce ADC output is 12 bits width */
+       return (value >> 3) & 0xfff;
 }
 
 static void ads7846_update_value(struct spi_message *m, int val)
index ff0b75813daa21cff6f8baccfbf453a06ebf383b..8275267eac25441f308e6103e82d48830d1feb71 100644 (file)
@@ -94,7 +94,7 @@ struct imx6ul_tsc {
  * TSC module need ADC to get the measure value. So
  * before config TSC, we should initialize ADC module.
  */
-static void imx6ul_adc_init(struct imx6ul_tsc *tsc)
+static int imx6ul_adc_init(struct imx6ul_tsc *tsc)
 {
        int adc_hc = 0;
        int adc_gc;
@@ -122,17 +122,23 @@ static void imx6ul_adc_init(struct imx6ul_tsc *tsc)
 
        timeout = wait_for_completion_timeout
                        (&tsc->completion, ADC_TIMEOUT);
-       if (timeout == 0)
+       if (timeout == 0) {
                dev_err(tsc->dev, "Timeout for adc calibration\n");
+               return -ETIMEDOUT;
+       }
 
        adc_gs = readl(tsc->adc_regs + REG_ADC_GS);
-       if (adc_gs & ADC_CALF)
+       if (adc_gs & ADC_CALF) {
                dev_err(tsc->dev, "ADC calibration failed\n");
+               return -EINVAL;
+       }
 
        /* TSC need the ADC work in hardware trigger */
        adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG);
        adc_cfg |= ADC_HARDWARE_TRIGGER;
        writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG);
+
+       return 0;
 }
 
 /*
@@ -188,11 +194,17 @@ static void imx6ul_tsc_set(struct imx6ul_tsc *tsc)
        writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
 }
 
-static void imx6ul_tsc_init(struct imx6ul_tsc *tsc)
+static int imx6ul_tsc_init(struct imx6ul_tsc *tsc)
 {
-       imx6ul_adc_init(tsc);
+       int err;
+
+       err = imx6ul_adc_init(tsc);
+       if (err)
+               return err;
        imx6ul_tsc_channel_config(tsc);
        imx6ul_tsc_set(tsc);
+
+       return 0;
 }
 
 static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc)
@@ -311,9 +323,7 @@ static int imx6ul_tsc_open(struct input_dev *input_dev)
                return err;
        }
 
-       imx6ul_tsc_init(tsc);
-
-       return 0;
+       return imx6ul_tsc_init(tsc);
 }
 
 static void imx6ul_tsc_close(struct input_dev *input_dev)
@@ -337,7 +347,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
        int tsc_irq;
        int adc_irq;
 
-       tsc = devm_kzalloc(&pdev->dev, sizeof(struct imx6ul_tsc), GFP_KERNEL);
+       tsc = devm_kzalloc(&pdev->dev, sizeof(*tsc), GFP_KERNEL);
        if (!tsc)
                return -ENOMEM;
 
@@ -345,7 +355,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
        if (!input_dev)
                return -ENOMEM;
 
-       input_dev->name = "iMX6UL TouchScreen Controller";
+       input_dev->name = "iMX6UL Touchscreen Controller";
        input_dev->id.bustype = BUS_HOST;
 
        input_dev->open = imx6ul_tsc_open;
@@ -406,7 +416,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
        }
 
        adc_irq = platform_get_irq(pdev, 1);
-       if (adc_irq <= 0) {
+       if (adc_irq < 0) {
                dev_err(&pdev->dev, "no adc irq resource?\n");
                return adc_irq;
        }
@@ -491,7 +501,7 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev)
                        goto out;
                }
 
-               imx6ul_tsc_init(tsc);
+               retval = imx6ul_tsc_init(tsc);
        }
 
 out:
index 24d704cd9f882fb8517d036b00d5f64d46787993..7fbb3b0c857150170a293981e1e7940d70478c3f 100644 (file)
@@ -139,14 +139,14 @@ static void lpc32xx_stop_tsc(struct lpc32xx_tsc *tsc)
                   tsc_readl(tsc, LPC32XX_TSC_CON) &
                             ~LPC32XX_TSC_ADCCON_AUTO_EN);
 
-       clk_disable(tsc->clk);
+       clk_disable_unprepare(tsc->clk);
 }
 
 static void lpc32xx_setup_tsc(struct lpc32xx_tsc *tsc)
 {
        u32 tmp;
 
-       clk_enable(tsc->clk);
+       clk_prepare_enable(tsc->clk);
 
        tmp = tsc_readl(tsc, LPC32XX_TSC_CON) & ~LPC32XX_TSC_ADCCON_POWER_UP;
 
index 7cce87650fc8da3e401ec9a9aa1807ea1a2d9dfb..1fafc9f57af6c75a7a8a9e9d6b90e016f4e90272 100644 (file)
@@ -394,12 +394,12 @@ static struct mms114_platform_data *mms114_parse_dt(struct device *dev)
        if (of_property_read_u32(np, "x-size", &pdata->x_size)) {
                dev_err(dev, "failed to get x-size property\n");
                return NULL;
-       };
+       }
 
        if (of_property_read_u32(np, "y-size", &pdata->y_size)) {
                dev_err(dev, "failed to get y-size property\n");
                return NULL;
-       };
+       }
 
        of_property_read_u32(np, "contact-threshold",
                                &pdata->contact_threshold);
index 4664c2a96c67fee361c3476ddc8f9a8e8842d271..cbe6a890a93a0d1448f46e32edbfdc5231ba7098 100644 (file)
@@ -23,8 +23,7 @@ config IOMMU_IO_PGTABLE
 config IOMMU_IO_PGTABLE_LPAE
        bool "ARMv7/v8 Long Descriptor Format"
        select IOMMU_IO_PGTABLE
-       # SWIOTLB guarantees a dma_to_phys() implementation
-       depends on ARM || ARM64 || (COMPILE_TEST && SWIOTLB)
+       depends on HAS_DMA && (ARM || ARM64 || COMPILE_TEST)
        help
          Enable support for the ARM long descriptor pagetable format.
          This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
@@ -43,7 +42,7 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST
 endmenu
 
 config IOMMU_IOVA
-       bool
+       tristate
 
 config OF_IOMMU
        def_bool y
index f82060e778a23bb7a8901ef2356d42b5363d93a6..532e2a211fe1cf9b3d7ff71d36cbcfde49bd29df 100644 (file)
@@ -1974,8 +1974,8 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
 static void clear_dte_entry(u16 devid)
 {
        /* remove entry from the device table seen by the hardware */
-       amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
-       amd_iommu_dev_table[devid].data[1] = 0;
+       amd_iommu_dev_table[devid].data[0]  = IOMMU_PTE_P | IOMMU_PTE_TV;
+       amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;
 
        amd_iommu_apply_erratum_63(devid);
 }
@@ -2006,6 +2006,15 @@ static void do_detach(struct iommu_dev_data *dev_data)
 {
        struct amd_iommu *iommu;
 
+       /*
+        * First check if the device is still attached. It might already
+        * be detached from its domain because the generic
+        * iommu_detach_group code detached it and we try again here in
+        * our alias handling.
+        */
+       if (!dev_data->domain)
+               return;
+
        iommu = amd_iommu_rlookup_table[dev_data->devid];
 
        /* decrease reference counters */
index 5ef347a13cb5d54789c07869b0527d81cb24365e..1b066e7d144d6fdc0043cfbfa340ce2e7a209c50 100644 (file)
@@ -1256,6 +1256,9 @@ static int iommu_init_pci(struct amd_iommu *iommu)
        if (!iommu->dev)
                return -ENODEV;
 
+       /* Prevent binding other PCI device drivers to IOMMU devices */
+       iommu->dev->match_driver = false;
+
        pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
                              &iommu->cap);
        pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
index f65908841be0184db61f049bc8e2b5a622ca2fbe..c9b64722f62309d76e56ad52c5a3691737ffd6ac 100644 (file)
 #define IOMMU_PTE_IR (1ULL << 61)
 #define IOMMU_PTE_IW (1ULL << 62)
 
+#define DTE_FLAG_MASK  (0x3ffULL << 32)
 #define DTE_FLAG_IOTLB (0x01UL << 32)
 #define DTE_FLAG_GV    (0x01ULL << 55)
 #define DTE_GLX_SHIFT  (56)
index 1131664b918b0a574c7cc654a6a3cd04107f8e81..d21d4edf7236abac49072c086290b76d2ae201b5 100644 (file)
@@ -516,6 +516,13 @@ static void do_fault(struct work_struct *work)
                goto out;
        }
 
+       if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) {
+               /* handle_mm_fault would BUG_ON() */
+               up_read(&mm->mmap_sem);
+               handle_fault_error(fault);
+               goto out;
+       }
+
        ret = handle_mm_fault(mm, vma, address, write);
        if (ret & VM_FAULT_ERROR) {
                /* failed to service fault */
index dafaf59dc3b82833fb78d55e8f194ff728999d35..286e890e7d64caa31867044f568e3a3cf6ce2ba9 100644 (file)
@@ -56,6 +56,7 @@
 #define IDR0_TTF_SHIFT                 2
 #define IDR0_TTF_MASK                  0x3
 #define IDR0_TTF_AARCH64               (2 << IDR0_TTF_SHIFT)
+#define IDR0_TTF_AARCH32_64            (3 << IDR0_TTF_SHIFT)
 #define IDR0_S1P                       (1 << 1)
 #define IDR0_S2P                       (1 << 0)
 
 #define CMDQ_TLBI_0_VMID_SHIFT         32
 #define CMDQ_TLBI_0_ASID_SHIFT         48
 #define CMDQ_TLBI_1_LEAF               (1UL << 0)
-#define CMDQ_TLBI_1_ADDR_MASK          ~0xfffUL
+#define CMDQ_TLBI_1_VA_MASK            ~0xfffUL
+#define CMDQ_TLBI_1_IPA_MASK           0xfffffffff000UL
 
 #define CMDQ_PRI_0_SSID_SHIFT          12
 #define CMDQ_PRI_0_SSID_MASK           0xfffffUL
@@ -770,11 +772,13 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
                break;
        case CMDQ_OP_TLBI_NH_VA:
                cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
-               /* Fallthrough */
+               cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
+               cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
+               break;
        case CMDQ_OP_TLBI_S2_IPA:
                cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
                cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
-               cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_ADDR_MASK;
+               cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
                break;
        case CMDQ_OP_TLBI_NH_ASID:
                cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
@@ -2460,7 +2464,13 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
        }
 
        /* We only support the AArch64 table format at present */
-       if ((reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) < IDR0_TTF_AARCH64) {
+       switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) {
+       case IDR0_TTF_AARCH32_64:
+               smmu->ias = 40;
+               /* Fallthrough */
+       case IDR0_TTF_AARCH64:
+               break;
+       default:
                dev_err(smmu->dev, "AArch64 table format not supported!\n");
                return -ENXIO;
        }
@@ -2541,8 +2551,7 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
                dev_warn(smmu->dev,
                         "failed to set DMA mask for table walker\n");
 
-       if (!smmu->ias)
-               smmu->ias = smmu->oas;
+       smmu->ias = max(smmu->ias, smmu->oas);
 
        dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
                 smmu->ias, smmu->oas, smmu->features);
index 2d7349a3ee1496408f051b4da8accebc8dd02ec1..d65cf42399e8e5aa0f856a1ab6869c041adc0c48 100644 (file)
@@ -2115,15 +2115,19 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
                                return -ENOMEM;
                        /* It is large page*/
                        if (largepage_lvl > 1) {
+                               unsigned long nr_superpages, end_pfn;
+
                                pteval |= DMA_PTE_LARGE_PAGE;
                                lvl_pages = lvl_to_nr_pages(largepage_lvl);
+
+                               nr_superpages = sg_res / lvl_pages;
+                               end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
+
                                /*
                                 * Ensure that old small page tables are
-                                * removed to make room for superpage,
-                                * if they exist.
+                                * removed to make room for superpage(s).
                                 */
-                               dma_pte_free_pagetable(domain, iov_pfn,
-                                                      iov_pfn + lvl_pages - 1);
+                               dma_pte_free_pagetable(domain, iov_pfn, end_pfn);
                        } else {
                                pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
                        }
@@ -2301,6 +2305,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
 
        if (ret) {
                spin_unlock_irqrestore(&device_domain_lock, flags);
+               free_devinfo_mem(info);
                return NULL;
        }
 
@@ -3215,6 +3220,8 @@ static struct iova *intel_alloc_iova(struct device *dev,
 
        /* Restrict dma_mask to the width that the iommu can handle */
        dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
+       /* Ensure we reserve the whole size-aligned region */
+       nrpages = __roundup_pow_of_two(nrpages);
 
        if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
                /*
@@ -3711,7 +3718,7 @@ static inline int iommu_devinfo_cache_init(void)
 static int __init iommu_init_mempool(void)
 {
        int ret;
-       ret = iommu_iova_cache_init();
+       ret = iova_cache_get();
        if (ret)
                return ret;
 
@@ -3725,7 +3732,7 @@ static int __init iommu_init_mempool(void)
 
        kmem_cache_destroy(iommu_domain_cache);
 domain_error:
-       iommu_iova_cache_destroy();
+       iova_cache_put();
 
        return -ENOMEM;
 }
@@ -3734,7 +3741,7 @@ static void __init iommu_exit_mempool(void)
 {
        kmem_cache_destroy(iommu_devinfo_cache);
        kmem_cache_destroy(iommu_domain_cache);
-       iommu_iova_cache_destroy();
+       iova_cache_put();
 }
 
 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
index 73c07482f48763c5af3f0d43d73a2f04774bb74d..7df97777662d4d8a9284a8f2cae4cc0e8891210a 100644 (file)
@@ -202,9 +202,9 @@ typedef u64 arm_lpae_iopte;
 
 static bool selftest_running = false;
 
-static dma_addr_t __arm_lpae_dma_addr(struct device *dev, void *pages)
+static dma_addr_t __arm_lpae_dma_addr(void *pages)
 {
-       return phys_to_dma(dev, virt_to_phys(pages));
+       return (dma_addr_t)virt_to_phys(pages);
 }
 
 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
@@ -223,10 +223,10 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
                        goto out_free;
                /*
                 * We depend on the IOMMU being able to work with any physical
-                * address directly, so if the DMA layer suggests it can't by
-                * giving us back some translation, that bodes very badly...
+                * address directly, so if the DMA layer suggests otherwise by
+                * translating or truncating them, that bodes very badly...
                 */
-               if (dma != __arm_lpae_dma_addr(dev, pages))
+               if (dma != virt_to_phys(pages))
                        goto out_unmap;
        }
 
@@ -243,10 +243,8 @@ out_free:
 static void __arm_lpae_free_pages(void *pages, size_t size,
                                  struct io_pgtable_cfg *cfg)
 {
-       struct device *dev = cfg->iommu_dev;
-
        if (!selftest_running)
-               dma_unmap_single(dev, __arm_lpae_dma_addr(dev, pages),
+               dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
                                 size, DMA_TO_DEVICE);
        free_pages_exact(pages, size);
 }
@@ -254,12 +252,11 @@ static void __arm_lpae_free_pages(void *pages, size_t size,
 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
                               struct io_pgtable_cfg *cfg)
 {
-       struct device *dev = cfg->iommu_dev;
-
        *ptep = pte;
 
        if (!selftest_running)
-               dma_sync_single_for_device(dev, __arm_lpae_dma_addr(dev, ptep),
+               dma_sync_single_for_device(cfg->iommu_dev,
+                                          __arm_lpae_dma_addr(ptep),
                                           sizeof(pte), DMA_TO_DEVICE);
 }
 
@@ -629,6 +626,11 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
        if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
                return NULL;
 
+       if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
+               dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
+               return NULL;
+       }
+
        data = kmalloc(sizeof(*data), GFP_KERNEL);
        if (!data)
                return NULL;
index b7c3d923f3e1c0569c42492d435b7c4d9a321caa..fa0adef32bd6d3a4af1b97ee3b1fb22dde6c225f 100644 (file)
  */
 
 #include <linux/iova.h>
+#include <linux/module.h>
 #include <linux/slab.h>
 
-static struct kmem_cache *iommu_iova_cache;
-
-int iommu_iova_cache_init(void)
-{
-       int ret = 0;
-
-       iommu_iova_cache = kmem_cache_create("iommu_iova",
-                                        sizeof(struct iova),
-                                        0,
-                                        SLAB_HWCACHE_ALIGN,
-                                        NULL);
-       if (!iommu_iova_cache) {
-               pr_err("Couldn't create iova cache\n");
-               ret = -ENOMEM;
-       }
-
-       return ret;
-}
-
-void iommu_iova_cache_destroy(void)
-{
-       kmem_cache_destroy(iommu_iova_cache);
-}
-
-struct iova *alloc_iova_mem(void)
-{
-       return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
-}
-
-void free_iova_mem(struct iova *iova)
-{
-       kmem_cache_free(iommu_iova_cache, iova);
-}
-
 void
 init_iova_domain(struct iova_domain *iovad, unsigned long granule,
        unsigned long start_pfn, unsigned long pfn_32bit)
@@ -72,6 +39,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
        iovad->start_pfn = start_pfn;
        iovad->dma_32bit_pfn = pfn_32bit;
 }
+EXPORT_SYMBOL_GPL(init_iova_domain);
 
 static struct rb_node *
 __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
@@ -120,19 +88,14 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
        }
 }
 
-/* Computes the padding size required, to make the
- * the start address naturally aligned on its size
+/*
+ * Computes the padding size required, to make the start address
+ * naturally aligned on the power-of-two order of its size
  */
-static int
-iova_get_pad_size(int size, unsigned int limit_pfn)
+static unsigned int
+iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
 {
-       unsigned int pad_size = 0;
-       unsigned int order = ilog2(size);
-
-       if (order)
-               pad_size = (limit_pfn + 1) % (1 << order);
-
-       return pad_size;
+       return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1);
 }
 
 static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
@@ -242,6 +205,57 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
        rb_insert_color(&iova->node, root);
 }
 
+static struct kmem_cache *iova_cache;
+static unsigned int iova_cache_users;
+static DEFINE_MUTEX(iova_cache_mutex);
+
+struct iova *alloc_iova_mem(void)
+{
+       return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
+}
+EXPORT_SYMBOL(alloc_iova_mem);
+
+void free_iova_mem(struct iova *iova)
+{
+       kmem_cache_free(iova_cache, iova);
+}
+EXPORT_SYMBOL(free_iova_mem);
+
+int iova_cache_get(void)
+{
+       mutex_lock(&iova_cache_mutex);
+       if (!iova_cache_users) {
+               iova_cache = kmem_cache_create(
+                       "iommu_iova", sizeof(struct iova), 0,
+                       SLAB_HWCACHE_ALIGN, NULL);
+               if (!iova_cache) {
+                       mutex_unlock(&iova_cache_mutex);
+                       printk(KERN_ERR "Couldn't create iova cache\n");
+                       return -ENOMEM;
+               }
+       }
+
+       iova_cache_users++;
+       mutex_unlock(&iova_cache_mutex);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(iova_cache_get);
+
+void iova_cache_put(void)
+{
+       mutex_lock(&iova_cache_mutex);
+       if (WARN_ON(!iova_cache_users)) {
+               mutex_unlock(&iova_cache_mutex);
+               return;
+       }
+       iova_cache_users--;
+       if (!iova_cache_users)
+               kmem_cache_destroy(iova_cache);
+       mutex_unlock(&iova_cache_mutex);
+}
+EXPORT_SYMBOL_GPL(iova_cache_put);
+
 /**
  * alloc_iova - allocates an iova
  * @iovad: - iova domain in question
@@ -265,12 +279,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
        if (!new_iova)
                return NULL;
 
-       /* If size aligned is set then round the size to
-        * to next power of two.
-        */
-       if (size_aligned)
-               size = __roundup_pow_of_two(size);
-
        ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
                        new_iova, size_aligned);
 
@@ -281,6 +289,7 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
 
        return new_iova;
 }
+EXPORT_SYMBOL_GPL(alloc_iova);
 
 /**
  * find_iova - find's an iova for a given pfn
@@ -321,6 +330,7 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
        return NULL;
 }
+EXPORT_SYMBOL_GPL(find_iova);
 
 /**
  * __free_iova - frees the given iova
@@ -339,6 +349,7 @@ __free_iova(struct iova_domain *iovad, struct iova *iova)
        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
        free_iova_mem(iova);
 }
+EXPORT_SYMBOL_GPL(__free_iova);
 
 /**
  * free_iova - finds and frees the iova for a given pfn
@@ -356,6 +367,7 @@ free_iova(struct iova_domain *iovad, unsigned long pfn)
                __free_iova(iovad, iova);
 
 }
+EXPORT_SYMBOL_GPL(free_iova);
 
 /**
  * put_iova_domain - destroys the iova doamin
@@ -378,6 +390,7 @@ void put_iova_domain(struct iova_domain *iovad)
        }
        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 }
+EXPORT_SYMBOL_GPL(put_iova_domain);
 
 static int
 __is_range_overlap(struct rb_node *node,
@@ -467,6 +480,7 @@ finish:
        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
        return iova;
 }
+EXPORT_SYMBOL_GPL(reserve_iova);
 
 /**
  * copy_reserved_iova - copies the reserved between domains
@@ -493,6 +507,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
        }
        spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
 }
+EXPORT_SYMBOL_GPL(copy_reserved_iova);
 
 struct iova *
 split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
@@ -534,3 +549,6 @@ error:
                free_iova_mem(prev);
        return NULL;
 }
+
+MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
+MODULE_LICENSE("GPL");
index 27b52c8729cd1e1a6f17e9945a14e894a78ee81f..4d7294e5d98271493e0ef6d2d90d10108178c83f 100644 (file)
@@ -123,6 +123,7 @@ config RENESAS_INTC_IRQPIN
 
 config RENESAS_IRQC
        bool
+       select GENERIC_IRQ_CHIP
        select IRQ_DOMAIN
 
 config ST_IRQCHIP
@@ -187,3 +188,8 @@ config IMX_GPCV2
        select IRQ_DOMAIN
        help
          Enables the wakeup IRQs for IMX platforms with GPCv2 block
+
+config IRQ_MXS
+       def_bool y if MACH_ASM9260 || ARCH_MXS
+       select IRQ_DOMAIN
+       select STMP_DEVICE
index bb3048f00e647d5c8c0665a7fb31cedc593f4e46..177f78f6e6d6313fd9fd16595fe36089296ce25f 100644 (file)
@@ -6,7 +6,7 @@ obj-$(CONFIG_ARCH_EXYNOS)               += exynos-combiner.o
 obj-$(CONFIG_ARCH_HIP04)               += irq-hip04.o
 obj-$(CONFIG_ARCH_MMP)                 += irq-mmp.o
 obj-$(CONFIG_ARCH_MVEBU)               += irq-armada-370-xp.o
-obj-$(CONFIG_ARCH_MXS)                 += irq-mxs.o
+obj-$(CONFIG_IRQ_MXS)                  += irq-mxs.o
 obj-$(CONFIG_ARCH_TEGRA)               += irq-tegra.o
 obj-$(CONFIG_ARCH_S3C24XX)             += irq-s3c24xx.o
 obj-$(CONFIG_DW_APB_ICTL)              += irq-dw-apb-ictl.o
diff --git a/drivers/irqchip/alphascale_asm9260-icoll.h b/drivers/irqchip/alphascale_asm9260-icoll.h
new file mode 100644 (file)
index 0000000..5cec108
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2014 Oleksij Rempel <linux@rempel-privat.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _ALPHASCALE_ASM9260_ICOLL_H
+#define _ALPHASCALE_ASM9260_ICOLL_H
+
+#define ASM9260_NUM_IRQS               64
+/*
+ * this device provide 4 offsets for each register:
+ * 0x0 - plain read write mode
+ * 0x4 - set mode, OR logic.
+ * 0x8 - clr mode, XOR logic.
+ * 0xc - togle mode.
+ */
+
+#define ASM9260_HW_ICOLL_VECTOR                                0x0000
+/*
+ * bits 31:2
+ * This register presents the vector address for the interrupt currently
+ * active on the CPU IRQ input. Writing to this register notifies the
+ * interrupt collector that the interrupt service routine for the current
+ * interrupt has been entered.
+ * The exception trap should have a LDPC instruction from this address:
+ * LDPC ASM9260_HW_ICOLL_VECTOR_ADDR; IRQ exception at 0xffff0018
+ */
+
+/*
+ * The Interrupt Collector Level Acknowledge Register is used by software to
+ * indicate the completion of an interrupt on a specific level.
+ * This register is written at the very end of an interrupt service routine. If
+ * nesting is used then the CPU irq must be turned on before writing to this
+ * register to avoid a race condition in the CPU interrupt hardware.
+ */
+#define ASM9260_HW_ICOLL_LEVELACK                      0x0010
+#define ASM9260_BM_LEVELn(nr)                          BIT(nr)
+
+#define ASM9260_HW_ICOLL_CTRL                          0x0020
+/*
+ * ASM9260_BM_CTRL_SFTRST and ASM9260_BM_CTRL_CLKGATE are not available on
+ * asm9260.
+ */
+#define ASM9260_BM_CTRL_SFTRST                         BIT(31)
+#define ASM9260_BM_CTRL_CLKGATE                                BIT(30)
+/* disable interrupt level nesting */
+#define ASM9260_BM_CTRL_NO_NESTING                     BIT(19)
+/*
+ * Set this bit to one enable the RISC32-style read side effect associated with
+ * the vector address register. In this mode, interrupt in-service is signaled
+ * by the read of the ASM9260_HW_ICOLL_VECTOR register to acquire the interrupt
+ * vector address. Set this bit to zero for normal operation, in which the ISR
+ * signals in-service explicitly by means of a write to the
+ * ASM9260_HW_ICOLL_VECTOR register.
+ * 0 - Must Write to Vector register to go in-service.
+ * 1 - Go in-service as a read side effect
+ */
+#define ASM9260_BM_CTRL_ARM_RSE_MODE                   BIT(18)
+#define ASM9260_BM_CTRL_IRQ_ENABLE                     BIT(16)
+
+#define ASM9260_HW_ICOLL_STAT_OFFSET                   0x0030
+/*
+ * bits 5:0
+ * Vector number of current interrupt. Multiply by 4 and add to vector base
+ * address to obtain the value in ASM9260_HW_ICOLL_VECTOR.
+ */
+
+/*
+ * RAW0 and RAW1 provides a read-only view of the raw interrupt request lines
+ * coming from various parts of the chip. Its purpose is to improve diagnostic
+ * observability.
+ */
+#define ASM9260_HW_ICOLL_RAW0                          0x0040
+#define ASM9260_HW_ICOLL_RAW1                          0x0050
+
+#define ASM9260_HW_ICOLL_INTERRUPT0                    0x0060
+#define ASM9260_HW_ICOLL_INTERRUPTn(n)         (0x0060 + ((n) >> 2) * 0x10)
+/*
+ * WARNING: Modifying the priority of an enabled interrupt may result in
+ * undefined behavior.
+ */
+#define ASM9260_BM_INT_PRIORITY_MASK                   0x3
+#define ASM9260_BM_INT_ENABLE                          BIT(2)
+#define ASM9260_BM_INT_SOFTIRQ                         BIT(3)
+
+#define ASM9260_BM_ICOLL_INTERRUPTn_SHIFT(n)           (((n) & 0x3) << 3)
+#define ASM9260_BM_ICOLL_INTERRUPTn_ENABLE(n)          (1 << (2 + \
+                       ASM9260_BM_ICOLL_INTERRUPTn_SHIFT(n)))
+
+#define ASM9260_HW_ICOLL_VBASE                         0x0160
+/*
+ * bits 31:2
+ * This bitfield holds the upper 30 bits of the base address of the vector
+ * table.
+ */
+
+#define ASM9260_HW_ICOLL_CLEAR0                                0x01d0
+#define ASM9260_HW_ICOLL_CLEAR1                                0x01e0
+#define ASM9260_HW_ICOLL_CLEARn(n)                     (((n >> 5) * 0x10) \
+                                                       + SET_REG)
+#define ASM9260_BM_CLEAR_BIT(n)                                BIT(n & 0x1f)
+
+/* Scratchpad */
+#define ASM9260_HW_ICOLL_UNDEF_VECTOR                  0x01f0
+#endif
index cd7d3bc78e345c49b733b9120c5a5bed2540583b..ead15be2d20ab12c476157fd75be87d0064c8c41 100644 (file)
@@ -144,7 +144,7 @@ static int combiner_irq_domain_xlate(struct irq_domain *d,
                                     unsigned long *out_hwirq,
                                     unsigned int *out_type)
 {
-       if (d->of_node != controller)
+       if (irq_domain_get_of_node(d) != controller)
                return -EINVAL;
 
        if (intsize < 2)
index 655cb967a1f2fe0b9c41f5d3484dbebe6be7331d..389318a3be820a560dbc02c7b5e2bd9b37cdeb35 100644 (file)
@@ -317,6 +317,7 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
                                        handle_level_irq);
        }
        irq_set_probe(virq);
+       irq_clear_status_flags(virq, IRQ_NOAUTOEN);
 
        return 0;
 }
index 63cd031b2c28d40c9c1296ae466005a5d48843d3..b12a5d58546f922b460d6aac34c073ff363ef1b8 100644 (file)
@@ -114,7 +114,7 @@ int aic_common_irq_domain_xlate(struct irq_domain *d,
 
 static void __init aic_common_ext_irq_of_init(struct irq_domain *domain)
 {
-       struct device_node *node = domain->of_node;
+       struct device_node *node = irq_domain_get_of_node(domain);
        struct irq_chip_generic *gc;
        struct aic_chip_data *aic;
        struct property *prop;
index f6d680485beecaf0ec870a0d7492146e3fb72090..62bb840c613f2a660966333f858426af5ab3d93e 100644 (file)
@@ -70,16 +70,15 @@ static struct irq_domain *aic5_domain;
 static asmlinkage void __exception_irq_entry
 aic5_handle(struct pt_regs *regs)
 {
-       struct irq_domain_chip_generic *dgc = aic5_domain->gc;
-       struct irq_chip_generic *gc = dgc->gc[0];
+       struct irq_chip_generic *bgc = irq_get_domain_generic_chip(aic5_domain, 0);
        u32 irqnr;
        u32 irqstat;
 
-       irqnr = irq_reg_readl(gc, AT91_AIC5_IVR);
-       irqstat = irq_reg_readl(gc, AT91_AIC5_ISR);
+       irqnr = irq_reg_readl(bgc, AT91_AIC5_IVR);
+       irqstat = irq_reg_readl(bgc, AT91_AIC5_ISR);
 
        if (!irqstat)
-               irq_reg_writel(gc, 0, AT91_AIC5_EOICR);
+               irq_reg_writel(bgc, 0, AT91_AIC5_EOICR);
        else
                handle_domain_irq(aic5_domain, irqnr, regs);
 }
@@ -87,8 +86,7 @@ aic5_handle(struct pt_regs *regs)
 static void aic5_mask(struct irq_data *d)
 {
        struct irq_domain *domain = d->domain;
-       struct irq_domain_chip_generic *dgc = domain->gc;
-       struct irq_chip_generic *bgc = dgc->gc[0];
+       struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
        struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
 
        /*
@@ -105,8 +103,7 @@ static void aic5_mask(struct irq_data *d)
 static void aic5_unmask(struct irq_data *d)
 {
        struct irq_domain *domain = d->domain;
-       struct irq_domain_chip_generic *dgc = domain->gc;
-       struct irq_chip_generic *bgc = dgc->gc[0];
+       struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
        struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
 
        /*
@@ -123,14 +120,13 @@ static void aic5_unmask(struct irq_data *d)
 static int aic5_retrigger(struct irq_data *d)
 {
        struct irq_domain *domain = d->domain;
-       struct irq_domain_chip_generic *dgc = domain->gc;
-       struct irq_chip_generic *gc = dgc->gc[0];
+       struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
 
        /* Enable interrupt on AIC5 */
-       irq_gc_lock(gc);
-       irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
-       irq_reg_writel(gc, 1, AT91_AIC5_ISCR);
-       irq_gc_unlock(gc);
+       irq_gc_lock(bgc);
+       irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR);
+       irq_reg_writel(bgc, 1, AT91_AIC5_ISCR);
+       irq_gc_unlock(bgc);
 
        return 0;
 }
@@ -138,18 +134,17 @@ static int aic5_retrigger(struct irq_data *d)
 static int aic5_set_type(struct irq_data *d, unsigned type)
 {
        struct irq_domain *domain = d->domain;
-       struct irq_domain_chip_generic *dgc = domain->gc;
-       struct irq_chip_generic *gc = dgc->gc[0];
+       struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
        unsigned int smr;
        int ret;
 
-       irq_gc_lock(gc);
-       irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
-       smr = irq_reg_readl(gc, AT91_AIC5_SMR);
+       irq_gc_lock(bgc);
+       irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR);
+       smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
        ret = aic_common_set_type(d, type, &smr);
        if (!ret)
-               irq_reg_writel(gc, smr, AT91_AIC5_SMR);
-       irq_gc_unlock(gc);
+               irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
+       irq_gc_unlock(bgc);
 
        return ret;
 }
@@ -159,7 +154,7 @@ static void aic5_suspend(struct irq_data *d)
 {
        struct irq_domain *domain = d->domain;
        struct irq_domain_chip_generic *dgc = domain->gc;
-       struct irq_chip_generic *bgc = dgc->gc[0];
+       struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
        struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
        int i;
        u32 mask;
@@ -183,7 +178,7 @@ static void aic5_resume(struct irq_data *d)
 {
        struct irq_domain *domain = d->domain;
        struct irq_domain_chip_generic *dgc = domain->gc;
-       struct irq_chip_generic *bgc = dgc->gc[0];
+       struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
        struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
        int i;
        u32 mask;
@@ -207,7 +202,7 @@ static void aic5_pm_shutdown(struct irq_data *d)
 {
        struct irq_domain *domain = d->domain;
        struct irq_domain_chip_generic *dgc = domain->gc;
-       struct irq_chip_generic *bgc = dgc->gc[0];
+       struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
        struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
        int i;
 
@@ -262,12 +257,11 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
                                 irq_hw_number_t *out_hwirq,
                                 unsigned int *out_type)
 {
-       struct irq_domain_chip_generic *dgc = d->gc;
-       struct irq_chip_generic *gc;
+       struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0);
        unsigned smr;
        int ret;
 
-       if (!dgc)
+       if (!bgc)
                return -EINVAL;
 
        ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize,
@@ -275,15 +269,13 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
        if (ret)
                return ret;
 
-       gc = dgc->gc[0];
-
-       irq_gc_lock(gc);
-       irq_reg_writel(gc, *out_hwirq, AT91_AIC5_SSR);
-       smr = irq_reg_readl(gc, AT91_AIC5_SMR);
+       irq_gc_lock(bgc);
+       irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
+       smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
        ret = aic_common_set_priority(intspec[2], &smr);
        if (!ret)
-               irq_reg_writel(gc, intspec[2] | smr, AT91_AIC5_SMR);
-       irq_gc_unlock(gc);
+               irq_reg_writel(bgc, intspec[2] | smr, AT91_AIC5_SMR);
+       irq_gc_unlock(bgc);
 
        return ret;
 }
index a7f5626930f506289c6eaea34529c20b2051c750..75573fa431ba8552e21afcc19edc8190c329d131 100644 (file)
@@ -78,10 +78,13 @@ static struct irq_chip crossbar_chip = {
 static int allocate_gic_irq(struct irq_domain *domain, unsigned virq,
                            irq_hw_number_t hwirq)
 {
-       struct of_phandle_args args;
+       struct irq_fwspec fwspec;
        int i;
        int err;
 
+       if (!irq_domain_get_of_node(domain->parent))
+               return -EINVAL;
+
        raw_spin_lock(&cb->lock);
        for (i = cb->int_max - 1; i >= 0; i--) {
                if (cb->irq_map[i] == IRQ_FREE) {
@@ -94,13 +97,13 @@ static int allocate_gic_irq(struct irq_domain *domain, unsigned virq,
        if (i < 0)
                return -ENODEV;
 
-       args.np = domain->parent->of_node;
-       args.args_count = 3;
-       args.args[0] = 0;       /* SPI */
-       args.args[1] = i;
-       args.args[2] = IRQ_TYPE_LEVEL_HIGH;
+       fwspec.fwnode = domain->parent->fwnode;
+       fwspec.param_count = 3;
+       fwspec.param[0] = 0;    /* SPI */
+       fwspec.param[1] = i;
+       fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
 
-       err = irq_domain_alloc_irqs_parent(domain, virq, 1, &args);
+       err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
        if (err)
                cb->irq_map[i] = IRQ_FREE;
        else
@@ -112,16 +115,16 @@ static int allocate_gic_irq(struct irq_domain *domain, unsigned virq,
 static int crossbar_domain_alloc(struct irq_domain *d, unsigned int virq,
                                 unsigned int nr_irqs, void *data)
 {
-       struct of_phandle_args *args = data;
+       struct irq_fwspec *fwspec = data;
        irq_hw_number_t hwirq;
        int i;
 
-       if (args->args_count != 3)
+       if (fwspec->param_count != 3)
                return -EINVAL; /* Not GIC compliant */
-       if (args->args[0] != 0)
+       if (fwspec->param[0] != 0)
                return -EINVAL; /* No PPI should point to this domain */
 
-       hwirq = args->args[1];
+       hwirq = fwspec->param[1];
        if ((hwirq + nr_irqs) > cb->max_crossbar_sources)
                return -EINVAL; /* Can't deal with this */
 
@@ -166,28 +169,31 @@ static void crossbar_domain_free(struct irq_domain *domain, unsigned int virq,
        raw_spin_unlock(&cb->lock);
 }
 
-static int crossbar_domain_xlate(struct irq_domain *d,
-                                struct device_node *controller,
-                                const u32 *intspec, unsigned int intsize,
-                                unsigned long *out_hwirq,
-                                unsigned int *out_type)
+static int crossbar_domain_translate(struct irq_domain *d,
+                                    struct irq_fwspec *fwspec,
+                                    unsigned long *hwirq,
+                                    unsigned int *type)
 {
-       if (d->of_node != controller)
-               return -EINVAL; /* Shouldn't happen, really... */
-       if (intsize != 3)
-               return -EINVAL; /* Not GIC compliant */
-       if (intspec[0] != 0)
-               return -EINVAL; /* No PPI should point to this domain */
+       if (is_of_node(fwspec->fwnode)) {
+               if (fwspec->param_count != 3)
+                       return -EINVAL;
 
-       *out_hwirq = intspec[1];
-       *out_type = intspec[2];
-       return 0;
+               /* No PPI should point to this domain */
+               if (fwspec->param[0] != 0)
+                       return -EINVAL;
+
+               *hwirq = fwspec->param[1];
+               *type = fwspec->param[2];
+               return 0;
+       }
+
+       return -EINVAL;
 }
 
 static const struct irq_domain_ops crossbar_domain_ops = {
-       .alloc  = crossbar_domain_alloc,
-       .free   = crossbar_domain_free,
-       .xlate  = crossbar_domain_xlate,
+       .alloc          = crossbar_domain_alloc,
+       .free           = crossbar_domain_free,
+       .translate      = crossbar_domain_translate,
 };
 
 static int __init crossbar_of_init(struct device_node *node)
index 9448e391cb710363d18df4c3079ae0504b0cbd44..44a077f3a4a2627c03953d9650a66e7607dd91e2 100644 (file)
 
 #include "irq-gic-common.h"
 
+void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
+               void *data)
+{
+       for (; quirks->desc; quirks++) {
+               if (quirks->iidr != (quirks->mask & iidr))
+                       continue;
+               quirks->init(data);
+               pr_info("GIC: enabling workaround for %s\n", quirks->desc);
+       }
+}
+
 int gic_configure_irq(unsigned int irq, unsigned int type,
                       void __iomem *base, void (*sync_access)(void))
 {
index 35a9884778bd5f337f43041f9a24b88b0371734c..fff697db8e2277be577f7f348917875f9066302e 100644 (file)
 #include <linux/of.h>
 #include <linux/irqdomain.h>
 
+struct gic_quirk {
+       const char *desc;
+       void (*init)(void *data);
+       u32 iidr;
+       u32 mask;
+};
+
 int gic_configure_irq(unsigned int irq, unsigned int type,
                        void __iomem *base, void (*sync_access)(void));
 void gic_dist_config(void __iomem *base, int gic_irqs,
                     void (*sync_access)(void));
 void gic_cpu_config(void __iomem *base, void (*sync_access)(void));
+void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
+               void *data);
 
 #endif /* _IRQ_GIC_COMMON_H */
index 12985daa66ab31c9c7981e65ea0926938e5662fa..87f8d104acab37970b834ded3b74a93ba8b692f6 100644 (file)
 #define V2M_MSI_SETSPI_NS             0x040
 #define V2M_MIN_SPI                   32
 #define V2M_MAX_SPI                   1019
+#define V2M_MSI_IIDR                  0xFCC
 
 #define V2M_MSI_TYPER_BASE_SPI(x)      \
               (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
 
 #define V2M_MSI_TYPER_NUM_SPI(x)       ((x) & V2M_MSI_TYPER_NUM_MASK)
 
+/* APM X-Gene with GICv2m MSI_IIDR register value */
+#define XGENE_GICV2M_MSI_IIDR          0x06000170
+
+/* List of flags for specific v2m implementation */
+#define GICV2M_NEEDS_SPI_OFFSET                0x00000001
+
+static LIST_HEAD(v2m_nodes);
+static DEFINE_SPINLOCK(v2m_lock);
+
 struct v2m_data {
-       spinlock_t msi_cnt_lock;
+       struct list_head entry;
+       struct device_node *node;
        struct resource res;    /* GICv2m resource */
        void __iomem *base;     /* GICv2m virt address */
        u32 spi_start;          /* The SPI number that MSIs start */
        u32 nr_spis;            /* The number of SPIs for MSIs */
        unsigned long *bm;      /* MSI vector bitmap */
+       u32 flags;              /* v2m flags for specific implementation */
 };
 
 static void gicv2m_mask_msi_irq(struct irq_data *d)
@@ -98,6 +110,9 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
        msg->address_hi = upper_32_bits(addr);
        msg->address_lo = lower_32_bits(addr);
        msg->data = data->hwirq;
+
+       if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
+               msg->data -= v2m->spi_start;
 }
 
 static struct irq_chip gicv2m_irq_chip = {
@@ -113,17 +128,21 @@ static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
                                       unsigned int virq,
                                       irq_hw_number_t hwirq)
 {
-       struct of_phandle_args args;
+       struct irq_fwspec fwspec;
        struct irq_data *d;
        int err;
 
-       args.np = domain->parent->of_node;
-       args.args_count = 3;
-       args.args[0] = 0;
-       args.args[1] = hwirq - 32;
-       args.args[2] = IRQ_TYPE_EDGE_RISING;
+       if (is_of_node(domain->parent->fwnode)) {
+               fwspec.fwnode = domain->parent->fwnode;
+               fwspec.param_count = 3;
+               fwspec.param[0] = 0;
+               fwspec.param[1] = hwirq - 32;
+               fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+       } else {
+               return -EINVAL;
+       }
 
-       err = irq_domain_alloc_irqs_parent(domain, virq, 1, &args);
+       err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
        if (err)
                return err;
 
@@ -143,27 +162,30 @@ static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq)
                return;
        }
 
-       spin_lock(&v2m->msi_cnt_lock);
+       spin_lock(&v2m_lock);
        __clear_bit(pos, v2m->bm);
-       spin_unlock(&v2m->msi_cnt_lock);
+       spin_unlock(&v2m_lock);
 }
 
 static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
                                   unsigned int nr_irqs, void *args)
 {
-       struct v2m_data *v2m = domain->host_data;
+       struct v2m_data *v2m = NULL, *tmp;
        int hwirq, offset, err = 0;
 
-       spin_lock(&v2m->msi_cnt_lock);
-       offset = find_first_zero_bit(v2m->bm, v2m->nr_spis);
-       if (offset < v2m->nr_spis)
-               __set_bit(offset, v2m->bm);
-       else
-               err = -ENOSPC;
-       spin_unlock(&v2m->msi_cnt_lock);
+       spin_lock(&v2m_lock);
+       list_for_each_entry(tmp, &v2m_nodes, entry) {
+               offset = find_first_zero_bit(tmp->bm, tmp->nr_spis);
+               if (offset < tmp->nr_spis) {
+                       __set_bit(offset, tmp->bm);
+                       v2m = tmp;
+                       break;
+               }
+       }
+       spin_unlock(&v2m_lock);
 
-       if (err)
-               return err;
+       if (!v2m)
+               return -ENOSPC;
 
        hwirq = v2m->spi_start + offset;
 
@@ -224,12 +246,61 @@ static struct msi_domain_info gicv2m_pmsi_domain_info = {
        .chip   = &gicv2m_pmsi_irq_chip,
 };
 
+static void gicv2m_teardown(void)
+{
+       struct v2m_data *v2m, *tmp;
+
+       list_for_each_entry_safe(v2m, tmp, &v2m_nodes, entry) {
+               list_del(&v2m->entry);
+               kfree(v2m->bm);
+               iounmap(v2m->base);
+               of_node_put(v2m->node);
+               kfree(v2m);
+       }
+}
+
+static int gicv2m_allocate_domains(struct irq_domain *parent)
+{
+       struct irq_domain *inner_domain, *pci_domain, *plat_domain;
+       struct v2m_data *v2m;
+
+       v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
+       if (!v2m)
+               return 0;
+
+       inner_domain = irq_domain_create_tree(of_node_to_fwnode(v2m->node),
+                                             &gicv2m_domain_ops, v2m);
+       if (!inner_domain) {
+               pr_err("Failed to create GICv2m domain\n");
+               return -ENOMEM;
+       }
+
+       inner_domain->bus_token = DOMAIN_BUS_NEXUS;
+       inner_domain->parent = parent;
+       pci_domain = pci_msi_create_irq_domain(of_node_to_fwnode(v2m->node),
+                                              &gicv2m_msi_domain_info,
+                                              inner_domain);
+       plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(v2m->node),
+                                                    &gicv2m_pmsi_domain_info,
+                                                    inner_domain);
+       if (!pci_domain || !plat_domain) {
+               pr_err("Failed to create MSI domains\n");
+               if (plat_domain)
+                       irq_domain_remove(plat_domain);
+               if (pci_domain)
+                       irq_domain_remove(pci_domain);
+               irq_domain_remove(inner_domain);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
 static int __init gicv2m_init_one(struct device_node *node,
                                  struct irq_domain *parent)
 {
        int ret;
        struct v2m_data *v2m;
-       struct irq_domain *inner_domain, *pci_domain, *plat_domain;
 
        v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
        if (!v2m) {
@@ -237,6 +308,9 @@ static int __init gicv2m_init_one(struct device_node *node,
                return -ENOMEM;
        }
 
+       INIT_LIST_HEAD(&v2m->entry);
+       v2m->node = node;
+
        ret = of_address_to_resource(node, 0, &v2m->res);
        if (ret) {
                pr_err("Failed to allocate v2m resource.\n");
@@ -266,6 +340,17 @@ static int __init gicv2m_init_one(struct device_node *node,
                goto err_iounmap;
        }
 
+       /*
+        * APM X-Gene GICv2m implementation has an erratum where
+        * the MSI data needs to be the offset from the spi_start
+        * in order to trigger the correct MSI interrupt. This is
+        * different from the standard GICv2m implementation where
+        * the MSI data is the absolute value within the range from
+        * spi_start to (spi_start + num_spis).
+        */
+       if (readl_relaxed(v2m->base + V2M_MSI_IIDR) == XGENE_GICV2M_MSI_IIDR)
+               v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
+
        v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis),
                          GFP_KERNEL);
        if (!v2m->bm) {
@@ -273,43 +358,13 @@ static int __init gicv2m_init_one(struct device_node *node,
                goto err_iounmap;
        }
 
-       inner_domain = irq_domain_add_tree(node, &gicv2m_domain_ops, v2m);
-       if (!inner_domain) {
-               pr_err("Failed to create GICv2m domain\n");
-               ret = -ENOMEM;
-               goto err_free_bm;
-       }
-
-       inner_domain->bus_token = DOMAIN_BUS_NEXUS;
-       inner_domain->parent = parent;
-       pci_domain = pci_msi_create_irq_domain(node, &gicv2m_msi_domain_info,
-                                              inner_domain);
-       plat_domain = platform_msi_create_irq_domain(node,
-                                                    &gicv2m_pmsi_domain_info,
-                                                    inner_domain);
-       if (!pci_domain || !plat_domain) {
-               pr_err("Failed to create MSI domains\n");
-               ret = -ENOMEM;
-               goto err_free_domains;
-       }
-
-       spin_lock_init(&v2m->msi_cnt_lock);
-
+       list_add_tail(&v2m->entry, &v2m_nodes);
        pr_info("Node %s: range[%#lx:%#lx], SPI[%d:%d]\n", node->name,
                (unsigned long)v2m->res.start, (unsigned long)v2m->res.end,
                v2m->spi_start, (v2m->spi_start + v2m->nr_spis));
 
        return 0;
 
-err_free_domains:
-       if (plat_domain)
-               irq_domain_remove(plat_domain);
-       if (pci_domain)
-               irq_domain_remove(pci_domain);
-       if (inner_domain)
-               irq_domain_remove(inner_domain);
-err_free_bm:
-       kfree(v2m->bm);
 err_iounmap:
        iounmap(v2m->base);
 err_free_v2m:
@@ -339,5 +394,9 @@ int __init gicv2m_of_init(struct device_node *node, struct irq_domain *parent)
                }
        }
 
+       if (!ret)
+               ret = gicv2m_allocate_domains(parent);
+       if (ret)
+               gicv2m_teardown();
        return ret;
 }
index cf351c6374645e7b0e4fe982a937d49b4904cfe9..aee60ed025dccfab0c6f6b81dbe4e1678a91279c 100644 (file)
@@ -42,7 +42,6 @@ static struct irq_chip its_msi_irq_chip = {
 
 struct its_pci_alias {
        struct pci_dev  *pdev;
-       u32             dev_id;
        u32             count;
 };
 
@@ -60,9 +59,8 @@ static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
 {
        struct its_pci_alias *dev_alias = data;
 
-       dev_alias->dev_id = alias;
        if (pdev != dev_alias->pdev)
-               dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev);
+               dev_alias->count += its_pci_msi_vec_count(pdev);
 
        return 0;
 }
@@ -86,7 +84,7 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
        pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias);
 
        /* ITS specific DeviceID, as the core ITS ignores dev. */
-       info->scratchpad[0].ul = dev_alias.dev_id;
+       info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev);
 
        return msi_info->ops->msi_prepare(domain->parent,
                                          dev, dev_alias.count, info);
@@ -125,7 +123,8 @@ static int __init its_pci_msi_init(void)
                        continue;
                }
 
-               if (!pci_msi_create_irq_domain(np, &its_pci_msi_domain_info,
+               if (!pci_msi_create_irq_domain(of_node_to_fwnode(np),
+                                              &its_pci_msi_domain_info,
                                               parent)) {
                        pr_err("%s: unable to create PCI domain\n",
                               np->full_name);
index a8655056277995417af6759591e6695ef71cc159..470b4aa7d62c2816015b57420a81f12b37ef4649 100644 (file)
@@ -29,13 +29,25 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
 {
        struct msi_domain_info *msi_info;
        u32 dev_id;
-       int ret;
+       int ret, index = 0;
 
        msi_info = msi_get_domain_info(domain->parent);
 
        /* Suck the DeviceID out of the msi-parent property */
-       ret = of_property_read_u32_index(dev->of_node, "msi-parent",
-                                        1, &dev_id);
+       do {
+               struct of_phandle_args args;
+
+               ret = of_parse_phandle_with_args(dev->of_node,
+                                                "msi-parent", "#msi-cells",
+                                                index, &args);
+               if (args.np == irq_domain_get_of_node(domain)) {
+                       if (WARN_ON(args.args_count != 1))
+                               return -EINVAL;
+                       dev_id = args.args[0];
+                       break;
+               }
+       } while (!ret);
+
        if (ret)
                return ret;
 
@@ -78,7 +90,8 @@ static int __init its_pmsi_init(void)
                        continue;
                }
 
-               if (!platform_msi_create_irq_domain(np, &its_pmsi_domain_info,
+               if (!platform_msi_create_irq_domain(of_node_to_fwnode(np),
+                                                   &its_pmsi_domain_info,
                                                    parent)) {
                        pr_err("%s: unable to create platform domain\n",
                               np->full_name);
index ac7ae2b3cb83726e336ce310d95e2b890da8224d..e23d1d18f9d6a39731bdd34633e2bbc9bc473525 100644 (file)
 #include <asm/cputype.h>
 #include <asm/exception.h>
 
-#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING          (1 << 0)
+#include "irq-gic-common.h"
+
+#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING          (1ULL << 0)
+#define ITS_FLAGS_WORKAROUND_CAVIUM_22375      (1ULL << 1)
 
 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING    (1 << 0)
 
@@ -719,6 +722,9 @@ static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
 out:
        spin_unlock(&lpi_lock);
 
+       if (!bitmap)
+               *base = *nr_ids = 0;
+
        return bitmap;
 }
 
@@ -814,7 +820,22 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
        int i;
        int psz = SZ_64K;
        u64 shr = GITS_BASER_InnerShareable;
-       u64 cache = GITS_BASER_WaWb;
+       u64 cache;
+       u64 typer;
+       u32 ids;
+
+       if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) {
+               /*
+                * erratum 22375: only alloc 8MB table size
+                * erratum 24313: ignore memory access type
+                */
+               cache   = 0;
+               ids     = 0x14;                 /* 20 bits, 8MB */
+       } else {
+               cache   = GITS_BASER_WaWb;
+               typer   = readq_relaxed(its->base + GITS_TYPER);
+               ids     = GITS_TYPER_DEVBITS(typer);
+       }
 
        for (i = 0; i < GITS_BASER_NR_REGS; i++) {
                u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
@@ -822,6 +843,7 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
                u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
                int order = get_order(psz);
                int alloc_size;
+               int alloc_pages;
                u64 tmp;
                void *base;
 
@@ -837,9 +859,6 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
                 * For other tables, only allocate a single page.
                 */
                if (type == GITS_BASER_TYPE_DEVICE) {
-                       u64 typer = readq_relaxed(its->base + GITS_TYPER);
-                       u32 ids = GITS_TYPER_DEVBITS(typer);
-
                        /*
                         * 'order' was initialized earlier to the default page
                         * granule of the the ITS.  We can't have an allocation
@@ -856,6 +875,14 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
                }
 
                alloc_size = (1 << order) * PAGE_SIZE;
+               alloc_pages = (alloc_size / psz);
+               if (alloc_pages > GITS_BASER_PAGES_MAX) {
+                       alloc_pages = GITS_BASER_PAGES_MAX;
+                       order = get_order(GITS_BASER_PAGES_MAX * psz);
+                       pr_warn("%s: Device Table too large, reduce its page order to %u (%u pages)\n",
+                               node_name, order, alloc_pages);
+               }
+
                base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
                if (!base) {
                        err = -ENOMEM;
@@ -884,7 +911,7 @@ retry_baser:
                        break;
                }
 
-               val |= (alloc_size / psz) - 1;
+               val |= alloc_pages - 1;
 
                writeq_relaxed(val, its->base + GITS_BASER + i * 8);
                tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
@@ -1238,15 +1265,19 @@ static int its_irq_gic_domain_alloc(struct irq_domain *domain,
                                    unsigned int virq,
                                    irq_hw_number_t hwirq)
 {
-       struct of_phandle_args args;
+       struct irq_fwspec fwspec;
 
-       args.np = domain->parent->of_node;
-       args.args_count = 3;
-       args.args[0] = GIC_IRQ_TYPE_LPI;
-       args.args[1] = hwirq;
-       args.args[2] = IRQ_TYPE_EDGE_RISING;
+       if (irq_domain_get_of_node(domain->parent)) {
+               fwspec.fwnode = domain->parent->fwnode;
+               fwspec.param_count = 3;
+               fwspec.param[0] = GIC_IRQ_TYPE_LPI;
+               fwspec.param[1] = hwirq;
+               fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+       } else {
+               return -EINVAL;
+       }
 
-       return irq_domain_alloc_irqs_parent(domain, virq, 1, &args);
+       return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
 }
 
 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -1367,6 +1398,33 @@ static int its_force_quiescent(void __iomem *base)
        }
 }
 
+static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
+{
+       struct its_node *its = data;
+
+       its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
+}
+
+static const struct gic_quirk its_quirks[] = {
+#ifdef CONFIG_CAVIUM_ERRATUM_22375
+       {
+               .desc   = "ITS: Cavium errata 22375, 24313",
+               .iidr   = 0xa100034c,   /* ThunderX pass 1.x */
+               .mask   = 0xffff0fff,
+               .init   = its_enable_quirk_cavium_22375,
+       },
+#endif
+       {
+       }
+};
+
+static void its_enable_quirks(struct its_node *its)
+{
+       u32 iidr = readl_relaxed(its->base + GITS_IIDR);
+
+       gic_enable_quirks(iidr, its_quirks, its);
+}
+
 static int its_probe(struct device_node *node, struct irq_domain *parent)
 {
        struct resource res;
@@ -1425,6 +1483,8 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
        }
        its->cmd_write = its->cmd_base;
 
+       its_enable_quirks(its);
+
        err = its_alloc_tables(node->full_name, its);
        if (err)
                goto out_free_cmd;
index 36ecfc870e5a6cb86698cb218ac00cccc491e299..d7be6ddc34f685a47dad313274f209b59dff6bf5 100644 (file)
@@ -108,57 +108,17 @@ static void gic_redist_wait_for_rwp(void)
        gic_do_wait_for_rwp(gic_data_rdist_rd_base());
 }
 
-/* Low level accessors */
-static u64 __maybe_unused gic_read_iar(void)
-{
-       u64 irqstat;
-
-       asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
-       return irqstat;
-}
-
-static void __maybe_unused gic_write_pmr(u64 val)
-{
-       asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
-}
-
-static void __maybe_unused gic_write_ctlr(u64 val)
-{
-       asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
-       isb();
-}
-
-static void __maybe_unused gic_write_grpen1(u64 val)
-{
-       asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
-       isb();
-}
+#ifdef CONFIG_ARM64
+static DEFINE_STATIC_KEY_FALSE(is_cavium_thunderx);
 
-static void __maybe_unused gic_write_sgi1r(u64 val)
-{
-       asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
-}
-
-static void gic_enable_sre(void)
+static u64 __maybe_unused gic_read_iar(void)
 {
-       u64 val;
-
-       asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
-       val |= ICC_SRE_EL1_SRE;
-       asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val));
-       isb();
-
-       /*
-        * Need to check that the SRE bit has actually been set. If
-        * not, it means that SRE is disabled at EL2. We're going to
-        * die painfully, and there is nothing we can do about it.
-        *
-        * Kindly inform the luser.
-        */
-       asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
-       if (!(val & ICC_SRE_EL1_SRE))
-               pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
+       if (static_branch_unlikely(&is_cavium_thunderx))
+               return gic_read_iar_cavium_thunderx();
+       else
+               return gic_read_iar_common();
 }
+#endif
 
 static void gic_enable_redist(bool enable)
 {
@@ -359,11 +319,11 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
        return 0;
 }
 
-static u64 gic_mpidr_to_affinity(u64 mpidr)
+static u64 gic_mpidr_to_affinity(unsigned long mpidr)
 {
        u64 aff;
 
-       aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
+       aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
               MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
               MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8  |
               MPIDR_AFFINITY_LEVEL(mpidr, 0));
@@ -373,7 +333,7 @@ static u64 gic_mpidr_to_affinity(u64 mpidr)
 
 static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
 {
-       u64 irqnr;
+       u32 irqnr;
 
        do {
                irqnr = gic_read_iar();
@@ -432,12 +392,12 @@ static void __init gic_dist_init(void)
         */
        affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
        for (i = 32; i < gic_data.irq_nr; i++)
-               writeq_relaxed(affinity, base + GICD_IROUTER + i * 8);
+               gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
 }
 
 static int gic_populate_rdist(void)
 {
-       u64 mpidr = cpu_logical_map(smp_processor_id());
+       unsigned long mpidr = cpu_logical_map(smp_processor_id());
        u64 typer;
        u32 aff;
        int i;
@@ -463,15 +423,14 @@ static int gic_populate_rdist(void)
                }
 
                do {
-                       typer = readq_relaxed(ptr + GICR_TYPER);
+                       typer = gic_read_typer(ptr + GICR_TYPER);
                        if ((typer >> 32) == aff) {
                                u64 offset = ptr - gic_data.redist_regions[i].redist_base;
                                gic_data_rdist_rd_base() = ptr;
                                gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
-                               pr_info("CPU%d: found redistributor %llx region %d:%pa\n",
-                                       smp_processor_id(),
-                                       (unsigned long long)mpidr,
-                                       i, &gic_data_rdist()->phys_base);
+                               pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
+                                       smp_processor_id(), mpidr, i,
+                                       &gic_data_rdist()->phys_base);
                                return 0;
                        }
 
@@ -486,15 +445,22 @@ static int gic_populate_rdist(void)
        }
 
        /* We couldn't even deal with ourselves... */
-       WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n",
-            smp_processor_id(), (unsigned long long)mpidr);
+       WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
+            smp_processor_id(), mpidr);
        return -ENODEV;
 }
 
 static void gic_cpu_sys_reg_init(void)
 {
-       /* Enable system registers */
-       gic_enable_sre();
+       /*
+        * Need to check that the SRE bit has actually been set. If
+        * not, it means that SRE is disabled at EL2. We're going to
+        * die painfully, and there is nothing we can do about it.
+        *
+        * Kindly inform the luser.
+        */
+       if (!gic_enable_sre())
+               pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
 
        /* Set priority mask register */
        gic_write_pmr(DEFAULT_PMR_VALUE);
@@ -557,10 +523,10 @@ static struct notifier_block gic_cpu_notifier = {
 };
 
 static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
-                                  u64 cluster_id)
+                                  unsigned long cluster_id)
 {
        int cpu = *base_cpu;
-       u64 mpidr = cpu_logical_map(cpu);
+       unsigned long mpidr = cpu_logical_map(cpu);
        u16 tlist = 0;
 
        while (cpu < nr_cpu_ids) {
@@ -621,7 +587,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
        smp_wmb();
 
        for_each_cpu(cpu, mask) {
-               u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
+               unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL;
                u16 tlist;
 
                tlist = gic_compute_target_list(&cpu, mask, cluster_id);
@@ -657,7 +623,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
        reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
        val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
 
-       writeq_relaxed(val, reg);
+       gic_write_irouter(val, reg);
 
        /*
         * If the interrupt was enabled, enabled it again. Otherwise,
@@ -771,32 +737,34 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
        return 0;
 }
 
-static int gic_irq_domain_xlate(struct irq_domain *d,
-                               struct device_node *controller,
-                               const u32 *intspec, unsigned int intsize,
-                               unsigned long *out_hwirq, unsigned int *out_type)
+static int gic_irq_domain_translate(struct irq_domain *d,
+                                   struct irq_fwspec *fwspec,
+                                   unsigned long *hwirq,
+                                   unsigned int *type)
 {
-       if (d->of_node != controller)
-               return -EINVAL;
-       if (intsize < 3)
-               return -EINVAL;
+       if (is_of_node(fwspec->fwnode)) {
+               if (fwspec->param_count < 3)
+                       return -EINVAL;
 
-       switch(intspec[0]) {
-       case 0:                 /* SPI */
-               *out_hwirq = intspec[1] + 32;
-               break;
-       case 1:                 /* PPI */
-               *out_hwirq = intspec[1] + 16;
-               break;
-       case GIC_IRQ_TYPE_LPI:  /* LPI */
-               *out_hwirq = intspec[1];
-               break;
-       default:
-               return -EINVAL;
+               switch (fwspec->param[0]) {
+               case 0:                 /* SPI */
+                       *hwirq = fwspec->param[1] + 32;
+                       break;
+               case 1:                 /* PPI */
+                       *hwirq = fwspec->param[1] + 16;
+                       break;
+               case GIC_IRQ_TYPE_LPI:  /* LPI */
+                       *hwirq = fwspec->param[1];
+                       break;
+               default:
+                       return -EINVAL;
+               }
+
+               *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+               return 0;
        }
 
-       *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
-       return 0;
+       return -EINVAL;
 }
 
 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -805,10 +773,9 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
        int i, ret;
        irq_hw_number_t hwirq;
        unsigned int type = IRQ_TYPE_NONE;
-       struct of_phandle_args *irq_data = arg;
+       struct irq_fwspec *fwspec = arg;
 
-       ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args,
-                                  irq_data->args_count, &hwirq, &type);
+       ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
        if (ret)
                return ret;
 
@@ -831,11 +798,19 @@ static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
 }
 
 static const struct irq_domain_ops gic_irq_domain_ops = {
-       .xlate = gic_irq_domain_xlate,
+       .translate = gic_irq_domain_translate,
        .alloc = gic_irq_domain_alloc,
        .free = gic_irq_domain_free,
 };
 
+static void gicv3_enable_quirks(void)
+{
+#ifdef CONFIG_ARM64
+       if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_23154))
+               static_branch_enable(&is_cavium_thunderx);
+#endif
+}
+
 static int __init gic_of_init(struct device_node *node, struct device_node *parent)
 {
        void __iomem *dist_base;
@@ -901,6 +876,8 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
        gic_data.nr_redist_regions = nr_redist_regions;
        gic_data.redist_stride = redist_stride;
 
+       gicv3_enable_quirks();
+
        /*
         * Find out how many interrupts are supported.
         * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
index 982c09c2d79171d21355c75153b4de87f4a1bee2..1d0e76855106cf946627dcc5c5aaa728a38c6bc8 100644 (file)
 
 #include "irq-gic-common.h"
 
+#ifdef CONFIG_ARM64
+#include <asm/cpufeature.h>
+
+static void gic_check_cpu_features(void)
+{
+       WARN_TAINT_ONCE(cpus_have_cap(ARM64_HAS_SYSREG_GIC_CPUIF),
+                       TAINT_CPU_OUT_OF_SPEC,
+                       "GICv3 system registers enabled, broken firmware!\n");
+}
+#else
+#define gic_check_cpu_features()       do { } while(0)
+#endif
+
 union gic_base {
        void __iomem *common_base;
        void __percpu * __iomem *percpu_base;
@@ -903,28 +916,39 @@ static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq)
 {
 }
 
-static int gic_irq_domain_xlate(struct irq_domain *d,
-                               struct device_node *controller,
-                               const u32 *intspec, unsigned int intsize,
-                               unsigned long *out_hwirq, unsigned int *out_type)
+static int gic_irq_domain_translate(struct irq_domain *d,
+                                   struct irq_fwspec *fwspec,
+                                   unsigned long *hwirq,
+                                   unsigned int *type)
 {
-       unsigned long ret = 0;
+       if (is_of_node(fwspec->fwnode)) {
+               if (fwspec->param_count < 3)
+                       return -EINVAL;
 
-       if (d->of_node != controller)
-               return -EINVAL;
-       if (intsize < 3)
-               return -EINVAL;
+               /* Get the interrupt number and add 16 to skip over SGIs */
+               *hwirq = fwspec->param[1] + 16;
 
-       /* Get the interrupt number and add 16 to skip over SGIs */
-       *out_hwirq = intspec[1] + 16;
+               /*
+                * For SPIs, we need to add 16 more to get the GIC irq
+                * ID number
+                */
+               if (!fwspec->param[0])
+                       *hwirq += 16;
 
-       /* For SPIs, we need to add 16 more to get the GIC irq ID number */
-       if (!intspec[0])
-               *out_hwirq += 16;
+               *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+               return 0;
+       }
 
-       *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+       if (fwspec->fwnode->type == FWNODE_IRQCHIP) {
+               if(fwspec->param_count != 2)
+                       return -EINVAL;
 
-       return ret;
+               *hwirq = fwspec->param[0];
+               *type = fwspec->param[1];
+               return 0;
+       }
+
+       return -EINVAL;
 }
 
 #ifdef CONFIG_SMP
@@ -952,10 +976,9 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
        int i, ret;
        irq_hw_number_t hwirq;
        unsigned int type = IRQ_TYPE_NONE;
-       struct of_phandle_args *irq_data = arg;
+       struct irq_fwspec *fwspec = arg;
 
-       ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args,
-                                  irq_data->args_count, &hwirq, &type);
+       ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
        if (ret)
                return ret;
 
@@ -966,7 +989,7 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
 }
 
 static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = {
-       .xlate = gic_irq_domain_xlate,
+       .translate = gic_irq_domain_translate,
        .alloc = gic_irq_domain_alloc,
        .free = irq_domain_free_irqs_top,
 };
@@ -974,12 +997,11 @@ static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = {
 static const struct irq_domain_ops gic_irq_domain_ops = {
        .map = gic_irq_domain_map,
        .unmap = gic_irq_domain_unmap,
-       .xlate = gic_irq_domain_xlate,
 };
 
 static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
                           void __iomem *dist_base, void __iomem *cpu_base,
-                          u32 percpu_offset, struct device_node *node)
+                          u32 percpu_offset, struct fwnode_handle *handle)
 {
        irq_hw_number_t hwirq_base;
        struct gic_chip_data *gic;
@@ -987,6 +1009,8 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
 
        BUG_ON(gic_nr >= MAX_GIC_NR);
 
+       gic_check_cpu_features();
+
        gic = &gic_data[gic_nr];
 #ifdef CONFIG_GIC_NON_BANKED
        if (percpu_offset) { /* Frankein-GIC without banked registers... */
@@ -1031,11 +1055,11 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
                gic_irqs = 1020;
        gic->gic_irqs = gic_irqs;
 
-       if (node) {             /* DT case */
-               gic->domain = irq_domain_add_linear(node, gic_irqs,
-                                                   &gic_irq_domain_hierarchy_ops,
-                                                   gic);
-       } else {                /* Non-DT case */
+       if (handle) {           /* DT/ACPI */
+               gic->domain = irq_domain_create_linear(handle, gic_irqs,
+                                                      &gic_irq_domain_hierarchy_ops,
+                                                      gic);
+       } else {                /* Legacy support */
                /*
                 * For primary GICs, skip over SGIs.
                 * For secondary GICs, skip over PPIs, too.
@@ -1058,7 +1082,7 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
                        irq_base = irq_start;
                }
 
-               gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
+               gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base,
                                        hwirq_base, &gic_irq_domain_ops, gic);
        }
 
@@ -1087,17 +1111,15 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
        gic_pm_init(gic);
 }
 
-void __init gic_init_bases(unsigned int gic_nr, int irq_start,
-                          void __iomem *dist_base, void __iomem *cpu_base,
-                          u32 percpu_offset, struct device_node *node)
+void __init gic_init(unsigned int gic_nr, int irq_start,
+                    void __iomem *dist_base, void __iomem *cpu_base)
 {
        /*
         * Non-DT/ACPI systems won't run a hypervisor, so let's not
         * bother with these...
         */
        static_key_slow_dec(&supports_deactivate);
-       __gic_init_bases(gic_nr, irq_start, dist_base, cpu_base,
-                        percpu_offset, node);
+       __gic_init_bases(gic_nr, irq_start, dist_base, cpu_base, 0, NULL);
 }
 
 #ifdef CONFIG_OF
@@ -1168,7 +1190,8 @@ gic_of_init(struct device_node *node, struct device_node *parent)
        if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
                percpu_offset = 0;
 
-       __gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
+       __gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset,
+                        &node->fwnode);
        if (!gic_cnt)
                gic_init_physaddr(node);
 
@@ -1191,6 +1214,7 @@ IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
 IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
 IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
 IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
+IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init);
 
 #endif
 
@@ -1242,6 +1266,7 @@ int __init
 gic_v2_acpi_init(struct acpi_table_header *table)
 {
        void __iomem *cpu_base, *dist_base;
+       struct fwnode_handle *domain_handle;
        int count;
 
        /* Collect CPU base addresses */
@@ -1292,14 +1317,19 @@ gic_v2_acpi_init(struct acpi_table_header *table)
                static_key_slow_dec(&supports_deactivate);
 
        /*
-        * Initialize zero GIC instance (no multi-GIC support). Also, set GIC
-        * as default IRQ domain to allow for GSI registration and GSI to IRQ
-        * number translation (see acpi_register_gsi() and acpi_gsi_to_irq()).
+        * Initialize GIC instance zero (no multi-GIC support).
         */
-       __gic_init_bases(0, -1, dist_base, cpu_base, 0, NULL);
-       irq_set_default_host(gic_data[0].domain);
+       domain_handle = irq_domain_alloc_fwnode(dist_base);
+       if (!domain_handle) {
+               pr_err("Unable to allocate domain handle\n");
+               iounmap(cpu_base);
+               iounmap(dist_base);
+               return -ENOMEM;
+       }
+
+       __gic_init_bases(0, -1, dist_base, cpu_base, 0, domain_handle);
 
-       acpi_irq_model = ACPI_IRQ_MODEL_GIC;
+       acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
        return 0;
 }
 #endif
index 8f3ca8f3a62b615b2d670851dd8afe1d75904bee..9688d2e2a6364d2accbe77f323dd3b9f1d735dfa 100644 (file)
@@ -325,7 +325,7 @@ static int hip04_irq_domain_xlate(struct irq_domain *d,
 {
        unsigned long ret = 0;
 
-       if (d->of_node != controller)
+       if (irq_domain_get_of_node(d) != controller)
                return -EINVAL;
        if (intsize < 3)
                return -EINVAL;
index e484fd2553210f55fd0eb72375f4ada1511c7fec..6b304eb39bd22c96b64c6a99629ef4195db721b5 100644 (file)
@@ -377,8 +377,8 @@ int __init i8259_of_init(struct device_node *node, struct device_node *parent)
        }
 
        domain = __init_i8259_irqs(node);
-       irq_set_handler_data(parent_irq, domain);
-       irq_set_chained_handler(parent_irq, i8259_irq_dispatch);
+       irq_set_chained_handler_and_data(parent_irq, i8259_irq_dispatch,
+                                        domain);
        return 0;
 }
 IRQCHIP_DECLARE(i8259, "intel,i8259", i8259_of_init);
index e48d3305456fe859e890413144066ff1fbb43875..15af9a9753e582b797a58de3c9713226800999f8 100644 (file)
@@ -150,49 +150,42 @@ static struct irq_chip gpcv2_irqchip_data_chip = {
 #endif
 };
 
-static int imx_gpcv2_domain_xlate(struct irq_domain *domain,
-                               struct device_node *controller,
-                               const u32 *intspec,
-                               unsigned int intsize,
-                               unsigned long *out_hwirq,
-                               unsigned int *out_type)
+static int imx_gpcv2_domain_translate(struct irq_domain *d,
+                                     struct irq_fwspec *fwspec,
+                                     unsigned long *hwirq,
+                                     unsigned int *type)
 {
-       /* Shouldn't happen, really... */
-       if (domain->of_node != controller)
-               return -EINVAL;
+       if (is_of_node(fwspec->fwnode)) {
+               if (fwspec->param_count != 3)
+                       return -EINVAL;
 
-       /* Not GIC compliant */
-       if (intsize != 3)
-               return -EINVAL;
+               /* No PPI should point to this domain */
+               if (fwspec->param[0] != 0)
+                       return -EINVAL;
 
-       /* No PPI should point to this domain */
-       if (intspec[0] != 0)
-               return -EINVAL;
+               *hwirq = fwspec->param[1];
+               *type = fwspec->param[2];
+               return 0;
+       }
 
-       *out_hwirq = intspec[1];
-       *out_type = intspec[2];
-       return 0;
+       return -EINVAL;
 }
 
 static int imx_gpcv2_domain_alloc(struct irq_domain *domain,
                                  unsigned int irq, unsigned int nr_irqs,
                                  void *data)
 {
-       struct of_phandle_args *args = data;
-       struct of_phandle_args parent_args;
+       struct irq_fwspec *fwspec = data;
+       struct irq_fwspec parent_fwspec;
        irq_hw_number_t hwirq;
+       unsigned int type;
+       int err;
        int i;
 
-       /* Not GIC compliant */
-       if (args->args_count != 3)
-               return -EINVAL;
-
-       /* No PPI should point to this domain */
-       if (args->args[0] != 0)
-               return -EINVAL;
+       err = imx_gpcv2_domain_translate(domain, fwspec, &hwirq, &type);
+       if (err)
+               return err;
 
-       /* Can't deal with this */
-       hwirq = args->args[1];
        if (hwirq >= GPC_MAX_IRQS)
                return -EINVAL;
 
@@ -201,15 +194,16 @@ static int imx_gpcv2_domain_alloc(struct irq_domain *domain,
                                &gpcv2_irqchip_data_chip, domain->host_data);
        }
 
-       parent_args = *args;
-       parent_args.np = domain->parent->of_node;
-       return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs, &parent_args);
+       parent_fwspec = *fwspec;
+       parent_fwspec.fwnode = domain->parent->fwnode;
+       return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs,
+                                           &parent_fwspec);
 }
 
 static struct irq_domain_ops gpcv2_irqchip_data_domain_ops = {
-       .xlate  = imx_gpcv2_domain_xlate,
-       .alloc  = imx_gpcv2_domain_alloc,
-       .free   = irq_domain_free_irqs_common,
+       .translate      = imx_gpcv2_domain_translate,
+       .alloc          = imx_gpcv2_domain_alloc,
+       .free           = irq_domain_free_irqs_common,
 };
 
 static int __init imx_gpcv2_irqchip_init(struct device_node *node,
index af2f16bb8a94d3912787b1893431c97e541fc0ec..aeaa061f0dbfd3694d8a9f890822eba266e2e4e0 100644 (file)
@@ -320,6 +320,14 @@ static void gic_handle_shared_int(bool chained)
                intrmask[i] = gic_read(intrmask_reg);
                pending_reg += gic_reg_step;
                intrmask_reg += gic_reg_step;
+
+               if (!config_enabled(CONFIG_64BIT) || mips_cm_is64)
+                       continue;
+
+               pending[i] |= (u64)gic_read(pending_reg) << 32;
+               intrmask[i] |= (u64)gic_read(intrmask_reg) << 32;
+               pending_reg += gic_reg_step;
+               intrmask_reg += gic_reg_step;
        }
 
        bitmap_and(pending, pending, intrmask, gic_shared_intrs);
@@ -426,7 +434,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
        spin_lock_irqsave(&gic_lock, flags);
 
        /* Re-route this IRQ */
-       gic_map_to_vpe(irq, cpumask_first(&tmp));
+       gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
 
        /* Update the pcpu_masks */
        for (i = 0; i < NR_CPUS; i++)
@@ -599,7 +607,7 @@ static __init void gic_ipi_init_one(unsigned int intr, int cpu,
                                      GIC_SHARED_TO_HWIRQ(intr));
        int i;
 
-       gic_map_to_vpe(intr, cpu);
+       gic_map_to_vpe(intr, mips_cm_vp_id(cpu));
        for (i = 0; i < NR_CPUS; i++)
                clear_bit(intr, pcpu_masks[i].pcpu_mask);
        set_bit(intr, pcpu_masks[cpu].pcpu_mask);
index c8753da4c156ee9e2fa1b3b5ddfde63561733d30..63ac73b1d9c849345adfc0b1e0984ea21cb5f03c 100644 (file)
@@ -67,22 +67,25 @@ static struct irq_chip mtk_sysirq_chip = {
        .irq_set_affinity       = irq_chip_set_affinity_parent,
 };
 
-static int mtk_sysirq_domain_xlate(struct irq_domain *d,
-                                  struct device_node *controller,
-                                  const u32 *intspec, unsigned int intsize,
-                                  unsigned long *out_hwirq,
-                                  unsigned int *out_type)
+static int mtk_sysirq_domain_translate(struct irq_domain *d,
+                                      struct irq_fwspec *fwspec,
+                                      unsigned long *hwirq,
+                                      unsigned int *type)
 {
-       if (intsize != 3)
-               return -EINVAL;
+       if (is_of_node(fwspec->fwnode)) {
+               if (fwspec->param_count != 3)
+                       return -EINVAL;
 
-       /* sysirq doesn't support PPI */
-       if (intspec[0])
-               return -EINVAL;
+               /* No PPI should point to this domain */
+               if (fwspec->param[0] != 0)
+                       return -EINVAL;
 
-       *out_hwirq = intspec[1];
-       *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
-       return 0;
+               *hwirq = fwspec->param[1];
+               *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+               return 0;
+       }
+
+       return -EINVAL;
 }
 
 static int mtk_sysirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -90,30 +93,30 @@ static int mtk_sysirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
 {
        int i;
        irq_hw_number_t hwirq;
-       struct of_phandle_args *irq_data = arg;
-       struct of_phandle_args gic_data = *irq_data;
+       struct irq_fwspec *fwspec = arg;
+       struct irq_fwspec gic_fwspec = *fwspec;
 
-       if (irq_data->args_count != 3)
+       if (fwspec->param_count != 3)
                return -EINVAL;
 
        /* sysirq doesn't support PPI */
-       if (irq_data->args[0])
+       if (fwspec->param[0])
                return -EINVAL;
 
-       hwirq = irq_data->args[1];
+       hwirq = fwspec->param[1];
        for (i = 0; i < nr_irqs; i++)
                irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
                                              &mtk_sysirq_chip,
                                              domain->host_data);
 
-       gic_data.np = domain->parent->of_node;
-       return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_data);
+       gic_fwspec.fwnode = domain->parent->fwnode;
+       return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_fwspec);
 }
 
 static const struct irq_domain_ops sysirq_domain_ops = {
-       .xlate = mtk_sysirq_domain_xlate,
-       .alloc = mtk_sysirq_domain_alloc,
-       .free = irq_domain_free_irqs_common,
+       .translate      = mtk_sysirq_domain_translate,
+       .alloc          = mtk_sysirq_domain_alloc,
+       .free           = irq_domain_free_irqs_common,
 };
 
 static int __init mtk_sysirq_of_init(struct device_node *node,
index 604df63e2edf6f9706db28d08bf559a0347e8bd8..c22e2d40cb302452624c29efd44c483e577f5333 100644 (file)
@@ -1,5 +1,7 @@
 /*
  * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2014 Oleksij Rempel <linux@rempel-privat.de>
+ *     Add Alphascale ASM9260 support.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #include <linux/stmp_device.h>
 #include <asm/exception.h>
 
+#include "alphascale_asm9260-icoll.h"
+
+/*
+ * this device provide 4 offsets for each register:
+ * 0x0 - plain read write mode
+ * 0x4 - set mode, OR logic.
+ * 0x8 - clr mode, XOR logic.
+ * 0xc - togle mode.
+ */
+#define SET_REG 4
+#define CLR_REG 8
+
 #define HW_ICOLL_VECTOR                                0x0000
 #define HW_ICOLL_LEVELACK                      0x0010
 #define HW_ICOLL_CTRL                          0x0020
 #define HW_ICOLL_STAT_OFFSET                   0x0070
-#define HW_ICOLL_INTERRUPTn_SET(n)             (0x0124 + (n) * 0x10)
-#define HW_ICOLL_INTERRUPTn_CLR(n)             (0x0128 + (n) * 0x10)
-#define BM_ICOLL_INTERRUPTn_ENABLE             0x00000004
+#define HW_ICOLL_INTERRUPT0                    0x0120
+#define HW_ICOLL_INTERRUPTn(n)                 ((n) * 0x10)
+#define BM_ICOLL_INTR_ENABLE                   BIT(2)
 #define BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0  0x1
 
 #define ICOLL_NUM_IRQS         128
 
-static void __iomem *icoll_base;
+enum icoll_type {
+       ICOLL,
+       ASM9260_ICOLL,
+};
+
+struct icoll_priv {
+       void __iomem *vector;
+       void __iomem *levelack;
+       void __iomem *ctrl;
+       void __iomem *stat;
+       void __iomem *intr;
+       void __iomem *clear;
+       enum icoll_type type;
+};
+
+static struct icoll_priv icoll_priv;
 static struct irq_domain *icoll_domain;
 
+/* calculate bit offset depending on number of intterupt per register */
+static u32 icoll_intr_bitshift(struct irq_data *d, u32 bit)
+{
+       /*
+        * mask lower part of hwirq to convert it
+        * in 0, 1, 2 or 3 and then multiply it by 8 (or shift by 3)
+        */
+       return bit << ((d->hwirq & 3) << 3);
+}
+
+/* calculate mem offset depending on number of intterupt per register */
+static void __iomem *icoll_intr_reg(struct irq_data *d)
+{
+       /* offset = hwirq / intr_per_reg * 0x10 */
+       return icoll_priv.intr + ((d->hwirq >> 2) * 0x10);
+}
+
 static void icoll_ack_irq(struct irq_data *d)
 {
        /*
@@ -50,19 +96,35 @@ static void icoll_ack_irq(struct irq_data *d)
         * BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 unconditionally.
         */
        __raw_writel(BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0,
-                       icoll_base + HW_ICOLL_LEVELACK);
+                       icoll_priv.levelack);
 }
 
 static void icoll_mask_irq(struct irq_data *d)
 {
-       __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE,
-                       icoll_base + HW_ICOLL_INTERRUPTn_CLR(d->hwirq));
+       __raw_writel(BM_ICOLL_INTR_ENABLE,
+                       icoll_priv.intr + CLR_REG + HW_ICOLL_INTERRUPTn(d->hwirq));
 }
 
 static void icoll_unmask_irq(struct irq_data *d)
 {
-       __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE,
-                       icoll_base + HW_ICOLL_INTERRUPTn_SET(d->hwirq));
+       __raw_writel(BM_ICOLL_INTR_ENABLE,
+                       icoll_priv.intr + SET_REG + HW_ICOLL_INTERRUPTn(d->hwirq));
+}
+
+static void asm9260_mask_irq(struct irq_data *d)
+{
+       __raw_writel(icoll_intr_bitshift(d, BM_ICOLL_INTR_ENABLE),
+                       icoll_intr_reg(d) + CLR_REG);
+}
+
+static void asm9260_unmask_irq(struct irq_data *d)
+{
+       __raw_writel(ASM9260_BM_CLEAR_BIT(d->hwirq),
+                    icoll_priv.clear +
+                    ASM9260_HW_ICOLL_CLEARn(d->hwirq));
+
+       __raw_writel(icoll_intr_bitshift(d, BM_ICOLL_INTR_ENABLE),
+                       icoll_intr_reg(d) + SET_REG);
 }
 
 static struct irq_chip mxs_icoll_chip = {
@@ -71,19 +133,32 @@ static struct irq_chip mxs_icoll_chip = {
        .irq_unmask = icoll_unmask_irq,
 };
 
+static struct irq_chip asm9260_icoll_chip = {
+       .irq_ack = icoll_ack_irq,
+       .irq_mask = asm9260_mask_irq,
+       .irq_unmask = asm9260_unmask_irq,
+};
+
 asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
 {
        u32 irqnr;
 
-       irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET);
-       __raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR);
+       irqnr = __raw_readl(icoll_priv.stat);
+       __raw_writel(irqnr, icoll_priv.vector);
        handle_domain_irq(icoll_domain, irqnr, regs);
 }
 
 static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq,
                                irq_hw_number_t hw)
 {
-       irq_set_chip_and_handler(virq, &mxs_icoll_chip, handle_level_irq);
+       struct irq_chip *chip;
+
+       if (icoll_priv.type == ICOLL)
+               chip = &mxs_icoll_chip;
+       else
+               chip = &asm9260_icoll_chip;
+
+       irq_set_chip_and_handler(virq, chip, handle_level_irq);
 
        return 0;
 }
@@ -93,20 +168,80 @@ static const struct irq_domain_ops icoll_irq_domain_ops = {
        .xlate = irq_domain_xlate_onecell,
 };
 
+static void __init icoll_add_domain(struct device_node *np,
+                         int num)
+{
+       icoll_domain = irq_domain_add_linear(np, num,
+                                            &icoll_irq_domain_ops, NULL);
+
+       if (!icoll_domain)
+               panic("%s: unable to create irq domain", np->full_name);
+}
+
+static void __iomem * __init icoll_init_iobase(struct device_node *np)
+{
+       void __iomem *icoll_base;
+
+       icoll_base = of_io_request_and_map(np, 0, np->name);
+       if (!icoll_base)
+               panic("%s: unable to map resource", np->full_name);
+       return icoll_base;
+}
+
 static int __init icoll_of_init(struct device_node *np,
                          struct device_node *interrupt_parent)
 {
-       icoll_base = of_iomap(np, 0);
-       WARN_ON(!icoll_base);
+       void __iomem *icoll_base;
+
+       icoll_priv.type = ICOLL;
+
+       icoll_base              = icoll_init_iobase(np);
+       icoll_priv.vector       = icoll_base + HW_ICOLL_VECTOR;
+       icoll_priv.levelack     = icoll_base + HW_ICOLL_LEVELACK;
+       icoll_priv.ctrl         = icoll_base + HW_ICOLL_CTRL;
+       icoll_priv.stat         = icoll_base + HW_ICOLL_STAT_OFFSET;
+       icoll_priv.intr         = icoll_base + HW_ICOLL_INTERRUPT0;
+       icoll_priv.clear        = NULL;
 
        /*
         * Interrupt Collector reset, which initializes the priority
         * for each irq to level 0.
         */
-       stmp_reset_block(icoll_base + HW_ICOLL_CTRL);
+       stmp_reset_block(icoll_priv.ctrl);
 
-       icoll_domain = irq_domain_add_linear(np, ICOLL_NUM_IRQS,
-                                            &icoll_irq_domain_ops, NULL);
-       return icoll_domain ? 0 : -ENODEV;
+       icoll_add_domain(np, ICOLL_NUM_IRQS);
+
+       return 0;
 }
 IRQCHIP_DECLARE(mxs, "fsl,icoll", icoll_of_init);
+
+static int __init asm9260_of_init(struct device_node *np,
+                         struct device_node *interrupt_parent)
+{
+       void __iomem *icoll_base;
+       int i;
+
+       icoll_priv.type = ASM9260_ICOLL;
+
+       icoll_base = icoll_init_iobase(np);
+       icoll_priv.vector       = icoll_base + ASM9260_HW_ICOLL_VECTOR;
+       icoll_priv.levelack     = icoll_base + ASM9260_HW_ICOLL_LEVELACK;
+       icoll_priv.ctrl         = icoll_base + ASM9260_HW_ICOLL_CTRL;
+       icoll_priv.stat         = icoll_base + ASM9260_HW_ICOLL_STAT_OFFSET;
+       icoll_priv.intr         = icoll_base + ASM9260_HW_ICOLL_INTERRUPT0;
+       icoll_priv.clear        = icoll_base + ASM9260_HW_ICOLL_CLEAR0;
+
+       writel_relaxed(ASM9260_BM_CTRL_IRQ_ENABLE,
+                       icoll_priv.ctrl);
+       /*
+        * ASM9260 don't provide reset bit. So, we need to set level 0
+        * manually.
+        */
+       for (i = 0; i < 16 * 0x10; i += 0x10)
+               writel(0, icoll_priv.intr + i);
+
+       icoll_add_domain(np, ASM9260_NUM_IRQS);
+
+       return 0;
+}
+IRQCHIP_DECLARE(asm9260, "alphascale,asm9260-icoll", asm9260_of_init);
index a878b8d03868823a222af0ee4d0f296793673a7d..b1777104fd9feca9c739db29caa1d94cc36d0edd 100644 (file)
@@ -48,16 +48,26 @@ nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs)
        handle_IRQ(irq, regs);
 }
 
+static int nvic_irq_domain_translate(struct irq_domain *d,
+                                    struct irq_fwspec *fwspec,
+                                    unsigned long *hwirq, unsigned int *type)
+{
+       if (WARN_ON(fwspec->param_count < 1))
+               return -EINVAL;
+       *hwirq = fwspec->param[0];
+       *type = IRQ_TYPE_NONE;
+       return 0;
+}
+
 static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
                                unsigned int nr_irqs, void *arg)
 {
        int i, ret;
        irq_hw_number_t hwirq;
        unsigned int type = IRQ_TYPE_NONE;
-       struct of_phandle_args *irq_data = arg;
+       struct irq_fwspec *fwspec = arg;
 
-       ret = irq_domain_xlate_onecell(domain, irq_data->np, irq_data->args,
-                                  irq_data->args_count, &hwirq, &type);
+       ret = nvic_irq_domain_translate(domain, fwspec, &hwirq, &type);
        if (ret)
                return ret;
 
@@ -68,7 +78,7 @@ static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
 }
 
 static const struct irq_domain_ops nvic_irq_domain_ops = {
-       .xlate = irq_domain_xlate_onecell,
+       .translate = nvic_irq_domain_translate,
        .alloc = nvic_irq_domain_alloc,
        .free = irq_domain_free_irqs_top,
 };
index 9525335723f68f7e4c408d81a6538dc8e72a2b5f..c325806561bedd932db973470d4bedfaf3950886 100644 (file)
@@ -361,14 +361,16 @@ static const struct irq_domain_ops intc_irqpin_irq_domain_ops = {
        .xlate  = irq_domain_xlate_twocell,
 };
 
-static const struct intc_irqpin_irlm_config intc_irqpin_irlm_r8a7779 = {
+static const struct intc_irqpin_irlm_config intc_irqpin_irlm_r8a777x = {
        .irlm_bit = 23, /* ICR0.IRLM0 */
 };
 
 static const struct of_device_id intc_irqpin_dt_ids[] = {
        { .compatible = "renesas,intc-irqpin", },
+       { .compatible = "renesas,intc-irqpin-r8a7778",
+         .data = &intc_irqpin_irlm_r8a777x },
        { .compatible = "renesas,intc-irqpin-r8a7779",
-         .data = &intc_irqpin_irlm_r8a7779 },
+         .data = &intc_irqpin_irlm_r8a777x },
        {},
 };
 MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids);
index 35bf97ba4a3d196db74ff5835dc9c59fb056c2af..52304b139aa46a60966d03922197b060f4693cf1 100644 (file)
@@ -62,33 +62,20 @@ struct irqc_priv {
        struct irqc_irq irq[IRQC_IRQ_MAX];
        unsigned int number_of_irqs;
        struct platform_device *pdev;
-       struct irq_chip irq_chip;
+       struct irq_chip_generic *gc;
        struct irq_domain *irq_domain;
        struct clk *clk;
 };
 
-static void irqc_dbg(struct irqc_irq *i, char *str)
-{
-       dev_dbg(&i->p->pdev->dev, "%s (%d:%d)\n",
-               str, i->requested_irq, i->hw_irq);
-}
-
-static void irqc_irq_enable(struct irq_data *d)
+static struct irqc_priv *irq_data_to_priv(struct irq_data *data)
 {
-       struct irqc_priv *p = irq_data_get_irq_chip_data(d);
-       int hw_irq = irqd_to_hwirq(d);
-
-       irqc_dbg(&p->irq[hw_irq], "enable");
-       iowrite32(BIT(hw_irq), p->cpu_int_base + IRQC_EN_SET);
+       return data->domain->host_data;
 }
 
-static void irqc_irq_disable(struct irq_data *d)
+static void irqc_dbg(struct irqc_irq *i, char *str)
 {
-       struct irqc_priv *p = irq_data_get_irq_chip_data(d);
-       int hw_irq = irqd_to_hwirq(d);
-
-       irqc_dbg(&p->irq[hw_irq], "disable");
-       iowrite32(BIT(hw_irq), p->cpu_int_base + IRQC_EN_STS);
+       dev_dbg(&i->p->pdev->dev, "%s (%d:%d)\n",
+               str, i->requested_irq, i->hw_irq);
 }
 
 static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = {
@@ -101,7 +88,7 @@ static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = {
 
 static int irqc_irq_set_type(struct irq_data *d, unsigned int type)
 {
-       struct irqc_priv *p = irq_data_get_irq_chip_data(d);
+       struct irqc_priv *p = irq_data_to_priv(d);
        int hw_irq = irqd_to_hwirq(d);
        unsigned char value = irqc_sense[type & IRQ_TYPE_SENSE_MASK];
        u32 tmp;
@@ -120,7 +107,7 @@ static int irqc_irq_set_type(struct irq_data *d, unsigned int type)
 
 static int irqc_irq_set_wake(struct irq_data *d, unsigned int on)
 {
-       struct irqc_priv *p = irq_data_get_irq_chip_data(d);
+       struct irqc_priv *p = irq_data_to_priv(d);
        int hw_irq = irqd_to_hwirq(d);
 
        irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
@@ -153,35 +140,11 @@ static irqreturn_t irqc_irq_handler(int irq, void *dev_id)
        return IRQ_NONE;
 }
 
-/*
- * This lock class tells lockdep that IRQC irqs are in a different
- * category than their parents, so it won't report false recursion.
- */
-static struct lock_class_key irqc_irq_lock_class;
-
-static int irqc_irq_domain_map(struct irq_domain *h, unsigned int virq,
-                              irq_hw_number_t hw)
-{
-       struct irqc_priv *p = h->host_data;
-
-       irqc_dbg(&p->irq[hw], "map");
-       irq_set_chip_data(virq, h->host_data);
-       irq_set_lockdep_class(virq, &irqc_irq_lock_class);
-       irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
-       return 0;
-}
-
-static const struct irq_domain_ops irqc_irq_domain_ops = {
-       .map    = irqc_irq_domain_map,
-       .xlate  = irq_domain_xlate_twocell,
-};
-
 static int irqc_probe(struct platform_device *pdev)
 {
        struct irqc_priv *p;
        struct resource *io;
        struct resource *irq;
-       struct irq_chip *irq_chip;
        const char *name = dev_name(&pdev->dev);
        int ret;
        int k;
@@ -241,40 +204,51 @@ static int irqc_probe(struct platform_device *pdev)
 
        p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */
 
-       irq_chip = &p->irq_chip;
-       irq_chip->name = name;
-       irq_chip->irq_mask = irqc_irq_disable;
-       irq_chip->irq_unmask = irqc_irq_enable;
-       irq_chip->irq_set_type = irqc_irq_set_type;
-       irq_chip->irq_set_wake = irqc_irq_set_wake;
-       irq_chip->flags = IRQCHIP_MASK_ON_SUSPEND;
-
        p->irq_domain = irq_domain_add_linear(pdev->dev.of_node,
                                              p->number_of_irqs,
-                                             &irqc_irq_domain_ops, p);
+                                             &irq_generic_chip_ops, p);
        if (!p->irq_domain) {
                ret = -ENXIO;
                dev_err(&pdev->dev, "cannot initialize irq domain\n");
                goto err2;
        }
 
+       ret = irq_alloc_domain_generic_chips(p->irq_domain, p->number_of_irqs,
+                                            1, name, handle_level_irq,
+                                            0, 0, IRQ_GC_INIT_NESTED_LOCK);
+       if (ret) {
+               dev_err(&pdev->dev, "cannot allocate generic chip\n");
+               goto err3;
+       }
+
+       p->gc = irq_get_domain_generic_chip(p->irq_domain, 0);
+       p->gc->reg_base = p->cpu_int_base;
+       p->gc->chip_types[0].regs.enable = IRQC_EN_SET;
+       p->gc->chip_types[0].regs.disable = IRQC_EN_STS;
+       p->gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
+       p->gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
+       p->gc->chip_types[0].chip.irq_set_type  = irqc_irq_set_type;
+       p->gc->chip_types[0].chip.irq_set_wake  = irqc_irq_set_wake;
+       p->gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND;
+
        /* request interrupts one by one */
        for (k = 0; k < p->number_of_irqs; k++) {
                if (request_irq(p->irq[k].requested_irq, irqc_irq_handler,
                                0, name, &p->irq[k])) {
                        dev_err(&pdev->dev, "failed to request IRQ\n");
                        ret = -ENOENT;
-                       goto err3;
+                       goto err4;
                }
        }
 
        dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs);
 
        return 0;
-err3:
+err4:
        while (--k >= 0)
                free_irq(p->irq[k].requested_irq, &p->irq[k]);
 
+err3:
        irq_domain_remove(p->irq_domain);
 err2:
        iounmap(p->iomem);
index 7154b011ddd2f65a65f7079bfe88783c50d0d373..c71914e8f596c3700d8c5a4da2448b32acf8c41f 100644 (file)
@@ -311,7 +311,7 @@ static void s3c_irq_demux(struct irq_desc *desc)
         * and one big domain for the dt case where the subintc
         * starts at hwirq number 32.
         */
-       offset = (intc->domain->of_node) ? 32 : 0;
+       offset = irq_domain_get_of_node(intc->domain) ? 32 : 0;
 
        chained_irq_enter(chip, desc);
 
@@ -342,7 +342,7 @@ static inline int s3c24xx_handle_intc(struct s3c_irq_intc *intc,
                return false;
 
        /* non-dt machines use individual domains */
-       if (!intc->domain->of_node)
+       if (!irq_domain_get_of_node(intc->domain))
                intc_offset = 0;
 
        /* We have a problem that the INTOFFSET register does not always
index c143dd58410c64cc97d5afcc5442a45d5b60ebf9..4ef178078e5bd7ddf388e215eddc1bde3151b0fb 100644 (file)
@@ -8,6 +8,9 @@
  * warranty of any kind, whether express or implied.
  */
 
+#define DRV_NAME       "sunxi-nmi"
+#define pr_fmt(fmt)    DRV_NAME ": " fmt
+
 #include <linux/bitops.h>
 #include <linux/device.h>
 #include <linux/io.h>
@@ -96,8 +99,8 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
                break;
        default:
                irq_gc_unlock(gc);
-               pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n",
-                       __func__, data->irq);
+               pr_err("Cannot assign multiple trigger modes to IRQ %d.\n",
+                       data->irq);
                return -EBADR;
        }
 
@@ -130,30 +133,29 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
 
        domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL);
        if (!domain) {
-               pr_err("%s: Could not register interrupt domain.\n", node->name);
+               pr_err("Could not register interrupt domain.\n");
                return -ENOMEM;
        }
 
-       ret = irq_alloc_domain_generic_chips(domain, 1, 2, node->name,
+       ret = irq_alloc_domain_generic_chips(domain, 1, 2, DRV_NAME,
                                             handle_fasteoi_irq, clr, 0,
                                             IRQ_GC_INIT_MASK_CACHE);
        if (ret) {
-                pr_err("%s: Could not allocate generic interrupt chip.\n",
-                        node->name);
-                goto fail_irqd_remove;
+               pr_err("Could not allocate generic interrupt chip.\n");
+               goto fail_irqd_remove;
        }
 
        irq = irq_of_parse_and_map(node, 0);
        if (irq <= 0) {
-               pr_err("%s: unable to parse irq\n", node->name);
+               pr_err("unable to parse irq\n");
                ret = -EINVAL;
                goto fail_irqd_remove;
        }
 
        gc = irq_get_domain_generic_chip(domain, 0);
-       gc->reg_base = of_iomap(node, 0);
+       gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
        if (!gc->reg_base) {
-               pr_err("%s: unable to map resource\n", node->name);
+               pr_err("unable to map resource\n");
                ret = -ENOMEM;
                goto fail_irqd_remove;
        }
index 2fd89eb88f3a29bc716ec1daf76ad4085e6a743d..121ec301372e69cbeca0bff90f41fc0653f0012a 100644 (file)
@@ -214,47 +214,50 @@ static struct irq_chip tegra_ictlr_chip = {
        .irq_unmask             = tegra_unmask,
        .irq_retrigger          = tegra_retrigger,
        .irq_set_wake           = tegra_set_wake,
+       .irq_set_type           = irq_chip_set_type_parent,
        .flags                  = IRQCHIP_MASK_ON_SUSPEND,
 #ifdef CONFIG_SMP
        .irq_set_affinity       = irq_chip_set_affinity_parent,
 #endif
 };
 
-static int tegra_ictlr_domain_xlate(struct irq_domain *domain,
-                                   struct device_node *controller,
-                                   const u32 *intspec,
-                                   unsigned int intsize,
-                                   unsigned long *out_hwirq,
-                                   unsigned int *out_type)
+static int tegra_ictlr_domain_translate(struct irq_domain *d,
+                                       struct irq_fwspec *fwspec,
+                                       unsigned long *hwirq,
+                                       unsigned int *type)
 {
-       if (domain->of_node != controller)
-               return -EINVAL; /* Shouldn't happen, really... */
-       if (intsize != 3)
-               return -EINVAL; /* Not GIC compliant */
-       if (intspec[0] != GIC_SPI)
-               return -EINVAL; /* No PPI should point to this domain */
+       if (is_of_node(fwspec->fwnode)) {
+               if (fwspec->param_count != 3)
+                       return -EINVAL;
 
-       *out_hwirq = intspec[1];
-       *out_type = intspec[2];
-       return 0;
+               /* No PPI should point to this domain */
+               if (fwspec->param[0] != 0)
+                       return -EINVAL;
+
+               *hwirq = fwspec->param[1];
+               *type = fwspec->param[2];
+               return 0;
+       }
+
+       return -EINVAL;
 }
 
 static int tegra_ictlr_domain_alloc(struct irq_domain *domain,
                                    unsigned int virq,
                                    unsigned int nr_irqs, void *data)
 {
-       struct of_phandle_args *args = data;
-       struct of_phandle_args parent_args;
+       struct irq_fwspec *fwspec = data;
+       struct irq_fwspec parent_fwspec;
        struct tegra_ictlr_info *info = domain->host_data;
        irq_hw_number_t hwirq;
        unsigned int i;
 
-       if (args->args_count != 3)
+       if (fwspec->param_count != 3)
                return -EINVAL; /* Not GIC compliant */
-       if (args->args[0] != GIC_SPI)
+       if (fwspec->param[0] != GIC_SPI)
                return -EINVAL; /* No PPI should point to this domain */
 
-       hwirq = args->args[1];
+       hwirq = fwspec->param[1];
        if (hwirq >= (num_ictlrs * 32))
                return -EINVAL;
 
@@ -266,9 +269,10 @@ static int tegra_ictlr_domain_alloc(struct irq_domain *domain,
                                              info->base[ictlr]);
        }
 
-       parent_args = *args;
-       parent_args.np = domain->parent->of_node;
-       return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_args);
+       parent_fwspec = *fwspec;
+       parent_fwspec.fwnode = domain->parent->fwnode;
+       return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+                                           &parent_fwspec);
 }
 
 static void tegra_ictlr_domain_free(struct irq_domain *domain,
@@ -284,9 +288,9 @@ static void tegra_ictlr_domain_free(struct irq_domain *domain,
 }
 
 static const struct irq_domain_ops tegra_ictlr_domain_ops = {
-       .xlate  = tegra_ictlr_domain_xlate,
-       .alloc  = tegra_ictlr_domain_alloc,
-       .free   = tegra_ictlr_domain_free,
+       .translate      = tegra_ictlr_domain_translate,
+       .alloc          = tegra_ictlr_domain_alloc,
+       .free           = tegra_ictlr_domain_free,
 };
 
 static int __init tegra_ictlr_init(struct device_node *node,
index 2c22558864012efe6425ab1f058ea0e07a9f0728..56b5e3cb9de2c4f2c050fcad42b9f91419e92b81 100644 (file)
@@ -130,35 +130,51 @@ static int vf610_mscm_ir_domain_alloc(struct irq_domain *domain, unsigned int vi
 {
        int i;
        irq_hw_number_t hwirq;
-       struct of_phandle_args *irq_data = arg;
-       struct of_phandle_args gic_data;
+       struct irq_fwspec *fwspec = arg;
+       struct irq_fwspec parent_fwspec;
 
-       if (irq_data->args_count != 2)
+       if (!irq_domain_get_of_node(domain->parent))
                return -EINVAL;
 
-       hwirq = irq_data->args[0];
+       if (fwspec->param_count != 2)
+               return -EINVAL;
+
+       hwirq = fwspec->param[0];
        for (i = 0; i < nr_irqs; i++)
                irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
                                              &vf610_mscm_ir_irq_chip,
                                              domain->host_data);
 
-       gic_data.np = domain->parent->of_node;
+       parent_fwspec.fwnode = domain->parent->fwnode;
 
        if (mscm_ir_data->is_nvic) {
-               gic_data.args_count = 1;
-               gic_data.args[0] = irq_data->args[0];
+               parent_fwspec.param_count = 1;
+               parent_fwspec.param[0] = fwspec->param[0];
        } else {
-               gic_data.args_count = 3;
-               gic_data.args[0] = GIC_SPI;
-               gic_data.args[1] = irq_data->args[0];
-               gic_data.args[2] = irq_data->args[1];
+               parent_fwspec.param_count = 3;
+               parent_fwspec.param[0] = GIC_SPI;
+               parent_fwspec.param[1] = fwspec->param[0];
+               parent_fwspec.param[2] = fwspec->param[1];
        }
 
-       return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_data);
+       return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+                                           &parent_fwspec);
+}
+
+static int vf610_mscm_ir_domain_translate(struct irq_domain *d,
+                                         struct irq_fwspec *fwspec,
+                                         unsigned long *hwirq,
+                                         unsigned int *type)
+{
+       if (WARN_ON(fwspec->param_count < 2))
+               return -EINVAL;
+       *hwirq = fwspec->param[0];
+       *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+       return 0;
 }
 
 static const struct irq_domain_ops mscm_irq_domain_ops = {
-       .xlate = irq_domain_xlate_twocell,
+       .translate = vf610_mscm_ir_domain_translate,
        .alloc = vf610_mscm_ir_domain_alloc,
        .free = irq_domain_free_irqs_common,
 };
@@ -205,7 +221,8 @@ static int __init vf610_mscm_ir_of_init(struct device_node *node,
                goto out_unmap;
        }
 
-       if (of_device_is_compatible(domain->parent->of_node, "arm,armv7m-nvic"))
+       if (of_device_is_compatible(irq_domain_get_of_node(domain->parent),
+                                   "arm,armv7m-nvic"))
                mscm_ir_data->is_nvic = true;
 
        cpu_pm_register_notifier(&mscm_ir_notifier_block);
index 18accb0a79cc51dea2d1851fc9a27891e8d79d6f..c53a53f6efb6a136ec09075996bd89081e50e536 100644 (file)
@@ -1247,7 +1247,7 @@ static void
 l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
 {
        struct PStack *st = fi->userdata;
-       struct sk_buff *skb;
+       struct sk_buff *skb, *nskb;
        struct Layer2 *l2 = &st->l2;
        u_char header[MAX_HEADER_LEN];
        int i, hdr_space_needed;
@@ -1262,14 +1262,10 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
                return;
 
        hdr_space_needed = l2headersize(l2, 0);
-       if (hdr_space_needed > skb_headroom(skb)) {
-               struct sk_buff *orig_skb = skb;
-
-               skb = skb_realloc_headroom(skb, hdr_space_needed);
-               if (!skb) {
-                       dev_kfree_skb(orig_skb);
-                       return;
-               }
+       nskb = skb_realloc_headroom(skb, hdr_space_needed);
+       if (!nskb) {
+               skb_queue_head(&l2->i_queue, skb);
+               return;
        }
        spin_lock_irqsave(&l2->lock, flags);
        if (test_bit(FLG_MOD128, &l2->flag))
@@ -1282,7 +1278,7 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
                       p1);
                dev_kfree_skb(l2->windowar[p1]);
        }
-       l2->windowar[p1] = skb_clone(skb, GFP_ATOMIC);
+       l2->windowar[p1] = skb;
 
        i = sethdraddr(&st->l2, header, CMD);
 
@@ -1295,8 +1291,8 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
                l2->vs = (l2->vs + 1) % 8;
        }
        spin_unlock_irqrestore(&l2->lock, flags);
-       memcpy(skb_push(skb, i), header, i);
-       st->l2.l2l1(st, PH_PULL | INDICATION, skb);
+       memcpy(skb_push(nskb, i), header, i);
+       st->l2.l2l1(st, PH_PULL | INDICATION, nskb);
        test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
        if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
                FsmDelTimer(&st->l2.t203, 13);
index 949cabb88f1c113c9606d26ff72627f6cf9db108..5eb380a2590394ba087e2f160281e1902112910f 100644 (file)
@@ -1476,7 +1476,7 @@ static void
 l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
 {
        struct layer2   *l2 = fi->userdata;
-       struct sk_buff  *skb, *nskb, *oskb;
+       struct sk_buff  *skb, *nskb;
        u_char          header[MAX_L2HEADER_LEN];
        u_int           i, p1;
 
@@ -1486,48 +1486,34 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
        skb = skb_dequeue(&l2->i_queue);
        if (!skb)
                return;
-
-       if (test_bit(FLG_MOD128, &l2->flag))
-               p1 = (l2->vs - l2->va) % 128;
-       else
-               p1 = (l2->vs - l2->va) % 8;
-       p1 = (p1 + l2->sow) % l2->window;
-       if (l2->windowar[p1]) {
-               printk(KERN_WARNING "%s: l2 try overwrite ack queue entry %d\n",
-                      mISDNDevName4ch(&l2->ch), p1);
-               dev_kfree_skb(l2->windowar[p1]);
-       }
-       l2->windowar[p1] = skb;
        i = sethdraddr(l2, header, CMD);
        if (test_bit(FLG_MOD128, &l2->flag)) {
                header[i++] = l2->vs << 1;
                header[i++] = l2->vr << 1;
+       } else
+               header[i++] = (l2->vr << 5) | (l2->vs << 1);
+       nskb = skb_realloc_headroom(skb, i);
+       if (!nskb) {
+               printk(KERN_WARNING "%s: no headroom(%d) copy for IFrame\n",
+                      mISDNDevName4ch(&l2->ch), i);
+               skb_queue_head(&l2->i_queue, skb);
+               return;
+       }
+       if (test_bit(FLG_MOD128, &l2->flag)) {
+               p1 = (l2->vs - l2->va) % 128;
                l2->vs = (l2->vs + 1) % 128;
        } else {
-               header[i++] = (l2->vr << 5) | (l2->vs << 1);
+               p1 = (l2->vs - l2->va) % 8;
                l2->vs = (l2->vs + 1) % 8;
        }
-
-       nskb = skb_clone(skb, GFP_ATOMIC);
-       p1 = skb_headroom(nskb);
-       if (p1 >= i)
-               memcpy(skb_push(nskb, i), header, i);
-       else {
-               printk(KERN_WARNING
-                      "%s: L2 pull_iqueue skb header(%d/%d) too short\n",
-                      mISDNDevName4ch(&l2->ch), i, p1);
-               oskb = nskb;
-               nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC);
-               if (!nskb) {
-                       dev_kfree_skb(oskb);
-                       printk(KERN_WARNING "%s: no skb mem in %s\n",
-                              mISDNDevName4ch(&l2->ch), __func__);
-                       return;
-               }
-               memcpy(skb_put(nskb, i), header, i);
-               memcpy(skb_put(nskb, oskb->len), oskb->data, oskb->len);
-               dev_kfree_skb(oskb);
+       p1 = (p1 + l2->sow) % l2->window;
+       if (l2->windowar[p1]) {
+               printk(KERN_WARNING "%s: l2 try overwrite ack queue entry %d\n",
+                      mISDNDevName4ch(&l2->ch), p1);
+               dev_kfree_skb(l2->windowar[p1]);
        }
+       l2->windowar[p1] = skb;
+       memcpy(skb_push(nskb, i), header, i);
        l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb);
        test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
        if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) {
index 42990f2d03178885d6021bce5ef3a9315d70f447..b1ab8bdf82519b9da91c082198cf7acef6f61692 100644 (file)
@@ -556,6 +556,16 @@ config LEDS_KTD2692
 
          Say Y to enable this driver.
 
+config LEDS_SEAD3
+       tristate "LED support for the MIPS SEAD 3 board"
+       depends on LEDS_CLASS && MIPS_SEAD3
+       help
+         Say Y here to include support for the FLED and PLED LEDs on SEAD3 eval
+         boards.
+
+         This driver can also be built as a module. If so the module
+         will be called leds-sead3.
+
 comment "LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)"
 
 config LEDS_BLINKM
index b503f92dc2c4a3990d5d435b62dab3120973e97f..e9d53092765d8ca8ae43e970dc63ec1c23e96b78 100644 (file)
@@ -65,6 +65,7 @@ obj-$(CONFIG_LEDS_VERSATILE)          += leds-versatile.o
 obj-$(CONFIG_LEDS_MENF21BMC)           += leds-menf21bmc.o
 obj-$(CONFIG_LEDS_KTD2692)             += leds-ktd2692.o
 obj-$(CONFIG_LEDS_POWERNV)             += leds-powernv.o
+obj-$(CONFIG_LEDS_SEAD3)               += leds-sead3.o
 
 # LED SPI Drivers
 obj-$(CONFIG_LEDS_DAC124S085)          += leds-dac124s085.o
index ca51d58bed244c488d815003c366d03acf79e418..7385f98dd54b4b42cf7e430b94952150b979827b 100644 (file)
@@ -102,70 +102,6 @@ static const struct attribute_group *led_groups[] = {
        NULL,
 };
 
-static void led_timer_function(unsigned long data)
-{
-       struct led_classdev *led_cdev = (void *)data;
-       unsigned long brightness;
-       unsigned long delay;
-
-       if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
-               led_set_brightness_async(led_cdev, LED_OFF);
-               return;
-       }
-
-       if (led_cdev->flags & LED_BLINK_ONESHOT_STOP) {
-               led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP;
-               return;
-       }
-
-       brightness = led_get_brightness(led_cdev);
-       if (!brightness) {
-               /* Time to switch the LED on. */
-               if (led_cdev->delayed_set_value) {
-                       led_cdev->blink_brightness =
-                                       led_cdev->delayed_set_value;
-                       led_cdev->delayed_set_value = 0;
-               }
-               brightness = led_cdev->blink_brightness;
-               delay = led_cdev->blink_delay_on;
-       } else {
-               /* Store the current brightness value to be able
-                * to restore it when the delay_off period is over.
-                */
-               led_cdev->blink_brightness = brightness;
-               brightness = LED_OFF;
-               delay = led_cdev->blink_delay_off;
-       }
-
-       led_set_brightness_async(led_cdev, brightness);
-
-       /* Return in next iteration if led is in one-shot mode and we are in
-        * the final blink state so that the led is toggled each delay_on +
-        * delay_off milliseconds in worst case.
-        */
-       if (led_cdev->flags & LED_BLINK_ONESHOT) {
-               if (led_cdev->flags & LED_BLINK_INVERT) {
-                       if (brightness)
-                               led_cdev->flags |= LED_BLINK_ONESHOT_STOP;
-               } else {
-                       if (!brightness)
-                               led_cdev->flags |= LED_BLINK_ONESHOT_STOP;
-               }
-       }
-
-       mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay));
-}
-
-static void set_brightness_delayed(struct work_struct *ws)
-{
-       struct led_classdev *led_cdev =
-               container_of(ws, struct led_classdev, set_brightness_work);
-
-       led_stop_software_blink(led_cdev);
-
-       led_set_brightness_async(led_cdev, led_cdev->delayed_set_value);
-}
-
 /**
  * led_classdev_suspend - suspend an led_classdev.
  * @led_cdev: the led_classdev to suspend.
@@ -283,10 +219,7 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
 
        led_update_brightness(led_cdev);
 
-       INIT_WORK(&led_cdev->set_brightness_work, set_brightness_delayed);
-
-       setup_timer(&led_cdev->blink_timer, led_timer_function,
-                   (unsigned long)led_cdev);
+       led_init_core(led_cdev);
 
 #ifdef CONFIG_LEDS_TRIGGERS
        led_trigger_set_default(led_cdev);
index 549de7e24cfdf445f27180ab7c776da21af50a94..c1c3af089634cded4f27fd8e6397a9519025ad70 100644 (file)
@@ -25,6 +25,70 @@ EXPORT_SYMBOL_GPL(leds_list_lock);
 LIST_HEAD(leds_list);
 EXPORT_SYMBOL_GPL(leds_list);
 
+static void led_timer_function(unsigned long data)
+{
+       struct led_classdev *led_cdev = (void *)data;
+       unsigned long brightness;
+       unsigned long delay;
+
+       if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
+               led_set_brightness_async(led_cdev, LED_OFF);
+               return;
+       }
+
+       if (led_cdev->flags & LED_BLINK_ONESHOT_STOP) {
+               led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP;
+               return;
+       }
+
+       brightness = led_get_brightness(led_cdev);
+       if (!brightness) {
+               /* Time to switch the LED on. */
+               if (led_cdev->delayed_set_value) {
+                       led_cdev->blink_brightness =
+                                       led_cdev->delayed_set_value;
+                       led_cdev->delayed_set_value = 0;
+               }
+               brightness = led_cdev->blink_brightness;
+               delay = led_cdev->blink_delay_on;
+       } else {
+               /* Store the current brightness value to be able
+                * to restore it when the delay_off period is over.
+                */
+               led_cdev->blink_brightness = brightness;
+               brightness = LED_OFF;
+               delay = led_cdev->blink_delay_off;
+       }
+
+       led_set_brightness_async(led_cdev, brightness);
+
+       /* Return in next iteration if led is in one-shot mode and we are in
+        * the final blink state so that the led is toggled each delay_on +
+        * delay_off milliseconds in worst case.
+        */
+       if (led_cdev->flags & LED_BLINK_ONESHOT) {
+               if (led_cdev->flags & LED_BLINK_INVERT) {
+                       if (brightness)
+                               led_cdev->flags |= LED_BLINK_ONESHOT_STOP;
+               } else {
+                       if (!brightness)
+                               led_cdev->flags |= LED_BLINK_ONESHOT_STOP;
+               }
+       }
+
+       mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay));
+}
+
+static void set_brightness_delayed(struct work_struct *ws)
+{
+       struct led_classdev *led_cdev =
+               container_of(ws, struct led_classdev, set_brightness_work);
+
+       led_stop_software_blink(led_cdev);
+
+       led_set_brightness_async(led_cdev, led_cdev->delayed_set_value);
+}
+
 static void led_set_software_blink(struct led_classdev *led_cdev,
                                   unsigned long delay_on,
                                   unsigned long delay_off)
@@ -72,6 +136,15 @@ static void led_blink_setup(struct led_classdev *led_cdev,
        led_set_software_blink(led_cdev, *delay_on, *delay_off);
 }
 
+void led_init_core(struct led_classdev *led_cdev)
+{
+       INIT_WORK(&led_cdev->set_brightness_work, set_brightness_delayed);
+
+       setup_timer(&led_cdev->blink_timer, led_timer_function,
+                   (unsigned long)led_cdev);
+}
+EXPORT_SYMBOL_GPL(led_init_core);
+
 void led_blink_set(struct led_classdev *led_cdev,
                   unsigned long *delay_on,
                   unsigned long *delay_off)
index 1497a09166d6e44fb7e76edf002c0578a10aabf5..7870840e7cc9a43543ab72262082bce05e150c11 100644 (file)
@@ -142,6 +142,7 @@ static int pm860x_led_dt_init(struct platform_device *pdev,
                        of_property_read_u32(np, "marvell,88pm860x-iset",
                                             &iset);
                        data->iset = PM8606_LED_CURRENT(iset);
+                       of_node_put(np);
                        break;
                }
        }
index 1793727bc9ae5f6c64467a68b4646b92e0963e1b..c7ea5c62633186294eff8046dba51c8865d06eab 100644 (file)
 #define BCM6328_SERIAL_LED_SHIFT_DIR   BIT(16)
 #define BCM6328_LED_SHIFT_TEST         BIT(30)
 #define BCM6328_LED_TEST               BIT(31)
+#define BCM6328_INIT_MASK              (BCM6328_SERIAL_LED_EN | \
+                                        BCM6328_SERIAL_LED_MUX  | \
+                                        BCM6328_SERIAL_LED_CLK_NPOL | \
+                                        BCM6328_SERIAL_LED_DATA_PPOL | \
+                                        BCM6328_SERIAL_LED_SHIFT_DIR)
 
 #define BCM6328_LED_MODE_MASK          3
 #define BCM6328_LED_MODE_OFF           0
@@ -281,11 +286,10 @@ static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg,
                                                    "linux,default-trigger",
                                                    NULL);
 
+       spin_lock_irqsave(lock, flags);
        if (!of_property_read_string(nc, "default-state", &state)) {
-               spin_lock_irqsave(lock, flags);
                if (!strcmp(state, "on")) {
                        led->cdev.brightness = LED_FULL;
-                       bcm6328_led_mode(led, BCM6328_LED_MODE_ON);
                } else if (!strcmp(state, "keep")) {
                        void __iomem *mode;
                        unsigned long val, shift;
@@ -296,21 +300,28 @@ static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg,
                        else
                                mode = mem + BCM6328_REG_MODE_LO;
 
-                       val = bcm6328_led_read(mode) >> (shift % 16);
+                       val = bcm6328_led_read(mode) >>
+                             BCM6328_LED_SHIFT(shift % 16);
                        val &= BCM6328_LED_MODE_MASK;
-                       if (val == BCM6328_LED_MODE_ON)
+                       if ((led->active_low && val == BCM6328_LED_MODE_ON) ||
+                           (!led->active_low && val == BCM6328_LED_MODE_OFF))
                                led->cdev.brightness = LED_FULL;
-                       else {
+                       else
                                led->cdev.brightness = LED_OFF;
-                               bcm6328_led_mode(led, BCM6328_LED_MODE_OFF);
-                       }
                } else {
                        led->cdev.brightness = LED_OFF;
-                       bcm6328_led_mode(led, BCM6328_LED_MODE_OFF);
                }
-               spin_unlock_irqrestore(lock, flags);
+       } else {
+               led->cdev.brightness = LED_OFF;
        }
 
+       if ((led->active_low && led->cdev.brightness == LED_FULL) ||
+           (!led->active_low && led->cdev.brightness == LED_OFF))
+               bcm6328_led_mode(led, BCM6328_LED_MODE_ON);
+       else
+               bcm6328_led_mode(led, BCM6328_LED_MODE_OFF);
+       spin_unlock_irqrestore(lock, flags);
+
        led->cdev.brightness_set = bcm6328_led_set;
        led->cdev.blink_set = bcm6328_blink_set;
 
@@ -360,9 +371,17 @@ static int bcm6328_leds_probe(struct platform_device *pdev)
        bcm6328_led_write(mem + BCM6328_REG_LNKACTSEL_LO, 0);
 
        val = bcm6328_led_read(mem + BCM6328_REG_INIT);
-       val &= ~BCM6328_SERIAL_LED_EN;
+       val &= ~(BCM6328_INIT_MASK);
        if (of_property_read_bool(np, "brcm,serial-leds"))
                val |= BCM6328_SERIAL_LED_EN;
+       if (of_property_read_bool(np, "brcm,serial-mux"))
+               val |= BCM6328_SERIAL_LED_MUX;
+       if (of_property_read_bool(np, "brcm,serial-clk-low"))
+               val |= BCM6328_SERIAL_LED_CLK_NPOL;
+       if (!of_property_read_bool(np, "brcm,serial-dat-low"))
+               val |= BCM6328_SERIAL_LED_DATA_PPOL;
+       if (!of_property_read_bool(np, "brcm,serial-shift-inv"))
+               val |= BCM6328_SERIAL_LED_SHIFT_DIR;
        bcm6328_led_write(mem + BCM6328_REG_INIT, val);
 
        for_each_available_child_of_node(np, child) {
@@ -373,7 +392,7 @@ static int bcm6328_leds_probe(struct platform_device *pdev)
                        continue;
 
                if (reg >= BCM6328_LED_MAX_COUNT) {
-                       dev_err(dev, "invalid LED (>= %d)\n",
+                       dev_err(dev, "invalid LED (%u >= %d)\n", reg,
                                BCM6328_LED_MAX_COUNT);
                        continue;
                }
@@ -384,8 +403,10 @@ static int bcm6328_leds_probe(struct platform_device *pdev)
                        rc = bcm6328_led(dev, child, reg, mem, lock,
                                         blink_leds, blink_delay);
 
-               if (rc < 0)
+               if (rc < 0) {
+                       of_node_put(child);
                        return rc;
+               }
        }
 
        return 0;
index 7ea3526702e0c56b2a841e0bf2d2dccbe3f606f6..82b4ee1bc87e03e3df18fe5e044270c0996b18a5 100644 (file)
@@ -215,8 +215,10 @@ static int bcm6358_leds_probe(struct platform_device *pdev)
                }
 
                rc = bcm6358_led(dev, child, reg, mem, lock);
-               if (rc < 0)
+               if (rc < 0) {
+                       of_node_put(child);
                        return rc;
+               }
        }
 
        return 0;
index d975220804916c9f35c734b9b4db212e36389f0a..9be195707b392d0c9dd8ca0da5f4633553342de0 100644 (file)
@@ -36,7 +36,6 @@ static struct led_classdev qube_front_led = {
 static int cobalt_qube_led_probe(struct platform_device *pdev)
 {
        struct resource *res;
-       int retval;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res)
@@ -49,31 +48,11 @@ static int cobalt_qube_led_probe(struct platform_device *pdev)
        led_value = LED_FRONT_LEFT | LED_FRONT_RIGHT;
        writeb(led_value, led_port);
 
-       retval = led_classdev_register(&pdev->dev, &qube_front_led);
-       if (retval)
-               goto err_null;
-
-       return 0;
-
-err_null:
-       led_port = NULL;
-
-       return retval;
-}
-
-static int cobalt_qube_led_remove(struct platform_device *pdev)
-{
-       led_classdev_unregister(&qube_front_led);
-
-       if (led_port)
-               led_port = NULL;
-
-       return 0;
+       return devm_led_classdev_register(&pdev->dev, &qube_front_led);
 }
 
 static struct platform_driver cobalt_qube_led_driver = {
        .probe  = cobalt_qube_led_probe,
-       .remove = cobalt_qube_led_remove,
        .driver = {
                .name   = "cobalt-qube-leds",
        },
index af1876a3a77c883a630f1c7765864ea28d3f0a03..5db4515a4fd7aaf292958a5a7fa982bcccceb999 100644 (file)
@@ -291,9 +291,22 @@ static int gpio_led_remove(struct platform_device *pdev)
        return 0;
 }
 
+static void gpio_led_shutdown(struct platform_device *pdev)
+{
+       struct gpio_leds_priv *priv = platform_get_drvdata(pdev);
+       int i;
+
+       for (i = 0; i < priv->num_leds; i++) {
+               struct gpio_led_data *led = &priv->leds[i];
+
+               gpio_led_set(&led->cdev, LED_OFF);
+       }
+}
+
 static struct platform_driver gpio_led_driver = {
        .probe          = gpio_led_probe,
        .remove         = gpio_led_remove,
+       .shutdown       = gpio_led_shutdown,
        .driver         = {
                .name   = "leds-gpio",
                .of_match_table = of_gpio_leds_match,
index 0b84c0113126046ac5af4ca2a0f1d988b4a9b61c..a6b8db0e27f11b8ce030efabf72542e9eb361159 100644 (file)
@@ -59,28 +59,15 @@ static int hp6xxled_probe(struct platform_device *pdev)
 {
        int ret;
 
-       ret = led_classdev_register(&pdev->dev, &hp6xx_red_led);
+       ret = devm_led_classdev_register(&pdev->dev, &hp6xx_red_led);
        if (ret < 0)
                return ret;
 
-       ret = led_classdev_register(&pdev->dev, &hp6xx_green_led);
-       if (ret < 0)
-               led_classdev_unregister(&hp6xx_red_led);
-
-       return ret;
-}
-
-static int hp6xxled_remove(struct platform_device *pdev)
-{
-       led_classdev_unregister(&hp6xx_red_led);
-       led_classdev_unregister(&hp6xx_green_led);
-
-       return 0;
+       return devm_led_classdev_register(&pdev->dev, &hp6xx_green_led);
 }
 
 static struct platform_driver hp6xxled_driver = {
        .probe          = hp6xxled_probe,
-       .remove         = hp6xxled_remove,
        .driver         = {
                .name           = "hp6xx-led",
        },
index 3776f516cd88f80541affad80ce4dcb25963152c..fa262b6b25eb26dfab24fa784200ae6175996f06 100644 (file)
@@ -16,9 +16,9 @@
 #define LED_YELLOW     0x00
 #define LED_GREEN      0x01
 
-#define LED_EN          (1 << 4)        /* LED ON/OFF 0:off, 1:on                       */
-#define LED_AUTOSTOP    (1 << 5)        /* LED ON/OFF auto stop set 0:disable, 1:enable */
-#define LED_ALWAYS      (1 << 6)        /* LED Interrupt Mask 0:No mask, 1:mask         */
+#define LED_EN       (1 << 4) /* LED ON/OFF 0:off, 1:on                       */
+#define LED_AUTOSTOP (1 << 5) /* LED ON/OFF auto stop set 0:disable, 1:enable */
+#define LED_ALWAYS   (1 << 6) /* LED Interrupt Mask 0:No mask, 1:mask         */
 
 static void micro_leds_brightness_set(struct led_classdev *led_cdev,
                                      enum led_brightness value)
@@ -79,14 +79,14 @@ static int micro_leds_blink_set(struct led_classdev *led_cdev,
        };
 
        msg.tx_data[0] = LED_GREEN;
-        if (*delay_on > IPAQ_LED_MAX_DUTY ||
+       if (*delay_on > IPAQ_LED_MAX_DUTY ||
            *delay_off > IPAQ_LED_MAX_DUTY)
-                return -EINVAL;
+               return -EINVAL;
 
-        if (*delay_on == 0 && *delay_off == 0) {
-                *delay_on = 100;
-                *delay_off = 100;
-        }
+       if (*delay_on == 0 && *delay_off == 0) {
+               *delay_on = 100;
+               *delay_off = 100;
+       }
 
        msg.tx_data[1] = 0;
        if (*delay_on >= IPAQ_LED_MAX_DUTY)
@@ -111,7 +111,7 @@ static int micro_leds_probe(struct platform_device *pdev)
 {
        int ret;
 
-       ret = led_classdev_register(&pdev->dev, &micro_led);
+       ret = devm_led_classdev_register(&pdev->dev, &micro_led);
        if (ret) {
                dev_err(&pdev->dev, "registering led failed: %d\n", ret);
                return ret;
@@ -121,18 +121,11 @@ static int micro_leds_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int micro_leds_remove(struct platform_device *pdev)
-{
-       led_classdev_unregister(&micro_led);
-       return 0;
-}
-
 static struct platform_driver micro_leds_device_driver = {
        .driver = {
                .name    = "ipaq-micro-leds",
        },
        .probe   = micro_leds_probe,
-       .remove  = micro_leds_remove,
 };
 module_platform_driver(micro_leds_device_driver);
 
index 80ba048889d61faa76195805cd622b1dada80d02..24c4b53a6b9387095b464809809ca7e1d44c0cf4 100644 (file)
@@ -59,23 +59,13 @@ static int locomoled_probe(struct locomo_dev *ldev)
 {
        int ret;
 
-       ret = led_classdev_register(&ldev->dev, &locomo_led0);
+       ret = devm_led_classdev_register(&ldev->dev, &locomo_led0);
        if (ret < 0)
                return ret;
 
-       ret = led_classdev_register(&ldev->dev, &locomo_led1);
-       if (ret < 0)
-               led_classdev_unregister(&locomo_led0);
-
-       return ret;
+       return  devm_led_classdev_register(&ldev->dev, &locomo_led1);
 }
 
-static int locomoled_remove(struct locomo_dev *dev)
-{
-       led_classdev_unregister(&locomo_led0);
-       led_classdev_unregister(&locomo_led1);
-       return 0;
-}
 
 static struct locomo_driver locomoled_driver = {
        .drv = {
@@ -83,7 +73,6 @@ static struct locomo_driver locomoled_driver = {
        },
        .devid  = LOCOMO_DEVID_LED,
        .probe  = locomoled_probe,
-       .remove = locomoled_remove,
 };
 
 static int __init locomoled_init(void)
index 4b9eea815b1a420d0b1b97966aa04292a137c4ea..dec2a6e59676e074a9476a5c988c36f9f9c93865 100644 (file)
@@ -87,36 +87,20 @@ static int menf21bmc_led_probe(struct platform_device *pdev)
                leds[i].cdev.name = leds[i].name;
                leds[i].cdev.brightness_set = menf21bmc_led_set;
                leds[i].i2c_client = i2c_client;
-               ret = led_classdev_register(&pdev->dev, &leds[i].cdev);
-               if (ret < 0)
-                       goto err_free_leds;
+               ret = devm_led_classdev_register(&pdev->dev, &leds[i].cdev);
+               if (ret < 0) {
+                       dev_err(&pdev->dev, "failed to register LED device\n");
+                       return ret;
+               }
        }
        dev_info(&pdev->dev, "MEN 140F21P00 BMC LED device enabled\n");
 
        return 0;
 
-err_free_leds:
-       dev_err(&pdev->dev, "failed to register LED device\n");
-
-       for (i = i - 1; i >= 0; i--)
-               led_classdev_unregister(&leds[i].cdev);
-
-       return ret;
-}
-
-static int menf21bmc_led_remove(struct platform_device *pdev)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(leds); i++)
-               led_classdev_unregister(&leds[i].cdev);
-
-       return 0;
 }
 
 static struct platform_driver menf21bmc_led = {
        .probe          = menf21bmc_led_probe,
-       .remove         = menf21bmc_led_remove,
        .driver         = {
                .name           = "menf21bmc_led",
        },
index ec3a2e8adcae6840837b18fdeb336082c45331ae..0d214c2e403c2a049ca9287f265312d93390da23 100644 (file)
@@ -39,18 +39,11 @@ static struct led_classdev net48xx_error_led = {
 
 static int net48xx_led_probe(struct platform_device *pdev)
 {
-       return led_classdev_register(&pdev->dev, &net48xx_error_led);
-}
-
-static int net48xx_led_remove(struct platform_device *pdev)
-{
-       led_classdev_unregister(&net48xx_error_led);
-       return 0;
+       return devm_led_classdev_register(&pdev->dev, &net48xx_error_led);
 }
 
 static struct platform_driver net48xx_led_driver = {
        .probe          = net48xx_led_probe,
-       .remove         = net48xx_led_remove,
        .driver         = {
                .name           = DRVNAME,
        },
index 25e419752a7b7c5719baa69bd114d57720a62f92..4b88b93244be52fa711e8c329446c273a04a24fa 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/spinlock.h>
 #include <linux/platform_device.h>
 #include <linux/gpio.h>
+#include <linux/of_gpio.h>
 #include <linux/leds.h>
 #include <linux/platform_data/leds-kirkwood-netxbig.h>
 
@@ -70,7 +71,8 @@ static void gpio_ext_set_value(struct netxbig_gpio_ext *gpio_ext,
        spin_unlock_irqrestore(&gpio_ext_lock, flags);
 }
 
-static int gpio_ext_init(struct netxbig_gpio_ext *gpio_ext)
+static int gpio_ext_init(struct platform_device *pdev,
+                        struct netxbig_gpio_ext *gpio_ext)
 {
        int err;
        int i;
@@ -80,46 +82,28 @@ static int gpio_ext_init(struct netxbig_gpio_ext *gpio_ext)
 
        /* Configure address GPIOs. */
        for (i = 0; i < gpio_ext->num_addr; i++) {
-               err = gpio_request_one(gpio_ext->addr[i], GPIOF_OUT_INIT_LOW,
-                                      "GPIO extension addr");
+               err = devm_gpio_request_one(&pdev->dev, gpio_ext->addr[i],
+                                           GPIOF_OUT_INIT_LOW,
+                                           "GPIO extension addr");
                if (err)
-                       goto err_free_addr;
+                       return err;
        }
        /* Configure data GPIOs. */
        for (i = 0; i < gpio_ext->num_data; i++) {
-               err = gpio_request_one(gpio_ext->data[i], GPIOF_OUT_INIT_LOW,
-                                  "GPIO extension data");
+               err = devm_gpio_request_one(&pdev->dev, gpio_ext->data[i],
+                                           GPIOF_OUT_INIT_LOW,
+                                           "GPIO extension data");
                if (err)
-                       goto err_free_data;
+                       return err;
        }
        /* Configure "enable select" GPIO. */
-       err = gpio_request_one(gpio_ext->enable, GPIOF_OUT_INIT_LOW,
-                              "GPIO extension enable");
+       err = devm_gpio_request_one(&pdev->dev, gpio_ext->enable,
+                                   GPIOF_OUT_INIT_LOW,
+                                   "GPIO extension enable");
        if (err)
-               goto err_free_data;
+               return err;
 
        return 0;
-
-err_free_data:
-       for (i = i - 1; i >= 0; i--)
-               gpio_free(gpio_ext->data[i]);
-       i = gpio_ext->num_addr;
-err_free_addr:
-       for (i = i - 1; i >= 0; i--)
-               gpio_free(gpio_ext->addr[i]);
-
-       return err;
-}
-
-static void gpio_ext_free(struct netxbig_gpio_ext *gpio_ext)
-{
-       int i;
-
-       gpio_free(gpio_ext->enable);
-       for (i = gpio_ext->num_addr - 1; i >= 0; i--)
-               gpio_free(gpio_ext->addr[i]);
-       for (i = gpio_ext->num_data - 1; i >= 0; i--)
-               gpio_free(gpio_ext->data[i]);
 }
 
 /*
@@ -132,7 +116,6 @@ struct netxbig_led_data {
        int                     mode_addr;
        int                     *mode_val;
        int                     bright_addr;
-       int                     bright_max;
        struct                  netxbig_led_timer *timer;
        int                     num_timer;
        enum netxbig_led_mode   mode;
@@ -194,7 +177,7 @@ static void netxbig_led_set(struct led_classdev *led_cdev,
        struct netxbig_led_data *led_dat =
                container_of(led_cdev, struct netxbig_led_data, cdev);
        enum netxbig_led_mode mode;
-       int mode_val, bright_val;
+       int mode_val;
        int set_brightness = 1;
        unsigned long flags;
 
@@ -220,12 +203,9 @@ static void netxbig_led_set(struct led_classdev *led_cdev,
         * SATA LEDs. So, change the brightness setting for a single
         * SATA LED will affect all the others.
         */
-       if (set_brightness) {
-               bright_val = DIV_ROUND_UP(value * led_dat->bright_max,
-                                         LED_FULL);
+       if (set_brightness)
                gpio_ext_set_value(led_dat->gpio_ext,
-                                  led_dat->bright_addr, bright_val);
-       }
+                                  led_dat->bright_addr, value);
 
        spin_unlock_irqrestore(&led_dat->lock, flags);
 }
@@ -299,18 +279,11 @@ static struct attribute *netxbig_led_attrs[] = {
 };
 ATTRIBUTE_GROUPS(netxbig_led);
 
-static void delete_netxbig_led(struct netxbig_led_data *led_dat)
+static int create_netxbig_led(struct platform_device *pdev,
+                             struct netxbig_led_platform_data *pdata,
+                             struct netxbig_led_data *led_dat,
+                             const struct netxbig_led *template)
 {
-       led_classdev_unregister(&led_dat->cdev);
-}
-
-static int
-create_netxbig_led(struct platform_device *pdev,
-                  struct netxbig_led_data *led_dat,
-                  const struct netxbig_led *template)
-{
-       struct netxbig_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
-
        spin_lock_init(&led_dat->lock);
        led_dat->gpio_ext = pdata->gpio_ext;
        led_dat->cdev.name = template->name;
@@ -329,11 +302,11 @@ create_netxbig_led(struct platform_device *pdev,
         */
        led_dat->sata = 0;
        led_dat->cdev.brightness = LED_OFF;
+       led_dat->cdev.max_brightness = template->bright_max;
        led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
        led_dat->mode_addr = template->mode_addr;
        led_dat->mode_val = template->mode_val;
        led_dat->bright_addr = template->bright_addr;
-       led_dat->bright_max = (1 << pdata->gpio_ext->num_data) - 1;
        led_dat->timer = pdata->timer;
        led_dat->num_timer = pdata->num_timer;
        /*
@@ -343,67 +316,274 @@ create_netxbig_led(struct platform_device *pdev,
        if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
                led_dat->cdev.groups = netxbig_led_groups;
 
-       return led_classdev_register(&pdev->dev, &led_dat->cdev);
+       return devm_led_classdev_register(&pdev->dev, &led_dat->cdev);
 }
 
-static int netxbig_led_probe(struct platform_device *pdev)
+#ifdef CONFIG_OF_GPIO
+static int gpio_ext_get_of_pdata(struct device *dev, struct device_node *np,
+                                struct netxbig_gpio_ext *gpio_ext)
 {
-       struct netxbig_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
-       struct netxbig_led_data *leds_data;
-       int i;
+       int *addr, *data;
+       int num_addr, num_data;
        int ret;
+       int i;
 
-       if (!pdata)
-               return -EINVAL;
-
-       leds_data = devm_kzalloc(&pdev->dev,
-               sizeof(struct netxbig_led_data) * pdata->num_leds, GFP_KERNEL);
-       if (!leds_data)
+       ret = of_gpio_named_count(np, "addr-gpios");
+       if (ret < 0) {
+               dev_err(dev,
+                       "Failed to count GPIOs in DT property addr-gpios\n");
+               return ret;
+       }
+       num_addr = ret;
+       addr = devm_kzalloc(dev, num_addr * sizeof(*addr), GFP_KERNEL);
+       if (!addr)
                return -ENOMEM;
 
-       ret = gpio_ext_init(pdata->gpio_ext);
-       if (ret < 0)
+       for (i = 0; i < num_addr; i++) {
+               ret = of_get_named_gpio(np, "addr-gpios", i);
+               if (ret < 0)
+                       return ret;
+               addr[i] = ret;
+       }
+       gpio_ext->addr = addr;
+       gpio_ext->num_addr = num_addr;
+
+       ret = of_gpio_named_count(np, "data-gpios");
+       if (ret < 0) {
+               dev_err(dev,
+                       "Failed to count GPIOs in DT property data-gpios\n");
                return ret;
+       }
+       num_data = ret;
+       data = devm_kzalloc(dev, num_data * sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
 
-       for (i = 0; i < pdata->num_leds; i++) {
-               ret = create_netxbig_led(pdev, &leds_data[i], &pdata->leds[i]);
+       for (i = 0; i < num_data; i++) {
+               ret = of_get_named_gpio(np, "data-gpios", i);
                if (ret < 0)
-                       goto err_free_leds;
+                       return ret;
+               data[i] = ret;
        }
+       gpio_ext->data = data;
+       gpio_ext->num_data = num_data;
 
-       platform_set_drvdata(pdev, leds_data);
+       ret = of_get_named_gpio(np, "enable-gpio", 0);
+       if (ret < 0) {
+               dev_err(dev,
+                       "Failed to get GPIO from DT property enable-gpio\n");
+               return ret;
+       }
+       gpio_ext->enable = ret;
 
        return 0;
+}
+
+static int netxbig_leds_get_of_pdata(struct device *dev,
+                                    struct netxbig_led_platform_data *pdata)
+{
+       struct device_node *np = dev->of_node;
+       struct device_node *gpio_ext_np;
+       struct device_node *child;
+       struct netxbig_gpio_ext *gpio_ext;
+       struct netxbig_led_timer *timers;
+       struct netxbig_led *leds, *led;
+       int num_timers;
+       int num_leds = 0;
+       int ret;
+       int i;
 
-err_free_leds:
-       for (i = i - 1; i >= 0; i--)
-               delete_netxbig_led(&leds_data[i]);
+       /* GPIO extension */
+       gpio_ext_np = of_parse_phandle(np, "gpio-ext", 0);
+       if (!gpio_ext_np) {
+               dev_err(dev, "Failed to get DT handle gpio-ext\n");
+               return -EINVAL;
+       }
 
-       gpio_ext_free(pdata->gpio_ext);
+       gpio_ext = devm_kzalloc(dev, sizeof(*gpio_ext), GFP_KERNEL);
+       if (!gpio_ext)
+               return -ENOMEM;
+       ret = gpio_ext_get_of_pdata(dev, gpio_ext_np, gpio_ext);
+       if (ret)
+               return ret;
+       of_node_put(gpio_ext_np);
+       pdata->gpio_ext = gpio_ext;
+
+       /* Timers (optional) */
+       ret = of_property_count_u32_elems(np, "timers");
+       if (ret > 0) {
+               if (ret % 3)
+                       return -EINVAL;
+               num_timers = ret / 3;
+               timers = devm_kzalloc(dev, num_timers * sizeof(*timers),
+                                     GFP_KERNEL);
+               if (!timers)
+                       return -ENOMEM;
+               for (i = 0; i < num_timers; i++) {
+                       u32 tmp;
+
+                       of_property_read_u32_index(np, "timers", 3 * i,
+                                                  &timers[i].mode);
+                       if (timers[i].mode >= NETXBIG_LED_MODE_NUM)
+                               return -EINVAL;
+                       of_property_read_u32_index(np, "timers",
+                                                  3 * i + 1, &tmp);
+                       timers[i].delay_on = tmp;
+                       of_property_read_u32_index(np, "timers",
+                                                  3 * i + 2, &tmp);
+                       timers[i].delay_off = tmp;
+               }
+               pdata->timer = timers;
+               pdata->num_timer = num_timers;
+       }
+
+       /* LEDs */
+       num_leds = of_get_child_count(np);
+       if (!num_leds) {
+               dev_err(dev, "No LED subnodes found in DT\n");
+               return -ENODEV;
+       }
+
+       leds = devm_kzalloc(dev, num_leds * sizeof(*leds), GFP_KERNEL);
+       if (!leds)
+               return -ENOMEM;
+
+       led = leds;
+       for_each_child_of_node(np, child) {
+               const char *string;
+               int *mode_val;
+               int num_modes;
+
+               ret = of_property_read_u32(child, "mode-addr",
+                                          &led->mode_addr);
+               if (ret)
+                       goto err_node_put;
+
+               ret = of_property_read_u32(child, "bright-addr",
+                                          &led->bright_addr);
+               if (ret)
+                       goto err_node_put;
+
+               ret = of_property_read_u32(child, "max-brightness",
+                                          &led->bright_max);
+               if (ret)
+                       goto err_node_put;
+
+               mode_val =
+                       devm_kzalloc(dev,
+                                    NETXBIG_LED_MODE_NUM * sizeof(*mode_val),
+                                    GFP_KERNEL);
+               if (!mode_val) {
+                       ret = -ENOMEM;
+                       goto err_node_put;
+               }
+
+               for (i = 0; i < NETXBIG_LED_MODE_NUM; i++)
+                       mode_val[i] = NETXBIG_LED_INVALID_MODE;
+
+               ret = of_property_count_u32_elems(child, "mode-val");
+               if (ret < 0 || ret % 2) {
+                       ret = -EINVAL;
+                       goto err_node_put;
+               }
+               num_modes = ret / 2;
+               if (num_modes > NETXBIG_LED_MODE_NUM) {
+                       ret = -EINVAL;
+                       goto err_node_put;
+               }
+
+               for (i = 0; i < num_modes; i++) {
+                       int mode;
+                       int val;
+
+                       of_property_read_u32_index(child,
+                                                  "mode-val", 2 * i, &mode);
+                       of_property_read_u32_index(child,
+                                                  "mode-val", 2 * i + 1, &val);
+                       if (mode >= NETXBIG_LED_MODE_NUM) {
+                               ret = -EINVAL;
+                               goto err_node_put;
+                       }
+                       mode_val[mode] = val;
+               }
+               led->mode_val = mode_val;
+
+               if (!of_property_read_string(child, "label", &string))
+                       led->name = string;
+               else
+                       led->name = child->name;
+
+               if (!of_property_read_string(child,
+                                            "linux,default-trigger", &string))
+                       led->default_trigger = string;
+
+               led++;
+       }
+
+       pdata->leds = leds;
+       pdata->num_leds = num_leds;
+
+       return 0;
+
+err_node_put:
+       of_node_put(child);
        return ret;
 }
 
-static int netxbig_led_remove(struct platform_device *pdev)
+static const struct of_device_id of_netxbig_leds_match[] = {
+       { .compatible = "lacie,netxbig-leds", },
+       {},
+};
+#else
+static inline int
+netxbig_leds_get_of_pdata(struct device *dev,
+                         struct netxbig_led_platform_data *pdata)
+{
+       return -ENODEV;
+}
+#endif /* CONFIG_OF_GPIO */
+
+static int netxbig_led_probe(struct platform_device *pdev)
 {
        struct netxbig_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
        struct netxbig_led_data *leds_data;
        int i;
+       int ret;
 
-       leds_data = platform_get_drvdata(pdev);
+       if (!pdata) {
+               pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+               if (!pdata)
+                       return -ENOMEM;
+               ret = netxbig_leds_get_of_pdata(&pdev->dev, pdata);
+               if (ret)
+                       return ret;
+       }
+
+       leds_data = devm_kzalloc(&pdev->dev,
+                                pdata->num_leds * sizeof(*leds_data),
+                                GFP_KERNEL);
+       if (!leds_data)
+               return -ENOMEM;
 
-       for (i = 0; i < pdata->num_leds; i++)
-               delete_netxbig_led(&leds_data[i]);
+       ret = gpio_ext_init(pdev, pdata->gpio_ext);
+       if (ret < 0)
+               return ret;
 
-       gpio_ext_free(pdata->gpio_ext);
+       for (i = 0; i < pdata->num_leds; i++) {
+               ret = create_netxbig_led(pdev, pdata,
+                                        &leds_data[i], &pdata->leds[i]);
+               if (ret < 0)
+                       return ret;
+       }
 
        return 0;
 }
 
 static struct platform_driver netxbig_led_driver = {
        .probe          = netxbig_led_probe,
-       .remove         = netxbig_led_remove,
        .driver         = {
-               .name   = "leds-netxbig",
+               .name           = "leds-netxbig",
+               .of_match_table = of_match_ptr(of_netxbig_leds_match),
        },
 };
 
index 39870de20a26fff25d6eba7c767d68fb8c12aa8e..12af1127d9b78117086cb2c83cc0fb2c776625b7 100644 (file)
@@ -124,9 +124,9 @@ static int ot200_led_probe(struct platform_device *pdev)
                leds[i].cdev.name = leds[i].name;
                leds[i].cdev.brightness_set = ot200_led_brightness_set;
 
-               ret = led_classdev_register(&pdev->dev, &leds[i].cdev);
+               ret = devm_led_classdev_register(&pdev->dev, &leds[i].cdev);
                if (ret < 0)
-                       goto err;
+                       return ret;
        }
 
        leds_front = 0;         /* turn off all front leds */
@@ -135,27 +135,10 @@ static int ot200_led_probe(struct platform_device *pdev)
        outb(leds_back, 0x5a);
 
        return 0;
-
-err:
-       for (i = i - 1; i >= 0; i--)
-               led_classdev_unregister(&leds[i].cdev);
-
-       return ret;
-}
-
-static int ot200_led_remove(struct platform_device *pdev)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(leds); i++)
-               led_classdev_unregister(&leds[i].cdev);
-
-       return 0;
 }
 
 static struct platform_driver ot200_led_driver = {
        .probe          = ot200_led_probe,
-       .remove         = ot200_led_remove,
        .driver         = {
                .name   = "leds-ot200",
        },
index 2c5c5b12ab6475c1af3a2710bff8a5f78a2525d9..1e75e1fe9b726d83b7cd8a317fce7c4c727bf82c 100644 (file)
@@ -262,15 +262,19 @@ static int powernv_led_classdev(struct platform_device *pdev,
                while ((cur = of_prop_next_string(p, cur)) != NULL) {
                        powernv_led = devm_kzalloc(dev, sizeof(*powernv_led),
                                                   GFP_KERNEL);
-                       if (!powernv_led)
+                       if (!powernv_led) {
+                               of_node_put(np);
                                return -ENOMEM;
+                       }
 
                        powernv_led->common = powernv_led_common;
                        powernv_led->loc_code = (char *)np->name;
 
                        rc = powernv_led_create(dev, powernv_led, cur);
-                       if (rc)
+                       if (rc) {
+                               of_node_put(np);
                                return rc;
+                       }
                } /* while end */
        }
 
similarity index 99%
rename from arch/mips/mti-sead3/leds-sead3.c
rename to drivers/leds/leds-sead3.c
index c938ceeb8848a1b7ef3b2186ddde2fa5ecc1641e..eb97a3271bb37a265d885f68df82f31e13b15c8c 100644 (file)
@@ -59,6 +59,7 @@ static int sead3_led_remove(struct platform_device *pdev)
 {
        led_classdev_unregister(&sead3_pled);
        led_classdev_unregister(&sead3_fled);
+
        return 0;
 }
 
index 1ba3defdd46023ff028969aa46b8a4cd0d42d6f3..473fb6b97ed4f5f5ca86710f9b0afbfbaefa6ff3 100644 (file)
@@ -76,39 +76,19 @@ static int wrap_led_probe(struct platform_device *pdev)
 {
        int ret;
 
-       ret = led_classdev_register(&pdev->dev, &wrap_power_led);
+       ret = devm_led_classdev_register(&pdev->dev, &wrap_power_led);
        if (ret < 0)
                return ret;
 
-       ret = led_classdev_register(&pdev->dev, &wrap_error_led);
+       ret = devm_led_classdev_register(&pdev->dev, &wrap_error_led);
        if (ret < 0)
-               goto err1;
-
-       ret = led_classdev_register(&pdev->dev, &wrap_extra_led);
-       if (ret < 0)
-               goto err2;
-
-       return ret;
-
-err2:
-       led_classdev_unregister(&wrap_error_led);
-err1:
-       led_classdev_unregister(&wrap_power_led);
-
-       return ret;
-}
+               return ret;
 
-static int wrap_led_remove(struct platform_device *pdev)
-{
-       led_classdev_unregister(&wrap_power_led);
-       led_classdev_unregister(&wrap_error_led);
-       led_classdev_unregister(&wrap_extra_led);
-       return 0;
+       return  devm_led_classdev_register(&pdev->dev, &wrap_extra_led);
 }
 
 static struct platform_driver wrap_led_driver = {
        .probe          = wrap_led_probe,
-       .remove         = wrap_led_remove,
        .driver         = {
                .name           = DRVNAME,
        },
index bc89d7ace2c44e48b1e4c318dc53103767eba942..4238fbc31d3587923f95a61f5ba2b33e5cf74d92 100644 (file)
@@ -44,6 +44,7 @@ static inline int led_get_brightness(struct led_classdev *led_cdev)
        return led_cdev->brightness;
 }
 
+void led_init_core(struct led_classdev *led_cdev);
 void led_stop_software_blink(struct led_classdev *led_cdev);
 
 extern struct rw_semaphore leds_list_lock;
index fea6871d2609d405d9f0d0046059a7562bb8152f..8622ce651ae28f1b94ee8debd97d520bf1a9d004 100644 (file)
@@ -27,6 +27,7 @@ struct heartbeat_trig_data {
        unsigned int phase;
        unsigned int period;
        struct timer_list timer;
+       unsigned int invert;
 };
 
 static void led_heartbeat_function(unsigned long data)
@@ -56,21 +57,27 @@ static void led_heartbeat_function(unsigned long data)
                        msecs_to_jiffies(heartbeat_data->period);
                delay = msecs_to_jiffies(70);
                heartbeat_data->phase++;
-               brightness = led_cdev->max_brightness;
+               if (!heartbeat_data->invert)
+                       brightness = led_cdev->max_brightness;
                break;
        case 1:
                delay = heartbeat_data->period / 4 - msecs_to_jiffies(70);
                heartbeat_data->phase++;
+               if (heartbeat_data->invert)
+                       brightness = led_cdev->max_brightness;
                break;
        case 2:
                delay = msecs_to_jiffies(70);
                heartbeat_data->phase++;
-               brightness = led_cdev->max_brightness;
+               if (!heartbeat_data->invert)
+                       brightness = led_cdev->max_brightness;
                break;
        default:
                delay = heartbeat_data->period - heartbeat_data->period / 4 -
                        msecs_to_jiffies(70);
                heartbeat_data->phase = 0;
+               if (heartbeat_data->invert)
+                       brightness = led_cdev->max_brightness;
                break;
        }
 
@@ -78,15 +85,50 @@ static void led_heartbeat_function(unsigned long data)
        mod_timer(&heartbeat_data->timer, jiffies + delay);
 }
 
+static ssize_t led_invert_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data;
+
+       return sprintf(buf, "%u\n", heartbeat_data->invert);
+}
+
+static ssize_t led_invert_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t size)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data;
+       unsigned long state;
+       int ret;
+
+       ret = kstrtoul(buf, 0, &state);
+       if (ret)
+               return ret;
+
+       heartbeat_data->invert = !!state;
+
+       return size;
+}
+
+static DEVICE_ATTR(invert, 0644, led_invert_show, led_invert_store);
+
 static void heartbeat_trig_activate(struct led_classdev *led_cdev)
 {
        struct heartbeat_trig_data *heartbeat_data;
+       int rc;
 
        heartbeat_data = kzalloc(sizeof(*heartbeat_data), GFP_KERNEL);
        if (!heartbeat_data)
                return;
 
        led_cdev->trigger_data = heartbeat_data;
+       rc = device_create_file(led_cdev->dev, &dev_attr_invert);
+       if (rc) {
+               kfree(led_cdev->trigger_data);
+               return;
+       }
+
        setup_timer(&heartbeat_data->timer,
                    led_heartbeat_function, (unsigned long) led_cdev);
        heartbeat_data->phase = 0;
@@ -100,6 +142,7 @@ static void heartbeat_trig_deactivate(struct led_classdev *led_cdev)
 
        if (led_cdev->activated) {
                del_timer_sync(&heartbeat_data->timer);
+               device_remove_file(led_cdev->dev, &dev_attr_invert);
                kfree(heartbeat_data);
                led_cdev->activated = false;
        }
index de36237d7c6b45de10fca57cc1870fde5a0620e5..051645498b53f8931e6f1db9a11aeb65e61ac2fd 100644 (file)
@@ -74,7 +74,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                ret = -ENOTSUPP;
                dev_err(&pdev->dev,
                        "IO mapped PCI devices are not supported\n");
-               goto out_release;
+               goto out_iounmap;
        }
 
        pci_set_drvdata(pdev, priv);
@@ -89,7 +89,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base);
        if (ret < 0)
-               goto out_iounmap;
+               goto out_mcb_bus;
        num_cells = ret;
 
        dev_dbg(&pdev->dev, "Found %d cells\n", num_cells);
@@ -98,6 +98,8 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        return 0;
 
+out_mcb_bus:
+       mcb_release_bus(priv->bus);
 out_iounmap:
        iounmap(priv->base);
 out_release:
index e51de52eeb94f71c9d6712a61d31e49f8e6f2f60..48b5890c28e35ad70484d67b29e12a70cb9a4b1b 100644 (file)
@@ -1997,7 +1997,8 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
        if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
                ret = bitmap_storage_alloc(&store, chunks,
                                           !bitmap->mddev->bitmap_info.external,
-                                          bitmap->cluster_slot);
+                                          mddev_is_clustered(bitmap->mddev)
+                                          ? bitmap->cluster_slot : 0);
        if (ret)
                goto err;
 
index 20cc36b01b77895625adbe82a14923148d688edb..0a17d1b91a811d712a350c399d94cdc50322f0d0 100644 (file)
@@ -634,10 +634,10 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
 
        disk_super = dm_block_data(sblock);
 
+       disk_super->flags = cpu_to_le32(cmd->flags);
        if (mutator)
                update_flags(disk_super, mutator);
 
-       disk_super->flags = cpu_to_le32(cmd->flags);
        disk_super->mapping_root = cpu_to_le64(cmd->root);
        disk_super->hint_root = cpu_to_le64(cmd->hint_root);
        disk_super->discard_root = cpu_to_le64(cmd->discard_root);
index 240c9f0e85e74e864624f0cb972c5b4394eaf11f..8a096456579bead67b182f27e65956341a7c8d73 100644 (file)
@@ -436,7 +436,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
 static struct dm_cache_policy_type wb_policy_type = {
        .name = "cleaner",
        .version = {1, 0, 0},
-       .hint_size = 0,
+       .hint_size = 4,
        .owner = THIS_MODULE,
        .create = wb_create
 };
index ebaa4f803eec3a08a0cd9fcd9a9a1b933618c50c..192bb8beeb6b59e296d9a2e06c0ef8c0a9be8aeb 100644 (file)
@@ -203,7 +203,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
                return -EINVAL;
        }
 
-       tmp_store = kmalloc(sizeof(*tmp_store), GFP_KERNEL);
+       tmp_store = kzalloc(sizeof(*tmp_store), GFP_KERNEL);
        if (!tmp_store) {
                ti->error = "Exception store allocation failed";
                return -ENOMEM;
@@ -215,7 +215,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
        else if (persistent == 'N')
                type = get_type("N");
        else {
-               ti->error = "Persistent flag is not P or N";
+               ti->error = "Exception store type is not P or N";
                r = -EINVAL;
                goto bad_type;
        }
@@ -233,7 +233,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
        if (r)
                goto bad;
 
-       r = type->ctr(tmp_store, 0, NULL);
+       r = type->ctr(tmp_store, (strlen(argv[0]) > 1 ? &argv[0][1] : NULL));
        if (r) {
                ti->error = "Exception store type constructor failed";
                goto bad;
index 0b2536247cf55a3215223b8b0c72ff29a629b87a..fae34e7a0b1e4e4d60b5867eff9422e432fba83b 100644 (file)
@@ -42,8 +42,7 @@ struct dm_exception_store_type {
        const char *name;
        struct module *module;
 
-       int (*ctr) (struct dm_exception_store *store,
-                   unsigned argc, char **argv);
+       int (*ctr) (struct dm_exception_store *store, char *options);
 
        /*
         * Destroys this object when you've finished with it.
@@ -123,6 +122,8 @@ struct dm_exception_store {
        unsigned chunk_shift;
 
        void *context;
+
+       bool userspace_supports_overflow;
 };
 
 /*
index 97e165183e79f2991f8191913e0b44fb91b00310..a0901214aef57de00419a14c573bc128431749c7 100644 (file)
@@ -329,8 +329,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
                 */
                if (min_region_size > (1 << 13)) {
                        /* If not a power of 2, make it the next power of 2 */
-                       if (min_region_size & (min_region_size - 1))
-                               region_size = 1 << fls(region_size);
+                       region_size = roundup_pow_of_two(min_region_size);
                        DMINFO("Choosing default region size of %lu sectors",
                               region_size);
                } else {
index bf71583296f732b6b78c71ae67dee5222824b2f8..117a05e40090a9b78829ed415446d906c8268701 100644 (file)
@@ -7,6 +7,7 @@
 
 #include "dm-exception-store.h"
 
+#include <linux/ctype.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/vmalloc.h>
@@ -843,10 +844,10 @@ static void persistent_drop_snapshot(struct dm_exception_store *store)
                DMWARN("write header failed");
 }
 
-static int persistent_ctr(struct dm_exception_store *store,
-                         unsigned argc, char **argv)
+static int persistent_ctr(struct dm_exception_store *store, char *options)
 {
        struct pstore *ps;
+       int r;
 
        /* allocate the pstore */
        ps = kzalloc(sizeof(*ps), GFP_KERNEL);
@@ -868,14 +869,32 @@ static int persistent_ctr(struct dm_exception_store *store,
 
        ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
        if (!ps->metadata_wq) {
-               kfree(ps);
                DMERR("couldn't start header metadata update thread");
-               return -ENOMEM;
+               r = -ENOMEM;
+               goto err_workqueue;
+       }
+
+       if (options) {
+               char overflow = toupper(options[0]);
+               if (overflow == 'O')
+                       store->userspace_supports_overflow = true;
+               else {
+                       DMERR("Unsupported persistent store option: %s", options);
+                       r = -EINVAL;
+                       goto err_options;
+               }
        }
 
        store->context = ps;
 
        return 0;
+
+err_options:
+       destroy_workqueue(ps->metadata_wq);
+err_workqueue:
+       kfree(ps);
+
+       return r;
 }
 
 static unsigned persistent_status(struct dm_exception_store *store,
@@ -888,7 +907,8 @@ static unsigned persistent_status(struct dm_exception_store *store,
        case STATUSTYPE_INFO:
                break;
        case STATUSTYPE_TABLE:
-               DMEMIT(" P %llu", (unsigned long long)store->chunk_size);
+               DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P",
+                      (unsigned long long)store->chunk_size);
        }
 
        return sz;
index 1ce9a2586e4134a79ec3289808f8229e9aaa2080..9b7c8c8049d6186f54bdfec114c43cb3ce4d77fa 100644 (file)
@@ -70,8 +70,7 @@ static void transient_usage(struct dm_exception_store *store,
        *metadata_sectors = 0;
 }
 
-static int transient_ctr(struct dm_exception_store *store,
-                        unsigned argc, char **argv)
+static int transient_ctr(struct dm_exception_store *store, char *options)
 {
        struct transient_c *tc;
 
index c0bcd6516dfe17f8e7a06ec8c66d1e1d5801f133..c06b74e91cd6aeef00ef4eefae9953d4d8c8f91b 100644 (file)
@@ -1098,7 +1098,7 @@ static void stop_merge(struct dm_snapshot *s)
 }
 
 /*
- * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
+ * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size>
  */
 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 {
@@ -1302,6 +1302,7 @@ static void __handover_exceptions(struct dm_snapshot *snap_src,
 
        u.store_swap = snap_dest->store;
        snap_dest->store = snap_src->store;
+       snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow;
        snap_src->store = u.store_swap;
 
        snap_dest->store->snap = snap_dest;
@@ -1739,8 +1740,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
 
                        pe = __find_pending_exception(s, pe, chunk);
                        if (!pe) {
-                               s->snapshot_overflowed = 1;
-                               DMERR("Snapshot overflowed: Unable to allocate exception.");
+                               if (s->store->userspace_supports_overflow) {
+                                       s->snapshot_overflowed = 1;
+                                       DMERR("Snapshot overflowed: Unable to allocate exception.");
+                               } else
+                                       __invalidate_snapshot(s, -ENOMEM);
                                r = -EIO;
                                goto out_unlock;
                        }
@@ -2365,7 +2369,7 @@ static struct target_type origin_target = {
 
 static struct target_type snapshot_target = {
        .name    = "snapshot",
-       .version = {1, 14, 0},
+       .version = {1, 15, 0},
        .module  = THIS_MODULE,
        .ctr     = snapshot_ctr,
        .dtr     = snapshot_dtr,
@@ -2379,7 +2383,7 @@ static struct target_type snapshot_target = {
 
 static struct target_type merge_target = {
        .name    = dm_snapshot_merge_target_name,
-       .version = {1, 3, 0},
+       .version = {1, 4, 0},
        .module  = THIS_MODULE,
        .ctr     = snapshot_ctr,
        .dtr     = snapshot_dtr,
index 6fcbfb0633665a7c7b91d036b771cd997560e3de..3897b90bd462d852e0aec27a792be14655efa150 100644 (file)
@@ -3201,7 +3201,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
                                                metadata_low_callback,
                                                pool);
        if (r)
-               goto out_free_pt;
+               goto out_flags_changed;
 
        pt->callbacks.congested_fn = pool_is_congested;
        dm_table_add_target_callbacks(ti->table, &pt->callbacks);
index 6264781dc69a6066b88d719537c471b7d1cd7b27..1b5c6047e4f19882fbbe9facbc29aeee54dc8723 100644 (file)
@@ -1001,6 +1001,7 @@ static void end_clone_bio(struct bio *clone)
        struct dm_rq_target_io *tio = info->tio;
        struct bio *bio = info->orig;
        unsigned int nr_bytes = info->orig->bi_iter.bi_size;
+       int error = clone->bi_error;
 
        bio_put(clone);
 
@@ -1011,13 +1012,13 @@ static void end_clone_bio(struct bio *clone)
                 * the remainder.
                 */
                return;
-       else if (bio->bi_error) {
+       else if (error) {
                /*
                 * Don't notice the error to the upper layer yet.
                 * The error handling decision is made by the target driver,
                 * when the request is completed.
                 */
-               tio->error = bio->bi_error;
+               tio->error = error;
                return;
        }
 
@@ -2837,8 +2838,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
 
        might_sleep();
 
-       map = dm_get_live_table(md, &srcu_idx);
-
        spin_lock(&_minor_lock);
        idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
        set_bit(DMF_FREEING, &md->flags);
@@ -2852,14 +2851,14 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
         * do not race with internal suspend.
         */
        mutex_lock(&md->suspend_lock);
+       map = dm_get_live_table(md, &srcu_idx);
        if (!dm_suspended_md(md)) {
                dm_table_presuspend_targets(map);
                dm_table_postsuspend_targets(map);
        }
-       mutex_unlock(&md->suspend_lock);
-
        /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
        dm_put_live_table(md, srcu_idx);
+       mutex_unlock(&md->suspend_lock);
 
        /*
         * Rare, but there may be I/O requests still going to complete,
index 4f5ecbe94ccbf97c562d96930635c6aaff0550d3..3fe3d04a968ad1ae5e148abbc93cde43be576104 100644 (file)
@@ -5409,9 +5409,13 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
                 * which will now never happen */
                wake_up_process(mddev->sync_thread->tsk);
 
+       if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags))
+               return -EBUSY;
        mddev_unlock(mddev);
        wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
                                          &mddev->recovery));
+       wait_event(mddev->sb_wait,
+                  !test_bit(MD_CHANGE_PENDING, &mddev->flags));
        mddev_lock_nointr(mddev);
 
        mutex_lock(&mddev->open_mutex);
@@ -8036,8 +8040,7 @@ static int remove_and_add_spares(struct mddev *mddev,
                       !test_bit(Bitmap_sync, &rdev->flags)))
                        continue;
 
-               if (rdev->saved_raid_disk < 0)
-                       rdev->recovery_offset = 0;
+               rdev->recovery_offset = 0;
                if (mddev->pers->
                    hot_add_disk(mddev, rdev) == 0) {
                        if (sysfs_link_rdev(mddev, rdev))
@@ -8160,6 +8163,7 @@ void md_check_recovery(struct mddev *mddev)
                        md_reap_sync_thread(mddev);
                        clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
                        clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+                       clear_bit(MD_CHANGE_PENDING, &mddev->flags);
                        goto unlock;
                }
 
index d222522c52e077dcdd012fbf22d929fc6a7cb0ad..d132f06afdd1aa3140922f7965494087cf43eb7a 100644 (file)
@@ -470,8 +470,7 @@ static int multipath_run (struct mddev *mddev)
        return 0;
 
 out_free_conf:
-       if (conf->pool)
-               mempool_destroy(conf->pool);
+       mempool_destroy(conf->pool);
        kfree(conf->multipaths);
        kfree(conf);
        mddev->private = NULL;
index 421a36c593e3e7dedef785a5f78c12914c13d8b3..2e4c4cb79e4d939f0ed2638986d2ccc3ddd8b972 100644 (file)
@@ -301,11 +301,16 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
 {
        int s;
        uint32_t max_entries = le32_to_cpu(left->header.max_entries);
-       unsigned target = (nr_left + nr_center + nr_right) / 3;
-       BUG_ON(target > max_entries);
+       unsigned total = nr_left + nr_center + nr_right;
+       unsigned target_right = total / 3;
+       unsigned remainder = (target_right * 3) != total;
+       unsigned target_left = target_right + remainder;
+
+       BUG_ON(target_left > max_entries);
+       BUG_ON(target_right > max_entries);
 
        if (nr_left < nr_right) {
-               s = nr_left - target;
+               s = nr_left - target_left;
 
                if (s < 0 && nr_center < -s) {
                        /* not enough in central node */
@@ -316,10 +321,10 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
                } else
                        shift(left, center, s);
 
-               shift(center, right, target - nr_right);
+               shift(center, right, target_right - nr_right);
 
        } else {
-               s = target - nr_right;
+               s = target_right - nr_right;
                if (s > 0 && nr_center < s) {
                        /* not enough in central node */
                        shift(center, right, nr_center);
@@ -329,7 +334,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
                } else
                        shift(center, right, s);
 
-               shift(left, center, nr_left - target);
+               shift(left, center, nr_left - target_left);
        }
 
        *key_ptr(parent, c->index) = center->keys[0];
index b6cec258cc2138497b3c8875fc4defdb7523701f..0e09aef43998ac250cc296122247875eed23ceb2 100644 (file)
@@ -523,7 +523,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
 
        r = new_block(s->info, &right);
        if (r < 0) {
-               /* FIXME: put left */
+               unlock_block(s->info, left);
                return r;
        }
 
index 63e619b2f44eb3ce51a90eb74980ed8a1f91c639..f8e5db0cb5aaae3038e67ec9f348f1faa4c64508 100644 (file)
@@ -376,12 +376,6 @@ static int raid0_run(struct mddev *mddev)
                struct md_rdev *rdev;
                bool discard_supported = false;
 
-               rdev_for_each(rdev, mddev) {
-                       disk_stack_limits(mddev->gendisk, rdev->bdev,
-                                         rdev->data_offset << 9);
-                       if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
-                               discard_supported = true;
-               }
                blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
                blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
                blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
@@ -390,6 +384,12 @@ static int raid0_run(struct mddev *mddev)
                blk_queue_io_opt(mddev->queue,
                                 (mddev->chunk_sectors << 9) * mddev->raid_disks);
 
+               rdev_for_each(rdev, mddev) {
+                       disk_stack_limits(mddev->gendisk, rdev->bdev,
+                                         rdev->data_offset << 9);
+                       if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
+                               discard_supported = true;
+               }
                if (!discard_supported)
                        queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
                else
index 4517f06c41bafe0fb2fbe2a5b454b68f012b2455..d9d031ede4bf5d73993a0fc607fab4274627c890 100644 (file)
@@ -881,8 +881,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
        }
 
        if (bio && bio_data_dir(bio) == WRITE) {
-               if (bio->bi_iter.bi_sector >=
-                   conf->mddev->curr_resync_completed) {
+               if (bio->bi_iter.bi_sector >= conf->next_resync) {
                        if (conf->start_next_window == MaxSector)
                                conf->start_next_window =
                                        conf->next_resync +
@@ -1516,7 +1515,7 @@ static void close_sync(struct r1conf *conf)
        conf->r1buf_pool = NULL;
 
        spin_lock_irq(&conf->resync_lock);
-       conf->next_resync = 0;
+       conf->next_resync = MaxSector - 2 * NEXT_NORMALIO_DISTANCE;
        conf->start_next_window = MaxSector;
        conf->current_window_requests +=
                conf->next_window_requests;
@@ -2196,7 +2195,7 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
                bio_trim(wbio, sector - r1_bio->sector, sectors);
                wbio->bi_iter.bi_sector += rdev->data_offset;
                wbio->bi_bdev = rdev->bdev;
-               if (submit_bio_wait(WRITE, wbio) == 0)
+               if (submit_bio_wait(WRITE, wbio) < 0)
                        /* failure! */
                        ok = rdev_set_badblocks(rdev, sector,
                                                sectors, 0)
@@ -2259,15 +2258,16 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
                        rdev_dec_pending(conf->mirrors[m].rdev,
                                         conf->mddev);
                }
-       if (test_bit(R1BIO_WriteError, &r1_bio->state))
-               close_write(r1_bio);
        if (fail) {
                spin_lock_irq(&conf->device_lock);
                list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
                spin_unlock_irq(&conf->device_lock);
                md_wakeup_thread(conf->mddev->thread);
-       } else
+       } else {
+               if (test_bit(R1BIO_WriteError, &r1_bio->state))
+                       close_write(r1_bio);
                raid_end_bio_io(r1_bio);
+       }
 }
 
 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
@@ -2383,9 +2383,13 @@ static void raid1d(struct md_thread *thread)
                }
                spin_unlock_irqrestore(&conf->device_lock, flags);
                while (!list_empty(&tmp)) {
-                       r1_bio = list_first_entry(&conf->bio_end_io_list,
-                                                 struct r1bio, retry_list);
+                       r1_bio = list_first_entry(&tmp, struct r1bio,
+                                                 retry_list);
                        list_del(&r1_bio->retry_list);
+                       if (mddev->degraded)
+                               set_bit(R1BIO_Degraded, &r1_bio->state);
+                       if (test_bit(R1BIO_WriteError, &r1_bio->state))
+                               close_write(r1_bio);
                        raid_end_bio_io(r1_bio);
                }
        }
@@ -2843,8 +2847,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
 
  abort:
        if (conf) {
-               if (conf->r1bio_pool)
-                       mempool_destroy(conf->r1bio_pool);
+               mempool_destroy(conf->r1bio_pool);
                kfree(conf->mirrors);
                safe_put_page(conf->tmppage);
                kfree(conf->poolinfo);
@@ -2946,8 +2949,7 @@ static void raid1_free(struct mddev *mddev, void *priv)
 {
        struct r1conf *conf = priv;
 
-       if (conf->r1bio_pool)
-               mempool_destroy(conf->r1bio_pool);
+       mempool_destroy(conf->r1bio_pool);
        kfree(conf->mirrors);
        safe_put_page(conf->tmppage);
        kfree(conf->poolinfo);
index 0fc33eb888551292bb37461f08d7e704483f93e6..96f36596830696c2f1ba1bd8bc6fbb24d30a43d8 100644 (file)
@@ -39,6 +39,7 @@
  *    far_copies (stored in second byte of layout)
  *    far_offset (stored in bit 16 of layout )
  *    use_far_sets (stored in bit 17 of layout )
+ *    use_far_sets_bugfixed (stored in bit 18 of layout )
  *
  * The data to be stored is divided into chunks using chunksize.  Each device
  * is divided into far_copies sections.   In each section, chunks are laid out
@@ -1497,6 +1498,8 @@ static void status(struct seq_file *seq, struct mddev *mddev)
                        seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
                else
                        seq_printf(seq, " %d far-copies", conf->geo.far_copies);
+               if (conf->geo.far_set_size != conf->geo.raid_disks)
+                       seq_printf(seq, " %d devices per set", conf->geo.far_set_size);
        }
        seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
                                        conf->geo.raid_disks - mddev->degraded);
@@ -2467,7 +2470,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
                                   choose_data_offset(r10_bio, rdev) +
                                   (sector - r10_bio->sector));
                wbio->bi_bdev = rdev->bdev;
-               if (submit_bio_wait(WRITE, wbio) == 0)
+               if (submit_bio_wait(WRITE, wbio) < 0)
                        /* Failure! */
                        ok = rdev_set_badblocks(rdev, sector,
                                                sectors, 0)
@@ -2654,16 +2657,17 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
                                rdev_dec_pending(rdev, conf->mddev);
                        }
                }
-               if (test_bit(R10BIO_WriteError,
-                            &r10_bio->state))
-                       close_write(r10_bio);
                if (fail) {
                        spin_lock_irq(&conf->device_lock);
                        list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
                        spin_unlock_irq(&conf->device_lock);
                        md_wakeup_thread(conf->mddev->thread);
-               } else
+               } else {
+                       if (test_bit(R10BIO_WriteError,
+                                    &r10_bio->state))
+                               close_write(r10_bio);
                        raid_end_bio_io(r10_bio);
+               }
        }
 }
 
@@ -2688,9 +2692,15 @@ static void raid10d(struct md_thread *thread)
                }
                spin_unlock_irqrestore(&conf->device_lock, flags);
                while (!list_empty(&tmp)) {
-                       r10_bio = list_first_entry(&conf->bio_end_io_list,
-                                                 struct r10bio, retry_list);
+                       r10_bio = list_first_entry(&tmp, struct r10bio,
+                                                  retry_list);
                        list_del(&r10_bio->retry_list);
+                       if (mddev->degraded)
+                               set_bit(R10BIO_Degraded, &r10_bio->state);
+
+                       if (test_bit(R10BIO_WriteError,
+                                    &r10_bio->state))
+                               close_write(r10_bio);
                        raid_end_bio_io(r10_bio);
                }
        }
@@ -3387,7 +3397,7 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
                disks = mddev->raid_disks + mddev->delta_disks;
                break;
        }
-       if (layout >> 18)
+       if (layout >> 19)
                return -1;
        if (chunk < (PAGE_SIZE >> 9) ||
            !is_power_of_2(chunk))
@@ -3399,7 +3409,22 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
        geo->near_copies = nc;
        geo->far_copies = fc;
        geo->far_offset = fo;
-       geo->far_set_size = (layout & (1<<17)) ? disks / fc : disks;
+       switch (layout >> 17) {
+       case 0: /* original layout.  simple but not always optimal */
+               geo->far_set_size = disks;
+               break;
+       case 1: /* "improved" layout which was buggy.  Hopefully no-one is
+                * actually using this, but leave code here just in case.*/
+               geo->far_set_size = disks/fc;
+               WARN(geo->far_set_size < fc,
+                    "This RAID10 layout does not provide data safety - please backup and create new array\n");
+               break;
+       case 2: /* "improved" layout fixed to match documentation */
+               geo->far_set_size = fc * nc;
+               break;
+       default: /* Not a valid layout */
+               return -1;
+       }
        geo->chunk_mask = chunk - 1;
        geo->chunk_shift = ffz(~chunk);
        return nc*fc;
@@ -3486,8 +3511,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
                printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
                       mdname(mddev));
        if (conf) {
-               if (conf->r10bio_pool)
-                       mempool_destroy(conf->r10bio_pool);
+               mempool_destroy(conf->r10bio_pool);
                kfree(conf->mirrors);
                safe_put_page(conf->tmppage);
                kfree(conf);
@@ -3682,8 +3706,7 @@ static int run(struct mddev *mddev)
 
 out_free_conf:
        md_unregister_thread(&mddev->thread);
-       if (conf->r10bio_pool)
-               mempool_destroy(conf->r10bio_pool);
+       mempool_destroy(conf->r10bio_pool);
        safe_put_page(conf->tmppage);
        kfree(conf->mirrors);
        kfree(conf);
@@ -3696,8 +3719,7 @@ static void raid10_free(struct mddev *mddev, void *priv)
 {
        struct r10conf *conf = priv;
 
-       if (conf->r10bio_pool)
-               mempool_destroy(conf->r10bio_pool);
+       mempool_destroy(conf->r10bio_pool);
        safe_put_page(conf->tmppage);
        kfree(conf->mirrors);
        kfree(conf->mirrors_old);
index 15ef2c641b2b93e96004d073463fdcfaaaaceab4..45933c1606972c007fee4393b82c05db7b24f7ec 100644 (file)
@@ -2271,8 +2271,7 @@ static void shrink_stripes(struct r5conf *conf)
               drop_one_stripe(conf))
                ;
 
-       if (conf->slab_cache)
-               kmem_cache_destroy(conf->slab_cache);
+       kmem_cache_destroy(conf->slab_cache);
        conf->slab_cache = NULL;
 }
 
@@ -3150,6 +3149,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                        spin_unlock_irq(&sh->stripe_lock);
                        if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
                                wake_up(&conf->wait_for_overlap);
+                       if (bi)
+                               s->to_read--;
                        while (bi && bi->bi_iter.bi_sector <
                               sh->dev[i].sector + STRIPE_SECTORS) {
                                struct bio *nextbi =
@@ -3169,6 +3170,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                 */
                clear_bit(R5_LOCKED, &sh->dev[i].flags);
        }
+       s->to_write = 0;
+       s->written = 0;
 
        if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
                if (atomic_dec_and_test(&conf->pending_full_writes))
@@ -3300,7 +3303,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
                 */
                return 0;
 
-       for (i = 0; i < s->failed; i++) {
+       for (i = 0; i < s->failed && i < 2; i++) {
                if (fdev[i]->towrite &&
                    !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
                    !test_bit(R5_OVERWRITE, &fdev[i]->flags))
@@ -3324,7 +3327,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
            sh->sector < sh->raid_conf->mddev->recovery_cp)
                /* reconstruct-write isn't being forced */
                return 0;
-       for (i = 0; i < s->failed; i++) {
+       for (i = 0; i < s->failed && i < 2; i++) {
                if (s->failed_num[i] != sh->pd_idx &&
                    s->failed_num[i] != sh->qd_idx &&
                    !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
@@ -3496,6 +3499,7 @@ returnbi:
                }
        if (!discard_pending &&
            test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
+               int hash;
                clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
                clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
                if (sh->qd_idx >= 0) {
@@ -3509,16 +3513,17 @@ returnbi:
                 * no updated data, so remove it from hash list and the stripe
                 * will be reinitialized
                 */
-               spin_lock_irq(&conf->device_lock);
 unhash:
+               hash = sh->hash_lock_index;
+               spin_lock_irq(conf->hash_locks + hash);
                remove_hash(sh);
+               spin_unlock_irq(conf->hash_locks + hash);
                if (head_sh->batch_head) {
                        sh = list_first_entry(&sh->batch_list,
                                              struct stripe_head, batch_list);
                        if (sh != head_sh)
                                        goto unhash;
                }
-               spin_unlock_irq(&conf->device_lock);
                sh = head_sh;
 
                if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
index b055319d532edd93ff85f5e68c57e4ae1ac9493a..c1e2d1834b782096ff3e35fefa3db8bb5c2bb786 100644 (file)
@@ -46,8 +46,8 @@ extern struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe,
                                        const struct horus3a_config *config,
                                        struct i2c_adapter *i2c);
 #else
-static inline struct dvb_frontend *horus3a_attach(
-                                       const struct cxd2820r_config *config,
+static inline struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe,
+                                       const struct horus3a_config *config,
                                        struct i2c_adapter *i2c)
 {
        printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
index 69f30e21f6b31f96497c3d6acccc4a0000b7e822..1f329ef05accea6601a093e33078b5104ef2cfa2 100644 (file)
@@ -43,7 +43,7 @@ struct dvb_frontend *lnbh25_attach(
        struct lnbh25_config *cfg,
        struct i2c_adapter *i2c);
 #else
-static inline dvb_frontend *lnbh25_attach(
+static inline struct dvb_frontend *lnbh25_attach(
        struct dvb_frontend *fe,
        struct lnbh25_config *cfg,
        struct i2c_adapter *i2c)
index ff31e7a01ca9aba8f9746e61d5091d2d1f45c255..feeeb70d841ed92a485e4ec630b1133d51c75291 100644 (file)
 
 static struct dvb_frontend_ops m88ds3103_ops;
 
+/* write single register with mask */
+static int m88ds3103_update_bits(struct m88ds3103_dev *dev,
+                               u8 reg, u8 mask, u8 val)
+{
+       int ret;
+       u8 tmp;
+
+       /* no need for read if whole reg is written */
+       if (mask != 0xff) {
+               ret = regmap_bulk_read(dev->regmap, reg, &tmp, 1);
+               if (ret)
+                       return ret;
+
+               val &= mask;
+               tmp &= ~mask;
+               val |= tmp;
+       }
+
+       return regmap_bulk_write(dev->regmap, reg, &val, 1);
+}
+
 /* write reg val table using reg addr auto increment */
 static int m88ds3103_wr_reg_val_tab(struct m88ds3103_dev *dev,
                const struct m88ds3103_reg_val *tab, int tab_len)
@@ -394,10 +415,10 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
                        u8tmp2 = 0x00; /* 0b00 */
                        break;
                }
-               ret = regmap_update_bits(dev->regmap, 0x22, 0xc0, u8tmp1 << 6);
+               ret = m88ds3103_update_bits(dev, 0x22, 0xc0, u8tmp1 << 6);
                if (ret)
                        goto err;
-               ret = regmap_update_bits(dev->regmap, 0x24, 0xc0, u8tmp2 << 6);
+               ret = m88ds3103_update_bits(dev, 0x24, 0xc0, u8tmp2 << 6);
                if (ret)
                        goto err;
        }
@@ -455,13 +476,13 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
                        if (ret)
                                goto err;
                }
-               ret = regmap_update_bits(dev->regmap, 0x9d, 0x08, 0x08);
+               ret = m88ds3103_update_bits(dev, 0x9d, 0x08, 0x08);
                if (ret)
                        goto err;
                ret = regmap_write(dev->regmap, 0xf1, 0x01);
                if (ret)
                        goto err;
-               ret = regmap_update_bits(dev->regmap, 0x30, 0x80, 0x80);
+               ret = m88ds3103_update_bits(dev, 0x30, 0x80, 0x80);
                if (ret)
                        goto err;
        }
@@ -498,7 +519,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
        switch (dev->cfg->ts_mode) {
        case M88DS3103_TS_SERIAL:
        case M88DS3103_TS_SERIAL_D7:
-               ret = regmap_update_bits(dev->regmap, 0x29, 0x20, u8tmp1);
+               ret = m88ds3103_update_bits(dev, 0x29, 0x20, u8tmp1);
                if (ret)
                        goto err;
                u8tmp1 = 0;
@@ -567,11 +588,11 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
        if (ret)
                goto err;
 
-       ret = regmap_update_bits(dev->regmap, 0x4d, 0x02, dev->cfg->spec_inv << 1);
+       ret = m88ds3103_update_bits(dev, 0x4d, 0x02, dev->cfg->spec_inv << 1);
        if (ret)
                goto err;
 
-       ret = regmap_update_bits(dev->regmap, 0x30, 0x10, dev->cfg->agc_inv << 4);
+       ret = m88ds3103_update_bits(dev, 0x30, 0x10, dev->cfg->agc_inv << 4);
        if (ret)
                goto err;
 
@@ -625,13 +646,13 @@ static int m88ds3103_init(struct dvb_frontend *fe)
        dev->warm = false;
 
        /* wake up device from sleep */
-       ret = regmap_update_bits(dev->regmap, 0x08, 0x01, 0x01);
+       ret = m88ds3103_update_bits(dev, 0x08, 0x01, 0x01);
        if (ret)
                goto err;
-       ret = regmap_update_bits(dev->regmap, 0x04, 0x01, 0x00);
+       ret = m88ds3103_update_bits(dev, 0x04, 0x01, 0x00);
        if (ret)
                goto err;
-       ret = regmap_update_bits(dev->regmap, 0x23, 0x10, 0x00);
+       ret = m88ds3103_update_bits(dev, 0x23, 0x10, 0x00);
        if (ret)
                goto err;
 
@@ -749,18 +770,18 @@ static int m88ds3103_sleep(struct dvb_frontend *fe)
                utmp = 0x29;
        else
                utmp = 0x27;
-       ret = regmap_update_bits(dev->regmap, utmp, 0x01, 0x00);
+       ret = m88ds3103_update_bits(dev, utmp, 0x01, 0x00);
        if (ret)
                goto err;
 
        /* sleep */
-       ret = regmap_update_bits(dev->regmap, 0x08, 0x01, 0x00);
+       ret = m88ds3103_update_bits(dev, 0x08, 0x01, 0x00);
        if (ret)
                goto err;
-       ret = regmap_update_bits(dev->regmap, 0x04, 0x01, 0x01);
+       ret = m88ds3103_update_bits(dev, 0x04, 0x01, 0x01);
        if (ret)
                goto err;
-       ret = regmap_update_bits(dev->regmap, 0x23, 0x10, 0x10);
+       ret = m88ds3103_update_bits(dev, 0x23, 0x10, 0x10);
        if (ret)
                goto err;
 
@@ -992,12 +1013,12 @@ static int m88ds3103_set_tone(struct dvb_frontend *fe,
        }
 
        utmp = tone << 7 | dev->cfg->envelope_mode << 5;
-       ret = regmap_update_bits(dev->regmap, 0xa2, 0xe0, utmp);
+       ret = m88ds3103_update_bits(dev, 0xa2, 0xe0, utmp);
        if (ret)
                goto err;
 
        utmp = 1 << 2;
-       ret = regmap_update_bits(dev->regmap, 0xa1, reg_a1_mask, utmp);
+       ret = m88ds3103_update_bits(dev, 0xa1, reg_a1_mask, utmp);
        if (ret)
                goto err;
 
@@ -1047,7 +1068,7 @@ static int m88ds3103_set_voltage(struct dvb_frontend *fe,
        voltage_dis ^= dev->cfg->lnb_en_pol;
 
        utmp = voltage_dis << 1 | voltage_sel << 0;
-       ret = regmap_update_bits(dev->regmap, 0xa2, 0x03, utmp);
+       ret = m88ds3103_update_bits(dev, 0xa2, 0x03, utmp);
        if (ret)
                goto err;
 
@@ -1080,7 +1101,7 @@ static int m88ds3103_diseqc_send_master_cmd(struct dvb_frontend *fe,
        }
 
        utmp = dev->cfg->envelope_mode << 5;
-       ret = regmap_update_bits(dev->regmap, 0xa2, 0xe0, utmp);
+       ret = m88ds3103_update_bits(dev, 0xa2, 0xe0, utmp);
        if (ret)
                goto err;
 
@@ -1115,12 +1136,12 @@ static int m88ds3103_diseqc_send_master_cmd(struct dvb_frontend *fe,
        } else {
                dev_dbg(&client->dev, "diseqc tx timeout\n");
 
-               ret = regmap_update_bits(dev->regmap, 0xa1, 0xc0, 0x40);
+               ret = m88ds3103_update_bits(dev, 0xa1, 0xc0, 0x40);
                if (ret)
                        goto err;
        }
 
-       ret = regmap_update_bits(dev->regmap, 0xa2, 0xc0, 0x80);
+       ret = m88ds3103_update_bits(dev, 0xa2, 0xc0, 0x80);
        if (ret)
                goto err;
 
@@ -1152,7 +1173,7 @@ static int m88ds3103_diseqc_send_burst(struct dvb_frontend *fe,
        }
 
        utmp = dev->cfg->envelope_mode << 5;
-       ret = regmap_update_bits(dev->regmap, 0xa2, 0xe0, utmp);
+       ret = m88ds3103_update_bits(dev, 0xa2, 0xe0, utmp);
        if (ret)
                goto err;
 
@@ -1194,12 +1215,12 @@ static int m88ds3103_diseqc_send_burst(struct dvb_frontend *fe,
        } else {
                dev_dbg(&client->dev, "diseqc tx timeout\n");
 
-               ret = regmap_update_bits(dev->regmap, 0xa1, 0xc0, 0x40);
+               ret = m88ds3103_update_bits(dev, 0xa1, 0xc0, 0x40);
                if (ret)
                        goto err;
        }
 
-       ret = regmap_update_bits(dev->regmap, 0xa2, 0xc0, 0x80);
+       ret = m88ds3103_update_bits(dev, 0xa2, 0xc0, 0x80);
        if (ret)
                goto err;
 
@@ -1435,13 +1456,13 @@ static int m88ds3103_probe(struct i2c_client *client,
                goto err_kfree;
 
        /* sleep */
-       ret = regmap_update_bits(dev->regmap, 0x08, 0x01, 0x00);
+       ret = m88ds3103_update_bits(dev, 0x08, 0x01, 0x00);
        if (ret)
                goto err_kfree;
-       ret = regmap_update_bits(dev->regmap, 0x04, 0x01, 0x01);
+       ret = m88ds3103_update_bits(dev, 0x04, 0x01, 0x01);
        if (ret)
                goto err_kfree;
-       ret = regmap_update_bits(dev->regmap, 0x23, 0x10, 0x10);
+       ret = m88ds3103_update_bits(dev, 0x23, 0x10, 0x10);
        if (ret)
                goto err_kfree;
 
index 81788c5a44d838dc1f6e27e7c060db174af56913..821a8f481507a14ec3857c66cd7c4744a6ac3efd 100644 (file)
@@ -502,6 +502,10 @@ static int si2168_init(struct dvb_frontend *fe)
                /* firmware is in the new format */
                for (remaining = fw->size; remaining > 0; remaining -= 17) {
                        len = fw->data[fw->size - remaining];
+                       if (len > SI2168_ARGLEN) {
+                               ret = -EINVAL;
+                               break;
+                       }
                        memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
                        cmd.wlen = len;
                        cmd.rlen = 1;
index f55b3276f28de0122c3f4e4385cb3a1736d0b430..56773f3893d40e02d6b9cde1a72fcf0108ad350d 100644 (file)
@@ -80,11 +80,9 @@ irqreturn_t netup_spi_interrupt(struct netup_spi *spi)
        u16 reg;
        unsigned long flags;
 
-       if (!spi) {
-               dev_dbg(&spi->master->dev,
-                       "%s(): SPI not initialized\n", __func__);
+       if (!spi)
                return IRQ_NONE;
-       }
+
        spin_lock_irqsave(&spi->lock, flags);
        reg = readw(&spi->regs->control_stat);
        if (!(reg & NETUP_SPI_CTRL_IRQ)) {
@@ -234,11 +232,9 @@ void netup_spi_release(struct netup_unidvb_dev *ndev)
        unsigned long flags;
        struct netup_spi *spi = ndev->spi;
 
-       if (!spi) {
-               dev_dbg(&spi->master->dev,
-                       "%s(): SPI not initialized\n", __func__);
+       if (!spi)
                return;
-       }
+
        spin_lock_irqsave(&spi->lock, flags);
        reg = readw(&spi->regs->control_stat);
        writew(reg | NETUP_SPI_CTRL_IRQ, &spi->regs->control_stat);
index 486aef50d99b23e4753cbd055221cd2dbb4f6097..f922f2e827bcbb2bfe8cd0606fac89d3ec8c5bd6 100644 (file)
@@ -1097,7 +1097,7 @@ static int load_slim_core_fw(const struct firmware *fw, void *context)
        Elf32_Ehdr *ehdr;
        Elf32_Phdr *phdr;
        u8 __iomem *dst;
-       int err, i;
+       int err = 0, i;
 
        if (!fw || !context)
                return -EINVAL;
@@ -1106,7 +1106,7 @@ static int load_slim_core_fw(const struct firmware *fw, void *context)
        phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff);
 
        /* go through the available ELF segments */
-       for (i = 0; i < ehdr->e_phnum && !err; i++, phdr++) {
+       for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
 
                /* Only consider LOAD segments */
                if (phdr->p_type != PT_LOAD)
@@ -1192,7 +1192,6 @@ err:
 
 static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei)
 {
-       int ret;
        int err;
 
        dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
@@ -1207,7 +1206,7 @@ static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei)
        if (err) {
                dev_err(fei->dev, "request_firmware_nowait err: %d.\n", err);
                complete_all(&fei->fw_ack);
-               return ret;
+               return err;
        }
 
        return 0;
index 1c087cb76815a3ab488cb53db85cf98b5b0cac80..d0549fba711c474a1d58e416bbff09dad9a2f328 100644 (file)
@@ -257,7 +257,7 @@ static int hix5hd2_ir_probe(struct platform_device *pdev)
                goto clkerr;
 
        if (devm_request_irq(dev, priv->irq, hix5hd2_ir_rx_interrupt,
-                            IRQF_NO_SUSPEND, pdev->name, priv) < 0) {
+                            0, pdev->name, priv) < 0) {
                dev_err(dev, "IRQ %d register failed\n", priv->irq);
                ret = -EINVAL;
                goto regerr;
index 507382160e5e7524022c782240c36f98c1b54871..ce157edd45fa1adb3dd382037dd421f8b8590091 100644 (file)
@@ -166,6 +166,10 @@ static int si2157_init(struct dvb_frontend *fe)
 
        for (remaining = fw->size; remaining > 0; remaining -= 17) {
                len = fw->data[fw->size - remaining];
+               if (len > SI2157_ARGLEN) {
+                       dev_err(&client->dev, "Bad firmware length\n");
+                       goto err_release_firmware;
+               }
                memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
                cmd.wlen = len;
                cmd.rlen = 1;
index c3cac4c12fb3c6c18315ab83941a3454dc5ce9bd..197a4f2e54d2a1c08a3c662b3f983b57bf8190dd 100644 (file)
@@ -34,6 +34,14 @@ static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req)
        unsigned int pipe;
        u8 requesttype;
 
+       mutex_lock(&d->usb_mutex);
+
+       if (req->size > sizeof(dev->buf)) {
+               dev_err(&d->intf->dev, "too large message %u\n", req->size);
+               ret = -EINVAL;
+               goto err_mutex_unlock;
+       }
+
        if (req->index & CMD_WR_FLAG) {
                /* write */
                memcpy(dev->buf, req->data, req->size);
@@ -50,14 +58,17 @@ static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req)
        dvb_usb_dbg_usb_control_msg(d->udev, 0, requesttype, req->value,
                        req->index, dev->buf, req->size);
        if (ret < 0)
-               goto err;
+               goto err_mutex_unlock;
 
        /* read request, copy returned data to return buf */
        if (requesttype == (USB_TYPE_VENDOR | USB_DIR_IN))
                memcpy(req->data, dev->buf, req->size);
 
+       mutex_unlock(&d->usb_mutex);
+
        return 0;
-err:
+err_mutex_unlock:
+       mutex_unlock(&d->usb_mutex);
        dev_dbg(&d->intf->dev, "failed=%d\n", ret);
        return ret;
 }
index 9f6115a2ee0166d509584d5b5c5fd0b62d5bff06..138062960a7367737521659acc607983c921a685 100644 (file)
@@ -71,7 +71,7 @@
 
 
 struct rtl28xxu_dev {
-       u8 buf[28];
+       u8 buf[128];
        u8 chip_id;
        u8 tuner;
        char *tuner_name;
index 82876a67f1449b62f02142f4a677aee8880c295a..9beece00869bf0e27a99fc641b8f926c0d3bad8f 100644 (file)
@@ -47,7 +47,7 @@ config V4L2_MEM2MEM_DEV
 # Used by LED subsystem flash drivers
 config V4L2_FLASH_LED_CLASS
        tristate "V4L2 flash API for LED flash class devices"
-       depends on VIDEO_V4L2_SUBDEV_API
+       depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
        depends on LEDS_CLASS_FLASH
        ---help---
          Say Y here to enable V4L2 flash API support for LED flash
index c6a644b22af44e53b5cee18fc596df48ff675835..6f3154613dc7174b04a7e91dff0af1d3aae9b31e 100644 (file)
@@ -58,12 +58,18 @@ config OMAP_GPMC
          memory drives like NOR, NAND, OneNAND, SRAM.
 
 config OMAP_GPMC_DEBUG
-       bool
+       bool "Enable GPMC debug output and skip reset of GPMC during init"
        depends on OMAP_GPMC
        help
          Enables verbose debugging mostly to decode the bootloader provided
-         timings. Enable this during development to configure devices
-         connected to the GPMC bus.
+         timings. To preserve the bootloader provided timings, the reset
+         of GPMC is skipped during init. Enable this during development to
+         configure devices connected to the GPMC bus.
+
+         NOTE: In addition to matching the register setup with the bootloader
+         you also need to match the GPMC FCLK frequency used by the
+         bootloader or else the GPMC timings won't be identical with the
+         bootloader timings.
 
 config MVEBU_DEVBUS
        bool "Marvell EBU Device Bus Controller"
index 32ac049f2bc4dbda4418587cc017cc074d5989c9..6515dfc2b805d6c5198756e17bacd23724689dac 100644 (file)
@@ -696,7 +696,6 @@ int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t,
        int div;
        u32 l;
 
-       gpmc_cs_show_timings(cs, "before gpmc_cs_set_timings");
        div = gpmc_calc_divider(t->sync_clk);
        if (div < 0)
                return div;
@@ -1988,6 +1987,7 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
        if (ret < 0)
                goto err;
 
+       gpmc_cs_show_timings(cs, "before gpmc_cs_program_settings");
        ret = gpmc_cs_program_settings(cs, &gpmc_s);
        if (ret < 0)
                goto err;
index f28cb28a62f87073c214d47368355be6c29c6f25..2c7f8d7c0595e2d849183dfd0db91d2d945daa10 100644 (file)
@@ -42,6 +42,8 @@ int intel_lpss_resume(struct device *dev);
        .thaw = intel_lpss_resume,              \
        .poweroff = intel_lpss_suspend,         \
        .restore = intel_lpss_resume,
+#else
+#define INTEL_LPSS_SLEEP_PM_OPS
 #endif
 
 #define INTEL_LPSS_RUNTIME_PM_OPS              \
index c52162ea3d0ab1daf8bd375220669f3ba53db15e..586098f1b233a6d19da9e74e1d2dd2e396408635 100644 (file)
@@ -80,7 +80,7 @@ static int max77843_chg_init(struct max77693_dev *max77843)
        if (!max77843->i2c_chg) {
                dev_err(&max77843->i2c->dev,
                                "Cannot allocate I2C device for Charger\n");
-               return PTR_ERR(max77843->i2c_chg);
+               return -ENODEV;
        }
        i2c_set_clientdata(max77843->i2c_chg, max77843);
 
index 8af12c884b04eeb870d21ae2bb4bf15e0cfb76c4..103baf0e0c5bfd9aa23537adf12f6035bb427d86 100644 (file)
@@ -105,6 +105,7 @@ EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
 
 void cxl_free_afu_irqs(struct cxl_context *ctx)
 {
+       afu_irq_name_free(ctx);
        cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
 }
 EXPORT_SYMBOL_GPL(cxl_free_afu_irqs);
index e762f85ee233a4b510390aa0ce4a5a79266b84c3..2faa1270d085b15f92e185f8f389f5790390fbef 100644 (file)
@@ -275,6 +275,9 @@ static void reclaim_ctx(struct rcu_head *rcu)
        if (ctx->kernelapi)
                kfree(ctx->mapping);
 
+       if (ctx->irq_bitmap)
+               kfree(ctx->irq_bitmap);
+
        kfree(ctx);
 }
 
index 1c30ef77073d607cd250ade57c08b110fffd092f..0cfb9c129f273cbdf0a408c6b5d3008bd308596d 100644 (file)
@@ -677,6 +677,7 @@ int cxl_register_serr_irq(struct cxl_afu *afu);
 void cxl_release_serr_irq(struct cxl_afu *afu);
 int afu_register_irqs(struct cxl_context *ctx, u32 count);
 void afu_release_irqs(struct cxl_context *ctx, void *cookie);
+void afu_irq_name_free(struct cxl_context *ctx);
 irqreturn_t cxl_slice_irq_err(int irq, void *data);
 
 int cxl_debugfs_init(void);
index a30bf285b5bdd75c3f2b357d89dbab251bb98c7c..7ccd2998be92b8b3f7cdca2a0acbf3f9586d0f34 100644 (file)
@@ -120,9 +120,16 @@ int afu_release(struct inode *inode, struct file *file)
                 __func__, ctx->pe);
        cxl_context_detach(ctx);
 
-       mutex_lock(&ctx->mapping_lock);
-       ctx->mapping = NULL;
-       mutex_unlock(&ctx->mapping_lock);
+
+       /*
+        * Delete the context's mapping pointer, unless it's created by the
+        * kernel API, in which case leave it so it can be freed by reclaim_ctx()
+        */
+       if (!ctx->kernelapi) {
+               mutex_lock(&ctx->mapping_lock);
+               ctx->mapping = NULL;
+               mutex_unlock(&ctx->mapping_lock);
+       }
 
        put_device(&ctx->afu->dev);
 
index 583b42afeda2355da2e606f4fbc33546445a8df6..09a406058c4650ddf71114c26201889620003b9e 100644 (file)
@@ -414,7 +414,7 @@ void cxl_release_psl_irq(struct cxl_afu *afu)
        kfree(afu->psl_irq_name);
 }
 
-static void afu_irq_name_free(struct cxl_context *ctx)
+void afu_irq_name_free(struct cxl_context *ctx)
 {
        struct cxl_irq_name *irq_name, *tmp;
 
@@ -524,7 +524,5 @@ void afu_release_irqs(struct cxl_context *ctx, void *cookie)
        afu_irq_name_free(ctx);
        cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
 
-       kfree(ctx->irq_bitmap);
-       ctx->irq_bitmap = NULL;
        ctx->irq_count = 0;
 }
index b37f2e8004f5bcd58f970ea274ebd29ef4b1eae3..d2e75c88f4d2165762913c27c57e5d4487e431ad 100644 (file)
@@ -457,6 +457,7 @@ static int activate_afu_directed(struct cxl_afu *afu)
 
        dev_info(&afu->dev, "Activating AFU directed mode\n");
 
+       afu->num_procs = afu->max_procs_virtualised;
        if (afu->spa == NULL) {
                if (cxl_alloc_spa(afu))
                        return -ENOMEM;
@@ -468,7 +469,6 @@ static int activate_afu_directed(struct cxl_afu *afu)
        cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
 
        afu->current_mode = CXL_MODE_DIRECTED;
-       afu->num_procs = afu->max_procs_virtualised;
 
        if ((rc = cxl_chardev_m_afu_add(afu)))
                return rc;
index a5e977192b61f97bfbace09aa93f0b577be67bb7..85761d7eb333173040204a7a5593bf2c7cf06485 100644 (file)
@@ -1035,6 +1035,32 @@ static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
        return 0;
 }
 
+/*
+ * Workaround a PCIe Host Bridge defect on some cards, that can cause
+ * malformed Transaction Layer Packet (TLP) errors to be erroneously
+ * reported. Mask this error in the Uncorrectable Error Mask Register.
+ *
+ * The upper nibble of the PSL revision is used to distinguish between
+ * different cards. The affected ones have it set to 0.
+ */
+static void cxl_fixup_malformed_tlp(struct cxl *adapter, struct pci_dev *dev)
+{
+       int aer;
+       u32 data;
+
+       if (adapter->psl_rev & 0xf000)
+               return;
+       if (!(aer = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)))
+               return;
+       pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &data);
+       if (data & PCI_ERR_UNC_MALF_TLP)
+               if (data & PCI_ERR_UNC_INTN)
+                       return;
+       data |= PCI_ERR_UNC_MALF_TLP;
+       data |= PCI_ERR_UNC_INTN;
+       pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, data);
+}
+
 static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev)
 {
        if (adapter->vsec_status & CXL_STATUS_SECOND_PORT)
@@ -1134,6 +1160,8 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
        if ((rc = cxl_vsec_looks_ok(adapter, dev)))
                return rc;
 
+       cxl_fixup_malformed_tlp(adapter, dev);
+
        if ((rc = setup_cxl_bars(dev)))
                return rc;
 
index 8eec887c8f701ce732a6278a49b1fbe298ca0635..6d7c188fb65c8ce288817e0ffb5728764e2ac133 100644 (file)
@@ -1209,7 +1209,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
                 * after the host receives the enum_resp
                 * message clients may be added or removed
                 */
-               if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS &&
+               if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS ||
                    dev->hbm_state >= MEI_HBM_STOPPED) {
                        dev_err(dev->dev, "hbm: add client: state mismatch, [%d, %d]\n",
                                dev->dev_state, dev->hbm_state);
index c742cfd7674e0eb699b2e656cff1eba288932b6f..23b6c8e8701ccd6e3ec57c85483276de4b02f51b 100644 (file)
@@ -387,6 +387,24 @@ out:
        return ERR_PTR(err);
 }
 
+static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
+                                     struct mmc_blk_ioc_data *idata)
+{
+       struct mmc_ioc_cmd *ic = &idata->ic;
+
+       if (copy_to_user(&(ic_ptr->response), ic->response,
+                        sizeof(ic->response)))
+               return -EFAULT;
+
+       if (!idata->ic.write_flag) {
+               if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
+                                idata->buf, idata->buf_bytes))
+                       return -EFAULT;
+       }
+
+       return 0;
+}
+
 static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
                                       u32 retries_max)
 {
@@ -447,12 +465,9 @@ out:
        return err;
 }
 
-static int mmc_blk_ioctl_cmd(struct block_device *bdev,
-       struct mmc_ioc_cmd __user *ic_ptr)
+static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+                              struct mmc_blk_ioc_data *idata)
 {
-       struct mmc_blk_ioc_data *idata;
-       struct mmc_blk_data *md;
-       struct mmc_card *card;
        struct mmc_command cmd = {0};
        struct mmc_data data = {0};
        struct mmc_request mrq = {NULL};
@@ -461,33 +476,12 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
        int is_rpmb = false;
        u32 status = 0;
 
-       /*
-        * The caller must have CAP_SYS_RAWIO, and must be calling this on the
-        * whole block device, not on a partition.  This prevents overspray
-        * between sibling partitions.
-        */
-       if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
-               return -EPERM;
-
-       idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
-       if (IS_ERR(idata))
-               return PTR_ERR(idata);
-
-       md = mmc_blk_get(bdev->bd_disk);
-       if (!md) {
-               err = -EINVAL;
-               goto cmd_err;
-       }
+       if (!card || !md || !idata)
+               return -EINVAL;
 
        if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
                is_rpmb = true;
 
-       card = md->queue.card;
-       if (IS_ERR(card)) {
-               err = PTR_ERR(card);
-               goto cmd_done;
-       }
-
        cmd.opcode = idata->ic.opcode;
        cmd.arg = idata->ic.arg;
        cmd.flags = idata->ic.flags;
@@ -530,23 +524,21 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
 
        mrq.cmd = &cmd;
 
-       mmc_get_card(card);
-
        err = mmc_blk_part_switch(card, md);
        if (err)
-               goto cmd_rel_host;
+               return err;
 
        if (idata->ic.is_acmd) {
                err = mmc_app_cmd(card->host, card);
                if (err)
-                       goto cmd_rel_host;
+                       return err;
        }
 
        if (is_rpmb) {
                err = mmc_set_blockcount(card, data.blocks,
                        idata->ic.write_flag & (1 << 31));
                if (err)
-                       goto cmd_rel_host;
+                       return err;
        }
 
        if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
@@ -557,7 +549,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
                        pr_err("%s: ioctl_do_sanitize() failed. err = %d",
                               __func__, err);
 
-               goto cmd_rel_host;
+               return err;
        }
 
        mmc_wait_for_req(card->host, &mrq);
@@ -565,14 +557,12 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
        if (cmd.error) {
                dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
                                                __func__, cmd.error);
-               err = cmd.error;
-               goto cmd_rel_host;
+               return cmd.error;
        }
        if (data.error) {
                dev_err(mmc_dev(card->host), "%s: data error %d\n",
                                                __func__, data.error);
-               err = data.error;
-               goto cmd_rel_host;
+               return data.error;
        }
 
        /*
@@ -582,18 +572,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
        if (idata->ic.postsleep_min_us)
                usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
 
-       if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
-               err = -EFAULT;
-               goto cmd_rel_host;
-       }
-
-       if (!idata->ic.write_flag) {
-               if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
-                                               idata->buf, idata->buf_bytes)) {
-                       err = -EFAULT;
-                       goto cmd_rel_host;
-               }
-       }
+       memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
 
        if (is_rpmb) {
                /*
@@ -607,24 +586,132 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
                                        __func__, status, err);
        }
 
-cmd_rel_host:
+       return err;
+}
+
+static int mmc_blk_ioctl_cmd(struct block_device *bdev,
+                            struct mmc_ioc_cmd __user *ic_ptr)
+{
+       struct mmc_blk_ioc_data *idata;
+       struct mmc_blk_data *md;
+       struct mmc_card *card;
+       int err = 0, ioc_err = 0;
+
+       idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
+       if (IS_ERR(idata))
+               return PTR_ERR(idata);
+
+       md = mmc_blk_get(bdev->bd_disk);
+       if (!md) {
+               err = -EINVAL;
+               goto cmd_err;
+       }
+
+       card = md->queue.card;
+       if (IS_ERR(card)) {
+               err = PTR_ERR(card);
+               goto cmd_done;
+       }
+
+       mmc_get_card(card);
+
+       ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
+
        mmc_put_card(card);
 
+       err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
+
 cmd_done:
        mmc_blk_put(md);
 cmd_err:
        kfree(idata->buf);
        kfree(idata);
-       return err;
+       return ioc_err ? ioc_err : err;
+}
+
+static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
+                                  struct mmc_ioc_multi_cmd __user *user)
+{
+       struct mmc_blk_ioc_data **idata = NULL;
+       struct mmc_ioc_cmd __user *cmds = user->cmds;
+       struct mmc_card *card;
+       struct mmc_blk_data *md;
+       int i, err = 0, ioc_err = 0;
+       __u64 num_of_cmds;
+
+       if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
+                          sizeof(num_of_cmds)))
+               return -EFAULT;
+
+       if (num_of_cmds > MMC_IOC_MAX_CMDS)
+               return -EINVAL;
+
+       idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
+       if (!idata)
+               return -ENOMEM;
+
+       for (i = 0; i < num_of_cmds; i++) {
+               idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
+               if (IS_ERR(idata[i])) {
+                       err = PTR_ERR(idata[i]);
+                       num_of_cmds = i;
+                       goto cmd_err;
+               }
+       }
+
+       md = mmc_blk_get(bdev->bd_disk);
+       if (!md)
+               goto cmd_err;
+
+       card = md->queue.card;
+       if (IS_ERR(card)) {
+               err = PTR_ERR(card);
+               goto cmd_done;
+       }
+
+       mmc_get_card(card);
+
+       for (i = 0; i < num_of_cmds && !ioc_err; i++)
+               ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
+
+       mmc_put_card(card);
+
+       /* copy to user if data and response */
+       for (i = 0; i < num_of_cmds && !err; i++)
+               err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
+
+cmd_done:
+       mmc_blk_put(md);
+cmd_err:
+       for (i = 0; i < num_of_cmds; i++) {
+               kfree(idata[i]->buf);
+               kfree(idata[i]);
+       }
+       kfree(idata);
+       return ioc_err ? ioc_err : err;
 }
 
 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
        unsigned int cmd, unsigned long arg)
 {
-       int ret = -EINVAL;
-       if (cmd == MMC_IOC_CMD)
-               ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
-       return ret;
+       /*
+        * The caller must have CAP_SYS_RAWIO, and must be calling this on the
+        * whole block device, not on a partition.  This prevents overspray
+        * between sibling partitions.
+        */
+       if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
+               return -EPERM;
+
+       switch (cmd) {
+       case MMC_IOC_CMD:
+               return mmc_blk_ioctl_cmd(bdev,
+                               (struct mmc_ioc_cmd __user *)arg);
+       case MMC_IOC_MULTI_CMD:
+               return mmc_blk_ioctl_multi_cmd(bdev,
+                               (struct mmc_ioc_multi_cmd __user *)arg);
+       default:
+               return -EINVAL;
+       }
 }
 
 #ifdef CONFIG_COMPAT
index b78cf5d403a33b74244a39245b9df6f9024c4d24..7fc9174d46191a13c77fa4e220bb476a32dfe05e 100644 (file)
@@ -2263,15 +2263,12 @@ static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
 /*
  * eMMC hardware reset.
  */
-static int mmc_test_hw_reset(struct mmc_test_card *test)
+static int mmc_test_reset(struct mmc_test_card *test)
 {
        struct mmc_card *card = test->card;
        struct mmc_host *host = card->host;
        int err;
 
-       if (!mmc_card_mmc(card) || !mmc_can_reset(card))
-               return RESULT_UNSUP_CARD;
-
        err = mmc_hw_reset(host);
        if (!err)
                return RESULT_OK;
@@ -2605,8 +2602,8 @@ static const struct mmc_test_case mmc_test_cases[] = {
        },
 
        {
-               .name = "eMMC hardware reset",
-               .run = mmc_test_hw_reset,
+               .name = "Reset test",
+               .run = mmc_test_reset,
        },
 };
 
index 9ebee72d9c3fd9cd42996f446cea6408e7b6e7c0..4c33d7690f2f66c4f8eb244480d06bce58acd3f1 100644 (file)
@@ -1,13 +1,3 @@
 #
 # MMC core configuration
 #
-
-config MMC_CLKGATE
-       bool "MMC host clock gating"
-       help
-         This will attempt to aggressively gate the clock to the MMC card.
-         This is done to save power due to gating off the logic and bus
-         noise when the MMC card is not in use. Your host driver has to
-         support handling this in order for it to be of any use.
-
-         If unsure, say N.
index 0520064dc33beb164aa9d80642c371e227d599b1..5ae89e48fd85b575cf743363b3a298b49872b7db 100644 (file)
@@ -134,9 +134,11 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
        int err = cmd->error;
 
        /* Flag re-tuning needed on CRC errors */
-       if (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
+       if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
+           cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
+           (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
            (mrq->data && mrq->data->error == -EILSEQ) ||
-           (mrq->stop && mrq->stop->error == -EILSEQ))
+           (mrq->stop && mrq->stop->error == -EILSEQ)))
                mmc_retune_needed(host);
 
        if (err && cmd->retries && mmc_host_is_spi(host)) {
@@ -185,8 +187,6 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
 
                if (mrq->done)
                        mrq->done(mrq);
-
-               mmc_host_clk_release(host);
        }
 }
 
@@ -204,6 +204,23 @@ static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
                return;
        }
 
+       /*
+        * For sdio rw commands we must wait for card busy otherwise some
+        * sdio devices won't work properly.
+        */
+       if (mmc_is_io_op(mrq->cmd->opcode) && host->ops->card_busy) {
+               int tries = 500; /* Wait aprox 500ms at maximum */
+
+               while (host->ops->card_busy(host) && --tries)
+                       mmc_delay(1);
+
+               if (tries == 0) {
+                       mrq->cmd->error = -EBUSY;
+                       mmc_request_done(host, mrq);
+                       return;
+               }
+       }
+
        host->ops->request(host, mrq);
 }
 
@@ -273,7 +290,6 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
                        mrq->stop->mrq = mrq;
                }
        }
-       mmc_host_clk_hold(host);
        led_trigger_event(host->led, LED_FULL);
        __mmc_start_request(host, mrq);
 
@@ -523,11 +539,8 @@ static void mmc_wait_for_req_done(struct mmc_host *host,
 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
                 bool is_first_req)
 {
-       if (host->ops->pre_req) {
-               mmc_host_clk_hold(host);
+       if (host->ops->pre_req)
                host->ops->pre_req(host, mrq, is_first_req);
-               mmc_host_clk_release(host);
-       }
 }
 
 /**
@@ -542,11 +555,8 @@ static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
                         int err)
 {
-       if (host->ops->post_req) {
-               mmc_host_clk_hold(host);
+       if (host->ops->post_req)
                host->ops->post_req(host, mrq, err);
-               mmc_host_clk_release(host);
-       }
 }
 
 /**
@@ -831,9 +841,9 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
                unsigned int timeout_us, limit_us;
 
                timeout_us = data->timeout_ns / 1000;
-               if (mmc_host_clk_rate(card->host))
+               if (card->host->ios.clock)
                        timeout_us += data->timeout_clks * 1000 /
-                               (mmc_host_clk_rate(card->host) / 1000);
+                               (card->host->ios.clock / 1000);
 
                if (data->flags & MMC_DATA_WRITE)
                        /*
@@ -1031,8 +1041,6 @@ static inline void mmc_set_ios(struct mmc_host *host)
                 ios->power_mode, ios->chip_select, ios->vdd,
                 ios->bus_width, ios->timing);
 
-       if (ios->clock > 0)
-               mmc_set_ungated(host);
        host->ops->set_ios(host, ios);
 }
 
@@ -1041,17 +1049,15 @@ static inline void mmc_set_ios(struct mmc_host *host)
  */
 void mmc_set_chip_select(struct mmc_host *host, int mode)
 {
-       mmc_host_clk_hold(host);
        host->ios.chip_select = mode;
        mmc_set_ios(host);
-       mmc_host_clk_release(host);
 }
 
 /*
  * Sets the host clock to the highest possible frequency that
  * is below "hz".
  */
-static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
+void mmc_set_clock(struct mmc_host *host, unsigned int hz)
 {
        WARN_ON(hz && hz < host->f_min);
 
@@ -1062,68 +1068,6 @@ static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
        mmc_set_ios(host);
 }
 
-void mmc_set_clock(struct mmc_host *host, unsigned int hz)
-{
-       mmc_host_clk_hold(host);
-       __mmc_set_clock(host, hz);
-       mmc_host_clk_release(host);
-}
-
-#ifdef CONFIG_MMC_CLKGATE
-/*
- * This gates the clock by setting it to 0 Hz.
- */
-void mmc_gate_clock(struct mmc_host *host)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&host->clk_lock, flags);
-       host->clk_old = host->ios.clock;
-       host->ios.clock = 0;
-       host->clk_gated = true;
-       spin_unlock_irqrestore(&host->clk_lock, flags);
-       mmc_set_ios(host);
-}
-
-/*
- * This restores the clock from gating by using the cached
- * clock value.
- */
-void mmc_ungate_clock(struct mmc_host *host)
-{
-       /*
-        * We should previously have gated the clock, so the clock shall
-        * be 0 here! The clock may however be 0 during initialization,
-        * when some request operations are performed before setting
-        * the frequency. When ungate is requested in that situation
-        * we just ignore the call.
-        */
-       if (host->clk_old) {
-               BUG_ON(host->ios.clock);
-               /* This call will also set host->clk_gated to false */
-               __mmc_set_clock(host, host->clk_old);
-       }
-}
-
-void mmc_set_ungated(struct mmc_host *host)
-{
-       unsigned long flags;
-
-       /*
-        * We've been given a new frequency while the clock is gated,
-        * so make sure we regard this as ungating it.
-        */
-       spin_lock_irqsave(&host->clk_lock, flags);
-       host->clk_gated = false;
-       spin_unlock_irqrestore(&host->clk_lock, flags);
-}
-
-#else
-void mmc_set_ungated(struct mmc_host *host)
-{
-}
-#endif
-
 int mmc_execute_tuning(struct mmc_card *card)
 {
        struct mmc_host *host = card->host;
@@ -1138,9 +1082,7 @@ int mmc_execute_tuning(struct mmc_card *card)
        else
                opcode = MMC_SEND_TUNING_BLOCK;
 
-       mmc_host_clk_hold(host);
        err = host->ops->execute_tuning(host, opcode);
-       mmc_host_clk_release(host);
 
        if (err)
                pr_err("%s: tuning execution failed\n", mmc_hostname(host));
@@ -1155,10 +1097,8 @@ int mmc_execute_tuning(struct mmc_card *card)
  */
 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
 {
-       mmc_host_clk_hold(host);
        host->ios.bus_mode = mode;
        mmc_set_ios(host);
-       mmc_host_clk_release(host);
 }
 
 /*
@@ -1166,10 +1106,8 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
  */
 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
 {
-       mmc_host_clk_hold(host);
        host->ios.bus_width = width;
        mmc_set_ios(host);
-       mmc_host_clk_release(host);
 }
 
 /*
@@ -1338,6 +1276,40 @@ struct device_node *mmc_of_find_child_device(struct mmc_host *host,
 
 #ifdef CONFIG_REGULATOR
 
+/**
+ * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
+ * @vdd_bit:   OCR bit number
+ * @min_uV:    minimum voltage value (mV)
+ * @max_uV:    maximum voltage value (mV)
+ *
+ * This function returns the voltage range according to the provided OCR
+ * bit number. If conversion is not possible a negative errno value returned.
+ */
+static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
+{
+       int             tmp;
+
+       if (!vdd_bit)
+               return -EINVAL;
+
+       /*
+        * REVISIT mmc_vddrange_to_ocrmask() may have set some
+        * bits this regulator doesn't quite support ... don't
+        * be too picky, most cards and regulators are OK with
+        * a 0.1V range goof (it's a small error percentage).
+        */
+       tmp = vdd_bit - ilog2(MMC_VDD_165_195);
+       if (tmp == 0) {
+               *min_uV = 1650 * 1000;
+               *max_uV = 1950 * 1000;
+       } else {
+               *min_uV = 1900 * 1000 + tmp * 100 * 1000;
+               *max_uV = *min_uV + 100 * 1000;
+       }
+
+       return 0;
+}
+
 /**
  * mmc_regulator_get_ocrmask - return mask of supported voltages
  * @supply: regulator to use
@@ -1401,22 +1373,7 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
        int                     min_uV, max_uV;
 
        if (vdd_bit) {
-               int             tmp;
-
-               /*
-                * REVISIT mmc_vddrange_to_ocrmask() may have set some
-                * bits this regulator doesn't quite support ... don't
-                * be too picky, most cards and regulators are OK with
-                * a 0.1V range goof (it's a small error percentage).
-                */
-               tmp = vdd_bit - ilog2(MMC_VDD_165_195);
-               if (tmp == 0) {
-                       min_uV = 1650 * 1000;
-                       max_uV = 1950 * 1000;
-               } else {
-                       min_uV = 1900 * 1000 + tmp * 100 * 1000;
-                       max_uV = min_uV + 100 * 1000;
-               }
+               mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
 
                result = regulator_set_voltage(supply, min_uV, max_uV);
                if (result == 0 && !mmc->regulator_enabled) {
@@ -1437,6 +1394,84 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
 }
 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
 
+static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
+                                                 int min_uV, int target_uV,
+                                                 int max_uV)
+{
+       /*
+        * Check if supported first to avoid errors since we may try several
+        * signal levels during power up and don't want to show errors.
+        */
+       if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
+               return -EINVAL;
+
+       return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
+                                            max_uV);
+}
+
+/**
+ * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
+ *
+ * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
+ * That will match the behavior of old boards where VQMMC and VMMC were supplied
+ * by the same supply.  The Bus Operating conditions for 3.3V signaling in the
+ * SD card spec also define VQMMC in terms of VMMC.
+ * If this is not possible we'll try the full 2.7-3.6V of the spec.
+ *
+ * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
+ * requested voltage.  This is definitely a good idea for UHS where there's a
+ * separate regulator on the card that's trying to make 1.8V and it's best if
+ * we match.
+ *
+ * This function is expected to be used by a controller's
+ * start_signal_voltage_switch() function.
+ */
+int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+       struct device *dev = mmc_dev(mmc);
+       int ret, volt, min_uV, max_uV;
+
+       /* If no vqmmc supply then we can't change the voltage */
+       if (IS_ERR(mmc->supply.vqmmc))
+               return -EINVAL;
+
+       switch (ios->signal_voltage) {
+       case MMC_SIGNAL_VOLTAGE_120:
+               return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
+                                               1100000, 1200000, 1300000);
+       case MMC_SIGNAL_VOLTAGE_180:
+               return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
+                                               1700000, 1800000, 1950000);
+       case MMC_SIGNAL_VOLTAGE_330:
+               ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
+               if (ret < 0)
+                       return ret;
+
+               dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
+                       __func__, volt, max_uV);
+
+               min_uV = max(volt - 300000, 2700000);
+               max_uV = min(max_uV + 200000, 3600000);
+
+               /*
+                * Due to a limitation in the current implementation of
+                * regulator_set_voltage_triplet() which is taking the lowest
+                * voltage possible if below the target, search for a suitable
+                * voltage in two steps and try to stay close to vmmc
+                * with a 0.3V tolerance at first.
+                */
+               if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
+                                               min_uV, volt, max_uV))
+                       return 0;
+
+               return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
+                                               2700000, volt, 3600000);
+       default:
+               return -EINVAL;
+       }
+}
+EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
+
 #endif /* CONFIG_REGULATOR */
 
 int mmc_regulator_get_supply(struct mmc_host *mmc)
@@ -1513,11 +1548,8 @@ int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
        int old_signal_voltage = host->ios.signal_voltage;
 
        host->ios.signal_voltage = signal_voltage;
-       if (host->ops->start_signal_voltage_switch) {
-               mmc_host_clk_hold(host);
+       if (host->ops->start_signal_voltage_switch)
                err = host->ops->start_signal_voltage_switch(host, &host->ios);
-               mmc_host_clk_release(host);
-       }
 
        if (err)
                host->ios.signal_voltage = old_signal_voltage;
@@ -1551,20 +1583,17 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
                pr_warn("%s: cannot verify signal voltage switch\n",
                        mmc_hostname(host));
 
-       mmc_host_clk_hold(host);
-
        cmd.opcode = SD_SWITCH_VOLTAGE;
        cmd.arg = 0;
        cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 
        err = mmc_wait_for_cmd(host, &cmd, 0);
        if (err)
-               goto err_command;
+               return err;
+
+       if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
+               return -EIO;
 
-       if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) {
-               err = -EIO;
-               goto err_command;
-       }
        /*
         * The card should drive cmd and dat[0:3] low immediately
         * after the response of cmd11, but wait 1 ms to be sure
@@ -1613,9 +1642,6 @@ power_cycle:
                mmc_power_cycle(host, ocr);
        }
 
-err_command:
-       mmc_host_clk_release(host);
-
        return err;
 }
 
@@ -1624,10 +1650,8 @@ err_command:
  */
 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
 {
-       mmc_host_clk_hold(host);
        host->ios.timing = timing;
        mmc_set_ios(host);
-       mmc_host_clk_release(host);
 }
 
 /*
@@ -1635,10 +1659,8 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
  */
 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
 {
-       mmc_host_clk_hold(host);
        host->ios.drv_type = drv_type;
        mmc_set_ios(host);
-       mmc_host_clk_release(host);
 }
 
 int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
@@ -1646,7 +1668,6 @@ int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
 {
        struct mmc_host *host = card->host;
        int host_drv_type = SD_DRIVER_TYPE_B;
-       int drive_strength;
 
        *drv_type = 0;
 
@@ -1669,14 +1690,10 @@ int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
         * information and let the hardware specific code
         * return what is possible given the options
         */
-       mmc_host_clk_hold(host);
-       drive_strength = host->ops->select_drive_strength(card, max_dtr,
-                                                         host_drv_type,
-                                                         card_drv_type,
-                                                         drv_type);
-       mmc_host_clk_release(host);
-
-       return drive_strength;
+       return host->ops->select_drive_strength(card, max_dtr,
+                                               host_drv_type,
+                                               card_drv_type,
+                                               drv_type);
 }
 
 /*
@@ -1695,8 +1712,6 @@ void mmc_power_up(struct mmc_host *host, u32 ocr)
        if (host->ios.power_mode == MMC_POWER_ON)
                return;
 
-       mmc_host_clk_hold(host);
-
        mmc_pwrseq_pre_power_on(host);
 
        host->ios.vdd = fls(ocr) - 1;
@@ -1730,8 +1745,6 @@ void mmc_power_up(struct mmc_host *host, u32 ocr)
         * time required to reach a stable voltage.
         */
        mmc_delay(10);
-
-       mmc_host_clk_release(host);
 }
 
 void mmc_power_off(struct mmc_host *host)
@@ -1739,8 +1752,6 @@ void mmc_power_off(struct mmc_host *host)
        if (host->ios.power_mode == MMC_POWER_OFF)
                return;
 
-       mmc_host_clk_hold(host);
-
        mmc_pwrseq_power_off(host);
 
        host->ios.clock = 0;
@@ -1756,8 +1767,6 @@ void mmc_power_off(struct mmc_host *host)
         * can be successfully turned on again.
         */
        mmc_delay(1);
-
-       mmc_host_clk_release(host);
 }
 
 void mmc_power_cycle(struct mmc_host *host, u32 ocr)
@@ -1973,7 +1982,7 @@ static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
                 */
                timeout_clks <<= 1;
                timeout_us += (timeout_clks * 1000) /
-                             (mmc_host_clk_rate(card->host) / 1000);
+                             (card->host->ios.clock / 1000);
 
                erase_timeout = timeout_us / 1000;
 
@@ -2421,9 +2430,7 @@ static void mmc_hw_reset_for_init(struct mmc_host *host)
 {
        if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
                return;
-       mmc_host_clk_hold(host);
        host->ops->hw_reset(host);
-       mmc_host_clk_release(host);
 }
 
 int mmc_hw_reset(struct mmc_host *host)
@@ -2631,10 +2638,14 @@ void mmc_start_host(struct mmc_host *host)
        host->f_init = max(freqs[0], host->f_min);
        host->rescan_disable = 0;
        host->ios.power_mode = MMC_POWER_UNDEFINED;
+
+       mmc_claim_host(host);
        if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
                mmc_power_off(host);
        else
                mmc_power_up(host, host->ocr_avail);
+       mmc_release_host(host);
+
        mmc_gpiod_request_cd_irq(host);
        _mmc_detect_change(host, 0, false);
 }
@@ -2672,7 +2683,9 @@ void mmc_stop_host(struct mmc_host *host)
 
        BUG_ON(host->card);
 
+       mmc_claim_host(host);
        mmc_power_off(host);
+       mmc_release_host(host);
 }
 
 int mmc_power_save_host(struct mmc_host *host)
index 1a22a82209b26a30c261f4affb43f23641926f50..09241e56d62872ec3b4c3b6cc24ef815e29ed3d8 100644 (file)
@@ -40,9 +40,6 @@ void mmc_init_erase(struct mmc_card *card);
 
 void mmc_set_chip_select(struct mmc_host *host, int mode);
 void mmc_set_clock(struct mmc_host *host, unsigned int hz);
-void mmc_gate_clock(struct mmc_host *host);
-void mmc_ungate_clock(struct mmc_host *host);
-void mmc_set_ungated(struct mmc_host *host);
 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
 void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
index e9142108a6c6d486d3ac15ba4cb8a317d1fd62b2..154aced0b91b63e33cb95a9ba1a82e386395392d 100644 (file)
@@ -126,6 +126,12 @@ static int mmc_ios_show(struct seq_file *s, void *data)
        case MMC_TIMING_SD_HS:
                str = "sd high-speed";
                break;
+       case MMC_TIMING_UHS_SDR12:
+               str = "sd uhs SDR12";
+               break;
+       case MMC_TIMING_UHS_SDR25:
+               str = "sd uhs SDR25";
+               break;
        case MMC_TIMING_UHS_SDR50:
                str = "sd uhs SDR50";
                break;
@@ -166,6 +172,25 @@ static int mmc_ios_show(struct seq_file *s, void *data)
        }
        seq_printf(s, "signal voltage:\t%u (%s)\n", ios->chip_select, str);
 
+       switch (ios->drv_type) {
+       case MMC_SET_DRIVER_TYPE_A:
+               str = "driver type A";
+               break;
+       case MMC_SET_DRIVER_TYPE_B:
+               str = "driver type B";
+               break;
+       case MMC_SET_DRIVER_TYPE_C:
+               str = "driver type C";
+               break;
+       case MMC_SET_DRIVER_TYPE_D:
+               str = "driver type D";
+               break;
+       default:
+               str = "invalid";
+               break;
+       }
+       seq_printf(s, "driver type:\t%u (%s)\n", ios->drv_type, str);
+
        return 0;
 }
 
@@ -230,11 +255,6 @@ void mmc_add_host_debugfs(struct mmc_host *host)
                        &mmc_clock_fops))
                goto err_node;
 
-#ifdef CONFIG_MMC_CLKGATE
-       if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
-                               root, &host->clk_delay))
-               goto err_node;
-#endif
 #ifdef CONFIG_FAIL_MMC_REQUEST
        if (fail_request)
                setup_fault_attr(&fail_default_attr, fail_request);
index abd933b7029bec26b7adebbea2db8fe3be426eb6..da950c44204d27d6db8cd5a2d56b6b3e88e27c09 100644 (file)
@@ -61,246 +61,6 @@ void mmc_unregister_host_class(void)
        class_unregister(&mmc_host_class);
 }
 
-#ifdef CONFIG_MMC_CLKGATE
-static ssize_t clkgate_delay_show(struct device *dev,
-               struct device_attribute *attr, char *buf)
-{
-       struct mmc_host *host = cls_dev_to_mmc_host(dev);
-       return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay);
-}
-
-static ssize_t clkgate_delay_store(struct device *dev,
-               struct device_attribute *attr, const char *buf, size_t count)
-{
-       struct mmc_host *host = cls_dev_to_mmc_host(dev);
-       unsigned long flags, value;
-
-       if (kstrtoul(buf, 0, &value))
-               return -EINVAL;
-
-       spin_lock_irqsave(&host->clk_lock, flags);
-       host->clkgate_delay = value;
-       spin_unlock_irqrestore(&host->clk_lock, flags);
-       return count;
-}
-
-/*
- * Enabling clock gating will make the core call out to the host
- * once up and once down when it performs a request or card operation
- * intermingled in any fashion. The driver will see this through
- * set_ios() operations with ios.clock field set to 0 to gate (disable)
- * the block clock, and to the old frequency to enable it again.
- */
-static void mmc_host_clk_gate_delayed(struct mmc_host *host)
-{
-       unsigned long tick_ns;
-       unsigned long freq = host->ios.clock;
-       unsigned long flags;
-
-       if (!freq) {
-               pr_debug("%s: frequency set to 0 in disable function, "
-                        "this means the clock is already disabled.\n",
-                        mmc_hostname(host));
-               return;
-       }
-       /*
-        * New requests may have appeared while we were scheduling,
-        * then there is no reason to delay the check before
-        * clk_disable().
-        */
-       spin_lock_irqsave(&host->clk_lock, flags);
-
-       /*
-        * Delay n bus cycles (at least 8 from MMC spec) before attempting
-        * to disable the MCI block clock. The reference count may have
-        * gone up again after this delay due to rescheduling!
-        */
-       if (!host->clk_requests) {
-               spin_unlock_irqrestore(&host->clk_lock, flags);
-               tick_ns = DIV_ROUND_UP(1000000000, freq);
-               ndelay(host->clk_delay * tick_ns);
-       } else {
-               /* New users appeared while waiting for this work */
-               spin_unlock_irqrestore(&host->clk_lock, flags);
-               return;
-       }
-       mutex_lock(&host->clk_gate_mutex);
-       spin_lock_irqsave(&host->clk_lock, flags);
-       if (!host->clk_requests) {
-               spin_unlock_irqrestore(&host->clk_lock, flags);
-               /* This will set host->ios.clock to 0 */
-               mmc_gate_clock(host);
-               spin_lock_irqsave(&host->clk_lock, flags);
-               pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
-       }
-       spin_unlock_irqrestore(&host->clk_lock, flags);
-       mutex_unlock(&host->clk_gate_mutex);
-}
-
-/*
- * Internal work. Work to disable the clock at some later point.
- */
-static void mmc_host_clk_gate_work(struct work_struct *work)
-{
-       struct mmc_host *host = container_of(work, struct mmc_host,
-                                             clk_gate_work.work);
-
-       mmc_host_clk_gate_delayed(host);
-}
-
-/**
- *     mmc_host_clk_hold - ungate hardware MCI clocks
- *     @host: host to ungate.
- *
- *     Makes sure the host ios.clock is restored to a non-zero value
- *     past this call. Increase clock reference count and ungate clock
- *     if we're the first user.
- */
-void mmc_host_clk_hold(struct mmc_host *host)
-{
-       unsigned long flags;
-
-       /* cancel any clock gating work scheduled by mmc_host_clk_release() */
-       cancel_delayed_work_sync(&host->clk_gate_work);
-       mutex_lock(&host->clk_gate_mutex);
-       spin_lock_irqsave(&host->clk_lock, flags);
-       if (host->clk_gated) {
-               spin_unlock_irqrestore(&host->clk_lock, flags);
-               mmc_ungate_clock(host);
-               spin_lock_irqsave(&host->clk_lock, flags);
-               pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
-       }
-       host->clk_requests++;
-       spin_unlock_irqrestore(&host->clk_lock, flags);
-       mutex_unlock(&host->clk_gate_mutex);
-}
-
-/**
- *     mmc_host_may_gate_card - check if this card may be gated
- *     @card: card to check.
- */
-static bool mmc_host_may_gate_card(struct mmc_card *card)
-{
-       /* If there is no card we may gate it */
-       if (!card)
-               return true;
-       /*
-        * Don't gate SDIO cards! These need to be clocked at all times
-        * since they may be independent systems generating interrupts
-        * and other events. The clock requests counter from the core will
-        * go down to zero since the core does not need it, but we will not
-        * gate the clock, because there is somebody out there that may still
-        * be using it.
-        */
-       return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING);
-}
-
-/**
- *     mmc_host_clk_release - gate off hardware MCI clocks
- *     @host: host to gate.
- *
- *     Calls the host driver with ios.clock set to zero as often as possible
- *     in order to gate off hardware MCI clocks. Decrease clock reference
- *     count and schedule disabling of clock.
- */
-void mmc_host_clk_release(struct mmc_host *host)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&host->clk_lock, flags);
-       host->clk_requests--;
-       if (mmc_host_may_gate_card(host->card) &&
-           !host->clk_requests)
-               schedule_delayed_work(&host->clk_gate_work,
-                                     msecs_to_jiffies(host->clkgate_delay));
-       spin_unlock_irqrestore(&host->clk_lock, flags);
-}
-
-/**
- *     mmc_host_clk_rate - get current clock frequency setting
- *     @host: host to get the clock frequency for.
- *
- *     Returns current clock frequency regardless of gating.
- */
-unsigned int mmc_host_clk_rate(struct mmc_host *host)
-{
-       unsigned long freq;
-       unsigned long flags;
-
-       spin_lock_irqsave(&host->clk_lock, flags);
-       if (host->clk_gated)
-               freq = host->clk_old;
-       else
-               freq = host->ios.clock;
-       spin_unlock_irqrestore(&host->clk_lock, flags);
-       return freq;
-}
-
-/**
- *     mmc_host_clk_init - set up clock gating code
- *     @host: host with potential clock to control
- */
-static inline void mmc_host_clk_init(struct mmc_host *host)
-{
-       host->clk_requests = 0;
-       /* Hold MCI clock for 8 cycles by default */
-       host->clk_delay = 8;
-       /*
-        * Default clock gating delay is 0ms to avoid wasting power.
-        * This value can be tuned by writing into sysfs entry.
-        */
-       host->clkgate_delay = 0;
-       host->clk_gated = false;
-       INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
-       spin_lock_init(&host->clk_lock);
-       mutex_init(&host->clk_gate_mutex);
-}
-
-/**
- *     mmc_host_clk_exit - shut down clock gating code
- *     @host: host with potential clock to control
- */
-static inline void mmc_host_clk_exit(struct mmc_host *host)
-{
-       /*
-        * Wait for any outstanding gate and then make sure we're
-        * ungated before exiting.
-        */
-       if (cancel_delayed_work_sync(&host->clk_gate_work))
-               mmc_host_clk_gate_delayed(host);
-       if (host->clk_gated)
-               mmc_host_clk_hold(host);
-       /* There should be only one user now */
-       WARN_ON(host->clk_requests > 1);
-}
-
-static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
-{
-       host->clkgate_delay_attr.show = clkgate_delay_show;
-       host->clkgate_delay_attr.store = clkgate_delay_store;
-       sysfs_attr_init(&host->clkgate_delay_attr.attr);
-       host->clkgate_delay_attr.attr.name = "clkgate_delay";
-       host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR;
-       if (device_create_file(&host->class_dev, &host->clkgate_delay_attr))
-               pr_err("%s: Failed to create clkgate_delay sysfs entry\n",
-                               mmc_hostname(host));
-}
-#else
-
-static inline void mmc_host_clk_init(struct mmc_host *host)
-{
-}
-
-static inline void mmc_host_clk_exit(struct mmc_host *host)
-{
-}
-
-static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
-{
-}
-
-#endif
-
 void mmc_retune_enable(struct mmc_host *host)
 {
        host->can_retune = 1;
@@ -457,7 +217,7 @@ int mmc_of_parse(struct mmc_host *host)
                                           0, &cd_gpio_invert);
                if (!ret)
                        dev_info(host->parent, "Got CD GPIO\n");
-               else if (ret != -ENOENT)
+               else if (ret != -ENOENT && ret != -ENOSYS)
                        return ret;
 
                /*
@@ -481,7 +241,7 @@ int mmc_of_parse(struct mmc_host *host)
        ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
        if (!ret)
                dev_info(host->parent, "Got WP GPIO\n");
-       else if (ret != -ENOENT)
+       else if (ret != -ENOENT && ret != -ENOSYS)
                return ret;
 
        if (of_property_read_bool(np, "disable-wp"))
@@ -507,6 +267,8 @@ int mmc_of_parse(struct mmc_host *host)
                host->caps |= MMC_CAP_UHS_DDR50;
        if (of_property_read_bool(np, "cap-power-off-card"))
                host->caps |= MMC_CAP_POWER_OFF_CARD;
+       if (of_property_read_bool(np, "cap-mmc-hw-reset"))
+               host->caps |= MMC_CAP_HW_RESET;
        if (of_property_read_bool(np, "cap-sdio-irq"))
                host->caps |= MMC_CAP_SDIO_IRQ;
        if (of_property_read_bool(np, "full-pwr-cycle"))
@@ -583,8 +345,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
                return NULL;
        }
 
-       mmc_host_clk_init(host);
-
        spin_lock_init(&host->lock);
        init_waitqueue_head(&host->wq);
        INIT_DELAYED_WORK(&host->detect, mmc_rescan);
@@ -633,7 +393,6 @@ int mmc_add_host(struct mmc_host *host)
 #ifdef CONFIG_DEBUG_FS
        mmc_add_host_debugfs(host);
 #endif
-       mmc_host_clk_sysfs_init(host);
 
        mmc_start_host(host);
        register_pm_notifier(&host->pm_notify);
@@ -663,8 +422,6 @@ void mmc_remove_host(struct mmc_host *host)
        device_del(&host->class_dev);
 
        led_trigger_unregister_simple(host->led);
-
-       mmc_host_clk_exit(host);
 }
 
 EXPORT_SYMBOL(mmc_remove_host);
index e726903170a828cffd69a1cb2e7f80d59c0d5152..c793fda27321da0086bb01d92e33d9aa44480ed0 100644 (file)
@@ -1924,7 +1924,6 @@ EXPORT_SYMBOL(mmc_can_reset);
 static int mmc_reset(struct mmc_host *host)
 {
        struct mmc_card *card = host->card;
-       u32 status;
 
        if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
                return -EOPNOTSUPP;
@@ -1932,20 +1931,12 @@ static int mmc_reset(struct mmc_host *host)
        if (!mmc_can_reset(card))
                return -EOPNOTSUPP;
 
-       mmc_host_clk_hold(host);
        mmc_set_clock(host, host->f_init);
 
        host->ops->hw_reset(host);
 
-       /* If the reset has happened, then a status command will fail */
-       if (!mmc_send_status(card, &status)) {
-               mmc_host_clk_release(host);
-               return -ENOSYS;
-       }
-
        /* Set initial state and call mmc_set_ios */
        mmc_set_initial_state(host);
-       mmc_host_clk_release(host);
 
        return mmc_init_card(host, card->ocr, card);
 }
@@ -2013,14 +2004,13 @@ int mmc_attach_mmc(struct mmc_host *host)
 
        mmc_release_host(host);
        err = mmc_add_card(host->card);
-       mmc_claim_host(host);
        if (err)
                goto remove_card;
 
+       mmc_claim_host(host);
        return 0;
 
 remove_card:
-       mmc_release_host(host);
        mmc_remove_card(host->card);
        mmc_claim_host(host);
        host->card = NULL;
index 0e9ae1c276c800b82228e6c3676e6aacb9ad2755..1f444269ebbe66027831757403d1bdd53d1b6a8d 100644 (file)
@@ -579,7 +579,6 @@ out:
 
        return err;
 }
-EXPORT_SYMBOL_GPL(__mmc_switch);
 
 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
                unsigned int timeout_ms)
@@ -589,7 +588,7 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 }
 EXPORT_SYMBOL_GPL(mmc_switch);
 
-int mmc_send_tuning(struct mmc_host *host)
+int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
 {
        struct mmc_request mrq = {NULL};
        struct mmc_command cmd = {0};
@@ -599,16 +598,13 @@ int mmc_send_tuning(struct mmc_host *host)
        const u8 *tuning_block_pattern;
        int size, err = 0;
        u8 *data_buf;
-       u32 opcode;
 
        if (ios->bus_width == MMC_BUS_WIDTH_8) {
                tuning_block_pattern = tuning_blk_pattern_8bit;
                size = sizeof(tuning_blk_pattern_8bit);
-               opcode = MMC_SEND_TUNING_BLOCK_HS200;
        } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
                tuning_block_pattern = tuning_blk_pattern_4bit;
                size = sizeof(tuning_blk_pattern_4bit);
-               opcode = MMC_SEND_TUNING_BLOCK;
        } else
                return -EINVAL;
 
@@ -639,6 +635,9 @@ int mmc_send_tuning(struct mmc_host *host)
 
        mmc_wait_for_req(host, &mrq);
 
+       if (cmd_error)
+               *cmd_error = cmd.error;
+
        if (cmd.error) {
                err = cmd.error;
                goto out;
index f498f9ae21f09bd405d687981ae959f061a63d3e..f1b8e81aaa284d1310c4da481a915cf5560e7700 100644 (file)
@@ -28,6 +28,9 @@ int mmc_bus_test(struct mmc_card *card, u8 bus_width);
 int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status);
 int mmc_can_ext_csd(struct mmc_card *card);
 int mmc_switch_status_error(struct mmc_host *host, u32 status);
+int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
+               unsigned int timeout_ms, bool use_busy_signal, bool send_status,
+               bool ignore_crc);
 
 #endif
 
index 9d6d2fb217967d069ff2f18db0b403dff05483e0..ad4f94ec7e8d465a4e4e45ba9388fb7d97231f4a 100644 (file)
@@ -76,7 +76,7 @@ struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
        if (!pwrseq)
                return ERR_PTR(-ENOMEM);
 
-       pwrseq->reset_gpio = gpiod_get_index(dev, "reset", 0, GPIOD_OUT_LOW);
+       pwrseq->reset_gpio = gpiod_get(dev, "reset", GPIOD_OUT_LOW);
        if (IS_ERR(pwrseq->reset_gpio)) {
                ret = PTR_ERR(pwrseq->reset_gpio);
                goto free;
@@ -84,11 +84,11 @@ struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
 
        /*
         * register reset handler to ensure emmc reset also from
-        * emergency_reboot(), priority 129 schedules it just before
-        * system reboot
+        * emergency_reboot(), priority 255 is the highest priority
+        * so it will be executed before any system reboot handler.
         */
        pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb;
-       pwrseq->reset_nb.priority = 129;
+       pwrseq->reset_nb.priority = 255;
        register_restart_handler(&pwrseq->reset_nb);
 
        pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops;
index 0b14b83a53d6c9614a2ac897893f5cc1690a5b6e..d10538bb5e07ac298fcadad3003bd84c02fa00d5 100644 (file)
@@ -23,18 +23,21 @@ struct mmc_pwrseq_simple {
        struct mmc_pwrseq pwrseq;
        bool clk_enabled;
        struct clk *ext_clk;
-       int nr_gpios;
-       struct gpio_desc *reset_gpios[0];
+       struct gpio_descs *reset_gpios;
 };
 
 static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
                                              int value)
 {
        int i;
+       struct gpio_descs *reset_gpios = pwrseq->reset_gpios;
+       int values[reset_gpios->ndescs];
 
-       for (i = 0; i < pwrseq->nr_gpios; i++)
-               if (!IS_ERR(pwrseq->reset_gpios[i]))
-                       gpiod_set_value_cansleep(pwrseq->reset_gpios[i], value);
+       for (i = 0; i < reset_gpios->ndescs; i++)
+               values[i] = value;
+
+       gpiod_set_array_value_cansleep(reset_gpios->ndescs, reset_gpios->desc,
+                                      values);
 }
 
 static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host)
@@ -75,11 +78,8 @@ static void mmc_pwrseq_simple_free(struct mmc_host *host)
 {
        struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
                                        struct mmc_pwrseq_simple, pwrseq);
-       int i;
 
-       for (i = 0; i < pwrseq->nr_gpios; i++)
-               if (!IS_ERR(pwrseq->reset_gpios[i]))
-                       gpiod_put(pwrseq->reset_gpios[i]);
+       gpiod_put_array(pwrseq->reset_gpios);
 
        if (!IS_ERR(pwrseq->ext_clk))
                clk_put(pwrseq->ext_clk);
@@ -98,14 +98,9 @@ struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
                                           struct device *dev)
 {
        struct mmc_pwrseq_simple *pwrseq;
-       int i, nr_gpios, ret = 0;
-
-       nr_gpios = of_gpio_named_count(dev->of_node, "reset-gpios");
-       if (nr_gpios < 0)
-               nr_gpios = 0;
+       int ret = 0;
 
-       pwrseq = kzalloc(sizeof(struct mmc_pwrseq_simple) + nr_gpios *
-                        sizeof(struct gpio_desc *), GFP_KERNEL);
+       pwrseq = kzalloc(sizeof(*pwrseq), GFP_KERNEL);
        if (!pwrseq)
                return ERR_PTR(-ENOMEM);
 
@@ -116,22 +111,12 @@ struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
                goto free;
        }
 
-       for (i = 0; i < nr_gpios; i++) {
-               pwrseq->reset_gpios[i] = gpiod_get_index(dev, "reset", i,
-                                                        GPIOD_OUT_HIGH);
-               if (IS_ERR(pwrseq->reset_gpios[i]) &&
-                   PTR_ERR(pwrseq->reset_gpios[i]) != -ENOENT &&
-                   PTR_ERR(pwrseq->reset_gpios[i]) != -ENOSYS) {
-                       ret = PTR_ERR(pwrseq->reset_gpios[i]);
-
-                       while (i--)
-                               gpiod_put(pwrseq->reset_gpios[i]);
-
-                       goto clk_put;
-               }
+       pwrseq->reset_gpios = gpiod_get_array(dev, "reset", GPIOD_OUT_HIGH);
+       if (IS_ERR(pwrseq->reset_gpios)) {
+               ret = PTR_ERR(pwrseq->reset_gpios);
+               goto clk_put;
        }
 
-       pwrseq->nr_gpios = nr_gpios;
        pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops;
 
        return &pwrseq->pwrseq;
index dd1d1e0fe32227edb4a184e3f8947ecbe27991b2..fad660b95809224e3a06abde0709be3d50acf307 100644 (file)
 #define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128
 #endif
 
-/*
- * This hook just adds a quirk for all sdio devices
- */
-static void add_quirk_for_sdio_devices(struct mmc_card *card, int data)
-{
-       if (mmc_card_sdio(card))
-               card->quirks |= data;
-}
-
 static const struct mmc_fixup mmc_fixup_methods[] = {
-       /* by default sdio devices are considered CLK_GATING broken */
-       /* good cards will be whitelisted as they are tested */
-       SDIO_FIXUP(SDIO_ANY_ID, SDIO_ANY_ID,
-                  add_quirk_for_sdio_devices,
-                  MMC_QUIRK_BROKEN_CLK_GATING),
-
-       SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
-                  remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
-
        SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
                   add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
 
index 4e7366ab187f295faab7674b72bf83fc8fb49ba4..141eaa923e18eecc140039d34604d5f2969c023b 100644 (file)
@@ -357,8 +357,6 @@ int mmc_sd_switch_hs(struct mmc_card *card)
        if (card->sw_caps.hs_max_dtr == 0)
                return 0;
 
-       err = -EIO;
-
        status = kmalloc(64, GFP_KERNEL);
        if (!status) {
                pr_err("%s: could not allocate a buffer for "
@@ -628,9 +626,25 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
         * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
         */
        if (!mmc_host_is_spi(card->host) &&
-           (card->sd_bus_speed == UHS_SDR50_BUS_SPEED ||
-            card->sd_bus_speed == UHS_SDR104_BUS_SPEED))
+               (card->sd_bus_speed == UHS_SDR50_BUS_SPEED ||
+                card->sd_bus_speed == UHS_DDR50_BUS_SPEED ||
+                card->sd_bus_speed == UHS_SDR104_BUS_SPEED)) {
                err = mmc_execute_tuning(card);
+
+               /*
+                * As SD Specifications Part1 Physical Layer Specification
+                * Version 3.01 says, CMD19 tuning is available for unlocked
+                * cards in transfer state of 1.8V signaling mode. The small
+                * difference between v3.00 and 3.01 spec means that CMD19
+                * tuning is also available for DDR50 mode.
+                */
+               if (err && card->sd_bus_speed == UHS_DDR50_BUS_SPEED) {
+                       pr_warn("%s: ddr50 tuning failed\n",
+                               mmc_hostname(card->host));
+                       err = 0;
+               }
+       }
+
 out:
        kfree(status);
 
@@ -786,9 +800,7 @@ static int mmc_sd_get_ro(struct mmc_host *host)
        if (!host->ops->get_ro)
                return -1;
 
-       mmc_host_clk_hold(host);
        ro = host->ops->get_ro(host);
-       mmc_host_clk_release(host);
 
        return ro;
 }
@@ -1231,14 +1243,13 @@ int mmc_attach_sd(struct mmc_host *host)
 
        mmc_release_host(host);
        err = mmc_add_card(host->card);
-       mmc_claim_host(host);
        if (err)
                goto remove_card;
 
+       mmc_claim_host(host);
        return 0;
 
 remove_card:
-       mmc_release_host(host);
        mmc_remove_card(host->card);
        host->card = NULL;
        mmc_claim_host(host);
index b91abedcfdca7054654a774f22a357d93661293a..16d838e6d623be3968a7a900e85348d17a642fe3 100644 (file)
@@ -897,11 +897,10 @@ static int mmc_sdio_pre_suspend(struct mmc_host *host)
  */
 static int mmc_sdio_suspend(struct mmc_host *host)
 {
-       if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
-               mmc_claim_host(host);
+       mmc_claim_host(host);
+
+       if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host))
                sdio_disable_wide(host->card);
-               mmc_release_host(host);
-       }
 
        if (!mmc_card_keep_power(host)) {
                mmc_power_off(host);
@@ -910,6 +909,8 @@ static int mmc_sdio_suspend(struct mmc_host *host)
                mmc_retune_needed(host);
        }
 
+       mmc_release_host(host);
+
        return 0;
 }
 
@@ -955,13 +956,10 @@ static int mmc_sdio_resume(struct mmc_host *host)
        }
 
        if (!err && host->sdio_irqs) {
-               if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
+               if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD))
                        wake_up_process(host->sdio_irq_thread);
-               } else if (host->caps & MMC_CAP_SDIO_IRQ) {
-                       mmc_host_clk_hold(host);
+               else if (host->caps & MMC_CAP_SDIO_IRQ)
                        host->ops->enable_sdio_irq(host, 1);
-                       mmc_host_clk_release(host);
-               }
        }
 
        mmc_release_host(host);
@@ -1018,15 +1016,24 @@ out:
 static int mmc_sdio_runtime_suspend(struct mmc_host *host)
 {
        /* No references to the card, cut the power to it. */
+       mmc_claim_host(host);
        mmc_power_off(host);
+       mmc_release_host(host);
+
        return 0;
 }
 
 static int mmc_sdio_runtime_resume(struct mmc_host *host)
 {
+       int ret;
+
        /* Restore power and re-initialize. */
+       mmc_claim_host(host);
        mmc_power_up(host, host->card->ocr);
-       return mmc_sdio_power_restore(host);
+       ret = mmc_sdio_power_restore(host);
+       mmc_release_host(host);
+
+       return ret;
 }
 
 static int mmc_sdio_reset(struct mmc_host *host)
index 09cc67d028f07b76165414625829bdd22f56b7f0..91bbbfb29f3f9dd1eeed0976f2757589c032ab99 100644 (file)
@@ -168,21 +168,15 @@ static int sdio_irq_thread(void *_host)
                }
 
                set_current_state(TASK_INTERRUPTIBLE);
-               if (host->caps & MMC_CAP_SDIO_IRQ) {
-                       mmc_host_clk_hold(host);
+               if (host->caps & MMC_CAP_SDIO_IRQ)
                        host->ops->enable_sdio_irq(host, 1);
-                       mmc_host_clk_release(host);
-               }
                if (!kthread_should_stop())
                        schedule_timeout(period);
                set_current_state(TASK_RUNNING);
        } while (!kthread_should_stop());
 
-       if (host->caps & MMC_CAP_SDIO_IRQ) {
-               mmc_host_clk_hold(host);
+       if (host->caps & MMC_CAP_SDIO_IRQ)
                host->ops->enable_sdio_irq(host, 0);
-               mmc_host_clk_release(host);
-       }
 
        pr_debug("%s: IRQ thread exiting with code %d\n",
                 mmc_hostname(host), ret);
@@ -208,9 +202,7 @@ static int sdio_card_irq_get(struct mmc_card *card)
                                return err;
                        }
                } else if (host->caps & MMC_CAP_SDIO_IRQ) {
-                       mmc_host_clk_hold(host);
                        host->ops->enable_sdio_irq(host, 1);
-                       mmc_host_clk_release(host);
                }
        }
 
@@ -229,9 +221,7 @@ static int sdio_card_irq_put(struct mmc_card *card)
                        atomic_set(&host->sdio_irq_thread_abort, 1);
                        kthread_stop(host->sdio_irq_thread);
                } else if (host->caps & MMC_CAP_SDIO_IRQ) {
-                       mmc_host_clk_hold(host);
                        host->ops->enable_sdio_irq(host, 0);
-                       mmc_host_clk_release(host);
                }
        }
 
index 12a4d3ab174cf9bcc19bff2d33d24cbb82f9c431..5660c7f459e94731b14a5055e9aab3fbb79ec3b1 100644 (file)
@@ -12,6 +12,8 @@
 #ifndef _MMC_SDIO_OPS_H
 #define _MMC_SDIO_OPS_H
 
+#include <linux/mmc/sdio.h>
+
 int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
 int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
        unsigned addr, u8 in, u8* out);
@@ -19,5 +21,10 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
        unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz);
 int sdio_reset(struct mmc_host *host);
 
+static inline bool mmc_is_io_op(u32 opcode)
+{
+       return opcode == SD_IO_RW_DIRECT || opcode == SD_IO_RW_EXTENDED;
+}
+
 #endif
 
index 8a1e3498261e9301cffad18889c5392e6751b7a6..af71de5fda3b48c99fefd122a0cb6df9af2234ac 100644 (file)
@@ -67,7 +67,7 @@ config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
          has the effect of scrambling the addresses and formats of data
          accessed in sizes other than the datum size.
 
-         This is the case for the Freescale eSDHC and Nintendo Wii SDHCI.
+         This is the case for the Nintendo Wii SDHCI.
 
 config MMC_SDHCI_PCI
        tristate "SDHCI support on PCI bus"
@@ -140,8 +140,8 @@ config MMC_SDHCI_OF_AT91
 config MMC_SDHCI_OF_ESDHC
        tristate "SDHCI OF support for the Freescale eSDHC controller"
        depends on MMC_SDHCI_PLTFM
-       depends on PPC
-       select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
+       depends on PPC || ARCH_MXC || ARCH_LAYERSCAPE
+       select MMC_SDHCI_IO_ACCESSORS
        help
          This selects the Freescale eSDHC controller support.
 
@@ -366,7 +366,7 @@ config MMC_OMAP
 config MMC_OMAP_HS
        tristate "TI OMAP High Speed Multimedia Card Interface support"
        depends on HAS_DMA
-       depends on ARCH_OMAP2PLUS || COMPILE_TEST
+       depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
        help
          This selects the TI OMAP High Speed Multimedia card Interface.
          If you have an omap2plus board with a Multimedia Card slot,
@@ -473,7 +473,7 @@ config MMC_DAVINCI
 
 config MMC_GOLDFISH
        tristate "goldfish qemu Multimedia Card Interface support"
-       depends on GOLDFISH
+       depends on GOLDFISH || COMPILE_TEST
        help
          This selects the Goldfish Multimedia card Interface emulation
          found on the Goldfish Android virtual device emulation.
@@ -615,15 +615,7 @@ config MMC_DW
        help
          This selects support for the Synopsys DesignWare Mobile Storage IP
          block, this provides host support for SD and MMC interfaces, in both
-         PIO and external DMA modes.
-
-config MMC_DW_IDMAC
-       bool "Internal DMAC interface"
-       depends on MMC_DW
-       help
-         This selects support for the internal DMAC block within the Synopsys
-         Designware Mobile Storage IP block. This disables the external DMA
-         interface.
+         PIO, internal DMA mode and external DMA mode.
 
 config MMC_DW_PLTFM
        tristate "Synopsys Designware MCI Support as platform device"
@@ -652,7 +644,6 @@ config MMC_DW_K3
        tristate "K3 specific extensions for Synopsys DW Memory Card Interface"
        depends on MMC_DW
        select MMC_DW_PLTFM
-       select MMC_DW_IDMAC
        help
          This selects support for Hisilicon K3 SoC specific extensions to the
          Synopsys DesignWare Memory Card Interface driver. Select this option
index 4f3452afa6ca3d0340cb67b74a874900f61bf54d..3595f83e89dd2caf9d56cdf6ceb875ef85ece719 100644 (file)
@@ -9,8 +9,8 @@ obj-$(CONFIG_MMC_MXC)           += mxcmmc.o
 obj-$(CONFIG_MMC_MXS)          += mxs-mmc.o
 obj-$(CONFIG_MMC_SDHCI)                += sdhci.o
 obj-$(CONFIG_MMC_SDHCI_PCI)    += sdhci-pci.o
+sdhci-pci-y                    += sdhci-pci-core.o sdhci-pci-o2micro.o
 obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI))       += sdhci-pci-data.o
-obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI))       += sdhci-pci-o2micro.o
 obj-$(CONFIG_MMC_SDHCI_ACPI)   += sdhci-acpi.o
 obj-$(CONFIG_MMC_SDHCI_PXAV3)  += sdhci-pxav3.o
 obj-$(CONFIG_MMC_SDHCI_PXAV2)  += sdhci-pxav2.o
index 1e75309898b76b33f7f2d91a05425269fb941ffb..3a7e835a00339a3f1b4165ebc47c28c37d57fc85 100644 (file)
@@ -446,7 +446,7 @@ out:
        return loc;
 }
 
-static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot)
+static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
 {
        struct dw_mci *host = slot->host;
        struct dw_mci_exynos_priv_data *priv = host->priv;
@@ -461,7 +461,7 @@ static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot)
                mci_writel(host, TMOUT, ~0);
                smpl = dw_mci_exynos_move_next_clksmpl(host);
 
-               if (!mmc_send_tuning(mmc))
+               if (!mmc_send_tuning(mmc, opcode, NULL))
                        candiates |= (1 << smpl);
 
        } while (start_smpl != smpl);
index ec6dbcdec693beacc6fef823be15c515645e83a6..7e1d13b68b062b4199e65d914c7cac464e2b0daf 100644 (file)
@@ -59,6 +59,8 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
        host->pdata = pdev->dev.platform_data;
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       /* Get registers' physical base address */
+       host->phy_regs = (void *)(regs->start);
        host->regs = devm_ioremap_resource(&pdev->dev, regs);
        if (IS_ERR(host->regs))
                return PTR_ERR(host->regs);
index bc76aa22473ea2f6e75f8b98d96e90b60ed99538..9becebeeccd17c925d933aa4bff36954a217ff9c 100644 (file)
 #include <linux/mmc/host.h>
 #include <linux/mmc/dw_mmc.h>
 #include <linux/of_address.h>
+#include <linux/slab.h>
 
 #include "dw_mmc.h"
 #include "dw_mmc-pltfm.h"
 
 #define RK3288_CLKGEN_DIV       2
 
+struct dw_mci_rockchip_priv_data {
+       struct clk              *drv_clk;
+       struct clk              *sample_clk;
+       int                     default_sample_phase;
+};
+
 static void dw_mci_rockchip_prepare_command(struct dw_mci *host, u32 *cmdr)
 {
        *cmdr |= SDMMC_CMD_USE_HOLD_REG;
@@ -33,6 +40,7 @@ static int dw_mci_rk3288_setup_clock(struct dw_mci *host)
 
 static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
 {
+       struct dw_mci_rockchip_priv_data *priv = host->priv;
        int ret;
        unsigned int cclkin;
        u32 bus_hz;
@@ -66,6 +74,158 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
                /* force dw_mci_setup_bus() */
                host->current_speed = 0;
        }
+
+       /* Make sure we use phases which we can enumerate with */
+       if (!IS_ERR(priv->sample_clk))
+               clk_set_phase(priv->sample_clk, priv->default_sample_phase);
+}
+
+#define NUM_PHASES                     360
+#define TUNING_ITERATION_TO_PHASE(i)   (DIV_ROUND_UP((i) * 360, NUM_PHASES))
+
+static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
+{
+       struct dw_mci *host = slot->host;
+       struct dw_mci_rockchip_priv_data *priv = host->priv;
+       struct mmc_host *mmc = slot->mmc;
+       int ret = 0;
+       int i;
+       bool v, prev_v = 0, first_v;
+       struct range_t {
+               int start;
+               int end; /* inclusive */
+       };
+       struct range_t *ranges;
+       unsigned int range_count = 0;
+       int longest_range_len = -1;
+       int longest_range = -1;
+       int middle_phase;
+
+       if (IS_ERR(priv->sample_clk)) {
+               dev_err(host->dev, "Tuning clock (sample_clk) not defined.\n");
+               return -EIO;
+       }
+
+       ranges = kmalloc_array(NUM_PHASES / 2 + 1, sizeof(*ranges), GFP_KERNEL);
+       if (!ranges)
+               return -ENOMEM;
+
+       /* Try each phase and extract good ranges */
+       for (i = 0; i < NUM_PHASES; ) {
+               clk_set_phase(priv->sample_clk, TUNING_ITERATION_TO_PHASE(i));
+
+               v = !mmc_send_tuning(mmc, opcode, NULL);
+
+               if (i == 0)
+                       first_v = v;
+
+               if ((!prev_v) && v) {
+                       range_count++;
+                       ranges[range_count-1].start = i;
+               }
+               if (v) {
+                       ranges[range_count-1].end = i;
+                       i++;
+               } else if (i == NUM_PHASES - 1) {
+                       /* No extra skipping rules if we're at the end */
+                       i++;
+               } else {
+                       /*
+                        * No need to check too close to an invalid
+                        * one since testing bad phases is slow.  Skip
+                        * 20 degrees.
+                        */
+                       i += DIV_ROUND_UP(20 * NUM_PHASES, 360);
+
+                       /* Always test the last one */
+                       if (i >= NUM_PHASES)
+                               i = NUM_PHASES - 1;
+               }
+
+               prev_v = v;
+       }
+
+       if (range_count == 0) {
+               dev_warn(host->dev, "All phases bad!");
+               ret = -EIO;
+               goto free;
+       }
+
+       /* wrap around case, merge the end points */
+       if ((range_count > 1) && first_v && v) {
+               ranges[0].start = ranges[range_count-1].start;
+               range_count--;
+       }
+
+       if (ranges[0].start == 0 && ranges[0].end == NUM_PHASES - 1) {
+               clk_set_phase(priv->sample_clk, priv->default_sample_phase);
+               dev_info(host->dev, "All phases work, using default phase %d.",
+                        priv->default_sample_phase);
+               goto free;
+       }
+
+       /* Find the longest range */
+       for (i = 0; i < range_count; i++) {
+               int len = (ranges[i].end - ranges[i].start + 1);
+
+               if (len < 0)
+                       len += NUM_PHASES;
+
+               if (longest_range_len < len) {
+                       longest_range_len = len;
+                       longest_range = i;
+               }
+
+               dev_dbg(host->dev, "Good phase range %d-%d (%d len)\n",
+                       TUNING_ITERATION_TO_PHASE(ranges[i].start),
+                       TUNING_ITERATION_TO_PHASE(ranges[i].end),
+                       len
+               );
+       }
+
+       dev_dbg(host->dev, "Best phase range %d-%d (%d len)\n",
+               TUNING_ITERATION_TO_PHASE(ranges[longest_range].start),
+               TUNING_ITERATION_TO_PHASE(ranges[longest_range].end),
+               longest_range_len
+       );
+
+       middle_phase = ranges[longest_range].start + longest_range_len / 2;
+       middle_phase %= NUM_PHASES;
+       dev_info(host->dev, "Successfully tuned phase to %d\n",
+                TUNING_ITERATION_TO_PHASE(middle_phase));
+
+       clk_set_phase(priv->sample_clk,
+                     TUNING_ITERATION_TO_PHASE(middle_phase));
+
+free:
+       kfree(ranges);
+       return ret;
+}
+
+static int dw_mci_rk3288_parse_dt(struct dw_mci *host)
+{
+       struct device_node *np = host->dev->of_node;
+       struct dw_mci_rockchip_priv_data *priv;
+
+       priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       if (of_property_read_u32(np, "rockchip,default-sample-phase",
+                                       &priv->default_sample_phase))
+               priv->default_sample_phase = 0;
+
+       priv->drv_clk = devm_clk_get(host->dev, "ciu-drive");
+       if (IS_ERR(priv->drv_clk))
+               dev_dbg(host->dev, "ciu_drv not available\n");
+
+       priv->sample_clk = devm_clk_get(host->dev, "ciu-sample");
+       if (IS_ERR(priv->sample_clk))
+               dev_dbg(host->dev, "ciu_sample not available\n");
+
+       host->priv = priv;
+
+       return 0;
 }
 
 static int dw_mci_rockchip_init(struct dw_mci *host)
@@ -95,6 +255,8 @@ static const struct dw_mci_drv_data rk3288_drv_data = {
        .caps                   = dw_mci_rk3288_dwmmc_caps,
        .prepare_command        = dw_mci_rockchip_prepare_command,
        .set_ios                = dw_mci_rk3288_set_ios,
+       .execute_tuning         = dw_mci_rk3288_execute_tuning,
+       .parse_dt               = dw_mci_rk3288_parse_dt,
        .setup_clock    = dw_mci_rk3288_setup_clock,
        .init                   = dw_mci_rockchip_init,
 };
index fcbf5524fd3136f6242036ae206239f83c90db71..7a6cedbe48a837e7fd5800c9fe1da131d569df51 100644 (file)
@@ -56,7 +56,6 @@
 #define DW_MCI_FREQ_MAX        200000000       /* unit: HZ */
 #define DW_MCI_FREQ_MIN        400000          /* unit: HZ */
 
-#ifdef CONFIG_MMC_DW_IDMAC
 #define IDMAC_INT_CLR          (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
                                 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
                                 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
@@ -102,7 +101,6 @@ struct idmac_desc {
 
 /* Each descriptor can transfer up to 4KB of data in chained mode */
 #define DW_MCI_DESC_DATA_LENGTH        0x1000
-#endif /* CONFIG_MMC_DW_IDMAC */
 
 static bool dw_mci_reset(struct dw_mci *host);
 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
@@ -407,7 +405,6 @@ static int dw_mci_get_dma_dir(struct mmc_data *data)
                return DMA_FROM_DEVICE;
 }
 
-#ifdef CONFIG_MMC_DW_IDMAC
 static void dw_mci_dma_cleanup(struct dw_mci *host)
 {
        struct mmc_data *data = host->data;
@@ -445,12 +442,21 @@ static void dw_mci_idmac_stop_dma(struct dw_mci *host)
        mci_writel(host, BMOD, temp);
 }
 
-static void dw_mci_idmac_complete_dma(struct dw_mci *host)
+static void dw_mci_dmac_complete_dma(void *arg)
 {
+       struct dw_mci *host = arg;
        struct mmc_data *data = host->data;
 
        dev_vdbg(host->dev, "DMA complete\n");
 
+       if ((host->use_dma == TRANS_MODE_EDMAC) &&
+           data && (data->flags & MMC_DATA_READ))
+               /* Invalidate cache after read */
+               dma_sync_sg_for_cpu(mmc_dev(host->cur_slot->mmc),
+                                   data->sg,
+                                   data->sg_len,
+                                   DMA_FROM_DEVICE);
+
        host->dma_ops->cleanup(host);
 
        /*
@@ -564,7 +570,7 @@ static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
        wmb(); /* drain writebuffer */
 }
 
-static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
+static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
 {
        u32 temp;
 
@@ -589,6 +595,8 @@ static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
 
        /* Start it running */
        mci_writel(host, PLDMND, 1);
+
+       return 0;
 }
 
 static int dw_mci_idmac_init(struct dw_mci *host)
@@ -669,10 +677,110 @@ static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
        .init = dw_mci_idmac_init,
        .start = dw_mci_idmac_start_dma,
        .stop = dw_mci_idmac_stop_dma,
-       .complete = dw_mci_idmac_complete_dma,
+       .complete = dw_mci_dmac_complete_dma,
+       .cleanup = dw_mci_dma_cleanup,
+};
+
+static void dw_mci_edmac_stop_dma(struct dw_mci *host)
+{
+       dmaengine_terminate_all(host->dms->ch);
+}
+
+static int dw_mci_edmac_start_dma(struct dw_mci *host,
+                                           unsigned int sg_len)
+{
+       struct dma_slave_config cfg;
+       struct dma_async_tx_descriptor *desc = NULL;
+       struct scatterlist *sgl = host->data->sg;
+       const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
+       u32 sg_elems = host->data->sg_len;
+       u32 fifoth_val;
+       u32 fifo_offset = host->fifo_reg - host->regs;
+       int ret = 0;
+
+       /* Set external dma config: burst size, burst width */
+       cfg.dst_addr = (dma_addr_t)(host->phy_regs + fifo_offset);
+       cfg.src_addr = cfg.dst_addr;
+       cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+       cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+       /* Match burst msize with external dma config */
+       fifoth_val = mci_readl(host, FIFOTH);
+       cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
+       cfg.src_maxburst = cfg.dst_maxburst;
+
+       if (host->data->flags & MMC_DATA_WRITE)
+               cfg.direction = DMA_MEM_TO_DEV;
+       else
+               cfg.direction = DMA_DEV_TO_MEM;
+
+       ret = dmaengine_slave_config(host->dms->ch, &cfg);
+       if (ret) {
+               dev_err(host->dev, "Failed to config edmac.\n");
+               return -EBUSY;
+       }
+
+       desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
+                                      sg_len, cfg.direction,
+                                      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+       if (!desc) {
+               dev_err(host->dev, "Can't prepare slave sg.\n");
+               return -EBUSY;
+       }
+
+       /* Set dw_mci_dmac_complete_dma as callback */
+       desc->callback = dw_mci_dmac_complete_dma;
+       desc->callback_param = (void *)host;
+       dmaengine_submit(desc);
+
+       /* Flush cache before write */
+       if (host->data->flags & MMC_DATA_WRITE)
+               dma_sync_sg_for_device(mmc_dev(host->cur_slot->mmc), sgl,
+                                      sg_elems, DMA_TO_DEVICE);
+
+       dma_async_issue_pending(host->dms->ch);
+
+       return 0;
+}
+
+static int dw_mci_edmac_init(struct dw_mci *host)
+{
+       /* Request external dma channel */
+       host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
+       if (!host->dms)
+               return -ENOMEM;
+
+       host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx");
+       if (!host->dms->ch) {
+               dev_err(host->dev, "Failed to get external DMA channel.\n");
+               kfree(host->dms);
+               host->dms = NULL;
+               return -ENXIO;
+       }
+
+       return 0;
+}
+
+static void dw_mci_edmac_exit(struct dw_mci *host)
+{
+       if (host->dms) {
+               if (host->dms->ch) {
+                       dma_release_channel(host->dms->ch);
+                       host->dms->ch = NULL;
+               }
+               kfree(host->dms);
+               host->dms = NULL;
+       }
+}
+
+static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
+       .init = dw_mci_edmac_init,
+       .exit = dw_mci_edmac_exit,
+       .start = dw_mci_edmac_start_dma,
+       .stop = dw_mci_edmac_stop_dma,
+       .complete = dw_mci_dmac_complete_dma,
        .cleanup = dw_mci_dma_cleanup,
 };
-#endif /* CONFIG_MMC_DW_IDMAC */
 
 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
                                   struct mmc_data *data,
@@ -752,7 +860,6 @@ static void dw_mci_post_req(struct mmc_host *mmc,
 
 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
 {
-#ifdef CONFIG_MMC_DW_IDMAC
        unsigned int blksz = data->blksz;
        const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
        u32 fifo_width = 1 << host->data_shift;
@@ -760,6 +867,10 @@ static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
        u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
        int idx = ARRAY_SIZE(mszs) - 1;
 
+       /* pio should ship this scenario */
+       if (!host->use_dma)
+               return;
+
        tx_wmark = (host->fifo_depth) / 2;
        tx_wmark_invers = host->fifo_depth - tx_wmark;
 
@@ -788,7 +899,6 @@ static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
 done:
        fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
        mci_writel(host, FIFOTH, fifoth_val);
-#endif
 }
 
 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
@@ -850,10 +960,12 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
 
        host->using_dma = 1;
 
-       dev_vdbg(host->dev,
-                "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
-                (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
-                sg_len);
+       if (host->use_dma == TRANS_MODE_IDMAC)
+               dev_vdbg(host->dev,
+                        "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
+                        (unsigned long)host->sg_cpu,
+                        (unsigned long)host->sg_dma,
+                        sg_len);
 
        /*
         * Decide the MSIZE and RX/TX Watermark.
@@ -875,7 +987,11 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
        mci_writel(host, INTMASK, temp);
        spin_unlock_irqrestore(&host->irq_lock, irqflags);
 
-       host->dma_ops->start(host, sg_len);
+       if (host->dma_ops->start(host, sg_len)) {
+               /* We can't do DMA */
+               dev_err(host->dev, "%s: failed to start DMA.\n", __func__);
+               return -ENODEV;
+       }
 
        return 0;
 }
@@ -1177,6 +1293,7 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 
        /* DDR mode set */
        if (ios->timing == MMC_TIMING_MMC_DDR52 ||
+           ios->timing == MMC_TIMING_UHS_DDR50 ||
            ios->timing == MMC_TIMING_MMC_HS400)
                regs |= ((0x1 << slot->id) << 16);
        else
@@ -1279,7 +1396,6 @@ static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
        const struct dw_mci_drv_data *drv_data = host->drv_data;
        u32 uhs;
        u32 v18 = SDMMC_UHS_18V << slot->id;
-       int min_uv, max_uv;
        int ret;
 
        if (drv_data && drv_data->switch_voltage)
@@ -1291,22 +1407,18 @@ static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
         * does no harm but you need to set the regulator directly.  Try both.
         */
        uhs = mci_readl(host, UHS_REG);
-       if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
-               min_uv = 2700000;
-               max_uv = 3600000;
+       if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
                uhs &= ~v18;
-       } else {
-               min_uv = 1700000;
-               max_uv = 1950000;
+       else
                uhs |= v18;
-       }
+
        if (!IS_ERR(mmc->supply.vqmmc)) {
-               ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv);
+               ret = mmc_regulator_set_vqmmc(mmc, ios);
 
                if (ret) {
                        dev_dbg(&mmc->class_dev,
-                                        "Regulator set error %d: %d - %d\n",
-                                        ret, min_uv, max_uv);
+                                        "Regulator set error %d - %s V\n",
+                                        ret, uhs & v18 ? "1.8" : "3.3");
                        return ret;
                }
        }
@@ -1427,7 +1539,7 @@ static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
        int err = -EINVAL;
 
        if (drv_data && drv_data->execute_tuning)
-               err = drv_data->execute_tuning(slot);
+               err = drv_data->execute_tuning(slot, opcode);
        return err;
 }
 
@@ -2343,15 +2455,17 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
 
        }
 
-#ifdef CONFIG_MMC_DW_IDMAC
-       /* Handle DMA interrupts */
+       if (host->use_dma != TRANS_MODE_IDMAC)
+               return IRQ_HANDLED;
+
+       /* Handle IDMA interrupts */
        if (host->dma_64bit_address == 1) {
                pending = mci_readl(host, IDSTS64);
                if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
                        mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
                                                        SDMMC_IDMAC_INT_RI);
                        mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
-                       host->dma_ops->complete(host);
+                       host->dma_ops->complete((void *)host);
                }
        } else {
                pending = mci_readl(host, IDSTS);
@@ -2359,10 +2473,9 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
                        mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
                                                        SDMMC_IDMAC_INT_RI);
                        mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
-                       host->dma_ops->complete(host);
+                       host->dma_ops->complete((void *)host);
                }
        }
-#endif
 
        return IRQ_HANDLED;
 }
@@ -2471,13 +2584,21 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
                goto err_host_allocated;
 
        /* Useful defaults if platform data is unset. */
-       if (host->use_dma) {
+       if (host->use_dma == TRANS_MODE_IDMAC) {
                mmc->max_segs = host->ring_size;
                mmc->max_blk_size = 65536;
                mmc->max_seg_size = 0x1000;
                mmc->max_req_size = mmc->max_seg_size * host->ring_size;
                mmc->max_blk_count = mmc->max_req_size / 512;
+       } else if (host->use_dma == TRANS_MODE_EDMAC) {
+               mmc->max_segs = 64;
+               mmc->max_blk_size = 65536;
+               mmc->max_blk_count = 65535;
+               mmc->max_req_size =
+                               mmc->max_blk_size * mmc->max_blk_count;
+               mmc->max_seg_size = mmc->max_req_size;
        } else {
+               /* TRANS_MODE_PIO */
                mmc->max_segs = 64;
                mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
                mmc->max_blk_count = 512;
@@ -2517,38 +2638,74 @@ static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
 static void dw_mci_init_dma(struct dw_mci *host)
 {
        int addr_config;
-       /* Check ADDR_CONFIG bit in HCON to find IDMAC address bus width */
-       addr_config = (mci_readl(host, HCON) >> 27) & 0x01;
-
-       if (addr_config == 1) {
-               /* host supports IDMAC in 64-bit address mode */
-               host->dma_64bit_address = 1;
-               dev_info(host->dev, "IDMAC supports 64-bit address mode.\n");
-               if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
-                       dma_set_coherent_mask(host->dev, DMA_BIT_MASK(64));
-       } else {
-               /* host supports IDMAC in 32-bit address mode */
-               host->dma_64bit_address = 0;
-               dev_info(host->dev, "IDMAC supports 32-bit address mode.\n");
-       }
+       struct device *dev = host->dev;
+       struct device_node *np = dev->of_node;
 
-       /* Alloc memory for sg translation */
-       host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
-                                         &host->sg_dma, GFP_KERNEL);
-       if (!host->sg_cpu) {
-               dev_err(host->dev, "%s: could not alloc DMA memory\n",
-                       __func__);
+       /*
+       * Check tansfer mode from HCON[17:16]
+       * Clear the ambiguous description of dw_mmc databook:
+       * 2b'00: No DMA Interface -> Actually means using Internal DMA block
+       * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
+       * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
+       * 2b'11: Non DW DMA Interface -> pio only
+       * Compared to DesignWare DMA Interface, Generic DMA Interface has a
+       * simpler request/acknowledge handshake mechanism and both of them
+       * are regarded as external dma master for dw_mmc.
+       */
+       host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
+       if (host->use_dma == DMA_INTERFACE_IDMA) {
+               host->use_dma = TRANS_MODE_IDMAC;
+       } else if (host->use_dma == DMA_INTERFACE_DWDMA ||
+                  host->use_dma == DMA_INTERFACE_GDMA) {
+               host->use_dma = TRANS_MODE_EDMAC;
+       } else {
                goto no_dma;
        }
 
        /* Determine which DMA interface to use */
-#ifdef CONFIG_MMC_DW_IDMAC
-       host->dma_ops = &dw_mci_idmac_ops;
-       dev_info(host->dev, "Using internal DMA controller.\n");
-#endif
+       if (host->use_dma == TRANS_MODE_IDMAC) {
+               /*
+               * Check ADDR_CONFIG bit in HCON to find
+               * IDMAC address bus width
+               */
+               addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
+
+               if (addr_config == 1) {
+                       /* host supports IDMAC in 64-bit address mode */
+                       host->dma_64bit_address = 1;
+                       dev_info(host->dev,
+                                "IDMAC supports 64-bit address mode.\n");
+                       if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
+                               dma_set_coherent_mask(host->dev,
+                                                     DMA_BIT_MASK(64));
+               } else {
+                       /* host supports IDMAC in 32-bit address mode */
+                       host->dma_64bit_address = 0;
+                       dev_info(host->dev,
+                                "IDMAC supports 32-bit address mode.\n");
+               }
 
-       if (!host->dma_ops)
-               goto no_dma;
+               /* Alloc memory for sg translation */
+               host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
+                                                  &host->sg_dma, GFP_KERNEL);
+               if (!host->sg_cpu) {
+                       dev_err(host->dev,
+                               "%s: could not alloc DMA memory\n",
+                               __func__);
+                       goto no_dma;
+               }
+
+               host->dma_ops = &dw_mci_idmac_ops;
+               dev_info(host->dev, "Using internal DMA controller.\n");
+       } else {
+               /* TRANS_MODE_EDMAC: check dma bindings again */
+               if ((of_property_count_strings(np, "dma-names") < 0) ||
+                   (!of_find_property(np, "dmas", NULL))) {
+                       goto no_dma;
+               }
+               host->dma_ops = &dw_mci_edmac_ops;
+               dev_info(host->dev, "Using external DMA controller.\n");
+       }
 
        if (host->dma_ops->init && host->dma_ops->start &&
            host->dma_ops->stop && host->dma_ops->cleanup) {
@@ -2562,12 +2719,11 @@ static void dw_mci_init_dma(struct dw_mci *host)
                goto no_dma;
        }
 
-       host->use_dma = 1;
        return;
 
 no_dma:
        dev_info(host->dev, "Using PIO mode.\n");
-       host->use_dma = 0;
+       host->use_dma = TRANS_MODE_PIO;
 }
 
 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
@@ -2650,10 +2806,9 @@ static bool dw_mci_reset(struct dw_mci *host)
                }
        }
 
-#if IS_ENABLED(CONFIG_MMC_DW_IDMAC)
-       /* It is also recommended that we reset and reprogram idmac */
-       dw_mci_idmac_reset(host);
-#endif
+       if (host->use_dma == TRANS_MODE_IDMAC)
+               /* It is also recommended that we reset and reprogram idmac */
+               dw_mci_idmac_reset(host);
 
        ret = true;
 
@@ -2890,7 +3045,7 @@ int dw_mci_probe(struct dw_mci *host)
         * Get the host data width - this assumes that HCON has been set with
         * the correct values.
         */
-       i = (mci_readl(host, HCON) >> 7) & 0x7;
+       i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
        if (!i) {
                host->push_data = dw_mci_push_data16;
                host->pull_data = dw_mci_pull_data16;
@@ -2972,7 +3127,7 @@ int dw_mci_probe(struct dw_mci *host)
        if (host->pdata->num_slots)
                host->num_slots = host->pdata->num_slots;
        else
-               host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
+               host->num_slots = SDMMC_GET_SLOT_NUM(mci_readl(host, HCON));
 
        /*
         * Enable interrupts for command done, data over, data empty,
@@ -3067,6 +3222,9 @@ EXPORT_SYMBOL(dw_mci_remove);
  */
 int dw_mci_suspend(struct dw_mci *host)
 {
+       if (host->use_dma && host->dma_ops->exit)
+               host->dma_ops->exit(host);
+
        return 0;
 }
 EXPORT_SYMBOL(dw_mci_suspend);
index 8ce4674730a6ff8efde66e456136aa0d282e06c5..f695b58f06135868ce934cae1e322899f3dc6545 100644 (file)
 #define SDMMC_SET_FIFOTH(m, r, t)      (((m) & 0x7) << 28 | \
                                         ((r) & 0xFFF) << 16 | \
                                         ((t) & 0xFFF))
+/* HCON register defines */
+#define DMA_INTERFACE_IDMA             (0x0)
+#define DMA_INTERFACE_DWDMA            (0x1)
+#define DMA_INTERFACE_GDMA             (0x2)
+#define DMA_INTERFACE_NODMA            (0x3)
+#define SDMMC_GET_TRANS_MODE(x)                (((x)>>16) & 0x3)
+#define SDMMC_GET_SLOT_NUM(x)          ((((x)>>1) & 0x1F) + 1)
+#define SDMMC_GET_HDATA_WIDTH(x)       (((x)>>7) & 0x7)
+#define SDMMC_GET_ADDR_CONFIG(x)       (((x)>>27) & 0x1)
 /* Internal DMAC interrupt defines */
 #define SDMMC_IDMAC_INT_AI             BIT(9)
 #define SDMMC_IDMAC_INT_NI             BIT(8)
 /* Version ID register define */
 #define SDMMC_GET_VERID(x)             ((x) & 0xFFFF)
 /* Card read threshold */
-#define SDMMC_SET_RD_THLD(v, x)                (((v) & 0x1FFF) << 16 | (x))
+#define SDMMC_SET_RD_THLD(v, x)                (((v) & 0xFFF) << 16 | (x))
 #define SDMMC_UHS_18V                  BIT(0)
 /* All ctrl reset bits */
 #define SDMMC_CTRL_ALL_RESET_FLAGS \
@@ -281,7 +290,7 @@ struct dw_mci_drv_data {
        void            (*prepare_command)(struct dw_mci *host, u32 *cmdr);
        void            (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
        int             (*parse_dt)(struct dw_mci *host);
-       int             (*execute_tuning)(struct dw_mci_slot *slot);
+       int             (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode);
        int             (*prepare_hs400_tuning)(struct dw_mci *host,
                                                struct mmc_ios *ios);
        int             (*switch_voltage)(struct mmc_host *mmc,
index ae19d83bb9de0d966bb095f73c2f8b900578659a..8ee11f4120fcae295756118c492b9cdef150eadf 100644 (file)
@@ -1511,6 +1511,7 @@ static const struct of_device_id mmc_spi_of_match_table[] = {
        { .compatible = "mmc-spi-slot", },
        {},
 };
+MODULE_DEVICE_TABLE(of, mmc_spi_of_match_table);
 
 static struct spi_driver mmc_spi_driver = {
        .driver = {
index 006f1862444b245a8c7564c60cff9a915e13d127..79905ce895adaeffc6f8e07689af768398b272c1 100644 (file)
@@ -711,6 +711,7 @@ static const struct of_device_id moxart_mmc_match[] = {
        { .compatible = "faraday,ftsdc010" },
        { }
 };
+MODULE_DEVICE_TABLE(of, moxart_mmc_match);
 
 static struct platform_driver moxart_mmc_driver = {
        .probe      = moxart_probe,
index 7153500dd0071bc98da0951299463d2b80d26111..39568cc29a2a18cf752141dcf6233746b4d149e1 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/pm.h>
 #include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
+#include <linux/slab.h>
 #include <linux/spinlock.h>
 
 #include <linux/mmc/card.h>
@@ -64,6 +65,7 @@
 #define SDC_RESP2        0x48
 #define SDC_RESP3        0x4c
 #define SDC_BLK_NUM      0x50
+#define EMMC_IOCON       0x7c
 #define SDC_ACMD_RESP    0x80
 #define MSDC_DMA_SA      0x90
 #define MSDC_DMA_CTRL    0x98
@@ -71,6 +73,8 @@
 #define MSDC_PATCH_BIT   0xb0
 #define MSDC_PATCH_BIT1  0xb4
 #define MSDC_PAD_TUNE    0xec
+#define PAD_DS_TUNE      0x188
+#define EMMC50_CFG0      0x208
 
 /*--------------------------------------------------------------------------*/
 /* Register Mask                                                            */
@@ -87,6 +91,7 @@
 #define MSDC_CFG_CKSTB          (0x1 << 7)     /* R  */
 #define MSDC_CFG_CKDIV          (0xff << 8)    /* RW */
 #define MSDC_CFG_CKMOD          (0x3 << 16)    /* RW */
+#define MSDC_CFG_HS400_CK_MODE  (0x1 << 18)    /* RW */
 
 /* MSDC_IOCON mask */
 #define MSDC_IOCON_SDR104CKS    (0x1 << 0)     /* RW */
 #define MSDC_PATCH_BIT_SPCPUSH    (0x1 << 29)  /* RW */
 #define MSDC_PATCH_BIT_DECRCTMO   (0x1 << 30)  /* RW */
 
+#define MSDC_PAD_TUNE_DATRRDLY   (0x1f <<  8)  /* RW */
+#define MSDC_PAD_TUNE_CMDRDLY    (0x1f << 16)  /* RW */
+
+#define PAD_DS_TUNE_DLY1         (0x1f << 2)   /* RW */
+#define PAD_DS_TUNE_DLY2         (0x1f << 7)   /* RW */
+#define PAD_DS_TUNE_DLY3         (0x1f << 12)  /* RW */
+
+#define EMMC50_CFG_PADCMD_LATCHCK (0x1 << 0)   /* RW */
+#define EMMC50_CFG_CRCSTS_EDGE    (0x1 << 3)   /* RW */
+#define EMMC50_CFG_CFCSTS_SEL     (0x1 << 4)   /* RW */
+
 #define REQ_CMD_EIO  (0x1 << 0)
 #define REQ_CMD_TMO  (0x1 << 1)
 #define REQ_DAT_ERR  (0x1 << 2)
 #define CMD_TIMEOUT         (HZ/10 * 5)        /* 100ms x5 */
 #define DAT_TIMEOUT         (HZ    * 5)        /* 1000ms x5 */
 
+#define PAD_DELAY_MAX  32 /* PAD delay cells */
 /*--------------------------------------------------------------------------*/
 /* Descriptor Structure                                                     */
 /*--------------------------------------------------------------------------*/
@@ -265,6 +282,14 @@ struct msdc_save_para {
        u32 pad_tune;
        u32 patch_bit0;
        u32 patch_bit1;
+       u32 pad_ds_tune;
+       u32 emmc50_cfg0;
+};
+
+struct msdc_delay_phase {
+       u8 maxlen;
+       u8 start;
+       u8 final_phase;
 };
 
 struct msdc_host {
@@ -297,8 +322,9 @@ struct msdc_host {
        u32 mclk;               /* mmc subsystem clock frequency */
        u32 src_clk_freq;       /* source clock frequency */
        u32 sclk;               /* SD/MS bus clock frequency */
-       bool ddr;
+       unsigned char timing;
        bool vqmmc_enabled;
+       u32 hs400_ds_delay;
        struct msdc_save_para save_para; /* used when gate HCLK */
 };
 
@@ -353,7 +379,10 @@ static void msdc_reset_hw(struct msdc_host *host)
 static void msdc_cmd_next(struct msdc_host *host,
                struct mmc_request *mrq, struct mmc_command *cmd);
 
-static u32 data_ints_mask = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO |
+static const u32 cmd_ints_mask = MSDC_INTEN_CMDRDY | MSDC_INTEN_RSPCRCERR |
+                       MSDC_INTEN_CMDTMO | MSDC_INTEN_ACMDRDY |
+                       MSDC_INTEN_ACMDCRCERR | MSDC_INTEN_ACMDTMO;
+static const u32 data_ints_mask = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO |
                        MSDC_INTEN_DATCRCERR | MSDC_INTEN_DMA_BDCSERR |
                        MSDC_INTEN_DMA_GPDCSERR | MSDC_INTEN_DMA_PROTECT;
 
@@ -485,7 +514,7 @@ static void msdc_ungate_clock(struct msdc_host *host)
                cpu_relax();
 }
 
-static void msdc_set_mclk(struct msdc_host *host, int ddr, u32 hz)
+static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
 {
        u32 mode;
        u32 flags;
@@ -501,8 +530,15 @@ static void msdc_set_mclk(struct msdc_host *host, int ddr, u32 hz)
 
        flags = readl(host->base + MSDC_INTEN);
        sdr_clr_bits(host->base + MSDC_INTEN, flags);
-       if (ddr) { /* may need to modify later */
-               mode = 0x2; /* ddr mode and use divisor */
+       sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE);
+       if (timing == MMC_TIMING_UHS_DDR50 ||
+           timing == MMC_TIMING_MMC_DDR52 ||
+           timing == MMC_TIMING_MMC_HS400) {
+               if (timing == MMC_TIMING_MMC_HS400)
+                       mode = 0x3;
+               else
+                       mode = 0x2; /* ddr mode and use divisor */
+
                if (hz >= (host->src_clk_freq >> 2)) {
                        div = 0; /* mean div = 1/4 */
                        sclk = host->src_clk_freq >> 2; /* sclk = clk / 4 */
@@ -511,6 +547,14 @@ static void msdc_set_mclk(struct msdc_host *host, int ddr, u32 hz)
                        sclk = (host->src_clk_freq >> 2) / div;
                        div = (div >> 1);
                }
+
+               if (timing == MMC_TIMING_MMC_HS400 &&
+                   hz >= (host->src_clk_freq >> 1)) {
+                       sdr_set_bits(host->base + MSDC_CFG,
+                                    MSDC_CFG_HS400_CK_MODE);
+                       sclk = host->src_clk_freq >> 1;
+                       div = 0; /* div is ignore when bit18 is set */
+               }
        } else if (hz >= host->src_clk_freq) {
                mode = 0x1; /* no divisor */
                div = 0;
@@ -532,12 +576,12 @@ static void msdc_set_mclk(struct msdc_host *host, int ddr, u32 hz)
                cpu_relax();
        host->sclk = sclk;
        host->mclk = hz;
-       host->ddr = ddr;
+       host->timing = timing;
        /* need because clk changed. */
        msdc_set_timeout(host, host->timeout_ns, host->timeout_clks);
        sdr_set_bits(host->base + MSDC_INTEN, flags);
 
-       dev_dbg(host->dev, "sclk: %d, ddr: %d\n", host->sclk, ddr);
+       dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->sclk, timing);
 }
 
 static inline u32 msdc_cmd_find_resp(struct msdc_host *host,
@@ -725,11 +769,7 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
        if (done)
                return true;
 
-       sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_CMDRDY |
-                       MSDC_INTEN_RSPCRCERR | MSDC_INTEN_CMDTMO |
-                       MSDC_INTEN_ACMDRDY | MSDC_INTEN_ACMDCRCERR |
-                       MSDC_INTEN_ACMDTMO);
-       writel(cmd->arg, host->base + SDC_ARG);
+       sdr_clr_bits(host->base + MSDC_INTEN, cmd_ints_mask);
 
        if (cmd->flags & MMC_RSP_PRESENT) {
                if (cmd->flags & MMC_RSP_136) {
@@ -819,10 +859,7 @@ static void msdc_start_command(struct msdc_host *host,
        rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd);
        mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
 
-       sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_CMDRDY |
-                       MSDC_INTEN_RSPCRCERR | MSDC_INTEN_CMDTMO |
-                       MSDC_INTEN_ACMDRDY | MSDC_INTEN_ACMDCRCERR |
-                       MSDC_INTEN_ACMDTMO);
+       sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask);
        writel(cmd->arg, host->base + SDC_ARG);
        writel(rawcmd, host->base + SDC_CMD);
 }
@@ -896,7 +933,7 @@ static void msdc_data_xfer_next(struct msdc_host *host,
                                struct mmc_request *mrq, struct mmc_data *data)
 {
        if (mmc_op_multi(mrq->cmd->opcode) && mrq->stop && !mrq->stop->error &&
-           (!data->bytes_xfered || !mrq->sbc))
+           !mrq->sbc)
                msdc_start_command(host, mrq, mrq->stop);
        else
                msdc_request_done(host, mrq);
@@ -942,6 +979,8 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
 
                        if (events & MSDC_INT_DATTMO)
                                data->error = -ETIMEDOUT;
+                       else if (events & MSDC_INT_DATCRCERR)
+                               data->error = -EILSEQ;
 
                        dev_err(host->dev, "%s: cmd=%d; blocks=%d",
                                __func__, mrq->cmd->opcode, data->blocks);
@@ -1113,10 +1152,12 @@ static void msdc_init_hw(struct msdc_host *host)
 
        writel(0, host->base + MSDC_PAD_TUNE);
        writel(0, host->base + MSDC_IOCON);
-       sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 1);
-       writel(0x403c004f, host->base + MSDC_PATCH_BIT);
+       sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0);
+       writel(0x403c0046, host->base + MSDC_PATCH_BIT);
        sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_CKGEN_MSDC_DLY_SEL, 1);
        writel(0xffff0089, host->base + MSDC_PATCH_BIT1);
+       sdr_set_bits(host->base + EMMC50_CFG0, EMMC50_CFG_CFCSTS_SEL);
+
        /* Configure to enable SDIO mode.
         * it's must otherwise sdio cmd5 failed
         */
@@ -1148,11 +1189,14 @@ static void msdc_init_gpd_bd(struct msdc_host *host, struct msdc_dma *dma)
        struct mt_bdma_desc *bd = dma->bd;
        int i;
 
-       memset(gpd, 0, sizeof(struct mt_gpdma_desc));
+       memset(gpd, 0, sizeof(struct mt_gpdma_desc) * 2);
 
        gpd->gpd_info = GPDMA_DESC_BDP; /* hwo, cs, bd pointer */
        gpd->ptr = (u32)dma->bd_addr; /* physical address */
-
+       /* gpd->next is must set for desc DMA
+        * That's why must alloc 2 gpd structure.
+        */
+       gpd->next = (u32)dma->gpd_addr + sizeof(struct mt_gpdma_desc);
        memset(bd, 0, sizeof(struct mt_bdma_desc) * MAX_BD_NUM);
        for (i = 0; i < (MAX_BD_NUM - 1); i++)
                bd[i].next = (u32)dma->bd_addr + sizeof(*bd) * (i + 1);
@@ -1162,20 +1206,16 @@ static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 {
        struct msdc_host *host = mmc_priv(mmc);
        int ret;
-       u32 ddr = 0;
 
        pm_runtime_get_sync(host->dev);
 
-       if (ios->timing == MMC_TIMING_UHS_DDR50 ||
-           ios->timing == MMC_TIMING_MMC_DDR52)
-               ddr = 1;
-
        msdc_set_buswidth(host, ios->bus_width);
 
        /* Suspend/Resume will do power off/on */
        switch (ios->power_mode) {
        case MMC_POWER_UP:
                if (!IS_ERR(mmc->supply.vmmc)) {
+                       msdc_init_hw(host);
                        ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
                                        ios->vdd);
                        if (ret) {
@@ -1206,14 +1246,207 @@ static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                break;
        }
 
-       if (host->mclk != ios->clock || host->ddr != ddr)
-               msdc_set_mclk(host, ddr, ios->clock);
+       if (host->mclk != ios->clock || host->timing != ios->timing)
+               msdc_set_mclk(host, ios->timing, ios->clock);
 
 end:
        pm_runtime_mark_last_busy(host->dev);
        pm_runtime_put_autosuspend(host->dev);
 }
 
+static u32 test_delay_bit(u32 delay, u32 bit)
+{
+       bit %= PAD_DELAY_MAX;
+       return delay & (1 << bit);
+}
+
+static int get_delay_len(u32 delay, u32 start_bit)
+{
+       int i;
+
+       for (i = 0; i < (PAD_DELAY_MAX - start_bit); i++) {
+               if (test_delay_bit(delay, start_bit + i) == 0)
+                       return i;
+       }
+       return PAD_DELAY_MAX - start_bit;
+}
+
+static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay)
+{
+       int start = 0, len = 0;
+       int start_final = 0, len_final = 0;
+       u8 final_phase = 0xff;
+       struct msdc_delay_phase delay_phase;
+
+       if (delay == 0) {
+               dev_err(host->dev, "phase error: [map:%x]\n", delay);
+               delay_phase.final_phase = final_phase;
+               return delay_phase;
+       }
+
+       while (start < PAD_DELAY_MAX) {
+               len = get_delay_len(delay, start);
+               if (len_final < len) {
+                       start_final = start;
+                       len_final = len;
+               }
+               start += len ? len : 1;
+               if (len >= 8 && start_final < 4)
+                       break;
+       }
+
+       /* The rule is that to find the smallest delay cell */
+       if (start_final == 0)
+               final_phase = (start_final + len_final / 3) % PAD_DELAY_MAX;
+       else
+               final_phase = (start_final + len_final / 2) % PAD_DELAY_MAX;
+       dev_info(host->dev, "phase: [map:%x] [maxlen:%d] [final:%d]\n",
+                delay, len_final, final_phase);
+
+       delay_phase.maxlen = len_final;
+       delay_phase.start = start_final;
+       delay_phase.final_phase = final_phase;
+       return delay_phase;
+}
+
+static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
+{
+       struct msdc_host *host = mmc_priv(mmc);
+       u32 rise_delay = 0, fall_delay = 0;
+       struct msdc_delay_phase final_rise_delay, final_fall_delay;
+       u8 final_delay, final_maxlen;
+       int cmd_err;
+       int i;
+
+       sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
+       for (i = 0 ; i < PAD_DELAY_MAX; i++) {
+               sdr_set_field(host->base + MSDC_PAD_TUNE,
+                             MSDC_PAD_TUNE_CMDRDLY, i);
+               mmc_send_tuning(mmc, opcode, &cmd_err);
+               if (!cmd_err)
+                       rise_delay |= (1 << i);
+       }
+
+       sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
+       for (i = 0; i < PAD_DELAY_MAX; i++) {
+               sdr_set_field(host->base + MSDC_PAD_TUNE,
+                             MSDC_PAD_TUNE_CMDRDLY, i);
+               mmc_send_tuning(mmc, opcode, &cmd_err);
+               if (!cmd_err)
+                       fall_delay |= (1 << i);
+       }
+
+       final_rise_delay = get_best_delay(host, rise_delay);
+       final_fall_delay = get_best_delay(host, fall_delay);
+
+       final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
+       if (final_maxlen == final_rise_delay.maxlen) {
+               sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
+               sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY,
+                             final_rise_delay.final_phase);
+               final_delay = final_rise_delay.final_phase;
+       } else {
+               sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
+               sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY,
+                             final_fall_delay.final_phase);
+               final_delay = final_fall_delay.final_phase;
+       }
+
+       return final_delay == 0xff ? -EIO : 0;
+}
+
+static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
+{
+       struct msdc_host *host = mmc_priv(mmc);
+       u32 rise_delay = 0, fall_delay = 0;
+       struct msdc_delay_phase final_rise_delay, final_fall_delay;
+       u8 final_delay, final_maxlen;
+       int i, ret;
+
+       sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
+       sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
+       for (i = 0 ; i < PAD_DELAY_MAX; i++) {
+               sdr_set_field(host->base + MSDC_PAD_TUNE,
+                             MSDC_PAD_TUNE_DATRRDLY, i);
+               ret = mmc_send_tuning(mmc, opcode, NULL);
+               if (!ret)
+                       rise_delay |= (1 << i);
+       }
+
+       sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
+       sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
+       for (i = 0; i < PAD_DELAY_MAX; i++) {
+               sdr_set_field(host->base + MSDC_PAD_TUNE,
+                             MSDC_PAD_TUNE_DATRRDLY, i);
+               ret = mmc_send_tuning(mmc, opcode, NULL);
+               if (!ret)
+                       fall_delay |= (1 << i);
+       }
+
+       final_rise_delay = get_best_delay(host, rise_delay);
+       final_fall_delay = get_best_delay(host, fall_delay);
+
+       final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
+       /* Rising edge is more stable, prefer to use it */
+       if (final_rise_delay.maxlen >= 10)
+               final_maxlen = final_rise_delay.maxlen;
+       if (final_maxlen == final_rise_delay.maxlen) {
+               sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
+               sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
+               sdr_set_field(host->base + MSDC_PAD_TUNE,
+                             MSDC_PAD_TUNE_DATRRDLY,
+                             final_rise_delay.final_phase);
+               final_delay = final_rise_delay.final_phase;
+       } else {
+               sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
+               sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
+               sdr_set_field(host->base + MSDC_PAD_TUNE,
+                             MSDC_PAD_TUNE_DATRRDLY,
+                             final_fall_delay.final_phase);
+               final_delay = final_fall_delay.final_phase;
+       }
+
+       return final_delay == 0xff ? -EIO : 0;
+}
+
+static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+       struct msdc_host *host = mmc_priv(mmc);
+       int ret;
+
+       pm_runtime_get_sync(host->dev);
+       ret = msdc_tune_response(mmc, opcode);
+       if (ret == -EIO) {
+               dev_err(host->dev, "Tune response fail!\n");
+               goto out;
+       }
+       ret = msdc_tune_data(mmc, opcode);
+       if (ret == -EIO)
+               dev_err(host->dev, "Tune data fail!\n");
+
+out:
+       pm_runtime_mark_last_busy(host->dev);
+       pm_runtime_put_autosuspend(host->dev);
+       return ret;
+}
+
+static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+       struct msdc_host *host = mmc_priv(mmc);
+
+       writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
+       return 0;
+}
+
+static void msdc_hw_reset(struct mmc_host *mmc)
+{
+       struct msdc_host *host = mmc_priv(mmc);
+
+       sdr_set_bits(host->base + EMMC_IOCON, 1);
+       udelay(10); /* 10us is enough */
+       sdr_clr_bits(host->base + EMMC_IOCON, 1);
+}
+
 static struct mmc_host_ops mt_msdc_ops = {
        .post_req = msdc_post_req,
        .pre_req = msdc_pre_req,
@@ -1221,6 +1454,9 @@ static struct mmc_host_ops mt_msdc_ops = {
        .set_ios = msdc_ops_set_ios,
        .start_signal_voltage_switch = msdc_ops_switch_volt,
        .card_busy = msdc_card_busy,
+       .execute_tuning = msdc_execute_tuning,
+       .prepare_hs400_tuning = msdc_prepare_hs400_tuning,
+       .hw_reset = msdc_hw_reset,
 };
 
 static int msdc_drv_probe(struct platform_device *pdev)
@@ -1294,6 +1530,11 @@ static int msdc_drv_probe(struct platform_device *pdev)
                goto host_free;
        }
 
+       if (!of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay",
+                                 &host->hs400_ds_delay))
+               dev_dbg(&pdev->dev, "hs400-ds-delay: %x\n",
+                       host->hs400_ds_delay);
+
        host->dev = &pdev->dev;
        host->mmc = mmc;
        host->src_clk_freq = clk_get_rate(host->src_clk);
@@ -1302,6 +1543,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
        mmc->f_min = host->src_clk_freq / (4 * 255);
 
        mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
+       mmc->caps |= MMC_CAP_RUNTIME_RESUME;
        /* MMC core transfer sizes tunable parameters */
        mmc->max_segs = MAX_BD_NUM;
        mmc->max_seg_size = BDMA_DESC_BUFLEN;
@@ -1313,7 +1555,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
 
        host->timeout_clks = 3 * 1048576;
        host->dma.gpd = dma_alloc_coherent(&pdev->dev,
-                               sizeof(struct mt_gpdma_desc),
+                               2 * sizeof(struct mt_gpdma_desc),
                                &host->dma.gpd_addr, GFP_KERNEL);
        host->dma.bd = dma_alloc_coherent(&pdev->dev,
                                MAX_BD_NUM * sizeof(struct mt_bdma_desc),
@@ -1354,7 +1596,7 @@ release:
 release_mem:
        if (host->dma.gpd)
                dma_free_coherent(&pdev->dev,
-                       sizeof(struct mt_gpdma_desc),
+                       2 * sizeof(struct mt_gpdma_desc),
                        host->dma.gpd, host->dma.gpd_addr);
        if (host->dma.bd)
                dma_free_coherent(&pdev->dev,
@@ -1403,6 +1645,8 @@ static void msdc_save_reg(struct msdc_host *host)
        host->save_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
        host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT);
        host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1);
+       host->save_para.pad_ds_tune = readl(host->base + PAD_DS_TUNE);
+       host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0);
 }
 
 static void msdc_restore_reg(struct msdc_host *host)
@@ -1413,6 +1657,8 @@ static void msdc_restore_reg(struct msdc_host *host)
        writel(host->save_para.pad_tune, host->base + MSDC_PAD_TUNE);
        writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT);
        writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1);
+       writel(host->save_para.pad_ds_tune, host->base + PAD_DS_TUNE);
+       writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0);
 }
 
 static int msdc_runtime_suspend(struct device *dev)
index b763b11ed9e1e72320a2d8e4a8d9e393b229766e..b9958a123594a6fa9d0b0075a61a92b1e0f7916b 100644 (file)
@@ -1490,6 +1490,7 @@ static const struct of_device_id mmc_omap_match[] = {
        { .compatible = "ti,omap2420-mmc", },
        { },
 };
+MODULE_DEVICE_TABLE(of, mmc_omap_match);
 #endif
 
 static struct platform_driver mmc_omap_driver = {
index 781e4db317671ce6146dea121a56f42f90e7c491..7fb0753abe3041bc1814ebc14c1136d103b254e1 100644 (file)
@@ -182,6 +182,7 @@ struct omap_hsmmc_host {
        struct  clk             *fclk;
        struct  clk             *dbclk;
        struct  regulator       *pbias;
+       bool                    pbias_enabled;
        void    __iomem         *base;
        int                     vqmmc_enabled;
        resource_size_t         mapbase;
@@ -328,20 +329,22 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
                        return ret;
                }
 
-               if (!regulator_is_enabled(host->pbias)) {
+               if (host->pbias_enabled == 0) {
                        ret = regulator_enable(host->pbias);
                        if (ret) {
                                dev_err(host->dev, "pbias reg enable fail\n");
                                return ret;
                        }
+                       host->pbias_enabled = 1;
                }
        } else {
-               if (regulator_is_enabled(host->pbias)) {
+               if (host->pbias_enabled == 1) {
                        ret = regulator_disable(host->pbias);
                        if (ret) {
                                dev_err(host->dev, "pbias reg disable fail\n");
                                return ret;
                        }
+                       host->pbias_enabled = 0;
                }
        }
 
@@ -475,7 +478,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
        mmc->supply.vmmc = devm_regulator_get_optional(host->dev, "vmmc");
        if (IS_ERR(mmc->supply.vmmc)) {
                ret = PTR_ERR(mmc->supply.vmmc);
-               if (ret != -ENODEV)
+               if ((ret != -ENODEV) && host->dev->of_node)
                        return ret;
                dev_dbg(host->dev, "unable to get vmmc regulator %ld\n",
                        PTR_ERR(mmc->supply.vmmc));
@@ -490,7 +493,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
        mmc->supply.vqmmc = devm_regulator_get_optional(host->dev, "vmmc_aux");
        if (IS_ERR(mmc->supply.vqmmc)) {
                ret = PTR_ERR(mmc->supply.vqmmc);
-               if (ret != -ENODEV)
+               if ((ret != -ENODEV) && host->dev->of_node)
                        return ret;
                dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n",
                        PTR_ERR(mmc->supply.vqmmc));
@@ -500,7 +503,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
        host->pbias = devm_regulator_get_optional(host->dev, "pbias");
        if (IS_ERR(host->pbias)) {
                ret = PTR_ERR(host->pbias);
-               if (ret != -ENODEV)
+               if ((ret != -ENODEV) && host->dev->of_node)
                        return ret;
                dev_dbg(host->dev, "unable to get pbias regulator %ld\n",
                        PTR_ERR(host->pbias));
@@ -2053,6 +2056,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
        host->base      = base + pdata->reg_offset;
        host->power_mode = MMC_POWER_OFF;
        host->next_data.cookie = 1;
+       host->pbias_enabled = 0;
        host->vqmmc_enabled = 0;
 
        ret = omap_hsmmc_gpio_init(mmc, host, pdata);
index 1420f29628c70d8e8fdedbfa3fe7d77f1ba0ae0b..8cadd74e8407bb08d7e277a82ac5d80d496f77e4 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
 #include <linux/io.h>
 #include <linux/regulator/consumer.h>
 #include <linux/gpio.h>
@@ -454,12 +455,8 @@ static int pxamci_get_ro(struct mmc_host *mmc)
 {
        struct pxamci_host *host = mmc_priv(mmc);
 
-       if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) {
-               if (host->pdata->gpio_card_ro_invert)
-                       return !gpio_get_value(host->pdata->gpio_card_ro);
-               else
-                       return gpio_get_value(host->pdata->gpio_card_ro);
-       }
+       if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro))
+               return mmc_gpio_get_ro(mmc);
        if (host->pdata && host->pdata->get_ro)
                return !!host->pdata->get_ro(mmc_dev(mmc));
        /*
@@ -551,6 +548,7 @@ static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
 
 static const struct mmc_host_ops pxamci_ops = {
        .request                = pxamci_request,
+       .get_cd                 = mmc_gpio_get_cd,
        .get_ro                 = pxamci_get_ro,
        .set_ios                = pxamci_set_ios,
        .enable_sdio_irq        = pxamci_enable_sdio_irq,
@@ -790,37 +788,31 @@ static int pxamci_probe(struct platform_device *pdev)
                gpio_power = host->pdata->gpio_power;
        }
        if (gpio_is_valid(gpio_power)) {
-               ret = gpio_request(gpio_power, "mmc card power");
+               ret = devm_gpio_request(&pdev->dev, gpio_power,
+                                       "mmc card power");
                if (ret) {
-                       dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power);
+                       dev_err(&pdev->dev, "Failed requesting gpio_power %d\n",
+                               gpio_power);
                        goto out;
                }
                gpio_direction_output(gpio_power,
                                      host->pdata->gpio_power_invert);
        }
-       if (gpio_is_valid(gpio_ro)) {
-               ret = gpio_request(gpio_ro, "mmc card read only");
-               if (ret) {
-                       dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
-                       goto err_gpio_ro;
-               }
-               gpio_direction_input(gpio_ro);
+       if (gpio_is_valid(gpio_ro))
+               ret = mmc_gpio_request_ro(mmc, gpio_ro);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
+               goto out;
+       } else {
+               mmc->caps |= host->pdata->gpio_card_ro_invert ?
+                       MMC_CAP2_RO_ACTIVE_HIGH : 0;
        }
-       if (gpio_is_valid(gpio_cd)) {
-               ret = gpio_request(gpio_cd, "mmc card detect");
-               if (ret) {
-                       dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
-                       goto err_gpio_cd;
-               }
-               gpio_direction_input(gpio_cd);
 
-               ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq,
-                                 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-                                 "mmc card detect", mmc);
-               if (ret) {
-                       dev_err(&pdev->dev, "failed to request card detect IRQ\n");
-                       goto err_request_irq;
-               }
+       if (gpio_is_valid(gpio_cd))
+               ret = mmc_gpio_request_cd(mmc, gpio_cd, 0);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
+               goto out;
        }
 
        if (host->pdata && host->pdata->init)
@@ -835,13 +827,7 @@ static int pxamci_probe(struct platform_device *pdev)
 
        return 0;
 
-err_request_irq:
-       gpio_free(gpio_cd);
-err_gpio_cd:
-       gpio_free(gpio_ro);
-err_gpio_ro:
-       gpio_free(gpio_power);
- out:
+out:
        if (host) {
                if (host->dma_chan_rx)
                        dma_release_channel(host->dma_chan_rx);
@@ -873,14 +859,6 @@ static int pxamci_remove(struct platform_device *pdev)
                        gpio_ro = host->pdata->gpio_card_ro;
                        gpio_power = host->pdata->gpio_power;
                }
-               if (gpio_is_valid(gpio_cd)) {
-                       free_irq(gpio_to_irq(gpio_cd), mmc);
-                       gpio_free(gpio_cd);
-               }
-               if (gpio_is_valid(gpio_ro))
-                       gpio_free(gpio_ro);
-               if (gpio_is_valid(gpio_power))
-                       gpio_free(gpio_power);
                if (host->vcc)
                        regulator_put(host->vcc);
 
index 22d929fa3371adbf87c2efa744b0272701d72741..f6047fc9406204d6ee672b74544d0ec6866308cd 100644 (file)
@@ -207,7 +207,9 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
        .caps2   = MMC_CAP2_HC_ERASE_SZ,
        .flags   = SDHCI_ACPI_RUNTIME_PM,
        .quirks  = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
-       .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | SDHCI_QUIRK2_STOP_WITH_TC,
+       .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+                  SDHCI_QUIRK2_STOP_WITH_TC |
+                  SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400,
        .probe_slot     = sdhci_acpi_emmc_probe_slot,
 };
 
@@ -239,6 +241,9 @@ struct sdhci_acpi_uid_slot {
 };
 
 static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
+       { "80865ACA", NULL, &sdhci_acpi_slot_int_sd },
+       { "80865ACC", NULL, &sdhci_acpi_slot_int_emmc },
+       { "80865AD0", NULL, &sdhci_acpi_slot_int_sdio },
        { "80860F14" , "1" , &sdhci_acpi_slot_int_emmc },
        { "80860F14" , "3" , &sdhci_acpi_slot_int_sd   },
        { "80860F16" , NULL, &sdhci_acpi_slot_int_sd   },
@@ -247,11 +252,15 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
        { "INT33C6"  , NULL, &sdhci_acpi_slot_int_sdio },
        { "INT3436"  , NULL, &sdhci_acpi_slot_int_sdio },
        { "INT344D"  , NULL, &sdhci_acpi_slot_int_sdio },
+       { "PNP0FFF"  , "3" , &sdhci_acpi_slot_int_sd   },
        { "PNP0D40"  },
        { },
 };
 
 static const struct acpi_device_id sdhci_acpi_ids[] = {
+       { "80865ACA" },
+       { "80865ACC" },
+       { "80865AD0" },
        { "80860F14" },
        { "80860F16" },
        { "INT33BB"  },
index 2bd90fb35c75e5b5c03f243aabd1edc99dce04d5..00a8a40a372954fdcbbb1d4cd92b3ab1cc953765 100644 (file)
@@ -273,7 +273,7 @@ static int sdhci_bcm_kona_probe(struct platform_device *pdev)
                host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
 
        dev_dbg(dev, "is_8bit=%c\n",
-               (host->mmc->caps | MMC_CAP_8_BIT_DATA) ? 'Y' : 'N');
+               (host->mmc->caps & MMC_CAP_8_BIT_DATA) ? 'Y' : 'N');
 
        ret = sdhci_bcm_kona_sd_reset(host);
        if (ret)
index 886d230f41d07357f9fe624b9a94de428789e962..1f1582f6cccbb3454f0824e9dcf39e170a76f395 100644 (file)
@@ -759,7 +759,7 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
        min = ESDHC_TUNE_CTRL_MIN;
        while (min < ESDHC_TUNE_CTRL_MAX) {
                esdhc_prepare_tuning(host, min);
-               if (!mmc_send_tuning(host->mmc))
+               if (!mmc_send_tuning(host->mmc, opcode, NULL))
                        break;
                min += ESDHC_TUNE_CTRL_STEP;
        }
@@ -768,7 +768,7 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
        max = min + ESDHC_TUNE_CTRL_STEP;
        while (max < ESDHC_TUNE_CTRL_MAX) {
                esdhc_prepare_tuning(host, max);
-               if (mmc_send_tuning(host->mmc)) {
+               if (mmc_send_tuning(host->mmc, opcode, NULL)) {
                        max -= ESDHC_TUNE_CTRL_STEP;
                        break;
                }
@@ -778,7 +778,7 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
        /* use average delay to get the best timing */
        avg = (min + max) / 2;
        esdhc_prepare_tuning(host, avg);
-       ret = mmc_send_tuning(host->mmc);
+       ret = mmc_send_tuning(host->mmc, opcode, NULL);
        esdhc_post_tuning(host);
 
        dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n",
index 163ac9974d9101dcf28220263a88cef8f43b7750..de132e28175342ea5869f1da1c003a0d16c2b6a7 100644 (file)
@@ -24,6 +24,8 @@
                                SDHCI_QUIRK_PIO_NEEDS_DELAY | \
                                SDHCI_QUIRK_NO_HISPD_BIT)
 
+#define ESDHC_PROCTL           0x28
+
 #define ESDHC_SYSTEM_CONTROL   0x2c
 #define ESDHC_CLOCK_MASK       0x0000fff0
 #define ESDHC_PREDIV_SHIFT     8
index 4bcee033fedaf520d95433f03e652f0d7a786924..4695bee203ea9306eff0c1a9d01f69afc9ce7d9b 100644 (file)
@@ -373,7 +373,7 @@ retry:
                if (rc)
                        return rc;
 
-               rc = mmc_send_tuning(mmc);
+               rc = mmc_send_tuning(mmc, opcode, NULL);
                if (!rc) {
                        /* Tuning is successful at this tuning point */
                        tuned_phases[tuned_phase_cnt++] = phase;
index d1556643a41d325abc7b7637ac94c694e778fedc..06d0b50dfe71d2ece8e7d8ee662af4c159860200 100644 (file)
@@ -43,6 +43,7 @@ static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
 
 static const struct sdhci_pltfm_data soc_data_sama5d2 = {
        .ops = &sdhci_at91_sama5d2_ops,
+       .quirks2 = SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST,
 };
 
 static const struct of_device_id sdhci_at91_dt_match[] = {
@@ -110,7 +111,6 @@ static int sdhci_at91_probe(struct platform_device *pdev)
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to set gck");
                goto hclock_disable_unprepare;
-               return -EINVAL;
        }
        /*
         * We need to check if we have the requested rate for gck because in
index 653f335bef1516ca9aba102e1f3ab21d99a754ce..90e94a028a49a39169b754d43050d4937d4b52eb 100644 (file)
 
 #define VENDOR_V_22    0x12
 #define VENDOR_V_23    0x13
-static u32 esdhc_readl(struct sdhci_host *host, int reg)
+
+struct sdhci_esdhc {
+       u8 vendor_ver;
+       u8 spec_ver;
+};
+
+/**
+ * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register
+ *                    to make it compatible with SD spec.
+ *
+ * @host: pointer to sdhci_host
+ * @spec_reg: SD spec register address
+ * @value: 32bit eSDHC register value on spec_reg address
+ *
+ * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
+ * registers are 32 bits. There are differences in register size, register
+ * address, register function, bit position and function between eSDHC spec
+ * and SD spec.
+ *
+ * Return a fixed up register value
+ */
+static u32 esdhc_readl_fixup(struct sdhci_host *host,
+                                    int spec_reg, u32 value)
 {
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_esdhc *esdhc = pltfm_host->priv;
        u32 ret;
 
-       ret = in_be32(host->ioaddr + reg);
        /*
         * The bit of ADMA flag in eSDHC is not compatible with standard
         * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
         * supported by eSDHC.
         * And for many FSL eSDHC controller, the reset value of field
-        * SDHCI_CAN_DO_ADMA1 is one, but some of them can't support ADMA,
+        * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA,
         * only these vendor version is greater than 2.2/0x12 support ADMA.
-        * For FSL eSDHC, must aligned 4-byte, so use 0xFC to read the
-        * the verdor version number, oxFE is SDHCI_HOST_VERSION.
         */
-       if ((reg == SDHCI_CAPABILITIES) && (ret & SDHCI_CAN_DO_ADMA1)) {
-               u32 tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS);
-               tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
-               if (tmp > VENDOR_V_22)
-                       ret |= SDHCI_CAN_DO_ADMA2;
+       if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) {
+               if (esdhc->vendor_ver > VENDOR_V_22) {
+                       ret = value | SDHCI_CAN_DO_ADMA2;
+                       return ret;
+               }
        }
-
+       ret = value;
        return ret;
 }
 
-static u16 esdhc_readw(struct sdhci_host *host, int reg)
+static u16 esdhc_readw_fixup(struct sdhci_host *host,
+                                    int spec_reg, u32 value)
 {
        u16 ret;
-       int base = reg & ~0x3;
-       int shift = (reg & 0x2) * 8;
+       int shift = (spec_reg & 0x2) * 8;
 
-       if (unlikely(reg == SDHCI_HOST_VERSION))
-               ret = in_be32(host->ioaddr + base) & 0xffff;
+       if (spec_reg == SDHCI_HOST_VERSION)
+               ret = value & 0xffff;
        else
-               ret = (in_be32(host->ioaddr + base) >> shift) & 0xffff;
+               ret = (value >> shift) & 0xffff;
        return ret;
 }
 
-static u8 esdhc_readb(struct sdhci_host *host, int reg)
+static u8 esdhc_readb_fixup(struct sdhci_host *host,
+                                    int spec_reg, u32 value)
 {
-       int base = reg & ~0x3;
-       int shift = (reg & 0x3) * 8;
-       u8 ret = (in_be32(host->ioaddr + base) >> shift) & 0xff;
+       u8 ret;
+       u8 dma_bits;
+       int shift = (spec_reg & 0x3) * 8;
+
+       ret = (value >> shift) & 0xff;
 
        /*
         * "DMA select" locates at offset 0x28 in SD specification, but on
         * P5020 or P3041, it locates at 0x29.
         */
-       if (reg == SDHCI_HOST_CONTROL) {
-               u32 dma_bits;
-
-               dma_bits = in_be32(host->ioaddr + reg);
+       if (spec_reg == SDHCI_HOST_CONTROL) {
                /* DMA select is 22,23 bits in Protocol Control Register */
-               dma_bits = (dma_bits >> 5) & SDHCI_CTRL_DMA_MASK;
-
+               dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK;
                /* fixup the result */
                ret &= ~SDHCI_CTRL_DMA_MASK;
                ret |= dma_bits;
        }
-
        return ret;
 }
 
-static void esdhc_writel(struct sdhci_host *host, u32 val, int reg)
+/**
+ * esdhc_write*_fixup - Fixup the SD spec register value so that it could be
+ *                     written into eSDHC register.
+ *
+ * @host: pointer to sdhci_host
+ * @spec_reg: SD spec register address
+ * @value: 8/16/32bit SD spec register value that would be written
+ * @old_value: 32bit eSDHC register value on spec_reg address
+ *
+ * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
+ * registers are 32 bits. There are differences in register size, register
+ * address, register function, bit position and function between eSDHC spec
+ * and SD spec.
+ *
+ * Return a fixed up register value
+ */
+static u32 esdhc_writel_fixup(struct sdhci_host *host,
+                                    int spec_reg, u32 value, u32 old_value)
 {
+       u32 ret;
+
        /*
-        * Enable IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
-        * when SYSCTL[RSTD]) is set for some special operations.
-        * No any impact other operation.
+        * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
+        * when SYSCTL[RSTD] is set for some special operations.
+        * No any impact on other operation.
         */
-       if (reg == SDHCI_INT_ENABLE)
-               val |= SDHCI_INT_BLK_GAP;
-       sdhci_be32bs_writel(host, val, reg);
+       if (spec_reg == SDHCI_INT_ENABLE)
+               ret = value | SDHCI_INT_BLK_GAP;
+       else
+               ret = value;
+
+       return ret;
 }
 
-static void esdhc_writew(struct sdhci_host *host, u16 val, int reg)
+static u32 esdhc_writew_fixup(struct sdhci_host *host,
+                                    int spec_reg, u16 value, u32 old_value)
 {
-       if (reg == SDHCI_BLOCK_SIZE) {
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       int shift = (spec_reg & 0x2) * 8;
+       u32 ret;
+
+       switch (spec_reg) {
+       case SDHCI_TRANSFER_MODE:
+               /*
+                * Postpone this write, we must do it together with a
+                * command write that is down below. Return old value.
+                */
+               pltfm_host->xfer_mode_shadow = value;
+               return old_value;
+       case SDHCI_COMMAND:
+               ret = (value << 16) | pltfm_host->xfer_mode_shadow;
+               return ret;
+       }
+
+       ret = old_value & (~(0xffff << shift));
+       ret |= (value << shift);
+
+       if (spec_reg == SDHCI_BLOCK_SIZE) {
                /*
                 * Two last DMA bits are reserved, and first one is used for
                 * non-standard blksz of 4096 bytes that we don't support
                 * yet. So clear the DMA boundary bits.
                 */
-               val &= ~SDHCI_MAKE_BLKSZ(0x7, 0);
+               ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0));
        }
-       sdhci_be32bs_writew(host, val, reg);
+       return ret;
 }
 
-static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
+static u32 esdhc_writeb_fixup(struct sdhci_host *host,
+                                    int spec_reg, u8 value, u32 old_value)
 {
+       u32 ret;
+       u32 dma_bits;
+       u8 tmp;
+       int shift = (spec_reg & 0x3) * 8;
+
+       /*
+        * eSDHC doesn't have a standard power control register, so we do
+        * nothing here to avoid incorrect operation.
+        */
+       if (spec_reg == SDHCI_POWER_CONTROL)
+               return old_value;
        /*
         * "DMA select" location is offset 0x28 in SD specification, but on
         * P5020 or P3041, it's located at 0x29.
         */
-       if (reg == SDHCI_HOST_CONTROL) {
-               u32 dma_bits;
-
+       if (spec_reg == SDHCI_HOST_CONTROL) {
                /*
                 * If host control register is not standard, exit
                 * this function
                 */
                if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL)
-                       return;
+                       return old_value;
 
                /* DMA select is 22,23 bits in Protocol Control Register */
-               dma_bits = (val & SDHCI_CTRL_DMA_MASK) << 5;
-               clrsetbits_be32(host->ioaddr + reg , SDHCI_CTRL_DMA_MASK << 5,
-                       dma_bits);
-               val &= ~SDHCI_CTRL_DMA_MASK;
-               val |= in_be32(host->ioaddr + reg) & SDHCI_CTRL_DMA_MASK;
+               dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5;
+               ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits;
+               tmp = (value & (~SDHCI_CTRL_DMA_MASK)) |
+                     (old_value & SDHCI_CTRL_DMA_MASK);
+               ret = (ret & (~0xff)) | tmp;
+
+               /* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */
+               ret &= ~ESDHC_HOST_CONTROL_RES;
+               return ret;
        }
 
-       /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */
-       if (reg == SDHCI_HOST_CONTROL)
-               val &= ~ESDHC_HOST_CONTROL_RES;
-       sdhci_be32bs_writeb(host, val, reg);
+       ret = (old_value & (~(0xff << shift))) | (value << shift);
+       return ret;
+}
+
+static u32 esdhc_be_readl(struct sdhci_host *host, int reg)
+{
+       u32 ret;
+       u32 value;
+
+       value = ioread32be(host->ioaddr + reg);
+       ret = esdhc_readl_fixup(host, reg, value);
+
+       return ret;
+}
+
+static u32 esdhc_le_readl(struct sdhci_host *host, int reg)
+{
+       u32 ret;
+       u32 value;
+
+       value = ioread32(host->ioaddr + reg);
+       ret = esdhc_readl_fixup(host, reg, value);
+
+       return ret;
+}
+
+static u16 esdhc_be_readw(struct sdhci_host *host, int reg)
+{
+       u16 ret;
+       u32 value;
+       int base = reg & ~0x3;
+
+       value = ioread32be(host->ioaddr + base);
+       ret = esdhc_readw_fixup(host, reg, value);
+       return ret;
+}
+
+static u16 esdhc_le_readw(struct sdhci_host *host, int reg)
+{
+       u16 ret;
+       u32 value;
+       int base = reg & ~0x3;
+
+       value = ioread32(host->ioaddr + base);
+       ret = esdhc_readw_fixup(host, reg, value);
+       return ret;
+}
+
+static u8 esdhc_be_readb(struct sdhci_host *host, int reg)
+{
+       u8 ret;
+       u32 value;
+       int base = reg & ~0x3;
+
+       value = ioread32be(host->ioaddr + base);
+       ret = esdhc_readb_fixup(host, reg, value);
+       return ret;
+}
+
+static u8 esdhc_le_readb(struct sdhci_host *host, int reg)
+{
+       u8 ret;
+       u32 value;
+       int base = reg & ~0x3;
+
+       value = ioread32(host->ioaddr + base);
+       ret = esdhc_readb_fixup(host, reg, value);
+       return ret;
+}
+
+static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg)
+{
+       u32 value;
+
+       value = esdhc_writel_fixup(host, reg, val, 0);
+       iowrite32be(value, host->ioaddr + reg);
+}
+
+static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
+{
+       u32 value;
+
+       value = esdhc_writel_fixup(host, reg, val, 0);
+       iowrite32(value, host->ioaddr + reg);
+}
+
+static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
+{
+       int base = reg & ~0x3;
+       u32 value;
+       u32 ret;
+
+       value = ioread32be(host->ioaddr + base);
+       ret = esdhc_writew_fixup(host, reg, val, value);
+       if (reg != SDHCI_TRANSFER_MODE)
+               iowrite32be(ret, host->ioaddr + base);
+}
+
+static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
+{
+       int base = reg & ~0x3;
+       u32 value;
+       u32 ret;
+
+       value = ioread32(host->ioaddr + base);
+       ret = esdhc_writew_fixup(host, reg, val, value);
+       if (reg != SDHCI_TRANSFER_MODE)
+               iowrite32(ret, host->ioaddr + base);
+}
+
+static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+       int base = reg & ~0x3;
+       u32 value;
+       u32 ret;
+
+       value = ioread32be(host->ioaddr + base);
+       ret = esdhc_writeb_fixup(host, reg, val, value);
+       iowrite32be(ret, host->ioaddr + base);
+}
+
+static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+       int base = reg & ~0x3;
+       u32 value;
+       u32 ret;
+
+       value = ioread32(host->ioaddr + base);
+       ret = esdhc_writeb_fixup(host, reg, val, value);
+       iowrite32(ret, host->ioaddr + base);
 }
 
 /*
@@ -149,19 +351,17 @@ static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
  * For Continue, apply soft reset for data(SYSCTL[RSTD]);
  * and re-issue the entire read transaction from beginning.
  */
-static void esdhci_of_adma_workaround(struct sdhci_host *host, u32 intmask)
+static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
 {
-       u32 tmp;
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_esdhc *esdhc = pltfm_host->priv;
        bool applicable;
        dma_addr_t dmastart;
        dma_addr_t dmanow;
 
-       tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS);
-       tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
-
        applicable = (intmask & SDHCI_INT_DATA_END) &&
-               (intmask & SDHCI_INT_BLK_GAP) &&
-               (tmp == VENDOR_V_23);
+                    (intmask & SDHCI_INT_BLK_GAP) &&
+                    (esdhc->vendor_ver == VENDOR_V_23);
        if (!applicable)
                return;
 
@@ -179,7 +379,11 @@ static void esdhci_of_adma_workaround(struct sdhci_host *host, u32 intmask)
 
 static int esdhc_of_enable_dma(struct sdhci_host *host)
 {
-       setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP);
+       u32 value;
+
+       value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
+       value |= ESDHC_DMA_SNOOP;
+       sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
        return 0;
 }
 
@@ -199,6 +403,8 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
 
 static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
 {
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_esdhc *esdhc = pltfm_host->priv;
        int pre_div = 1;
        int div = 1;
        u32 temp;
@@ -209,9 +415,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
                return;
 
        /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
-       temp = esdhc_readw(host, SDHCI_HOST_VERSION);
-       temp = (temp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
-       if (temp < VENDOR_V_23)
+       if (esdhc->vendor_ver < VENDOR_V_23)
                pre_div = 2;
 
        /* Workaround to reduce the clock frequency for p1010 esdhc */
@@ -247,39 +451,26 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
        mdelay(1);
 }
 
-static void esdhc_of_platform_init(struct sdhci_host *host)
-{
-       u32 vvn;
-
-       vvn = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS);
-       vvn = (vvn & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
-       if (vvn == VENDOR_V_22)
-               host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
-
-       if (vvn > VENDOR_V_22)
-               host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
-}
-
 static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
 {
        u32 ctrl;
 
+       ctrl = sdhci_readl(host, ESDHC_PROCTL);
+       ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK);
        switch (width) {
        case MMC_BUS_WIDTH_8:
-               ctrl = ESDHC_CTRL_8BITBUS;
+               ctrl |= ESDHC_CTRL_8BITBUS;
                break;
 
        case MMC_BUS_WIDTH_4:
-               ctrl = ESDHC_CTRL_4BITBUS;
+               ctrl |= ESDHC_CTRL_4BITBUS;
                break;
 
        default:
-               ctrl = 0;
                break;
        }
 
-       clrsetbits_be32(host->ioaddr + SDHCI_HOST_CONTROL,
-                       ESDHC_CTRL_BUSWIDTH_MASK, ctrl);
+       sdhci_writel(host, ctrl, ESDHC_PROCTL);
 }
 
 static void esdhc_reset(struct sdhci_host *host, u8 mask)
@@ -290,32 +481,13 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
        sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
 }
 
-static const struct sdhci_ops sdhci_esdhc_ops = {
-       .read_l = esdhc_readl,
-       .read_w = esdhc_readw,
-       .read_b = esdhc_readb,
-       .write_l = esdhc_writel,
-       .write_w = esdhc_writew,
-       .write_b = esdhc_writeb,
-       .set_clock = esdhc_of_set_clock,
-       .enable_dma = esdhc_of_enable_dma,
-       .get_max_clock = esdhc_of_get_max_clock,
-       .get_min_clock = esdhc_of_get_min_clock,
-       .platform_init = esdhc_of_platform_init,
-       .adma_workaround = esdhci_of_adma_workaround,
-       .set_bus_width = esdhc_pltfm_set_bus_width,
-       .reset = esdhc_reset,
-       .set_uhs_signaling = sdhci_set_uhs_signaling,
-};
-
 #ifdef CONFIG_PM
-
 static u32 esdhc_proctl;
 static int esdhc_of_suspend(struct device *dev)
 {
        struct sdhci_host *host = dev_get_drvdata(dev);
 
-       esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL);
+       esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL);
 
        return sdhci_suspend_host(host);
 }
@@ -328,9 +500,8 @@ static int esdhc_of_resume(struct device *dev)
        if (ret == 0) {
                /* Isn't this already done by sdhci_resume_host() ? --rmk */
                esdhc_of_enable_dma(host);
-               sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
+               sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
        }
-
        return ret;
 }
 
@@ -343,37 +514,103 @@ static const struct dev_pm_ops esdhc_pmops = {
 #define ESDHC_PMOPS NULL
 #endif
 
-static const struct sdhci_pltfm_data sdhci_esdhc_pdata = {
-       /*
-        * card detection could be handled via GPIO
-        * eSDHC cannot support End Attribute in NOP ADMA descriptor
-        */
+static const struct sdhci_ops sdhci_esdhc_be_ops = {
+       .read_l = esdhc_be_readl,
+       .read_w = esdhc_be_readw,
+       .read_b = esdhc_be_readb,
+       .write_l = esdhc_be_writel,
+       .write_w = esdhc_be_writew,
+       .write_b = esdhc_be_writeb,
+       .set_clock = esdhc_of_set_clock,
+       .enable_dma = esdhc_of_enable_dma,
+       .get_max_clock = esdhc_of_get_max_clock,
+       .get_min_clock = esdhc_of_get_min_clock,
+       .adma_workaround = esdhc_of_adma_workaround,
+       .set_bus_width = esdhc_pltfm_set_bus_width,
+       .reset = esdhc_reset,
+       .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_ops sdhci_esdhc_le_ops = {
+       .read_l = esdhc_le_readl,
+       .read_w = esdhc_le_readw,
+       .read_b = esdhc_le_readb,
+       .write_l = esdhc_le_writel,
+       .write_w = esdhc_le_writew,
+       .write_b = esdhc_le_writeb,
+       .set_clock = esdhc_of_set_clock,
+       .enable_dma = esdhc_of_enable_dma,
+       .get_max_clock = esdhc_of_get_max_clock,
+       .get_min_clock = esdhc_of_get_min_clock,
+       .adma_workaround = esdhc_of_adma_workaround,
+       .set_bus_width = esdhc_pltfm_set_bus_width,
+       .reset = esdhc_reset,
+       .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
        .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
                | SDHCI_QUIRK_NO_CARD_NO_RESET
                | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
-       .ops = &sdhci_esdhc_ops,
+       .ops = &sdhci_esdhc_be_ops,
 };
 
+static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
+       .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
+               | SDHCI_QUIRK_NO_CARD_NO_RESET
+               | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+       .ops = &sdhci_esdhc_le_ops,
+};
+
+static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
+{
+       struct sdhci_pltfm_host *pltfm_host;
+       struct sdhci_esdhc *esdhc;
+       u16 host_ver;
+
+       pltfm_host = sdhci_priv(host);
+       esdhc = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_esdhc),
+                            GFP_KERNEL);
+
+       host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
+       esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
+                            SDHCI_VENDOR_VER_SHIFT;
+       esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
+
+       pltfm_host->priv = esdhc;
+}
+
 static int sdhci_esdhc_probe(struct platform_device *pdev)
 {
        struct sdhci_host *host;
        struct device_node *np;
        int ret;
 
-       host = sdhci_pltfm_init(pdev, &sdhci_esdhc_pdata, 0);
+       np = pdev->dev.of_node;
+
+       if (of_get_property(np, "little-endian", NULL))
+               host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata, 0);
+       else
+               host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata, 0);
+
        if (IS_ERR(host))
                return PTR_ERR(host);
 
+       esdhc_init(pdev, host);
+
        sdhci_get_of_property(pdev);
 
-       np = pdev->dev.of_node;
        if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
            of_device_is_compatible(np, "fsl,p5020-esdhc") ||
            of_device_is_compatible(np, "fsl,p4080-esdhc") ||
            of_device_is_compatible(np, "fsl,p1020-esdhc") ||
-           of_device_is_compatible(np, "fsl,t1040-esdhc"))
+           of_device_is_compatible(np, "fsl,t1040-esdhc") ||
+           of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
                host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
 
+       if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
+               host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+
        if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
                /*
                 * Freescale messed up with P2020 as it has a non-standard
similarity index 96%
rename from drivers/mmc/host/sdhci-pci.c
rename to drivers/mmc/host/sdhci-pci-core.c
index b3b0a3e4fca1652e3ceb1759c0bcbd13bcc1d22e..cf7ad458b4f44fe60beb0a614882fbb985aede37 100644 (file)
@@ -444,11 +444,7 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
        else
                scratch &= ~0x47;
 
-       ret = pci_write_config_byte(chip->pdev, 0xAE, scratch);
-       if (ret)
-               return ret;
-
-       return 0;
+       return pci_write_config_byte(chip->pdev, 0xAE, scratch);
 }
 
 static int jmicron_probe(struct sdhci_pci_chip *chip)
@@ -1112,6 +1108,62 @@ static const struct pci_device_id pci_ids[] = {
                .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_sd,
        },
 
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_DNV_EMMC,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+       },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_BXT_EMMC,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+       },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_BXT_SDIO,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+       },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_BXT_SD,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_sd,
+       },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_APL_EMMC,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+       },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_APL_SDIO,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+       },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_APL_SD,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_sd,
+       },
+
        {
                .vendor         = PCI_VENDOR_ID_O2,
                .device         = PCI_DEVICE_ID_O2_8120,
index e2ec108dba0e8220e1d23804f11b80ccfa71414f..d48f03104b5be39a978bdbe9d1ae7484f70beaca 100644 (file)
@@ -60,7 +60,7 @@ static void o2_pci_led_enable(struct sdhci_pci_chip *chip)
 
 }
 
-void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip)
+static void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip)
 {
        u32 scratch_32;
        int ret;
@@ -145,7 +145,6 @@ void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip)
        scratch_32 |= 0x00080000;
        pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL4, scratch_32);
 }
-EXPORT_SYMBOL_GPL(sdhci_pci_o2_fujin2_pci_init);
 
 int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
 {
@@ -179,7 +178,6 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(sdhci_pci_o2_probe_slot);
 
 int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
 {
@@ -385,11 +383,9 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(sdhci_pci_o2_probe);
 
 int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip)
 {
        sdhci_pci_o2_probe(chip);
        return 0;
 }
-EXPORT_SYMBOL_GPL(sdhci_pci_o2_resume);
index f7ffc908d9a0d2f83b13a7a00e09c635aceb5079..770f53857211cc445853359eafc33a88cb94eb82 100644 (file)
@@ -64,8 +64,6 @@
 #define O2_SD_VENDOR_SETTING   0x110
 #define O2_SD_VENDOR_SETTING2  0x1C8
 
-extern void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip);
-
 extern int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
 
 extern int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
index 541f1cad5247f0f4ea04d18b9c89367e3f1801ba..d1a0b4db60db41d67ad7d0e34652f2d13d2d3507 100644 (file)
 #define PCI_DEVICE_ID_INTEL_SPT_EMMC   0x9d2b
 #define PCI_DEVICE_ID_INTEL_SPT_SDIO   0x9d2c
 #define PCI_DEVICE_ID_INTEL_SPT_SD     0x9d2d
+#define PCI_DEVICE_ID_INTEL_DNV_EMMC   0x19db
+#define PCI_DEVICE_ID_INTEL_BXT_SD     0x0aca
+#define PCI_DEVICE_ID_INTEL_BXT_EMMC   0x0acc
+#define PCI_DEVICE_ID_INTEL_BXT_SDIO   0x0ad0
+#define PCI_DEVICE_ID_INTEL_APL_SD     0x5aca
+#define PCI_DEVICE_ID_INTEL_APL_EMMC   0x5acc
+#define PCI_DEVICE_ID_INTEL_APL_SDIO   0x5ad0
 
 /*
  * PCI registers
index a207f5aaf62f53d7aa4db3699b4d695f9ce5d625..87fb5ea8ebe7cae575b8281f07c630ce03ce2223 100644 (file)
@@ -71,9 +71,7 @@ void sdhci_get_of_property(struct platform_device *pdev)
        struct device_node *np = pdev->dev.of_node;
        struct sdhci_host *host = platform_get_drvdata(pdev);
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-       const __be32 *clk;
        u32 bus_width;
-       int size;
 
        if (of_get_property(np, "sdhci,auto-cmd12", NULL))
                host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
@@ -101,9 +99,7 @@ void sdhci_get_of_property(struct platform_device *pdev)
            of_device_is_compatible(np, "fsl,mpc8536-esdhc"))
                host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
 
-       clk = of_get_property(np, "clock-frequency", &size);
-       if (clk && size == sizeof(*clk) && *clk)
-               pltfm_host->clock = be32_to_cpup(clk);
+       of_property_read_u32(np, "clock-frequency", &pltfm_host->clock);
 
        if (of_find_property(np, "keep-power-in-suspend", NULL))
                host->mmc->pm_caps |= MMC_PM_KEEP_POWER;
index 946d37f94a31b29e8739304ec71cf7b1468eead6..f5edf9d3a18a2088a2b08705d876c413b1d659b5 100644 (file)
@@ -135,6 +135,7 @@ static int armada_38x_quirks(struct platform_device *pdev,
        struct sdhci_pxa *pxa = pltfm_host->priv;
        struct resource *res;
 
+       host->quirks &= ~SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
        host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
                                           "conf-sdio3");
@@ -290,6 +291,9 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
                    uhs == MMC_TIMING_UHS_DDR50) {
                        reg_val &= ~SDIO3_CONF_CLK_INV;
                        reg_val |= SDIO3_CONF_SD_FB_CLK;
+               } else if (uhs == MMC_TIMING_MMC_HS) {
+                       reg_val &= ~SDIO3_CONF_CLK_INV;
+                       reg_val &= ~SDIO3_CONF_SD_FB_CLK;
                } else {
                        reg_val |= SDIO3_CONF_CLK_INV;
                        reg_val &= ~SDIO3_CONF_SD_FB_CLK;
@@ -398,7 +402,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
        if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
                ret = armada_38x_quirks(pdev, host);
                if (ret < 0)
-                       goto err_clk_get;
+                       goto err_mbus_win;
                ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
                if (ret < 0)
                        goto err_mbus_win;
index 884294576356d4c87e3869162186aadbc24b4f6a..34866f668dd7b7aee1b943d2001355ea9652c9d2 100644 (file)
@@ -50,7 +50,8 @@ static u32 sdhci_sirf_readl_le(struct sdhci_host *host, int reg)
        if (unlikely((reg == SDHCI_CAPABILITIES_1) &&
                        (host->mmc->caps & MMC_CAP_UHS_SDR50))) {
                /* fake CAP_1 register */
-               val = SDHCI_SUPPORT_SDR50 | SDHCI_USE_SDR50_TUNING;
+               val = SDHCI_SUPPORT_DDR50 |
+                       SDHCI_SUPPORT_SDR50 | SDHCI_USE_SDR50_TUNING;
        }
 
        if (unlikely(reg == SDHCI_SLOT_INT_STATUS)) {
@@ -97,7 +98,7 @@ retry:
                        clock_setting | phase,
                        SDHCI_CLK_DELAY_SETTING);
 
-               if (!mmc_send_tuning(mmc)) {
+               if (!mmc_send_tuning(mmc, opcode, NULL)) {
                        /* Tuning is successful at this tuning point */
                        tuned_phase_cnt++;
                        dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
index 64b7fdbd1a9ccab80034e8a38660ef944daf8bae..b48565ed5616c79302008963a05aa7e1e18d742c 100644 (file)
@@ -1160,6 +1160,8 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
        host->mmc->actual_clock = 0;
 
        sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+       if (host->quirks2 & SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST)
+               mdelay(1);
 
        if (clock == 0)
                return;
@@ -1893,9 +1895,9 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
                tuning_count = host->tuning_count;
 
        /*
-        * The Host Controller needs tuning only in case of SDR104 mode
-        * and for SDR50 mode when Use Tuning for SDR50 is set in the
-        * Capabilities register.
+        * The Host Controller needs tuning in case of SDR104 and DDR50
+        * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
+        * the Capabilities register.
         * If the Host Controller supports the HS200 mode then the
         * tuning function has to be executed.
         */
@@ -1915,6 +1917,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
                break;
 
        case MMC_TIMING_UHS_SDR104:
+       case MMC_TIMING_UHS_DDR50:
                break;
 
        case MMC_TIMING_UHS_SDR50:
@@ -2714,17 +2717,6 @@ int sdhci_resume_host(struct sdhci_host *host)
                        host->ops->enable_dma(host);
        }
 
-       if (!device_may_wakeup(mmc_dev(host->mmc))) {
-               ret = request_threaded_irq(host->irq, sdhci_irq,
-                                          sdhci_thread_irq, IRQF_SHARED,
-                                          mmc_hostname(host->mmc), host);
-               if (ret)
-                       return ret;
-       } else {
-               sdhci_disable_irq_wakeups(host);
-               disable_irq_wake(host->irq);
-       }
-
        if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
            (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
                /* Card keeps power but host controller does not */
@@ -2737,6 +2729,17 @@ int sdhci_resume_host(struct sdhci_host *host)
                mmiowb();
        }
 
+       if (!device_may_wakeup(mmc_dev(host->mmc))) {
+               ret = request_threaded_irq(host->irq, sdhci_irq,
+                                          sdhci_thread_irq, IRQF_SHARED,
+                                          mmc_hostname(host->mmc), host);
+               if (ret)
+                       return ret;
+       } else {
+               sdhci_disable_irq_wakeups(host);
+               disable_irq_wake(host->irq);
+       }
+
        sdhci_enable_card_detection(host);
 
        return ret;
index 7c02ff46c8ac3ecdaf37e792fd6bcb43c9bd029e..9d4aa31b683ac2d64e16f31f88d8d0893162a225 100644 (file)
@@ -412,6 +412,11 @@ struct sdhci_host {
 #define SDHCI_QUIRK2_ACMD23_BROKEN                     (1<<14)
 /* Broken Clock divider zero in controller */
 #define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN             (1<<15)
+/*
+ * When internal clock is disabled, a delay is needed before modifying the
+ * SD clock frequency or enabling back the internal clock.
+ */
+#define SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST      (1<<16)
 
        int irq;                /* Device IRQ */
        void __iomem *ioaddr;   /* Mapped address */
index a7b7a67715986d748d9f880088cc2ae069bd2283..83de82bceafc91a6dd6326d393a4285a5b9061ad 100644 (file)
 #define SDXC_IDMAC_DES0_CES    BIT(30) /* card error summary */
 #define SDXC_IDMAC_DES0_OWN    BIT(31) /* 1-idma owns it, 0-host owns it */
 
+#define SDXC_CLK_400K          0
+#define SDXC_CLK_25M           1
+#define SDXC_CLK_50M           2
+#define SDXC_CLK_50M_DDR       3
+
+struct sunxi_mmc_clk_delay {
+       u32 output;
+       u32 sample;
+};
+
 struct sunxi_idma_des {
        u32     config;
        u32     buf_size;
@@ -229,6 +239,7 @@ struct sunxi_mmc_host {
        struct clk      *clk_mmc;
        struct clk      *clk_sample;
        struct clk      *clk_output;
+       const struct sunxi_mmc_clk_delay *clk_delays;
 
        /* irq */
        spinlock_t      lock;
@@ -654,25 +665,19 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
 
        /* determine delays */
        if (rate <= 400000) {
-               oclk_dly = 180;
-               sclk_dly = 42;
+               oclk_dly = host->clk_delays[SDXC_CLK_400K].output;
+               sclk_dly = host->clk_delays[SDXC_CLK_400K].sample;
        } else if (rate <= 25000000) {
-               oclk_dly = 180;
-               sclk_dly = 75;
+               oclk_dly = host->clk_delays[SDXC_CLK_25M].output;
+               sclk_dly = host->clk_delays[SDXC_CLK_25M].sample;
        } else if (rate <= 50000000) {
                if (ios->timing == MMC_TIMING_UHS_DDR50) {
-                       oclk_dly = 60;
-                       sclk_dly = 120;
+                       oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output;
+                       sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample;
                } else {
-                       oclk_dly = 90;
-                       sclk_dly = 150;
+                       oclk_dly = host->clk_delays[SDXC_CLK_50M].output;
+                       sclk_dly = host->clk_delays[SDXC_CLK_50M].sample;
                }
-       } else if (rate <= 100000000) {
-               oclk_dly = 6;
-               sclk_dly = 24;
-       } else if (rate <= 200000000) {
-               oclk_dly = 3;
-               sclk_dly = 12;
        } else {
                return -EINVAL;
        }
@@ -868,9 +873,17 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
        spin_unlock_irqrestore(&host->lock, iflags);
 }
 
+static int sunxi_mmc_card_busy(struct mmc_host *mmc)
+{
+       struct sunxi_mmc_host *host = mmc_priv(mmc);
+
+       return !!(mmc_readl(host, REG_STAS) & SDXC_CARD_DATA_BUSY);
+}
+
 static const struct of_device_id sunxi_mmc_of_match[] = {
        { .compatible = "allwinner,sun4i-a10-mmc", },
        { .compatible = "allwinner,sun5i-a13-mmc", },
+       { .compatible = "allwinner,sun9i-a80-mmc", },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
@@ -882,6 +895,21 @@ static struct mmc_host_ops sunxi_mmc_ops = {
        .get_cd          = mmc_gpio_get_cd,
        .enable_sdio_irq = sunxi_mmc_enable_sdio_irq,
        .hw_reset        = sunxi_mmc_hw_reset,
+       .card_busy       = sunxi_mmc_card_busy,
+};
+
+static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = {
+       [SDXC_CLK_400K]         = { .output = 180, .sample = 180 },
+       [SDXC_CLK_25M]          = { .output = 180, .sample =  75 },
+       [SDXC_CLK_50M]          = { .output =  90, .sample = 120 },
+       [SDXC_CLK_50M_DDR]      = { .output =  60, .sample = 120 },
+};
+
+static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
+       [SDXC_CLK_400K]         = { .output = 180, .sample = 180 },
+       [SDXC_CLK_25M]          = { .output = 180, .sample =  75 },
+       [SDXC_CLK_50M]          = { .output = 150, .sample = 120 },
+       [SDXC_CLK_50M_DDR]      = { .output =  90, .sample = 120 },
 };
 
 static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
@@ -895,6 +923,11 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
        else
                host->idma_des_size_bits = 16;
 
+       if (of_device_is_compatible(np, "allwinner,sun9i-a80-mmc"))
+               host->clk_delays = sun9i_mmc_clk_delays;
+       else
+               host->clk_delays = sunxi_mmc_clk_delays;
+
        ret = mmc_regulator_get_supply(host->mmc);
        if (ret) {
                if (ret != -EPROBE_DEFER)
index fbabbb82b35486d599c2718127ad8abaf3dabbbd..1e819f98b94f52fe0a2f289f2e38dd4dbf72c27b 100644 (file)
@@ -563,7 +563,7 @@ static void add_offloaded_reg(struct vub300_mmc_host *vub300,
                        i += 1;
                        continue;
                }
-       };
+       }
        __add_offloaded_reg_to_fifo(vub300, register_access, func);
 }
 
@@ -1372,7 +1372,7 @@ static void download_offload_pseudocode(struct vub300_mmc_host *vub300)
                l += snprintf(vub300->vub_name + l,
                              sizeof(vub300->vub_name) - l, "_%04X%04X",
                              sf->vendor, sf->device);
-       };
+       }
        snprintf(vub300->vub_name + l, sizeof(vub300->vub_name) - l, ".bin");
        dev_info(&vub300->udev->dev, "requesting offload firmware %s\n",
                 vub300->vub_name);
@@ -1893,7 +1893,7 @@ static int satisfy_request_from_offloaded_data(struct vub300_mmc_host *vub300,
                        i += 1;
                        continue;
                }
-       };
+       }
        if (vub300->total_offload_count == 0)
                return 0;
        else if (vub300->fn[func].offload_count == 0)
index ca183ea767b32b5bace0ab819b32fdb251bb4314..c3fd16d997caba4d903d6e8d3ea35acd0f350075 100644 (file)
@@ -809,7 +809,7 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
                        cmd->error = -EINVAL;
 
                        goto done;
-               };
+               }
        }
 
        /*
index 2426db88db36bf95f1f247eeae597ff69238c70d..f04445b992f512c537018b81bf0d685a3ee2f62b 100644 (file)
@@ -879,7 +879,7 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom)
                                      oob_chunk_size);
 
                /* the last chunk */
-               memcpy16_toio(&s[oob_chunk_size * sparebuf_size],
+               memcpy16_toio(&s[i * sparebuf_size],
                              &d[i * oob_chunk_size],
                              host->used_oobsize - i * oob_chunk_size);
        }
index f97a58d6aae1bbbacdb29ca86ac30c1f21d19e48..e7d333c162befd274f891b8674b5ca8fd905315e 100644 (file)
 #define NFC_ECC_MODE           GENMASK(15, 12)
 #define NFC_RANDOM_SEED                GENMASK(30, 16)
 
+/* NFC_USER_DATA helper macros */
+#define NFC_BUF_TO_USER_DATA(buf)      ((buf)[0] | ((buf)[1] << 8) | \
+                                       ((buf)[2] << 16) | ((buf)[3] << 24))
+
 #define NFC_DEFAULT_TIMEOUT_MS 1000
 
 #define NFC_SRAM_SIZE          1024
@@ -646,15 +650,9 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
                offset = layout->eccpos[i * ecc->bytes] - 4 + mtd->writesize;
 
                /* Fill OOB data in */
-               if (oob_required) {
-                       tmp = 0xffffffff;
-                       memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
-                                   4);
-               } else {
-                       memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE,
-                                   chip->oob_poi + offset - mtd->writesize,
-                                   4);
-               }
+               writel(NFC_BUF_TO_USER_DATA(chip->oob_poi +
+                                           layout->oobfree[i].offset),
+                      nfc->regs + NFC_REG_USER_DATA_BASE);
 
                chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1);
 
@@ -784,14 +782,8 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
                offset += ecc->size;
 
                /* Fill OOB data in */
-               if (oob_required) {
-                       tmp = 0xffffffff;
-                       memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
-                                   4);
-               } else {
-                       memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, oob,
-                                   4);
-               }
+               writel(NFC_BUF_TO_USER_DATA(oob),
+                      nfc->regs + NFC_REG_USER_DATA_BASE);
 
                tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR |
                      (1 << 30);
@@ -1389,6 +1381,7 @@ static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
                                        node);
                nand_release(&chip->mtd);
                sunxi_nand_ecc_cleanup(&chip->nand.ecc);
+               list_del(&chip->node);
        }
 }
 
index 5bbd1f094f4e33dca9c7ad3dca3edc7736b7e0bb..1fc23e48fe8e49fc947c972cca179399a60a37ec 100644 (file)
@@ -926,6 +926,11 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
                goto bad;
        }
 
+       if (data_size > ubi->leb_size) {
+               ubi_err(ubi, "bad data_size");
+               goto bad;
+       }
+
        if (vol_type == UBI_VID_STATIC) {
                /*
                 * Although from high-level point of view static volumes may
index 80bdd5b88bac271fbd01f5496be76bf1b79f0d9a..d85c1976216078d2f1647b34ba5873c9c86439a0 100644 (file)
@@ -649,6 +649,7 @@ static int init_volumes(struct ubi_device *ubi,
                if (ubi->corr_peb_count)
                        ubi_err(ubi, "%d PEBs are corrupted and not used",
                                ubi->corr_peb_count);
+               return -ENOSPC;
        }
        ubi->rsvd_pebs += reserved_pebs;
        ubi->avail_pebs -= reserved_pebs;
index 275d9fb6fe5c541c7253ece5cacc7ae189110eba..eb4489f9082fe84345d2ec68b0f8723888e5c177 100644 (file)
@@ -1601,6 +1601,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
                if (ubi->corr_peb_count)
                        ubi_err(ubi, "%d PEBs are corrupted and not used",
                                ubi->corr_peb_count);
+               err = -ENOSPC;
                goto out_free;
        }
        ubi->avail_pebs -= reserved_pebs;
index e5fac368068a2320eb06d207934c4b969356237b..131026fbc2d77cbc3ccb5903daa10f8920f8ae17 100644 (file)
@@ -87,6 +87,7 @@ static const struct pci_device_id peak_pci_tbl[] = {
        {PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
        {PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
        {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+       {PEAK_PCI_VENDOR_ID, PEAK_PCIE_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,},
 #ifdef CONFIG_CAN_PEAK_PCIEC
        {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
        {PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
index f8baa897d1a0e48b39f2d628ce36bfc2ef73ca08..1f7dd927cc5ea4777530a8ba23daae631f258bf9 100644 (file)
@@ -2051,6 +2051,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
                                reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
                        else
                                reg |= PORT_CONTROL_FRAME_MODE_DSA;
+                       reg |= PORT_CONTROL_FORWARD_UNKNOWN |
+                               PORT_CONTROL_FORWARD_UNKNOWN_MC;
                }
 
                if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
index 48ce83e443c2b12bbcbc2e2545c5ac6b1983f3d0..8d50314ac3eb1f308d1cc556270058aba05c7b60 100644 (file)
@@ -847,21 +847,25 @@ static int emac_probe(struct platform_device *pdev)
        if (ndev->irq == -ENXIO) {
                netdev_err(ndev, "No irq resource\n");
                ret = ndev->irq;
-               goto out;
+               goto out_iounmap;
        }
 
        db->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(db->clk)) {
                ret = PTR_ERR(db->clk);
-               goto out;
+               goto out_iounmap;
        }
 
-       clk_prepare_enable(db->clk);
+       ret = clk_prepare_enable(db->clk);
+       if (ret) {
+               dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", ret);
+               goto out_iounmap;
+       }
 
        ret = sunxi_sram_claim(&pdev->dev);
        if (ret) {
                dev_err(&pdev->dev, "Error couldn't map SRAM to device\n");
-               goto out;
+               goto out_clk_disable_unprepare;
        }
 
        db->phy_node = of_parse_phandle(np, "phy", 0);
@@ -910,6 +914,10 @@ static int emac_probe(struct platform_device *pdev)
 
 out_release_sram:
        sunxi_sram_release(&pdev->dev);
+out_clk_disable_unprepare:
+       clk_disable_unprepare(db->clk);
+out_iounmap:
+       iounmap(db->membase);
 out:
        dev_err(db->dev, "not found (%d).\n", ret);
 
@@ -921,8 +929,12 @@ out:
 static int emac_remove(struct platform_device *pdev)
 {
        struct net_device *ndev = platform_get_drvdata(pdev);
+       struct emac_board_info *db = netdev_priv(ndev);
 
        unregister_netdev(ndev);
+       sunxi_sram_release(&pdev->dev);
+       clk_disable_unprepare(db->clk);
+       iounmap(db->membase);
        free_netdev(ndev);
 
        dev_dbg(&pdev->dev, "released and freed device\n");
index 2c063b60db4b02bc48246887fb5d98f6b3de0394..96f485ab612e679dc7065b1e214cb9d73c690d43 100644 (file)
@@ -327,9 +327,13 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
        pdata->debugfs_xpcs_reg = 0;
 
        buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
+       if (!buf)
+               return;
+
        pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
        if (!pdata->xgbe_debugfs) {
                netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
+               kfree(buf);
                return;
        }
 
index a4473d8ff4fa0e1ec7bbdb511f9edd51f1871d71..f672dba345f7f73b028741ec60befa6ab012fd04 100644 (file)
@@ -1595,7 +1595,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
                                  packet->rdesc_count, 1);
 
        /* Make sure ownership is written to the descriptor */
-       dma_wmb();
+       smp_wmb();
 
        ring->cur = cur_index + 1;
        if (!packet->skb->xmit_more ||
index aae9d5ecd1822b16a2812de3bee503f59113adaa..dde0486667e0cfab87c593283d80a71f873977d3 100644 (file)
@@ -1807,6 +1807,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
        struct netdev_queue *txq;
        int processed = 0;
        unsigned int tx_packets = 0, tx_bytes = 0;
+       unsigned int cur;
 
        DBGPR("-->xgbe_tx_poll\n");
 
@@ -1814,10 +1815,15 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
        if (!ring)
                return 0;
 
+       cur = ring->cur;
+
+       /* Be sure we get ring->cur before accessing descriptor data */
+       smp_rmb();
+
        txq = netdev_get_tx_queue(netdev, channel->queue_index);
 
        while ((processed < XGBE_TX_DESC_MAX_PROC) &&
-              (ring->dirty != ring->cur)) {
+              (ring->dirty != cur)) {
                rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
                rdesc = rdata->rdesc;
 
index a7f2cc3e485eebfae962fe24cfc1142021a74cde..4183c2abeeeb2dc206f2ca4aa90d88279e2eb6d9 100644 (file)
@@ -2049,7 +2049,7 @@ static void swphy_poll_timer(unsigned long data)
 
        for (i = 0; i < priv->num_ports; i++) {
                struct bcm63xx_enetsw_port *port;
-               int val, j, up, advertise, lpa, lpa2, speed, duplex, media;
+               int val, j, up, advertise, lpa, speed, duplex, media;
                int external_phy = bcm_enet_port_is_rgmii(i);
                u8 override;
 
@@ -2092,22 +2092,27 @@ static void swphy_poll_timer(unsigned long data)
                lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
                                           MII_LPA);
 
-               lpa2 = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
-                                           MII_STAT1000);
-
                /* figure out media and duplex from advertise and LPA values */
                media = mii_nway_result(lpa & advertise);
                duplex = (media & ADVERTISE_FULL) ? 1 : 0;
-               if (lpa2 & LPA_1000FULL)
-                       duplex = 1;
-
-               if (lpa2 & (LPA_1000FULL | LPA_1000HALF))
-                       speed = 1000;
-               else {
-                       if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
-                               speed = 100;
-                       else
-                               speed = 10;
+
+               if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
+                       speed = 100;
+               else
+                       speed = 10;
+
+               if (val & BMSR_ESTATEN) {
+                       advertise = bcmenet_sw_mdio_read(priv, external_phy,
+                                               port->phy_id, MII_CTRL1000);
+
+                       lpa = bcmenet_sw_mdio_read(priv, external_phy,
+                                               port->phy_id, MII_STAT1000);
+
+                       if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
+                                       && lpa & (LPA_1000FULL | LPA_1000HALF)) {
+                               speed = 1000;
+                               duplex = (lpa & LPA_1000FULL);
+                       }
                }
 
                dev_info(&priv->pdev->dev,
index aeb7ce64452e14cd3cbe49325f63bae2d99e3ef2..be628bd9fb18b6f0116125e5a3f9ea16ad1d3561 100644 (file)
@@ -3351,6 +3351,13 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
                        udp_rss_requested = 0;
                else
                        return -EINVAL;
+
+               if (CHIP_IS_E1x(bp) && udp_rss_requested) {
+                       DP(BNX2X_MSG_ETHTOOL,
+                          "57710, 57711 boards don't support RSS according to UDP 4-tuple\n");
+                       return -EINVAL;
+               }
+
                if ((info->flow_type == UDP_V4_FLOW) &&
                    (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) {
                        bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested;
index 3bc701e4c59eb56cc19e11c917dd34c24062754e..1805541b4240e72c79445ea6f50bbc2a74ba13c7 100644 (file)
@@ -1683,6 +1683,24 @@ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
        bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
 }
 
+static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
+{
+       u32 int0_enable = 0;
+
+       /* Monitor cable plug/unplugged event for internal PHY, external PHY
+        * and MoCA PHY
+        */
+       if (priv->internal_phy) {
+               int0_enable |= UMAC_IRQ_LINK_EVENT;
+       } else if (priv->ext_phy) {
+               int0_enable |= UMAC_IRQ_LINK_EVENT;
+       } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+               if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+                       int0_enable |= UMAC_IRQ_LINK_EVENT;
+       }
+       bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+}
+
 static int init_umac(struct bcmgenet_priv *priv)
 {
        struct device *kdev = &priv->pdev->dev;
@@ -1723,15 +1741,8 @@ static int init_umac(struct bcmgenet_priv *priv)
        /* Enable Tx default queue 16 interrupts */
        int0_enable |= UMAC_IRQ_TXDMA_DONE;
 
-       /* Monitor cable plug/unplugged event for internal PHY */
-       if (priv->internal_phy) {
-               int0_enable |= UMAC_IRQ_LINK_EVENT;
-       } else if (priv->ext_phy) {
-               int0_enable |= UMAC_IRQ_LINK_EVENT;
-       } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
-               if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
-                       int0_enable |= UMAC_IRQ_LINK_EVENT;
-
+       /* Configure backpressure vectors for MoCA */
+       if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
                reg = bcmgenet_bp_mc_get(priv);
                reg |= BIT(priv->hw_params->bp_in_en_shift);
 
@@ -2645,6 +2656,9 @@ static void bcmgenet_netif_start(struct net_device *dev)
 
        netif_tx_start_all_queues(dev);
 
+       /* Monitor link interrupts now */
+       bcmgenet_link_intr_enable(priv);
+
        phy_start(priv->phydev);
 }
 
index b7a0f7879de2d3ee4d2b419a3b26c5e7661224c6..9e59663a6eadb012de6f4a4474484800401fce3b 100644 (file)
@@ -1543,7 +1543,7 @@ bfa_flash_cmd_act_check(void __iomem *pci_bar)
 }
 
 /* Flush FLI data fifo. */
-static u32
+static int
 bfa_flash_fifo_flush(void __iomem *pci_bar)
 {
        u32 i;
@@ -1573,11 +1573,11 @@ bfa_flash_fifo_flush(void __iomem *pci_bar)
 }
 
 /* Read flash status. */
-static u32
+static int
 bfa_flash_status_read(void __iomem *pci_bar)
 {
        union bfa_flash_dev_status_reg  dev_status;
-       u32                             status;
+       int                             status;
        u32                     ret_status;
        int                             i;
 
@@ -1611,11 +1611,11 @@ bfa_flash_status_read(void __iomem *pci_bar)
 }
 
 /* Start flash read operation. */
-static u32
+static int
 bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
                     char *buf)
 {
-       u32 status;
+       int status;
 
        /* len must be mutiple of 4 and not exceeding fifo size */
        if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
@@ -1703,7 +1703,8 @@ static enum bfa_status
 bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
                   u32 len)
 {
-       u32 n, status;
+       u32 n;
+       int status;
        u32 off, l, s, residue, fifo_sz;
 
        residue = len;
index 9b35d142f47accfbaec039f0d8100fb45d68bbf8..8fb84e69c30ec8ddfdf6be710cbfab0a3cd1aa60 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 config NET_VENDOR_CAVIUM
-       tristate "Cavium ethernet drivers"
+       bool "Cavium ethernet drivers"
        depends on PCI
        default y
        ---help---
index b3a5947a2cc03e1675e5895fef39e35b10ea04d0..c561fdcb79a730aeeb890c5a985b05cb873faa45 100644 (file)
@@ -22,7 +22,6 @@
 
 struct nicpf {
        struct pci_dev          *pdev;
-       u8                      rev_id;
        u8                      node;
        unsigned int            flags;
        u8                      num_vf_en;      /* No of VF enabled */
@@ -44,6 +43,7 @@ struct nicpf {
        u8                      duplex[MAX_LMAC];
        u32                     speed[MAX_LMAC];
        u16                     cpi_base[MAX_NUM_VFS_SUPPORTED];
+       u16                     rssi_base[MAX_NUM_VFS_SUPPORTED];
        u16                     rss_ind_tbl_size;
        bool                    mbx_lock[MAX_NUM_VFS_SUPPORTED];
 
@@ -54,6 +54,11 @@ struct nicpf {
        bool                    irq_allocated[NIC_PF_MSIX_VECTORS];
 };
 
+static inline bool pass1_silicon(struct nicpf *nic)
+{
+       return nic->pdev->revision < 8;
+}
+
 /* Supported devices */
 static const struct pci_device_id nic_id_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) },
@@ -117,7 +122,7 @@ static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx)
         * when PF writes to MBOX(1), in next revisions when
         * PF writes to MBOX(0)
         */
-       if (nic->rev_id == 0) {
+       if (pass1_silicon(nic)) {
                /* see the comment for nic_reg_write()/nic_reg_read()
                 * functions above
                 */
@@ -305,9 +310,6 @@ static void nic_init_hw(struct nicpf *nic)
 {
        int i;
 
-       /* Reset NIC, in case the driver is repeatedly inserted and removed */
-       nic_reg_write(nic, NIC_PF_SOFT_RESET, 1);
-
        /* Enable NIC HW block */
        nic_reg_write(nic, NIC_PF_CFG, 0x3);
 
@@ -395,8 +397,18 @@ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
                        padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */
 
                /* Leave RSS_SIZE as '0' to disable RSS */
-               nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
-                             (vnic << 24) | (padd << 16) | (rssi_base + rssi));
+               if (pass1_silicon(nic)) {
+                       nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
+                                     (vnic << 24) | (padd << 16) |
+                                     (rssi_base + rssi));
+               } else {
+                       /* Set MPI_ALG to '0' to disable MCAM parsing */
+                       nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
+                                     (padd << 16));
+                       /* MPI index is same as CPI if MPI_ALG is not enabled */
+                       nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3),
+                                     (vnic << 24) | (rssi_base + rssi));
+               }
 
                if ((rssi + 1) >= cfg->rq_cnt)
                        continue;
@@ -409,6 +421,7 @@ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
                        rssi = ((cpi - cpi_base) & 0x38) >> 3;
        }
        nic->cpi_base[cfg->vf_id] = cpi_base;
+       nic->rssi_base[cfg->vf_id] = rssi_base;
 }
 
 /* Responsds to VF with its RSS indirection table size */
@@ -434,10 +447,9 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
 {
        u8  qset, idx = 0;
        u64 cpi_cfg, cpi_base, rssi_base, rssi;
+       u64 idx_addr;
 
-       cpi_base = nic->cpi_base[cfg->vf_id];
-       cpi_cfg = nic_reg_read(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3));
-       rssi_base = (cpi_cfg & 0x0FFF) + cfg->tbl_offset;
+       rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset;
 
        rssi = rssi_base;
        qset = cfg->vf_id;
@@ -454,9 +466,15 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
                idx++;
        }
 
+       cpi_base = nic->cpi_base[cfg->vf_id];
+       if (pass1_silicon(nic))
+               idx_addr = NIC_PF_CPI_0_2047_CFG;
+       else
+               idx_addr = NIC_PF_MPI_0_2047_CFG;
+       cpi_cfg = nic_reg_read(nic, idx_addr | (cpi_base << 3));
        cpi_cfg &= ~(0xFULL << 20);
        cpi_cfg |= (cfg->hash_bits << 20);
-       nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3), cpi_cfg);
+       nic_reg_write(nic, idx_addr | (cpi_base << 3), cpi_cfg);
 }
 
 /* 4 level transmit side scheduler configutation
@@ -1001,8 +1019,6 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_release_regions;
        }
 
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &nic->rev_id);
-
        nic->node = nic_get_node_id(pdev);
 
        nic_set_lmac_vf_mapping(nic);
index 58197bb2f80528bf06de0daaa2c406b96d4fc0c4..dd536be20193119c3465cd772a4bff6d649e3319 100644 (file)
 #define   NIC_PF_ECC3_DBE_INT_W1S              (0x2708)
 #define   NIC_PF_ECC3_DBE_ENA_W1C              (0x2710)
 #define   NIC_PF_ECC3_DBE_ENA_W1S              (0x2718)
+#define   NIC_PF_MCAM_0_191_ENA                        (0x100000)
+#define   NIC_PF_MCAM_0_191_M_0_5_DATA         (0x110000)
+#define   NIC_PF_MCAM_CTRL                     (0x120000)
 #define   NIC_PF_CPI_0_2047_CFG                        (0x200000)
+#define   NIC_PF_MPI_0_2047_CFG                        (0x210000)
 #define   NIC_PF_RSSI_0_4097_RQ                        (0x220000)
 #define   NIC_PF_LMAC_0_7_CFG                  (0x240000)
 #define   NIC_PF_LMAC_0_7_SW_XOFF              (0x242000)
index b63e579aeb12d09bbe7f6e3de5badced5b2d44ba..a9377727c11c3fdb18a09f16ce8366ae4ef48057 100644 (file)
@@ -29,7 +29,7 @@
 static const struct pci_device_id nicvf_id_table[] = {
        { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
                         PCI_DEVICE_ID_THUNDER_NIC_VF,
-                        PCI_VENDOR_ID_CAVIUM, 0xA11E) },
+                        PCI_VENDOR_ID_CAVIUM, 0xA134) },
        { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
                         PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
                         PCI_VENDOR_ID_CAVIUM, 0xA11E) },
index 574c49278900943e5584fa9658f7eb6adb2d7452..180aa9fabf4820df042f18cba8c39d5ce1668e9d 100644 (file)
@@ -977,8 +977,10 @@ static int bgx_init_of_phy(struct bgx *bgx)
                SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
                bgx->lmac[lmac].lmacid = lmac;
                lmac++;
-               if (lmac == MAX_LMAC_PER_BGX)
+               if (lmac == MAX_LMAC_PER_BGX) {
+                       of_node_put(np_child);
                        break;
+               }
        }
        return 0;
 }
index 821540913343db10f59ae3a03835a084ca82063d..d463563e1f7039ee5176ca36abfdc6bae3f2ed46 100644 (file)
@@ -592,6 +592,7 @@ struct be_adapter {
        int be_get_temp_freq;
        struct be_hwmon hwmon_info;
        u8 pf_number;
+       u8 pci_func_num;
        struct rss_info rss_info;
        /* Filters for packets that need to be sent to BMC */
        u32 bmc_filt_mask;
index eb323913cd39fb981a8c0cc02140c0c7205ee4f8..1795c935ff023fcf795008a49a9a4cd0fce63d9c 100644 (file)
@@ -851,8 +851,10 @@ static int be_cmd_notify_wait(struct be_adapter *adapter,
                return status;
 
        dest_wrb = be_cmd_copy(adapter, wrb);
-       if (!dest_wrb)
-               return -EBUSY;
+       if (!dest_wrb) {
+               status = -EBUSY;
+               goto unlock;
+       }
 
        if (use_mcc(adapter))
                status = be_mcc_notify_wait(adapter);
@@ -862,6 +864,7 @@ static int be_cmd_notify_wait(struct be_adapter *adapter,
        if (!status)
                memcpy(wrb, dest_wrb, sizeof(*wrb));
 
+unlock:
        be_cmd_unlock(adapter);
        return status;
 }
@@ -1984,6 +1987,8 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
                         be_if_cap_flags(adapter));
        }
        flags &= be_if_cap_flags(adapter);
+       if (!flags)
+               return -ENOTSUPP;
 
        return __be_cmd_rx_filter(adapter, flags, value);
 }
@@ -2887,6 +2892,7 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
        if (!status) {
                attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
                adapter->hba_port_num = attribs->hba_attribs.phy_port;
+               adapter->pci_func_num = attribs->pci_func_num;
                serial_num = attribs->hba_attribs.controller_serial_number;
                for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
                        adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
@@ -3709,7 +3715,6 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
                        status = -EINVAL;
                        goto err;
                }
-
                adapter->pf_number = desc->pf_num;
                be_copy_nic_desc(res, desc);
        }
@@ -3721,7 +3726,10 @@ err:
        return status;
 }
 
-/* Will use MBOX only if MCCQ has not been created */
+/* Will use MBOX only if MCCQ has not been created
+ * non-zero domain => a PF is querying this on behalf of a VF
+ * zero domain => a PF or a VF is querying this for itself
+ */
 int be_cmd_get_profile_config(struct be_adapter *adapter,
                              struct be_resources *res, u8 query, u8 domain)
 {
@@ -3748,10 +3756,15 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
                               OPCODE_COMMON_GET_PROFILE_CONFIG,
                               cmd.size, &wrb, &cmd);
 
-       req->hdr.domain = domain;
        if (!lancer_chip(adapter))
                req->hdr.version = 1;
        req->type = ACTIVE_PROFILE_TYPE;
+       /* When a function is querying profile information relating to
+        * itself hdr.pf_number must be set to it's pci_func_num + 1
+        */
+       req->hdr.domain = domain;
+       if (domain == 0)
+               req->hdr.pf_num = adapter->pci_func_num + 1;
 
        /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
         * descriptors with all bits set to "1" for the fields which can be
@@ -3921,12 +3934,16 @@ static void be_fill_vf_res_template(struct be_adapter *adapter,
                        vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
                                             BE_IF_FLAGS_DEFQ_RSS);
                }
-
-               nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
        } else {
                num_vf_qs = 1;
        }
 
+       if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
+               nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
+               vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
+       }
+
+       nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
        nic_vft->rq_count = cpu_to_le16(num_vf_qs);
        nic_vft->txq_count = cpu_to_le16(num_vf_qs);
        nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
index 7d178bdb112eb7d14d5a62d74ea21ecaa30ba0e9..91155ea74f342e2663f18848c9fe5546635e7c05 100644 (file)
@@ -289,7 +289,9 @@ struct be_cmd_req_hdr {
        u32 timeout;            /* dword 1 */
        u32 request_length;     /* dword 2 */
        u8 version;             /* dword 3 */
-       u8 rsvd[3];             /* dword 3 */
+       u8 rsvd1;               /* dword 3 */
+       u8 pf_num;              /* dword 3 */
+       u8 rsvd2;               /* dword 3 */
 };
 
 #define RESP_HDR_INFO_OPCODE_SHIFT     0       /* bits 0 - 7 */
@@ -1652,7 +1654,11 @@ struct mgmt_hba_attribs {
 
 struct mgmt_controller_attrib {
        struct mgmt_hba_attribs hba_attribs;
-       u32 rsvd0[10];
+       u32 rsvd0[2];
+       u16 rsvd1;
+       u8 pci_func_num;
+       u8 rsvd2;
+       u32 rsvd3[7];
 } __packed;
 
 struct be_cmd_req_cntl_attribs {
index 7bf51a1a0a77e12a4812662767dd21767813dc1b..eb48a977f8daabe78d6d5b6a94f6607bf19287b8 100644 (file)
@@ -1123,11 +1123,12 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
                                           struct sk_buff *skb,
                                           struct be_wrb_params *wrb_params)
 {
-       /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
-        * less may cause a transmit stall on that port. So the work-around is
-        * to pad short packets (<= 32 bytes) to a 36-byte length.
+       /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
+        * packets that are 32b or less may cause a transmit stall
+        * on that port. The workaround is to pad such packets
+        * (len <= 32 bytes) to a minimum length of 36b.
         */
-       if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
+       if (skb->len <= 32) {
                if (skb_put_padto(skb, 36))
                        return NULL;
        }
@@ -4205,10 +4206,6 @@ static int be_get_config(struct be_adapter *adapter)
        int status, level;
        u16 profile_id;
 
-       status = be_cmd_get_cntl_attributes(adapter);
-       if (status)
-               return status;
-
        status = be_cmd_query_fw_cfg(adapter);
        if (status)
                return status;
@@ -4407,6 +4404,11 @@ static int be_setup(struct be_adapter *adapter)
        if (!lancer_chip(adapter))
                be_cmd_req_native_mode(adapter);
 
+       /* Need to invoke this cmd first to get the PCI Function Number */
+       status = be_cmd_get_cntl_attributes(adapter);
+       if (status)
+               return status;
+
        if (!BE2_chip(adapter) && be_physfn(adapter))
                be_alloc_sriov_res(adapter);
 
@@ -4999,7 +5001,15 @@ static bool be_check_ufi_compatibility(struct be_adapter *adapter,
                return false;
        }
 
-       return (fhdr->asic_type_rev >= adapter->asic_rev);
+       /* In BE3 FW images the "asic_type_rev" field doesn't track the
+        * asic_rev of the chips it is compatible with.
+        * When asic_type_rev is 0 the image is compatible only with
+        * pre-BE3-R chips (asic_rev < 0x10)
+        */
+       if (BEx_chip(adapter) && fhdr->asic_type_rev == 0)
+               return adapter->asic_rev < 0x10;
+       else
+               return (fhdr->asic_type_rev >= adapter->asic_rev);
 }
 
 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
index 3c40f6b9922436a32d255aa627e9d94db45f8477..55c36230e17634c3e063bdb20f4bb6a896bce4a6 100644 (file)
@@ -198,17 +198,28 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
 
 #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
 /*
+ * Return the TBIPA address, starting from the address
+ * of the mapped GFAR MDIO registers (struct gfar)
  * This is mildly evil, but so is our hardware for doing this.
  * Also, we have to cast back to struct gfar because of
  * definition weirdness done in gianfar.h.
  */
-static uint32_t __iomem *get_gfar_tbipa(void __iomem *p)
+static uint32_t __iomem *get_gfar_tbipa_from_mdio(void __iomem *p)
 {
        struct gfar __iomem *enet_regs = p;
 
        return &enet_regs->tbipa;
 }
 
+/*
+ * Return the TBIPA address, starting from the address
+ * of the mapped GFAR MII registers (gfar_mii_regs[] within struct gfar)
+ */
+static uint32_t __iomem *get_gfar_tbipa_from_mii(void __iomem *p)
+{
+       return get_gfar_tbipa_from_mdio(container_of(p, struct gfar, gfar_mii_regs));
+}
+
 /*
  * Return the TBIPAR address for an eTSEC2 node
  */
@@ -220,11 +231,12 @@ static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
 
 #if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
 /*
- * Return the TBIPAR address for a QE MDIO node
+ * Return the TBIPAR address for a QE MDIO node, starting from the address
+ * of the mapped MII registers (struct fsl_pq_mii)
  */
 static uint32_t __iomem *get_ucc_tbipa(void __iomem *p)
 {
-       struct fsl_pq_mdio __iomem *mdio = p;
+       struct fsl_pq_mdio __iomem *mdio = container_of(p, struct fsl_pq_mdio, mii);
 
        return &mdio->utbipar;
 }
@@ -300,14 +312,14 @@ static const struct of_device_id fsl_pq_mdio_match[] = {
                .compatible = "fsl,gianfar-tbi",
                .data = &(struct fsl_pq_mdio_data) {
                        .mii_offset = 0,
-                       .get_tbipa = get_gfar_tbipa,
+                       .get_tbipa = get_gfar_tbipa_from_mii,
                },
        },
        {
                .compatible = "fsl,gianfar-mdio",
                .data = &(struct fsl_pq_mdio_data) {
                        .mii_offset = 0,
-                       .get_tbipa = get_gfar_tbipa,
+                       .get_tbipa = get_gfar_tbipa_from_mii,
                },
        },
        {
@@ -315,7 +327,7 @@ static const struct of_device_id fsl_pq_mdio_match[] = {
                .compatible = "gianfar",
                .data = &(struct fsl_pq_mdio_data) {
                        .mii_offset = offsetof(struct fsl_pq_mdio, mii),
-                       .get_tbipa = get_gfar_tbipa,
+                       .get_tbipa = get_gfar_tbipa_from_mdio,
                },
        },
        {
@@ -445,6 +457,16 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
 
                        tbipa = data->get_tbipa(priv->map);
 
+                       /*
+                        * Add consistency check to make sure TBI is contained
+                        * within the mapped range (not because we would get a
+                        * segfault, rather to catch bugs in computing TBI
+                        * address). Print error message but continue anyway.
+                        */
+                       if ((void *)tbipa > priv->map + resource_size(&res) - 4)
+                               dev_err(&pdev->dev, "invalid register map (should be at least 0x%04x to contain TBI address)\n",
+                                       ((void *)tbipa - priv->map) + 4);
+
                        iowrite32be(be32_to_cpup(prop), tbipa);
                }
        }
index 710715fcb23dea7765ce3fc579c88fcd29ecb0da..ce38d266f931c506503b37749823eb388f1dd380 100644 (file)
@@ -341,7 +341,7 @@ static void gfar_rx_offload_en(struct gfar_private *priv)
        if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
                priv->uses_rxfcb = 1;
 
-       if (priv->hwts_rx_en)
+       if (priv->hwts_rx_en || priv->rx_filer_enable)
                priv->uses_rxfcb = 1;
 }
 
@@ -351,7 +351,7 @@ static void gfar_mac_rx_config(struct gfar_private *priv)
        u32 rctrl = 0;
 
        if (priv->rx_filer_enable) {
-               rctrl |= RCTRL_FILREN;
+               rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
                /* Program the RIR0 reg with the required distribution */
                if (priv->poll_mode == GFAR_SQ_POLLING)
                        gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
@@ -3462,11 +3462,9 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
                netif_dbg(priv, tx_err, dev, "Transmit Error\n");
        }
        if (events & IEVENT_BSY) {
-               dev->stats.rx_errors++;
+               dev->stats.rx_over_errors++;
                atomic64_inc(&priv->extra_stats.rx_bsy);
 
-               gfar_receive(irq, grp_id);
-
                netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
                          gfar_read(&regs->rstat));
        }
index 6bdc89179b72d6487d031ecda8551453b9268cd4..a33e4a8296015d10906b4b0d02d72e6f5b048277 100644 (file)
@@ -676,14 +676,14 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
        u32 fcr = 0x0, fpr = FPR_FILER_MASK;
 
        if (ethflow & RXH_L2DA) {
-               fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
+               fcr = RQFCR_PID_DAH | RQFCR_CMP_NOMATCH |
                      RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
                priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
                priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
                priv->cur_filer_idx = priv->cur_filer_idx - 1;
 
-               fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
+               fcr = RQFCR_PID_DAL | RQFCR_CMP_NOMATCH |
                      RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
                priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
                priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
index cc2d8b4b18e3e2a99ef303b76809545496089787..253f8ed0537a058778fd08b2c5a0747f53d7eca4 100644 (file)
@@ -816,7 +816,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
        struct net_device *ndev;
        struct hip04_priv *priv;
        struct resource *res;
-       unsigned int irq;
+       int irq;
        int ret;
 
        ndev = alloc_etherdev(sizeof(struct hip04_priv));
index 28df37420da963d5d8f3b3234e4f584442537121..ac02c675c59c4167b4870c892c6afcceb13aec4c 100644 (file)
@@ -460,8 +460,8 @@ struct emac_ethtool_regs_subhdr {
        u32 index;
 };
 
-#define EMAC_ETHTOOL_REGS_VER          0
-#define EMAC4_ETHTOOL_REGS_VER         1
-#define EMAC4SYNC_ETHTOOL_REGS_VER     2
+#define EMAC_ETHTOOL_REGS_VER          3
+#define EMAC4_ETHTOOL_REGS_VER         4
+#define EMAC4SYNC_ETHTOOL_REGS_VER     5
 
 #endif /* __IBM_NEWEMAC_CORE_H */
index 3e0d20037675e84164ae9da793ddc080737e1d81..c0e943aecd1394262757e790b04a6ce1eccda72a 100644 (file)
@@ -386,7 +386,6 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
 
        hw->aq.asq.next_to_use = 0;
        hw->aq.asq.next_to_clean = 0;
-       hw->aq.asq.count = hw->aq.num_asq_entries;
 
        /* allocate the ring memory */
        ret_code = i40e_alloc_adminq_asq_ring(hw);
@@ -404,6 +403,7 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
                goto init_adminq_free_rings;
 
        /* success! */
+       hw->aq.asq.count = hw->aq.num_asq_entries;
        goto init_adminq_exit;
 
 init_adminq_free_rings:
@@ -445,7 +445,6 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
 
        hw->aq.arq.next_to_use = 0;
        hw->aq.arq.next_to_clean = 0;
-       hw->aq.arq.count = hw->aq.num_arq_entries;
 
        /* allocate the ring memory */
        ret_code = i40e_alloc_adminq_arq_ring(hw);
@@ -463,6 +462,7 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
                goto init_adminq_free_rings;
 
        /* success! */
+       hw->aq.arq.count = hw->aq.num_arq_entries;
        goto init_adminq_exit;
 
 init_adminq_free_rings:
@@ -946,6 +946,13 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
        /* take the lock before we start messing with the ring */
        mutex_lock(&hw->aq.arq_mutex);
 
+       if (hw->aq.arq.count == 0) {
+               i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+                          "AQRX: Admin queue not initialized.\n");
+               ret_code = I40E_ERR_QUEUE_EMPTY;
+               goto clean_arq_element_err;
+       }
+
        /* set next_to_use to head */
        ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
        if (ntu == ntc) {
@@ -1007,6 +1014,8 @@ clean_arq_element_out:
        /* Set pending if needed, unlock and return */
        if (pending != NULL)
                *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+
+clean_arq_element_err:
        mutex_unlock(&hw->aq.arq_mutex);
 
        if (i40e_is_nvm_update_op(&e->desc)) {
index e972b5ecbf0b6e1bfb2f11ca93a3f13a18438715..13a5d4cf494bc76e98fd500afe856951703bf244 100644 (file)
@@ -1344,6 +1344,12 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
                        data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
                                     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
                }
+               for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) {
+                       data[i++] = veb->tc_stats.tc_tx_packets[j];
+                       data[i++] = veb->tc_stats.tc_tx_bytes[j];
+                       data[i++] = veb->tc_stats.tc_rx_packets[j];
+                       data[i++] = veb->tc_stats.tc_rx_bytes[j];
+               }
        }
        for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
                p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
index 851c1a159be8a1566d8a792c52bf4e83ea6877d7..3dd26cdd0bf27365ec60d084c027800bee128f93 100644 (file)
@@ -2672,7 +2672,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
                rx_ctx.lrxqthresh = 2;
        rx_ctx.crcstrip = 1;
        rx_ctx.l2tsel = 1;
-       rx_ctx.showiv = 1;
+       /* this controls whether VLAN is stripped from inner headers */
+       rx_ctx.showiv = 0;
 #ifdef I40E_FCOE
        rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
 #endif
@@ -7910,6 +7911,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
        if (pf->hw.func_caps.vmdq) {
                pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
                pf->flags |= I40E_FLAG_VMDQ_ENABLED;
+               pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
        }
 
 #ifdef I40E_FCOE
@@ -8388,6 +8390,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
 
        netdev->hw_enc_features |= NETIF_F_IP_CSUM       |
                                  NETIF_F_GSO_UDP_TUNNEL |
+                                 NETIF_F_GSO_GRE        |
                                  NETIF_F_TSO;
 
        netdev->features = NETIF_F_SG                  |
@@ -8395,6 +8398,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
                           NETIF_F_SCTP_CSUM           |
                           NETIF_F_HIGHDMA             |
                           NETIF_F_GSO_UDP_TUNNEL      |
+                          NETIF_F_GSO_GRE             |
                           NETIF_F_HW_VLAN_CTAG_TX     |
                           NETIF_F_HW_VLAN_CTAG_RX     |
                           NETIF_F_HW_VLAN_CTAG_FILTER |
index f08450b907745afefcdd6da766f7a32ae3aa57c2..a23ebfd5cd254014cd0632979aa441a2a6f6eadf 100644 (file)
@@ -373,7 +373,6 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
 
        hw->aq.asq.next_to_use = 0;
        hw->aq.asq.next_to_clean = 0;
-       hw->aq.asq.count = hw->aq.num_asq_entries;
 
        /* allocate the ring memory */
        ret_code = i40e_alloc_adminq_asq_ring(hw);
@@ -391,6 +390,7 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
                goto init_adminq_free_rings;
 
        /* success! */
+       hw->aq.asq.count = hw->aq.num_asq_entries;
        goto init_adminq_exit;
 
 init_adminq_free_rings:
@@ -432,7 +432,6 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
 
        hw->aq.arq.next_to_use = 0;
        hw->aq.arq.next_to_clean = 0;
-       hw->aq.arq.count = hw->aq.num_arq_entries;
 
        /* allocate the ring memory */
        ret_code = i40e_alloc_adminq_arq_ring(hw);
@@ -450,6 +449,7 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
                goto init_adminq_free_rings;
 
        /* success! */
+       hw->aq.arq.count = hw->aq.num_arq_entries;
        goto init_adminq_exit;
 
 init_adminq_free_rings:
@@ -887,6 +887,13 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
        /* take the lock before we start messing with the ring */
        mutex_lock(&hw->aq.arq_mutex);
 
+       if (hw->aq.arq.count == 0) {
+               i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+                          "AQRX: Admin queue not initialized.\n");
+               ret_code = I40E_ERR_QUEUE_EMPTY;
+               goto clean_arq_element_err;
+       }
+
        /* set next_to_use to head */
        ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
        if (ntu == ntc) {
@@ -948,6 +955,8 @@ clean_arq_element_out:
        /* Set pending if needed, unlock and return */
        if (pending != NULL)
                *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+
+clean_arq_element_err:
        mutex_unlock(&hw->aq.arq_mutex);
 
        return ret_code;
index 960169efe636a659241f7e8fae10706fd0503939..dfb6d5f79a1002a0101e4ae7fe2d7bf466f49c6f 100644 (file)
@@ -759,11 +759,23 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
 
        desc->l4i_chk = 0;
        desc->byte_cnt = length;
-       desc->buf_ptr = dma_map_single(dev->dev.parent, data,
-                                      length, DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev->dev.parent, desc->buf_ptr))) {
-               WARN(1, "dma_map_single failed!\n");
-               return -ENOMEM;
+
+       if (length <= 8 && (uintptr_t)data & 0x7) {
+               /* Copy unaligned small data fragment to TSO header data area */
+               memcpy(txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE,
+                      data, length);
+               desc->buf_ptr = txq->tso_hdrs_dma
+                       + txq->tx_curr_desc * TSO_HEADER_SIZE;
+       } else {
+               /* Alignment is okay, map buffer and hand off to hardware */
+               txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
+               desc->buf_ptr = dma_map_single(dev->dev.parent, data,
+                       length, DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(dev->dev.parent,
+                                              desc->buf_ptr))) {
+                       WARN(1, "dma_map_single failed!\n");
+                       return -ENOMEM;
+               }
        }
 
        cmd_sts = BUFFER_OWNED_BY_DMA;
@@ -779,7 +791,8 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
 }
 
 static inline void
-txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
+txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
+               u32 *first_cmd_sts, bool first_desc)
 {
        struct mv643xx_eth_private *mp = txq_to_mp(txq);
        int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
@@ -788,6 +801,7 @@ txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
        int ret;
        u32 cmd_csum = 0;
        u16 l4i_chk = 0;
+       u32 cmd_sts;
 
        tx_index = txq->tx_curr_desc;
        desc = &txq->tx_desc_area[tx_index];
@@ -803,9 +817,17 @@ txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
        desc->byte_cnt = hdr_len;
        desc->buf_ptr = txq->tso_hdrs_dma +
                        txq->tx_curr_desc * TSO_HEADER_SIZE;
-       desc->cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA  | TX_FIRST_DESC |
+       cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA  | TX_FIRST_DESC |
                                   GEN_CRC;
 
+       /* Defer updating the first command descriptor until all
+        * following descriptors have been written.
+        */
+       if (first_desc)
+               *first_cmd_sts = cmd_sts;
+       else
+               desc->cmd_sts = cmd_sts;
+
        txq->tx_curr_desc++;
        if (txq->tx_curr_desc == txq->tx_ring_size)
                txq->tx_curr_desc = 0;
@@ -819,6 +841,8 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
        int desc_count = 0;
        struct tso_t tso;
        int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       struct tx_desc *first_tx_desc;
+       u32 first_cmd_sts = 0;
 
        /* Count needed descriptors */
        if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
@@ -826,11 +850,14 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
                return -EBUSY;
        }
 
+       first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc];
+
        /* Initialize the TSO handler, and prepare the first payload */
        tso_start(skb, &tso);
 
        total_len = skb->len - hdr_len;
        while (total_len > 0) {
+               bool first_desc = (desc_count == 0);
                char *hdr;
 
                data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
@@ -840,7 +867,8 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
                /* prepare packet headers: MAC + IP + TCP */
                hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
                tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
-               txq_put_hdr_tso(skb, txq, data_left);
+               txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts,
+                               first_desc);
 
                while (data_left > 0) {
                        int size;
@@ -860,6 +888,10 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
        __skb_queue_tail(&txq->tx_skb, skb);
        skb_tx_timestamp(skb);
 
+       /* ensure all other descriptors are written before first cmd_sts */
+       wmb();
+       first_tx_desc->cmd_sts = first_cmd_sts;
+
        /* clear TX_END status */
        mp->work_tx_end &= ~(1 << txq->index);
 
@@ -2785,8 +2817,10 @@ static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
 
        for_each_available_child_of_node(np, pnp) {
                ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
-               if (ret)
+               if (ret) {
+                       of_node_put(pnp);
                        return ret;
+               }
        }
        return 0;
 }
index 0a3202047569c707a28f62376466e72fdcd8cd00..2177e56ed0be7d18ee40d2428b69003bd6f8cca5 100644 (file)
@@ -2398,7 +2398,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
                        }
                }
 
-               memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
+               memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
                priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
                INIT_WORK(&priv->mfunc.master.comm_work,
                          mlx4_master_comm_channel);
index 494e7762fdb19efb83d76f187b88fd37d422d5b5..4421bf5463f67159618c3a4c572b4e9ebc04dd7b 100644 (file)
@@ -964,6 +964,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                        tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN;
                else if (vlan_proto == ETH_P_8021Q)
                        tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
+               else
+                       tx_desc->ctrl.ins_vlan = 0;
 
                tx_desc->ctrl.fence_size = real_size;
 
index 8e81e53c370e7d54e6367c012212cccc73ee26fd..603d1c3d3b2ea1a8a98de110f1e777f1d751e649 100644 (file)
@@ -196,7 +196,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
                return;
        }
 
-       memcpy(s_eqe, eqe, dev->caps.eqe_size - 1);
+       memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
        s_eqe->slave_id = slave;
        /* ensure all information is written before setting the ownersip bit */
        dma_wmb();
@@ -1364,6 +1364,10 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
         * and performing a NOP command
         */
        for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
+               /* Make sure request_irq was called */
+               if (!priv->eq_table.eq[i].have_irq)
+                       continue;
+
                /* Temporary use polling for command completions */
                mlx4_cmd_use_polling(dev);
 
index 006757f80988bcb71cfc352542305b61310ef40f..cc3a9897574c542ee368ab067c109128ce89b290 100644 (file)
@@ -2669,14 +2669,11 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
 
        if (msi_x) {
                int nreq = dev->caps.num_ports * num_online_cpus() + 1;
-               bool shared_ports = false;
 
                nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
                             nreq);
-               if (nreq > MAX_MSIX) {
+               if (nreq > MAX_MSIX)
                        nreq = MAX_MSIX;
-                       shared_ports = true;
-               }
 
                entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
                if (!entries)
@@ -2699,9 +2696,6 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
                bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
                            dev->caps.num_ports);
 
-               if (MLX4_IS_LEGACY_EQ_MODE(dev->caps))
-                       shared_ports = true;
-
                for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
                        if (i == MLX4_EQ_ASYNC)
                                continue;
@@ -2709,7 +2703,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
                        priv->eq_table.eq[i].irq =
                                entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
 
-                       if (shared_ports) {
+                       if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
                                bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
                                            dev->caps.num_ports);
                                /* We don't set affinity hint when there
index bd9ea0d01aae4cba296d2eba5e9f7a865b5a0426..1d4e2e054647ae3da57bfad23f4460f622ca85b0 100644 (file)
@@ -1184,10 +1184,11 @@ out:
        if (prot == MLX4_PROT_ETH) {
                /* manage the steering entry for promisc mode */
                if (new_entry)
-                       new_steering_entry(dev, port, steer, index, qp->qpn);
+                       err = new_steering_entry(dev, port, steer,
+                                                index, qp->qpn);
                else
-                       existing_steering_entry(dev, port, steer,
-                                               index, qp->qpn);
+                       err = existing_steering_entry(dev, port, steer,
+                                                     index, qp->qpn);
        }
        if (err && link && index != -1) {
                if (index < dev->caps.num_mgms)
index e71563ce05d1bc34123fd4aa63348d569adf4c57..22d603f7827333e2dce6eab7be6ed6acd5a14cdb 100644 (file)
@@ -598,6 +598,8 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
                return;
 
        priv->vlan.filter_disabled = false;
+       if (priv->netdev->flags & IFF_PROMISC)
+               return;
        mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
 }
 
@@ -607,6 +609,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
                return;
 
        priv->vlan.filter_disabled = true;
+       if (priv->netdev->flags & IFF_PROMISC)
+               return;
        mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
 }
 
@@ -717,8 +721,12 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
        bool enable_broadcast  = !ea->broadcast_enabled &&  broadcast_enabled;
        bool disable_broadcast =  ea->broadcast_enabled && !broadcast_enabled;
 
-       if (enable_promisc)
+       if (enable_promisc) {
                mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
+               if (!priv->vlan.filter_disabled)
+                       mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+                                           0);
+       }
        if (enable_allmulti)
                mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
        if (enable_broadcast)
@@ -730,8 +738,12 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
                mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
        if (disable_allmulti)
                mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
-       if (disable_promisc)
+       if (disable_promisc) {
+               if (!priv->vlan.filter_disabled)
+                       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+                                           0);
                mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
+       }
 
        ea->promisc_enabled   = promisc_enabled;
        ea->allmulti_enabled  = allmulti_enabled;
index aa0d5ffe92d8177234c1975d958a76751a9539c6..9335e5ae18ccee954b4cc08eff41a01871b41b2e 100644 (file)
@@ -200,25 +200,3 @@ int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
 
        return err;
 }
-
-int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey)
-{
-       struct mlx5_cmd_query_special_contexts_mbox_in in;
-       struct mlx5_cmd_query_special_contexts_mbox_out out;
-       int err;
-
-       memset(&in, 0, sizeof(in));
-       memset(&out, 0, sizeof(out));
-       in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
-       err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
-       if (err)
-               return err;
-
-       if (out.hdr.status)
-               err = mlx5_cmd_status_to_err(&out.hdr);
-
-       *rsvd_lkey = be32_to_cpu(out.resd_lkey);
-
-       return err;
-}
-EXPORT_SYMBOL(mlx5_core_query_special_context);
index 821caaab9bfb04697fb0424cb8498bdc9eacabed..3b9480fa3403dee018931c16b678f5ec6c98e913 100644 (file)
@@ -311,7 +311,7 @@ static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
        int err;
 
        memset(in, 0, sizeof(in));
-       MLX5_SET(ptys_reg, in, local_port, local_port);
+       MLX5_SET(pvlc_reg, in, local_port, local_port);
 
        err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
                                   pvlc_size, MLX5_REG_PVLC, 0, 0);
index dbcaf5df8967e828f85648de4edce281860f2a61..28c19cc1a17c5f44a7b15103ce168651502843d8 100644 (file)
@@ -374,26 +374,31 @@ static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
        int err;
        int ret;
 
+       mlxsw_core->emad.trans_active = true;
+
        err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
        if (err) {
                dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
                        mlxsw_core->emad.tid);
                dev_kfree_skb(skb);
-               return err;
+               goto trans_inactive_out;
        }
 
-       mlxsw_core->emad.trans_active = true;
        ret = wait_event_timeout(mlxsw_core->emad.wait,
                                 !(mlxsw_core->emad.trans_active),
                                 msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
        if (!ret) {
                dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
                         mlxsw_core->emad.tid);
-               mlxsw_core->emad.trans_active = false;
-               return -EIO;
+               err = -EIO;
+               goto trans_inactive_out;
        }
 
        return 0;
+
+trans_inactive_out:
+       mlxsw_core->emad.trans_active = false;
+       return err;
 }
 
 static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
index ffd55d030ce28dbf902f6990a0dfb7f96b08a766..36fb1cec53c98e9c7dad0cb7b9b59051d7e0c352 100644 (file)
@@ -187,6 +187,7 @@ __mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift)
 {
        u16 max_index, be_index;
        u16 offset;             /* byte offset inside the array */
+       u8 in_byte_index;
 
        BUG_ON(index && !item->element_size);
        if (item->offset % sizeof(u32) != 0 ||
@@ -199,7 +200,8 @@ __mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift)
        max_index = (item->size.bytes << 3) / item->element_size - 1;
        be_index = max_index - index;
        offset = be_index * item->element_size >> 3;
-       *shift = index % (BITS_PER_BYTE / item->element_size) << 1;
+       in_byte_index  = index % (BITS_PER_BYTE / item->element_size);
+       *shift = in_byte_index * item->element_size;
 
        return item->offset + offset;
 }
index 462cea31ecbb7be6387512dc741286d7fb56658e..cef866c37648ca0c93d4f109eb9d4f3671fcd890 100644 (file)
@@ -1582,11 +1582,11 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
 
        if (in_mbox)
                memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
-       mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, in_mapaddr >> 32);
-       mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, in_mapaddr);
+       mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
+       mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
 
-       mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, out_mapaddr >> 32);
-       mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, out_mapaddr);
+       mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
+       mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
 
        mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
        mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
index 3e52ee93438c00188d0efe6482cfbd9f686b8ee9..62cbbd1ada8da6a5d8343beb394b67dc5af6a81c 100644 (file)
@@ -1069,9 +1069,9 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
        return 0;
 
 err_register_netdev:
-err_port_admin_status_set:
 err_port_mac_learning_mode_set:
 err_port_stp_state_set:
+err_port_admin_status_set:
 err_port_mtu_set:
 err_port_speed_set:
 err_port_swid_set:
index a41bb5e6b954f0e6b40375b76a52ed8a254cdb18..75e88f4c15315c84c20106022f38edc1dccf75e7 100644 (file)
@@ -4076,6 +4076,8 @@ static void nv_do_nic_poll(unsigned long data)
        struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
        u32 mask = 0;
+       unsigned long flags;
+       unsigned int irq = 0;
 
        /*
         * First disable irq(s) and then
@@ -4085,25 +4087,27 @@ static void nv_do_nic_poll(unsigned long data)
 
        if (!using_multi_irqs(dev)) {
                if (np->msi_flags & NV_MSI_X_ENABLED)
-                       disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+                       irq = np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector;
                else
-                       disable_irq_lockdep(np->pci_dev->irq);
+                       irq = np->pci_dev->irq;
                mask = np->irqmask;
        } else {
                if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
-                       disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+                       irq = np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector;
                        mask |= NVREG_IRQ_RX_ALL;
                }
                if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
-                       disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+                       irq = np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector;
                        mask |= NVREG_IRQ_TX_ALL;
                }
                if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
-                       disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
+                       irq = np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector;
                        mask |= NVREG_IRQ_OTHER;
                }
        }
-       /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
+
+       disable_irq_nosync_lockdep_irqsave(irq, &flags);
+       synchronize_irq(irq);
 
        if (np->recover_error) {
                np->recover_error = 0;
@@ -4156,28 +4160,22 @@ static void nv_do_nic_poll(unsigned long data)
                        nv_nic_irq_optimized(0, dev);
                else
                        nv_nic_irq(0, dev);
-               if (np->msi_flags & NV_MSI_X_ENABLED)
-                       enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
-               else
-                       enable_irq_lockdep(np->pci_dev->irq);
        } else {
                if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
                        np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
                        nv_nic_irq_rx(0, dev);
-                       enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
                }
                if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
                        np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
                        nv_nic_irq_tx(0, dev);
-                       enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
                }
                if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
                        np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
                        nv_nic_irq_other(0, dev);
-                       enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
                }
        }
 
+       enable_irq_lockdep_irqrestore(irq, &flags);
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
index 66fd868152e579a4ba3483fd4726f0e9d9662436..b159ef8303cc3e65d1e374367d19ca590d934901 100644 (file)
@@ -476,13 +476,12 @@ static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
        mac[5] = tmp >> 8;
 }
 
-static void __lpc_eth_clock_enable(struct netdata_local *pldat,
-                                  bool enable)
+static void __lpc_eth_clock_enable(struct netdata_local *pldat, bool enable)
 {
        if (enable)
-               clk_enable(pldat->clk);
+               clk_prepare_enable(pldat->clk);
        else
-               clk_disable(pldat->clk);
+               clk_disable_unprepare(pldat->clk);
 }
 
 static void __lpc_params_setup(struct netdata_local *pldat)
@@ -1494,7 +1493,7 @@ err_out_free_irq:
 err_out_iounmap:
        iounmap(pldat->net_base);
 err_out_disable_clocks:
-       clk_disable(pldat->clk);
+       clk_disable_unprepare(pldat->clk);
        clk_put(pldat->clk);
 err_out_free_dev:
        free_netdev(ndev);
@@ -1519,7 +1518,7 @@ static int lpc_eth_drv_remove(struct platform_device *pdev)
        iounmap(pldat->net_base);
        mdiobus_unregister(pldat->mii_bus);
        mdiobus_free(pldat->mii_bus);
-       clk_disable(pldat->clk);
+       clk_disable_unprepare(pldat->clk);
        clk_put(pldat->clk);
        free_netdev(ndev);
 
@@ -1540,7 +1539,7 @@ static int lpc_eth_drv_suspend(struct platform_device *pdev,
                if (netif_running(ndev)) {
                        netif_device_detach(ndev);
                        __lpc_eth_shutdown(pldat);
-                       clk_disable(pldat->clk);
+                       clk_disable_unprepare(pldat->clk);
 
                        /*
                         * Reset again now clock is disable to be sure
index 2b32e0c5a0b46bcdb4e50d1931c676f2f65503d0..b4f21232019a98c7e0afd9e5a43a5a160da765fe 100644 (file)
@@ -6081,7 +6081,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
 {
        void __iomem *ioaddr = tp->mmio_addr;
        struct pci_dev *pdev = tp->pci_dev;
-       u16 rg_saw_cnt;
+       int rg_saw_cnt;
        u32 data;
        static const struct ephy_info e_info_8168h_1[] = {
                { 0x1e, 0x0800, 0x0001 },
index 257ea713b4c1564fa680acfcf7ad760268ea0df6..a484d8beb8557935b30251dc70bf3ef3d3b64dbc 100644 (file)
@@ -1127,7 +1127,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
        struct sh_eth_txdesc *txdesc = NULL;
        int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
        int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
-       int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+       int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
        dma_addr_t dma_addr;
 
        mdp->cur_rx = 0;
@@ -1148,8 +1148,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
 
                /* RX descriptor */
                rxdesc = &mdp->rx_ring[i];
-               /* The size of the buffer is a multiple of 16 bytes. */
-               rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
+               /* The size of the buffer is a multiple of 32 bytes. */
+               rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32);
                dma_addr = dma_map_single(&ndev->dev, skb->data,
                                          rxdesc->buffer_length,
                                          DMA_FROM_DEVICE);
@@ -1450,7 +1450,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
        struct sk_buff *skb;
        u16 pkt_len = 0;
        u32 desc_status;
-       int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+       int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
        dma_addr_t dma_addr;
 
        boguscnt = min(boguscnt, *quota);
@@ -1506,7 +1506,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
                        if (mdp->cd->rpadir)
                                skb_reserve(skb, NET_IP_ALIGN);
                        dma_unmap_single(&ndev->dev, rxdesc->addr,
-                                        ALIGN(mdp->rx_buf_sz, 16),
+                                        ALIGN(mdp->rx_buf_sz, 32),
                                         DMA_FROM_DEVICE);
                        skb_put(skb, pkt_len);
                        skb->protocol = eth_type_trans(skb, ndev);
@@ -1524,8 +1524,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
        for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
                entry = mdp->dirty_rx % mdp->num_rx_ring;
                rxdesc = &mdp->rx_ring[entry];
-               /* The size of the buffer is 16 byte boundary. */
-               rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
+               /* The size of the buffer is 32 byte boundary. */
+               rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32);
 
                if (mdp->rx_skbuff[entry] == NULL) {
                        skb = netdev_alloc_skb(ndev, skbuff_size);
index 98d172b04f71815a1105b304ea5cadfa552687a2..a9b9460de0d63b2369062ce1fc9dee90909878ee 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <linux/delay.h>
 #include <linux/moduleparam.h>
-#include <asm/cmpxchg.h>
+#include <linux/atomic.h>
 #include "net_driver.h"
 #include "nic.h"
 #include "io.h"
index ad62615a93dcfe238fd23cf0487e5c6ea60330b7..c771e0af4e06d76028e7e8b85f36c09b9e19c149 100644 (file)
@@ -401,8 +401,8 @@ size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats)
 /* For Siena platforms NIC time is s and ns */
 static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor)
 {
-       struct timespec ts = ns_to_timespec(ns);
-       *nic_major = ts.tv_sec;
+       struct timespec64 ts = ns_to_timespec64(ns);
+       *nic_major = (u32)ts.tv_sec;
        *nic_minor = ts.tv_nsec;
 }
 
@@ -431,8 +431,8 @@ static ktime_t efx_ptp_s_ns_to_ktime_correction(u32 nic_major, u32 nic_minor,
  */
 static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor)
 {
-       struct timespec ts = ns_to_timespec(ns);
-       u32 maj = ts.tv_sec;
+       struct timespec64 ts = ns_to_timespec64(ns);
+       u32 maj = (u32)ts.tv_sec;
        u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT +
                         (1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT);
 
@@ -646,28 +646,28 @@ static void efx_ptp_send_times(struct efx_nic *efx,
                               struct pps_event_time *last_time)
 {
        struct pps_event_time now;
-       struct timespec limit;
+       struct timespec64 limit;
        struct efx_ptp_data *ptp = efx->ptp_data;
-       struct timespec start;
+       struct timespec64 start;
        int *mc_running = ptp->start.addr;
 
        pps_get_ts(&now);
        start = now.ts_real;
        limit = now.ts_real;
-       timespec_add_ns(&limit, SYNCHRONISE_PERIOD_NS);
+       timespec64_add_ns(&limit, SYNCHRONISE_PERIOD_NS);
 
        /* Write host time for specified period or until MC is done */
-       while ((timespec_compare(&now.ts_real, &limit) < 0) &&
+       while ((timespec64_compare(&now.ts_real, &limit) < 0) &&
               ACCESS_ONCE(*mc_running)) {
-               struct timespec update_time;
+               struct timespec64 update_time;
                unsigned int host_time;
 
                /* Don't update continuously to avoid saturating the PCIe bus */
                update_time = now.ts_real;
-               timespec_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS);
+               timespec64_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS);
                do {
                        pps_get_ts(&now);
-               } while ((timespec_compare(&now.ts_real, &update_time) < 0) &&
+               } while ((timespec64_compare(&now.ts_real, &update_time) < 0) &&
                         ACCESS_ONCE(*mc_running));
 
                /* Synchronise NIC with single word of time only */
@@ -723,7 +723,7 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
        struct efx_ptp_data *ptp = efx->ptp_data;
        u32 last_sec;
        u32 start_sec;
-       struct timespec delta;
+       struct timespec64 delta;
        ktime_t mc_time;
 
        if (number_readings == 0)
@@ -737,14 +737,14 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
         */
        for (i = 0; i < number_readings; i++) {
                s32 window, corrected;
-               struct timespec wait;
+               struct timespec64 wait;
 
                efx_ptp_read_timeset(
                        MCDI_ARRAY_STRUCT_PTR(synch_buf,
                                              PTP_OUT_SYNCHRONIZE_TIMESET, i),
                        &ptp->timeset[i]);
 
-               wait = ktime_to_timespec(
+               wait = ktime_to_timespec64(
                        ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0));
                window = ptp->timeset[i].window;
                corrected = window - wait.tv_nsec;
@@ -803,7 +803,7 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
                                          ptp->timeset[last_good].minor, 0);
 
        /* Calculate delay from NIC top of second to last_time */
-       delta.tv_nsec += ktime_to_timespec(mc_time).tv_nsec;
+       delta.tv_nsec += ktime_to_timespec64(mc_time).tv_nsec;
 
        /* Set PPS timestamp to match NIC top of second */
        ptp->host_time_pps = *last_time;
index 8fc90f1c872c6fe5afe2105aae62c54d8283800f..874fb297e96c563525a5d275a8e2239b6ab41725 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/delay.h>
 #include <linux/pm_runtime.h>
 #include <linux/of.h>
+#include <linux/of_mdio.h>
 #include <linux/of_net.h>
 #include <linux/of_device.h>
 #include <linux/if_vlan.h>
@@ -365,6 +366,7 @@ struct cpsw_priv {
        spinlock_t                      lock;
        struct platform_device          *pdev;
        struct net_device               *ndev;
+       struct device_node              *phy_node;
        struct napi_struct              napi_rx;
        struct napi_struct              napi_tx;
        struct device                   *dev;
@@ -1145,7 +1147,11 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
                cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
                                   1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
 
-       slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
+       if (priv->phy_node)
+               slave->phy = of_phy_connect(priv->ndev, priv->phy_node,
+                                &cpsw_adjust_link, 0, slave->data->phy_if);
+       else
+               slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
                                 &cpsw_adjust_link, slave->data->phy_if);
        if (IS_ERR(slave->phy)) {
                dev_err(priv->dev, "phy %s not found on slave %d\n",
@@ -1934,11 +1940,12 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
        slave->port_vlan = data->dual_emac_res_vlan;
 }
 
-static int cpsw_probe_dt(struct cpsw_platform_data *data,
+static int cpsw_probe_dt(struct cpsw_priv *priv,
                         struct platform_device *pdev)
 {
        struct device_node *node = pdev->dev.of_node;
        struct device_node *slave_node;
+       struct cpsw_platform_data *data = &priv->data;
        int i = 0, ret;
        u32 prop;
 
@@ -2029,6 +2036,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                if (strcmp(slave_node->name, "slave"))
                        continue;
 
+               priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0);
                parp = of_get_property(slave_node, "phy_id", &lenp);
                if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
                        dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
@@ -2044,7 +2052,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                }
                snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
                         PHY_ID_FMT, mdio->name, phyid);
-
                slave_data->phy_if = of_get_phy_mode(slave_node);
                if (slave_data->phy_if < 0) {
                        dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
@@ -2240,7 +2247,7 @@ static int cpsw_probe(struct platform_device *pdev)
        /* Select default pin state */
        pinctrl_pm_select_default_state(&pdev->dev);
 
-       if (cpsw_probe_dt(&priv->data, pdev)) {
+       if (cpsw_probe_dt(priv, pdev)) {
                dev_err(&pdev->dev, "cpsw: platform data missing\n");
                ret = -ENODEV;
                goto clean_runtime_disable_ret;
index 6bff8d82ceab7a428e73048504a5dd63f4a1c1bd..4e70e7586a0918a045008b7e54e807f0f11e2de9 100644 (file)
@@ -2637,8 +2637,10 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev,
                        mac_phy_link = true;
 
                slave->open = true;
-               if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
+               if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
+                       of_node_put(port);
                        break;
+               }
        }
 
        /* of_phy_connect() is needed only for MAC-PHY interface */
@@ -3137,8 +3139,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
                        continue;
                }
                gbe_dev->num_slaves++;
-               if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
+               if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
+                       of_node_put(interface);
                        break;
+               }
        }
        of_node_put(interfaces);
 
index a83263743665411eacb0ca845e23f52db612866c..2b7550c43f7800fe36e54fafacabfcd921c408f4 100644 (file)
@@ -2134,10 +2134,11 @@ static int rhine_rx(struct net_device *dev, int limit)
                        }
 
                        skb_put(skb, pkt_len);
-                       skb->protocol = eth_type_trans(skb, dev);
 
                        rhine_rx_vlan_tag(skb, desc, data_size);
 
+                       skb->protocol = eth_type_trans(skb, dev);
+
                        netif_receive_skb(skb);
 
                        u64_stats_update_begin(&rp->rx_stats.syncp);
index 8f5c02eed47de09883b43b8b587717993064ef0b..445071c163cb3b45412af65e58353a7eae26842a 100644 (file)
@@ -594,14 +594,12 @@ static struct rtable *geneve_get_rt(struct sk_buff *skb,
        rt = ip_route_output_key(geneve->net, fl4);
        if (IS_ERR(rt)) {
                netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr);
-               dev->stats.tx_carrier_errors++;
-               return rt;
+               return ERR_PTR(-ENETUNREACH);
        }
        if (rt->dst.dev == dev) { /* is this necessary? */
                netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr);
-               dev->stats.collisions++;
                ip_rt_put(rt);
-               return ERR_PTR(-EINVAL);
+               return ERR_PTR(-ELOOP);
        }
        return rt;
 }
@@ -627,12 +625,12 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
        struct ip_tunnel_info *info = NULL;
        struct rtable *rt = NULL;
        const struct iphdr *iip; /* interior IP header */
+       int err = -EINVAL;
        struct flowi4 fl4;
        __u8 tos, ttl;
        __be16 sport;
        bool udp_csum;
        __be16 df;
-       int err;
 
        if (geneve->collect_md) {
                info = skb_tunnel_info(skb);
@@ -647,7 +645,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
        rt = geneve_get_rt(skb, dev, &fl4, info);
        if (IS_ERR(rt)) {
                netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
-               dev->stats.tx_carrier_errors++;
+               err = PTR_ERR(rt);
                goto tx_error;
        }
 
@@ -699,10 +697,37 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
 tx_error:
        dev_kfree_skb(skb);
 err:
-       dev->stats.tx_errors++;
+       if (err == -ELOOP)
+               dev->stats.collisions++;
+       else if (err == -ENETUNREACH)
+               dev->stats.tx_carrier_errors++;
+       else
+               dev->stats.tx_errors++;
        return NETDEV_TX_OK;
 }
 
+static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+{
+       struct ip_tunnel_info *info = skb_tunnel_info(skb);
+       struct geneve_dev *geneve = netdev_priv(dev);
+       struct rtable *rt;
+       struct flowi4 fl4;
+
+       if (ip_tunnel_info_af(info) != AF_INET)
+               return -EINVAL;
+
+       rt = geneve_get_rt(skb, dev, &fl4, info);
+       if (IS_ERR(rt))
+               return PTR_ERR(rt);
+
+       ip_rt_put(rt);
+       info->key.u.ipv4.src = fl4.saddr;
+       info->key.tp_src = udp_flow_src_port(geneve->net, skb,
+                                            1, USHRT_MAX, true);
+       info->key.tp_dst = geneve->dst_port;
+       return 0;
+}
+
 static const struct net_device_ops geneve_netdev_ops = {
        .ndo_init               = geneve_init,
        .ndo_uninit             = geneve_uninit,
@@ -713,6 +738,7 @@ static const struct net_device_ops geneve_netdev_ops = {
        .ndo_change_mtu         = eth_change_mtu,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_fill_metadata_dst  = geneve_fill_metadata_dst,
 };
 
 static void geneve_get_drvinfo(struct net_device *dev,
@@ -870,14 +896,14 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
        __be16 dst_port = htons(GENEVE_UDP_PORT);
        __u8 ttl = 0, tos = 0;
        bool metadata = false;
-       __be32 rem_addr;
-       __u32 vni;
+       __be32 rem_addr = 0;
+       __u32 vni = 0;
 
-       if (!data[IFLA_GENEVE_ID] || !data[IFLA_GENEVE_REMOTE])
-               return -EINVAL;
+       if (data[IFLA_GENEVE_ID])
+               vni = nla_get_u32(data[IFLA_GENEVE_ID]);
 
-       vni = nla_get_u32(data[IFLA_GENEVE_ID]);
-       rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
+       if (data[IFLA_GENEVE_REMOTE])
+               rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
 
        if (data[IFLA_GENEVE_TTL])
                ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
index 248478c6f6e49522681a3eeb8a80fd8eaefd32fc..197c93937c2d577e56cf7fab8dcef07313bf75f4 100644 (file)
@@ -137,7 +137,7 @@ static const struct proto_ops macvtap_socket_ops;
 #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
                      NETIF_F_TSO6 | NETIF_F_UFO)
 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
-#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
+#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
 
 static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
 {
index c5ad98ace5d0abeff5d6ef8f3f028e537b241a8e..436972b2a746a23d27bac9ebb4e23d17b6d54715 100644 (file)
@@ -122,6 +122,11 @@ config MICREL_PHY
        ---help---
          Supports the KSZ9021, VSC8201, KS8001 PHYs.
 
+config DP83848_PHY
+       tristate "Driver for Texas Instruments DP83848 PHY"
+       ---help---
+         Supports the DP83848 PHY.
+
 config DP83867_PHY
        tristate "Drivers for Texas Instruments DP83867 Gigabit PHY"
        ---help---
@@ -168,8 +173,6 @@ config MDIO_OCTEON
          busses. It is required by the Octeon and ThunderX ethernet device
          drivers.
 
-         If in doubt, say Y.
-
 config MDIO_SUN4I
        tristate "Allwinner sun4i MDIO interface support"
        depends on ARCH_SUNXI
index 87f079c4b2c7ab16e5577b0b86fdd8509f9b7c8f..b74822463930051f60151beec04c5ccfe0e7bd0b 100644 (file)
@@ -24,6 +24,7 @@ obj-$(CONFIG_MDIO_BITBANG)    += mdio-bitbang.o
 obj-$(CONFIG_MDIO_GPIO)                += mdio-gpio.o
 obj-$(CONFIG_NATIONAL_PHY)     += national.o
 obj-$(CONFIG_DP83640_PHY)      += dp83640.o
+obj-$(CONFIG_DP83848_PHY)      += dp83848.o
 obj-$(CONFIG_DP83867_PHY)      += dp83867.o
 obj-$(CONFIG_STE10XP)          += ste10Xp.o
 obj-$(CONFIG_MICREL_PHY)       += micrel.o
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
new file mode 100644 (file)
index 0000000..5ce9bef
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+ * Driver for the Texas Instruments DP83848 PHY
+ *
+ * Copyright (C) 2015 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+
+#define DP83848_PHY_ID                 0x20005c90
+
+/* Registers */
+#define DP83848_MICR                   0x11
+#define DP83848_MISR                   0x12
+
+/* MICR Register Fields */
+#define DP83848_MICR_INT_OE            BIT(0) /* Interrupt Output Enable */
+#define DP83848_MICR_INTEN             BIT(1) /* Interrupt Enable */
+
+/* MISR Register Fields */
+#define DP83848_MISR_RHF_INT_EN                BIT(0) /* Receive Error Counter */
+#define DP83848_MISR_FHF_INT_EN                BIT(1) /* False Carrier Counter */
+#define DP83848_MISR_ANC_INT_EN                BIT(2) /* Auto-negotiation complete */
+#define DP83848_MISR_DUP_INT_EN                BIT(3) /* Duplex Status */
+#define DP83848_MISR_SPD_INT_EN                BIT(4) /* Speed status */
+#define DP83848_MISR_LINK_INT_EN       BIT(5) /* Link status */
+#define DP83848_MISR_ED_INT_EN         BIT(6) /* Energy detect */
+#define DP83848_MISR_LQM_INT_EN                BIT(7) /* Link Quality Monitor */
+
+static int dp83848_ack_interrupt(struct phy_device *phydev)
+{
+       int err = phy_read(phydev, DP83848_MISR);
+
+       return err < 0 ? err : 0;
+}
+
+static int dp83848_config_intr(struct phy_device *phydev)
+{
+       int err;
+
+       if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+               err = phy_write(phydev, DP83848_MICR,
+                               DP83848_MICR_INT_OE |
+                               DP83848_MICR_INTEN);
+               if (err < 0)
+                       return err;
+
+               return phy_write(phydev, DP83848_MISR,
+                                DP83848_MISR_ANC_INT_EN |
+                                DP83848_MISR_DUP_INT_EN |
+                                DP83848_MISR_SPD_INT_EN |
+                                DP83848_MISR_LINK_INT_EN);
+       }
+
+       return phy_write(phydev, DP83848_MICR, 0x0);
+}
+
+static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
+       { DP83848_PHY_ID, 0xfffffff0 },
+       { }
+};
+MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
+
+static struct phy_driver dp83848_driver[] = {
+       {
+               .phy_id         = DP83848_PHY_ID,
+               .phy_id_mask    = 0xfffffff0,
+               .name           = "TI DP83848",
+               .features       = PHY_BASIC_FEATURES,
+               .flags          = PHY_HAS_INTERRUPT,
+
+               .soft_reset     = genphy_soft_reset,
+               .config_init    = genphy_config_init,
+               .suspend        = genphy_suspend,
+               .resume         = genphy_resume,
+               .config_aneg    = genphy_config_aneg,
+               .read_status    = genphy_read_status,
+
+               /* IRQ related */
+               .ack_interrupt  = dp83848_ack_interrupt,
+               .config_intr    = dp83848_config_intr,
+
+               .driver         = { .owner = THIS_MODULE, },
+       },
+};
+module_phy_driver(dp83848_driver);
+
+MODULE_DESCRIPTION("Texas Instruments DP83848 PHY driver");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com");
+MODULE_LICENSE("GPL");
index 2377c1341172f6ac6fb56eeaf835b7fc577e1593..7fde454fbc4f1762b2c0bb35cf41f60580fd16ed 100644 (file)
@@ -113,12 +113,14 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev)
                if (!iprop || len != sizeof(uint32_t)) {
                        dev_err(&pdev->dev, "mdio-mux child node %s is "
                                "missing a 'reg' property\n", np2->full_name);
+                       of_node_put(np2);
                        return -ENODEV;
                }
                if (be32_to_cpup(iprop) & ~s->mask) {
                        dev_err(&pdev->dev, "mdio-mux child node %s has "
                                "a 'reg' value with unmasked bits\n",
                                np2->full_name);
+                       of_node_put(np2);
                        return -ENODEV;
                }
        }
index 280c7c311f72442c4877815ece73c2a36745be49..908e8d4863429fb6c11f4a625449500f3be36ce1 100644 (file)
@@ -144,6 +144,7 @@ int mdio_mux_init(struct device *dev,
                        dev_err(dev,
                                "Error: Failed to allocate memory for child\n");
                        ret_val = -ENOMEM;
+                       of_node_put(child_bus_node);
                        break;
                }
                cb->bus_number = v;
index 499185eaf413ba08b1447fbebf1f60080c643329..cf6312fafea545fbc3efb96e8ff6c63b35c7420e 100644 (file)
@@ -514,6 +514,27 @@ static int ksz8873mll_read_status(struct phy_device *phydev)
        return 0;
 }
 
+static int ksz9031_read_status(struct phy_device *phydev)
+{
+       int err;
+       int regval;
+
+       err = genphy_read_status(phydev);
+       if (err)
+               return err;
+
+       /* Make sure the PHY is not broken. Read idle error count,
+        * and reset the PHY if it is maxed out.
+        */
+       regval = phy_read(phydev, MII_STAT1000);
+       if ((regval & 0xFF) == 0xFF) {
+               phy_init_hw(phydev);
+               phydev->link = 0;
+       }
+
+       return 0;
+}
+
 static int ksz8873mll_config_aneg(struct phy_device *phydev)
 {
        return 0;
@@ -772,7 +793,7 @@ static struct phy_driver ksphy_driver[] = {
        .driver_data    = &ksz9021_type,
        .config_init    = ksz9031_config_init,
        .config_aneg    = genphy_config_aneg,
-       .read_status    = genphy_read_status,
+       .read_status    = ksz9031_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
        .suspend        = genphy_suspend,
index 70b08958763a129fff47ad00a1db130c1334f254..dc2da87709185870f808ef626e8446893c658db6 100644 (file)
@@ -43,16 +43,25 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
 
 static int smsc_phy_config_init(struct phy_device *phydev)
 {
+       int __maybe_unused len;
+       struct device *dev __maybe_unused = &phydev->dev;
+       struct device_node *of_node __maybe_unused = dev->of_node;
        int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+       int enable_energy = 1;
 
        if (rc < 0)
                return rc;
 
-       /* Enable energy detect mode for this SMSC Transceivers */
-       rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
-                      rc | MII_LAN83C185_EDPWRDOWN);
-       if (rc < 0)
-               return rc;
+       if (of_find_property(of_node, "smsc,disable-energy-detect", &len))
+               enable_energy = 0;
+
+       if (enable_energy) {
+               /* Enable energy detect mode for this SMSC Transceivers */
+               rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
+                              rc | MII_LAN83C185_EDPWRDOWN);
+               if (rc < 0)
+                       return rc;
+       }
 
        return smsc_phy_ack_interrupt(phydev);
 }
index 3837ae344f63b9d69a5dd958d0e7b7dc202ff316..5e0b43283bce2c4f5251e5c5db982ca679526d07 100644 (file)
@@ -313,7 +313,6 @@ static void pppoe_flush_dev(struct net_device *dev)
                        if (po->pppoe_dev == dev &&
                            sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
                                pppox_unbind_sock(sk);
-                               sk->sk_state = PPPOX_ZOMBIE;
                                sk->sk_state_change(sk);
                                po->pppoe_dev = NULL;
                                dev_put(dev);
@@ -590,7 +589,7 @@ static int pppoe_release(struct socket *sock)
 
        po = pppox_sk(sk);
 
-       if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
+       if (po->pppoe_dev) {
                dev_put(po->pppoe_dev);
                po->pppoe_dev = NULL;
        }
index fbb9325d1f6e539421f2ad592d718bcede68acea..e66805eeffb45f014a9c61d20e0bd6f3b473098b 100644 (file)
@@ -164,6 +164,7 @@ config USB_NET_AX8817X
            * Aten UC210T
            * ASIX AX88172
            * Billionton Systems, USB2AR
+           * Billionton Systems, GUSB2AM-1G-B
            * Buffalo LUA-U2-KTX
            * Corega FEther USB2-TX
            * D-Link DUB-E100
index 75d6f26729a30e34cdaaf8334a9cc0aaa2a01c82..079069a060a62fdb42cb9dff9acbcd55148277b8 100644 (file)
@@ -91,8 +91,10 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
                        }
                        rx->ax_skb = netdev_alloc_skb_ip_align(dev->net,
                                                               rx->size);
-                       if (!rx->ax_skb)
+                       if (!rx->ax_skb) {
+                               rx->size = 0;
                                return 0;
+                       }
                }
 
                if (rx->size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
index 1173a24feda38c3af236c84acaf8982f39c0e0b1..5cabefc2349438f26cddde0022d52acf6e967116 100644 (file)
@@ -958,6 +958,10 @@ static const struct usb_device_id  products [] = {
        // Billionton Systems, USB2AR
        USB_DEVICE (0x08dd, 0x90ff),
        .driver_info =  (unsigned long) &ax8817x_info,
+}, {
+       // Billionton Systems, GUSB2AM-1G-B
+       USB_DEVICE(0x08dd, 0x0114),
+       .driver_info =  (unsigned long) &ax88178_info,
 }, {
        // ATEN UC210T
        USB_DEVICE (0x0557, 0x2009),
index 355842b85ee906a434477b325f32199ec315eae8..2a7c1be23c4f2aea38e2ec99384be91e7f9d0f62 100644 (file)
@@ -765,6 +765,10 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1199, 0x9056, 8)},    /* Sierra Wireless Modem */
        {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
        {QMI_FIXED_INTF(0x1199, 0x9061, 8)},    /* Sierra Wireless Modem */
+       {QMI_FIXED_INTF(0x1199, 0x9070, 8)},    /* Sierra Wireless MC74xx/EM74xx */
+       {QMI_FIXED_INTF(0x1199, 0x9070, 10)},   /* Sierra Wireless MC74xx/EM74xx */
+       {QMI_FIXED_INTF(0x1199, 0x9071, 8)},    /* Sierra Wireless MC74xx/EM74xx */
+       {QMI_FIXED_INTF(0x1199, 0x9071, 10)},   /* Sierra Wireless MC74xx/EM74xx */
        {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},    /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
        {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},    /* Alcatel L800MA */
        {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
index bbac1d35ed4e37450a3282b4b7f9dc41dabcd4e8..c1587ece28cfffeb3c7c3011bf087215edb77662 100644 (file)
@@ -2337,6 +2337,46 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
        return 0;
 }
 
+static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb,
+                               struct ip_tunnel_info *info,
+                               __be16 sport, __be16 dport)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct rtable *rt;
+       struct flowi4 fl4;
+
+       memset(&fl4, 0, sizeof(fl4));
+       fl4.flowi4_tos = RT_TOS(info->key.tos);
+       fl4.flowi4_mark = skb->mark;
+       fl4.flowi4_proto = IPPROTO_UDP;
+       fl4.daddr = info->key.u.ipv4.dst;
+
+       rt = ip_route_output_key(vxlan->net, &fl4);
+       if (IS_ERR(rt))
+               return PTR_ERR(rt);
+       ip_rt_put(rt);
+
+       info->key.u.ipv4.src = fl4.saddr;
+       info->key.tp_src = sport;
+       info->key.tp_dst = dport;
+       return 0;
+}
+
+static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct ip_tunnel_info *info = skb_tunnel_info(skb);
+       __be16 sport, dport;
+
+       sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
+                                 vxlan->cfg.port_max, true);
+       dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
+
+       if (ip_tunnel_info_af(info) == AF_INET)
+               return egress_ipv4_tun_info(dev, skb, info, sport, dport);
+       return -EINVAL;
+}
+
 static const struct net_device_ops vxlan_netdev_ops = {
        .ndo_init               = vxlan_init,
        .ndo_uninit             = vxlan_uninit,
@@ -2351,6 +2391,7 @@ static const struct net_device_ops vxlan_netdev_ops = {
        .ndo_fdb_add            = vxlan_fdb_add,
        .ndo_fdb_del            = vxlan_fdb_delete,
        .ndo_fdb_dump           = vxlan_fdb_dump,
+       .ndo_fill_metadata_dst  = vxlan_fill_metadata_dst,
 };
 
 /* Info for udev, that this is a virtual tunnel endpoint */
@@ -2745,11 +2786,10 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
        struct vxlan_config conf;
        int err;
 
-       if (!data[IFLA_VXLAN_ID])
-               return -EINVAL;
-
        memset(&conf, 0, sizeof(conf));
-       conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]);
+
+       if (data[IFLA_VXLAN_ID])
+               conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]);
 
        if (data[IFLA_VXLAN_GROUP]) {
                conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
index 23afcda2de967637d73c7039d0f2cdb5f3b777ff..678d72af4a9df73976b7513d9af17888bc1ef7d1 100644 (file)
@@ -337,7 +337,7 @@ enum ath10k_hw_rate_cck {
 #define TARGET_10X_MAX_FRAG_ENTRIES            0
 
 /* 10.2 parameters */
-#define TARGET_10_2_DMA_BURST_SIZE             1
+#define TARGET_10_2_DMA_BURST_SIZE             0
 
 /* Target specific defines for WMI-TLV firmware */
 #define TARGET_TLV_NUM_VDEVS                   4
@@ -391,7 +391,7 @@ enum ath10k_hw_rate_cck {
 
 #define TARGET_10_4_TX_DBG_LOG_SIZE            1024
 #define TARGET_10_4_NUM_WDS_ENTRIES            32
-#define TARGET_10_4_DMA_BURST_SIZE             1
+#define TARGET_10_4_DMA_BURST_SIZE             0
 #define TARGET_10_4_MAC_AGGR_DELIM             0
 #define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
 #define TARGET_10_4_VOW_CONFIG                 0
index 6e473fa4b13cae0df30a33fd9162729afaf76f6b..12241b1c57cd28023d0278c57f347457583bc017 100644 (file)
@@ -715,6 +715,7 @@ static bool check_device_tree(struct ath6kl *ar)
                                   board_filename, ret);
                        continue;
                }
+               of_node_put(node);
                return true;
        }
        return false;
index 57f95f2dca5b072ac294b2c76122ec1b952a2a80..90eb75012e4f4818a03481d98cb9b137a89393fa 100644 (file)
@@ -880,6 +880,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
        hw->max_rate_tries = 10;
        hw->sta_data_size = sizeof(struct ath_node);
        hw->vif_data_size = sizeof(struct ath_vif);
+       hw->extra_tx_headroom = 4;
 
        hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
        hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
index 28490702124a0da53bb2834f1ca880a452d6523b..71d3e9adbf3c02b4d5ff50843a2b4b06cd96b6a5 100644 (file)
@@ -120,6 +120,7 @@ MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if over
 #ifdef CONFIG_B43_BCMA
 static const struct bcma_device_id b43_bcma_tbl[] = {
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x11, BCMA_ANY_CLASS),
+       BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x15, BCMA_ANY_CLASS),
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS),
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS),
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1C, BCMA_ANY_CLASS),
index ab45819c1fbbf6d0080813c26090bb095082672d..e18629a16fb0260dff9b3486c572f4bfd2166fcd 100644 (file)
@@ -1020,7 +1020,7 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
                        u8 *pn = seq.ccmp.pn;
 
                        ieee80211_get_key_rx_seq(key, i, &seq);
-                       aes_sc->pn = cpu_to_le64(
+                       aes_sc[i].pn = cpu_to_le64(
                                        (u64)pn[5] |
                                        ((u64)pn[4] << 8) |
                                        ((u64)pn[3] << 16) |
index 6951aba620eb74a13f6fc2c2da36a043272d8a43..3fb327d5a911aeba53e4ac71ad6bb2ad419d06b0 100644 (file)
@@ -348,6 +348,6 @@ const struct iwl_cfg iwl7265d_n_cfg = {
 };
 
 MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
-MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
+MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
 MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
 MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
index 04264e417c1c644e2b362e9bf29760489cfcae4f..576187611e614adc3a92ca50c4e1f2ab3a7bb7e1 100644 (file)
@@ -274,18 +274,13 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
                break;
        case WLAN_CIPHER_SUITE_CCMP:
                if (sta) {
-                       u8 *pn = seq.ccmp.pn;
+                       u64 pn64;
 
                        aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
                        aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
 
-                       ieee80211_get_key_tx_seq(key, &seq);
-                       aes_tx_sc->pn = cpu_to_le64((u64)pn[5] |
-                                                   ((u64)pn[4] << 8) |
-                                                   ((u64)pn[3] << 16) |
-                                                   ((u64)pn[2] << 24) |
-                                                   ((u64)pn[1] << 32) |
-                                                   ((u64)pn[0] << 40));
+                       pn64 = atomic64_read(&key->tx_pn);
+                       aes_tx_sc->pn = cpu_to_le64(pn64);
                } else {
                        aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
                }
@@ -298,12 +293,12 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
                        u8 *pn = seq.ccmp.pn;
 
                        ieee80211_get_key_rx_seq(key, i, &seq);
-                       aes_sc->pn = cpu_to_le64((u64)pn[5] |
-                                                ((u64)pn[4] << 8) |
-                                                ((u64)pn[3] << 16) |
-                                                ((u64)pn[2] << 24) |
-                                                ((u64)pn[1] << 32) |
-                                                ((u64)pn[0] << 40));
+                       aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
+                                                  ((u64)pn[4] << 8) |
+                                                  ((u64)pn[3] << 16) |
+                                                  ((u64)pn[2] << 24) |
+                                                  ((u64)pn[1] << 32) |
+                                                  ((u64)pn[0] << 40));
                }
                data->use_rsc_tsc = true;
                break;
@@ -1453,15 +1448,15 @@ static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw,
 
                switch (key->cipher) {
                case WLAN_CIPHER_SUITE_CCMP:
-                       iwl_mvm_aes_sc_to_seq(&sc->aes.tsc, &seq);
                        iwl_mvm_set_aes_rx_seq(sc->aes.unicast_rsc, key);
+                       atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn));
                        break;
                case WLAN_CIPHER_SUITE_TKIP:
                        iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
                        iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
+                       ieee80211_set_key_tx_seq(key, &seq);
                        break;
                }
-               ieee80211_set_key_tx_seq(key, &seq);
 
                /* that's it for this key */
                return;
index 4a0ce83315bdd212d1714956af8900ea271f62b6..5c7f7cc9ffcc2aa1d81cbf1ff5b2d33f46133727 100644 (file)
@@ -703,7 +703,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
         * abort after reading the nvm in case RF Kill is on, we will complete
         * the init seq later when RF kill will switch to off
         */
-       if (iwl_mvm_is_radio_killed(mvm)) {
+       if (iwl_mvm_is_radio_hw_killed(mvm)) {
                IWL_DEBUG_RF_KILL(mvm,
                                  "jump over all phy activities due to RF kill\n");
                iwl_remove_notification(&mvm->notif_wait, &calib_wait);
@@ -736,7 +736,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
        ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
                        MVM_UCODE_CALIB_TIMEOUT);
 
-       if (ret && iwl_mvm_is_radio_killed(mvm)) {
+       if (ret && iwl_mvm_is_radio_hw_killed(mvm)) {
                IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
                ret = 1;
        }
index aa8c2b7f23c73862f0526f109ad647cc0c80a259..7c2944a72470b92acdca39a6f7c13b845b34842a 100644 (file)
@@ -2388,6 +2388,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
                iwl_mvm_remove_time_event(mvm, mvmvif,
                                          &mvmvif->time_event_data);
                RCU_INIT_POINTER(mvm->csa_vif, NULL);
+               mvmvif->csa_countdown = false;
        }
 
        if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
index b95a07ec9e362bf031f960dee35dc52c97221ed0..c754051a4ceacf138d4e6399c9cc679d8a9979c0 100644 (file)
@@ -860,6 +860,11 @@ static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
               test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
 }
 
+static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm)
+{
+       return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+}
+
 /* Must be called with rcu_read_lock() held and it can only be
  * released when mvmsta is not needed anymore.
  */
index a37de3f410a01e0b594f41d43bfda4360273e324..f0cb092f980ec26b26e9ca3c6cadf813f39de853 100644 (file)
@@ -590,6 +590,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        ieee80211_unregister_hw(mvm->hw);
        iwl_mvm_leds_exit(mvm);
  out_free:
+       flush_delayed_work(&mvm->fw_dump_wk);
        iwl_phy_db_free(mvm->phy_db);
        kfree(mvm->scan_cmd);
        if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name)
index b0825c402c732c0514637b3b21b26288a7275444..644b58bc5226c52b3cdee0a24b9a392c25e8ac02 100644 (file)
@@ -414,6 +414,11 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095B, 0x5212, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)},
 
 /* 8000 Series */
        {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
index 5932306084fd305a6f45ccf03d8957b771eaaa13..bf9afbf46c1bbbc1220bad06e429d2622a1f9f58 100644 (file)
@@ -1114,6 +1114,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x0db0, 0x871c) },
        { USB_DEVICE(0x0db0, 0x899a) },
        /* Ovislink */
+       { USB_DEVICE(0x1b75, 0x3070) },
        { USB_DEVICE(0x1b75, 0x3071) },
        { USB_DEVICE(0x1b75, 0x3072) },
        { USB_DEVICE(0x1b75, 0xa200) },
index d4567d12e07ebd13f17f0097baeec41a25702d31..5da6703942d9dd08017d896070404fdbe29a96e4 100644 (file)
@@ -247,6 +247,8 @@ struct rtl_pci {
        /* MSI support */
        bool msi_support;
        bool using_msi;
+       /* interrupt clear before set */
+       bool int_clear;
 };
 
 struct mp_adapter {
index b7f18e2155eb18358cf4d4f9f3f82774f9b6f522..6e9418ed90c289bee5b7f2dfc478f847dfc7ca68 100644 (file)
@@ -2253,11 +2253,28 @@ void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci)
        }
 }
 
+static void rtl8821ae_clear_interrupt(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 tmp = rtl_read_dword(rtlpriv, REG_HISR);
+
+       rtl_write_dword(rtlpriv, REG_HISR, tmp);
+
+       tmp = rtl_read_dword(rtlpriv, REG_HISRE);
+       rtl_write_dword(rtlpriv, REG_HISRE, tmp);
+
+       tmp = rtl_read_dword(rtlpriv, REG_HSISR);
+       rtl_write_dword(rtlpriv, REG_HSISR, tmp);
+}
+
 void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
+       if (!rtlpci->int_clear)
+               rtl8821ae_clear_interrupt(hw);/*clear it here first*/
+
        rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
        rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
        rtlpci->irq_enabled = true;
index a4988121e1ab6a20bad5ad9b5934c166a41f36d6..8ee141a55bc5cc6b566e79dde58cdb05583e7fdf 100644 (file)
@@ -96,6 +96,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
 
        rtl8821ae_bt_reg_init(hw);
        rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
+       rtlpci->int_clear = rtlpriv->cfg->mod_params->int_clear;
        rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
 
        rtlpriv->dm.dm_initialgain_enable = 1;
@@ -167,6 +168,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
        rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
        rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
        rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
+       rtlpci->msi_support = rtlpriv->cfg->mod_params->int_clear;
        if (rtlpriv->cfg->mod_params->disable_watchdog)
                pr_info("watchdog disabled\n");
        rtlpriv->psc.reg_fwctrl_lps = 3;
@@ -308,6 +310,7 @@ static struct rtl_mod_params rtl8821ae_mod_params = {
        .swctrl_lps = false,
        .fwctrl_lps = true,
        .msi_support = true,
+       .int_clear = true,
        .debug = DBG_EMERG,
        .disable_watchdog = 0,
 };
@@ -437,6 +440,7 @@ module_param_named(fwlps, rtl8821ae_mod_params.fwctrl_lps, bool, 0444);
 module_param_named(msi, rtl8821ae_mod_params.msi_support, bool, 0444);
 module_param_named(disable_watchdog, rtl8821ae_mod_params.disable_watchdog,
                   bool, 0444);
+module_param_named(int_clear, rtl8821ae_mod_params.int_clear, bool, 0444);
 MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
 MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
 MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
@@ -444,6 +448,7 @@ MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
 MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n");
 MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
 MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
+MODULE_PARM_DESC(int_clear, "Set to 1 to disable interrupt clear before set (default 0)\n");
 
 static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
 
index b90ca618b123209a1724bc808c4fdfe8ea330a69..4544752a2ba83ca173f7cb4f14abc72538b73beb 100644 (file)
@@ -2249,6 +2249,9 @@ struct rtl_mod_params {
 
        /* default 0: 1 means disable */
        bool disable_watchdog;
+
+       /* default 0: 1 means do not disable interrupts */
+       bool int_clear;
 };
 
 struct rtl_hal_usbint_cfg {
index 929a6e7e5ecfe9249569c0059516e531fb0b79eb..56ebd8267386e6a91cabf506962e15f5d83531ec 100644 (file)
@@ -788,6 +788,12 @@ static void connect(struct backend_info *be)
        /* Use the number of queues requested by the frontend */
        be->vif->queues = vzalloc(requested_num_queues *
                                  sizeof(struct xenvif_queue));
+       if (!be->vif->queues) {
+               xenbus_dev_fatal(dev, -ENOMEM,
+                                "allocating queues");
+               return;
+       }
+
        be->vif->num_queues = requested_num_queues;
        be->vif->stalled_queues = requested_num_queues;
 
index f821a97d78278feed765d08d886a4665d8795bd5..6febc053a37febc069d241e6811d904f18890f45 100644 (file)
@@ -1706,19 +1706,19 @@ static void xennet_destroy_queues(struct netfront_info *info)
 }
 
 static int xennet_create_queues(struct netfront_info *info,
-                               unsigned int num_queues)
+                               unsigned int *num_queues)
 {
        unsigned int i;
        int ret;
 
-       info->queues = kcalloc(num_queues, sizeof(struct netfront_queue),
+       info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
                               GFP_KERNEL);
        if (!info->queues)
                return -ENOMEM;
 
        rtnl_lock();
 
-       for (i = 0; i < num_queues; i++) {
+       for (i = 0; i < *num_queues; i++) {
                struct netfront_queue *queue = &info->queues[i];
 
                queue->id = i;
@@ -1728,7 +1728,7 @@ static int xennet_create_queues(struct netfront_info *info,
                if (ret < 0) {
                        dev_warn(&info->netdev->dev,
                                 "only created %d queues\n", i);
-                       num_queues = i;
+                       *num_queues = i;
                        break;
                }
 
@@ -1738,11 +1738,11 @@ static int xennet_create_queues(struct netfront_info *info,
                        napi_enable(&queue->napi);
        }
 
-       netif_set_real_num_tx_queues(info->netdev, num_queues);
+       netif_set_real_num_tx_queues(info->netdev, *num_queues);
 
        rtnl_unlock();
 
-       if (num_queues == 0) {
+       if (*num_queues == 0) {
                dev_err(&info->netdev->dev, "no queues\n");
                return -EINVAL;
        }
@@ -1788,7 +1788,7 @@ static int talk_to_netback(struct xenbus_device *dev,
        if (info->queues)
                xennet_destroy_queues(info);
 
-       err = xennet_create_queues(info, num_queues);
+       err = xennet_create_queues(info, &num_queues);
        if (err < 0)
                goto destroy_ring;
 
index d3c6676b3c0cafbaa996c1f60d1c21adb0f20b21..6fd4e5a5ef4a495bbd412ee33b931f4fb3a8a24f 100644 (file)
@@ -67,7 +67,7 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
        int rc;
 
        /* Stop the user from reading */
-       if (pos > nvmem->size)
+       if (pos >= nvmem->size)
                return 0;
 
        if (pos + count > nvmem->size)
@@ -92,7 +92,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
        int rc;
 
        /* Stop the user from writing */
-       if (pos > nvmem->size)
+       if (pos >= nvmem->size)
                return 0;
 
        if (pos + count > nvmem->size)
@@ -825,7 +825,7 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem,
                return rc;
 
        /* shift bits in-place */
-       if (cell->bit_offset || cell->bit_offset)
+       if (cell->bit_offset || cell->nbits)
                nvmem_shift_read_buffer_in_place(cell, buf);
 
        *len = cell->bytes;
@@ -938,7 +938,7 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
        rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes);
 
        /* free the tmp buffer */
-       if (cell->bit_offset)
+       if (cell->bit_offset || cell->nbits)
                kfree(buf);
 
        if (IS_ERR_VALUE(rc))
index 14777dd5212d29d10c672a18c8b85c17fdcdceb4..cfa3b85064dd233a463b1556742274d960e4f47b 100644 (file)
@@ -103,7 +103,7 @@ static int sunxi_sid_probe(struct platform_device *pdev)
        struct nvmem_device *nvmem;
        struct regmap *regmap;
        struct sunxi_sid *sid;
-       int i, size;
+       int ret, i, size;
        char *randomness;
 
        sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL);
@@ -131,6 +131,11 @@ static int sunxi_sid_probe(struct platform_device *pdev)
                return PTR_ERR(nvmem);
 
        randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL);
+       if (!randomness) {
+               ret = -EINVAL;
+               goto err_unreg_nvmem;
+       }
+
        for (i = 0; i < size; i++)
                randomness[i] = sunxi_sid_read_byte(sid, i);
 
@@ -140,6 +145,10 @@ static int sunxi_sid_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, nvmem);
 
        return 0;
+
+err_unreg_nvmem:
+       nvmem_unregister(nvmem);
+       return ret;
 }
 
 static int sunxi_sid_remove(struct platform_device *pdev)
index 55317fa9c9dca32557c40b1735ac2c2bec5d4714..0baf626da56ac5b142d75367a7cfdb52747c24ce 100644 (file)
@@ -579,22 +579,187 @@ err:
        }
 }
 
+static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
+                           u32 rid_in)
+{
+       struct device *parent_dev;
+       struct device_node *msi_controller_node;
+       struct device_node *msi_np = *np;
+       u32 map_mask, masked_rid, rid_base, msi_base, rid_len, phandle;
+       int msi_map_len;
+       bool matched;
+       u32 rid_out = rid_in;
+       const __be32 *msi_map = NULL;
+
+       /*
+        * Walk up the device parent links looking for one with a
+        * "msi-map" property.
+        */
+       for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent) {
+               if (!parent_dev->of_node)
+                       continue;
+
+               msi_map = of_get_property(parent_dev->of_node,
+                                         "msi-map", &msi_map_len);
+               if (!msi_map)
+                       continue;
+
+               if (msi_map_len % (4 * sizeof(__be32))) {
+                       dev_err(parent_dev, "Error: Bad msi-map length: %d\n",
+                               msi_map_len);
+                       return rid_out;
+               }
+               /* We have a good parent_dev and msi_map, let's use them. */
+               break;
+       }
+       if (!msi_map)
+               return rid_out;
+
+       /* The default is to select all bits. */
+       map_mask = 0xffffffff;
+
+       /*
+        * Can be overridden by "msi-map-mask" property.  If
+        * of_property_read_u32() fails, the default is used.
+        */
+       of_property_read_u32(parent_dev->of_node, "msi-map-mask", &map_mask);
+
+       masked_rid = map_mask & rid_in;
+       matched = false;
+       while (!matched && msi_map_len >= 4 * sizeof(__be32)) {
+               rid_base = be32_to_cpup(msi_map + 0);
+               phandle = be32_to_cpup(msi_map + 1);
+               msi_base = be32_to_cpup(msi_map + 2);
+               rid_len = be32_to_cpup(msi_map + 3);
+
+               msi_controller_node = of_find_node_by_phandle(phandle);
+
+               matched = (masked_rid >= rid_base &&
+                          masked_rid < rid_base + rid_len);
+               if (msi_np)
+                       matched &= msi_np == msi_controller_node;
+
+               if (matched && !msi_np) {
+                       *np = msi_np = msi_controller_node;
+                       break;
+               }
+
+               of_node_put(msi_controller_node);
+               msi_map_len -= 4 * sizeof(__be32);
+               msi_map += 4;
+       }
+       if (!matched)
+               return rid_out;
+
+       rid_out = masked_rid + msi_base;
+       dev_dbg(dev,
+               "msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n",
+               dev_name(parent_dev), map_mask, rid_base, msi_base,
+               rid_len, rid_in, rid_out);
+
+       return rid_out;
+}
+
 /**
- * of_msi_configure - Set the msi_domain field of a device
- * @dev: device structure to associate with an MSI irq domain
- * @np: device node for that device
+ * of_msi_map_rid - Map a MSI requester ID for a device.
+ * @dev: device for which the mapping is to be done.
+ * @msi_np: device node of the expected msi controller.
+ * @rid_in: unmapped MSI requester ID for the device.
+ *
+ * Walk up the device hierarchy looking for devices with a "msi-map"
+ * property.  If found, apply the mapping to @rid_in.
+ *
+ * Returns the mapped MSI requester ID.
  */
-void of_msi_configure(struct device *dev, struct device_node *np)
+u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in)
+{
+       return __of_msi_map_rid(dev, &msi_np, rid_in);
+}
+
+static struct irq_domain *__of_get_msi_domain(struct device_node *np,
+                                             enum irq_domain_bus_token token)
+{
+       struct irq_domain *d;
+
+       d = irq_find_matching_host(np, token);
+       if (!d)
+               d = irq_find_host(np);
+
+       return d;
+}
+
+/**
+ * of_msi_map_get_device_domain - Use msi-map to find the relevant MSI domain
+ * @dev: device for which the mapping is to be done.
+ * @rid: Requester ID for the device.
+ *
+ * Walk up the device hierarchy looking for devices with a "msi-map"
+ * property.
+ *
+ * Returns: the MSI domain for this device (or NULL on failure)
+ */
+struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 rid)
+{
+       struct device_node *np = NULL;
+
+       __of_msi_map_rid(dev, &np, rid);
+       return __of_get_msi_domain(np, DOMAIN_BUS_PCI_MSI);
+}
+
+/**
+ * of_msi_get_domain - Use msi-parent to find the relevant MSI domain
+ * @dev: device for which the domain is requested
+ * @np: device node for @dev
+ * @token: bus type for this domain
+ *
+ * Parse the msi-parent property (both the simple and the complex
+ * versions), and returns the corresponding MSI domain.
+ *
+ * Returns: the MSI domain for this device (or NULL on failure).
+ */
+struct irq_domain *of_msi_get_domain(struct device *dev,
+                                    struct device_node *np,
+                                    enum irq_domain_bus_token token)
 {
        struct device_node *msi_np;
        struct irq_domain *d;
 
+       /* Check for a single msi-parent property */
        msi_np = of_parse_phandle(np, "msi-parent", 0);
-       if (!msi_np)
-               return;
+       if (msi_np && !of_property_read_bool(msi_np, "#msi-cells")) {
+               d = __of_get_msi_domain(msi_np, token);
+               if (!d)
+                       of_node_put(msi_np);
+               return d;
+       }
 
-       d = irq_find_matching_host(msi_np, DOMAIN_BUS_PLATFORM_MSI);
-       if (!d)
-               d = irq_find_host(msi_np);
-       dev_set_msi_domain(dev, d);
+       if (token == DOMAIN_BUS_PLATFORM_MSI) {
+               /* Check for the complex msi-parent version */
+               struct of_phandle_args args;
+               int index = 0;
+
+               while (!of_parse_phandle_with_args(np, "msi-parent",
+                                                  "#msi-cells",
+                                                  index, &args)) {
+                       d = __of_get_msi_domain(args.np, token);
+                       if (d)
+                               return d;
+
+                       of_node_put(args.np);
+                       index++;
+               }
+       }
+
+       return NULL;
+}
+
+/**
+ * of_msi_configure - Set the msi_domain field of a device
+ * @dev: device structure to associate with an MSI irq domain
+ * @np: device node for that device
+ */
+void of_msi_configure(struct device *dev, struct device_node *np)
+{
+       dev_set_msi_domain(dev,
+                          of_msi_get_domain(dev, np, DOMAIN_BUS_PLATFORM_MSI));
 }
index e491681daf22681c7bd541ad75d078edfa32ea1c..a6456b5782692a5d5559b9ef59226d2fbce99e3f 100644 (file)
@@ -256,7 +256,7 @@ static int xgene_allocate_domains(struct xgene_msi *msi)
        if (!msi->inner_domain)
                return -ENOMEM;
 
-       msi->msi_domain = pci_msi_create_irq_domain(msi->node,
+       msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(msi->node),
                                                    &xgene_msi_domain_info,
                                                    msi->inner_domain);
 
index d4497141d083a71d5d5206496fee58fbddc5cb13..45a51486d080a54fa987eaa230106df5895b3b09 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/irqdomain.h>
+#include <linux/of_irq.h>
 
 #include "pci.h"
 
@@ -1243,11 +1244,15 @@ static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info)
        BUG_ON(!chip);
        if (!chip->irq_write_msi_msg)
                chip->irq_write_msi_msg = pci_msi_domain_write_msg;
+       if (!chip->irq_mask)
+               chip->irq_mask = pci_msi_mask_irq;
+       if (!chip->irq_unmask)
+               chip->irq_unmask = pci_msi_unmask_irq;
 }
 
 /**
- * pci_msi_create_irq_domain - Creat a MSI interrupt domain
- * @node:      Optional device-tree node of the interrupt controller
+ * pci_msi_create_irq_domain - Create a MSI interrupt domain
+ * @fwnode:    Optional fwnode of the interrupt controller
  * @info:      MSI domain info
  * @parent:    Parent irq domain
  *
@@ -1256,7 +1261,7 @@ static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info)
  * Returns:
  * A domain pointer or NULL in case of failure.
  */
-struct irq_domain *pci_msi_create_irq_domain(struct device_node *node,
+struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
                                             struct msi_domain_info *info,
                                             struct irq_domain *parent)
 {
@@ -1267,7 +1272,7 @@ struct irq_domain *pci_msi_create_irq_domain(struct device_node *node,
        if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
                pci_msi_domain_update_chip_ops(info);
 
-       domain = msi_create_irq_domain(node, info, parent);
+       domain = msi_create_irq_domain(fwnode, info, parent);
        if (!domain)
                return NULL;
 
@@ -1303,14 +1308,14 @@ void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev)
 
 /**
  * pci_msi_create_default_irq_domain - Create a default MSI interrupt domain
- * @node:      Optional device-tree node of the interrupt controller
+ * @fwnode:    Optional fwnode of the interrupt controller
  * @info:      MSI domain info
  * @parent:    Parent irq domain
  *
  * Returns: A domain pointer or NULL in case of failure. If successful
  * the default PCI/MSI irqdomain pointer is updated.
  */
-struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node,
+struct irq_domain *pci_msi_create_default_irq_domain(struct fwnode_handle *fwnode,
                struct msi_domain_info *info, struct irq_domain *parent)
 {
        struct irq_domain *domain;
@@ -1320,11 +1325,59 @@ struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node,
                pr_err("PCI: default irq domain for PCI MSI has already been created.\n");
                domain = NULL;
        } else {
-               domain = pci_msi_create_irq_domain(node, info, parent);
+               domain = pci_msi_create_irq_domain(fwnode, info, parent);
                pci_msi_default_domain = domain;
        }
        mutex_unlock(&pci_msi_domain_lock);
 
        return domain;
 }
+
+static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data)
+{
+       u32 *pa = data;
+
+       *pa = alias;
+       return 0;
+}
+/**
+ * pci_msi_domain_get_msi_rid - Get the MSI requester id (RID)
+ * @domain:    The interrupt domain
+ * @pdev:      The PCI device.
+ *
+ * The RID for a device is formed from the alias, with a firmware
+ * supplied mapping applied
+ *
+ * Returns: The RID.
+ */
+u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
+{
+       struct device_node *of_node;
+       u32 rid = 0;
+
+       pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
+
+       of_node = irq_domain_get_of_node(domain);
+       if (of_node)
+               rid = of_msi_map_rid(&pdev->dev, of_node, rid);
+
+       return rid;
+}
+
+/**
+ * pci_msi_get_device_domain - Get the MSI domain for a given PCI device
+ * @pdev:      The PCI device
+ *
+ * Use the firmware data to find a device-specific MSI domain
+ * (i.e. not one that is ste as a default).
+ *
+ * Returns: The coresponding MSI domain or NULL if none has been found.
+ */
+struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
+{
+       u32 rid = 0;
+
+       pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
+       return of_msi_map_get_device_domain(&pdev->dev, rid);
+}
 #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
index 2e99a500cb83fe48ac065333fad036186935aa4f..e112da11630ee46803fc14d02f979c079c70ba79 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/kernel.h>
 #include <linux/pci.h>
 #include <linux/of.h>
+#include <linux/of_irq.h>
 #include <linux/of_pci.h>
 #include "pci.h"
 
@@ -64,27 +65,25 @@ struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
 {
 #ifdef CONFIG_IRQ_DOMAIN
-       struct device_node *np;
        struct irq_domain *d;
 
        if (!bus->dev.of_node)
                return NULL;
 
        /* Start looking for a phandle to an MSI controller. */
-       np = of_parse_phandle(bus->dev.of_node, "msi-parent", 0);
+       d = of_msi_get_domain(&bus->dev, bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
+       if (d)
+               return d;
 
        /*
         * If we don't have an msi-parent property, look for a domain
         * directly attached to the host bridge.
         */
-       if (!np)
-               np = bus->dev.of_node;
-
-       d = irq_find_matching_host(np, DOMAIN_BUS_PCI_MSI);
+       d = irq_find_matching_host(bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
        if (d)
                return d;
 
-       return irq_find_host(np);
+       return irq_find_host(bus->dev.of_node);
 #else
        return NULL;
 #endif
index dd652f2ae03db964ed539c5d369092173ab9ab33..108a3118ace7fbd107a2916aa29066cc326c0b6c 100644 (file)
@@ -299,9 +299,10 @@ static long local_pci_probe(void *_ddi)
         * Unbound PCI devices are always put in D0, regardless of
         * runtime PM status.  During probe, the device is set to
         * active and the usage count is incremented.  If the driver
-        * supports runtime PM, it should call pm_runtime_put_noidle()
-        * in its probe routine and pm_runtime_get_noresume() in its
-        * remove routine.
+        * supports runtime PM, it should call pm_runtime_put_noidle(),
+        * or any other runtime PM helper function decrementing the usage
+        * count, in its probe routine and pm_runtime_get_noresume() in
+        * its remove routine.
         */
        pm_runtime_get_sync(dev);
        pci_dev->driver = pci_drv;
index 312f23a8429cd9331b45afaf72a278e84bd41d83..92618686604cb9d314aa1e6bf833363cfbaaa1b5 100644 (file)
@@ -216,7 +216,7 @@ static ssize_t numa_node_store(struct device *dev,
        if (ret)
                return ret;
 
-       if (!node_online(node))
+       if (node >= MAX_NUMNODES || !node_online(node))
                return -EINVAL;
 
        add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
index 8361d27e5ecad82ff9767c3d55a019a66f884046..f14a970b61fa59bf00b6e97474da3ec024ba315e 100644 (file)
@@ -1622,15 +1622,48 @@ static void pci_init_capabilities(struct pci_dev *dev)
        pci_enable_acs(dev);
 }
 
+/*
+ * This is the equivalent of pci_host_bridge_msi_domain that acts on
+ * devices. Firmware interfaces that can select the MSI domain on a
+ * per-device basis should be called from here.
+ */
+static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
+{
+       struct irq_domain *d;
+
+       /*
+        * If a domain has been set through the pcibios_add_device
+        * callback, then this is the one (platform code knows best).
+        */
+       d = dev_get_msi_domain(&dev->dev);
+       if (d)
+               return d;
+
+       /*
+        * Let's see if we have a firmware interface able to provide
+        * the domain.
+        */
+       d = pci_msi_get_device_domain(dev);
+       if (d)
+               return d;
+
+       return NULL;
+}
+
 static void pci_set_msi_domain(struct pci_dev *dev)
 {
+       struct irq_domain *d;
+
        /*
-        * If no domain has been set through the pcibios_add_device
-        * callback, inherit the default from the bus device.
+        * If the platform or firmware interfaces cannot supply a
+        * device-specific MSI domain, then inherit the default domain
+        * from the host bridge itself.
         */
-       if (!dev_get_msi_domain(&dev->dev))
-               dev_set_msi_domain(&dev->dev,
-                                  dev_get_msi_domain(&dev->bus->dev));
+       d = pci_dev_msi_domain(dev);
+       if (!d)
+               d = dev_get_msi_domain(&dev->bus->dev);
+
+       dev_set_msi_domain(&dev->dev, d);
 }
 
 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
index 2365a32a595e42b8ddf43c2dc32603749cf990d7..be3755c973e96d4c6ee48f0459c751ba6d225665 100644 (file)
@@ -823,9 +823,15 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
                }
 
                /* Now look up the logical CPU number */
-               for_each_possible_cpu(cpu)
-                       if (dn == of_cpu_device_node_get(cpu))
+               for_each_possible_cpu(cpu) {
+                       struct device_node *cpu_dn;
+
+                       cpu_dn = of_cpu_device_node_get(cpu);
+                       of_node_put(cpu_dn);
+
+                       if (dn == cpu_dn)
                                break;
+               }
 
                if (cpu >= nr_cpu_ids) {
                        pr_warn("Failed to find logical CPU for %s\n",
index 0062027afb1ef90335ae46782dba06448949b989..77a2e054fdea0f46ccd3d2841f5f837f80a985e1 100644 (file)
@@ -276,6 +276,7 @@ static const struct of_device_id phy_berlin_sata_of_match[] = {
        { .compatible = "marvell,berlin2q-sata-phy" },
        { },
 };
+MODULE_DEVICE_TABLE(of, phy_berlin_sata_of_match);
 
 static struct platform_driver phy_berlin_sata_driver = {
        .probe  = phy_berlin_sata_probe,
index 49a1ed0cef56fe7cbf9aed102b47149415f021f0..107cb57c3513c22642bb14420f47c469a39dcfa2 100644 (file)
@@ -432,6 +432,7 @@ out_disable_src:
 out:
        return ret;
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_ref_clk);
 
 static
 int ufs_qcom_phy_disable_vreg(struct phy *phy,
@@ -474,6 +475,7 @@ void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy)
                phy->is_ref_clk_enabled = false;
        }
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk);
 
 #define UFS_REF_CLK_EN (1 << 5)
 
@@ -517,11 +519,13 @@ void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy)
 {
        ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true);
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_dev_ref_clk);
 
 void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy)
 {
        ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false);
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk);
 
 /* Turn ON M-PHY RMMI interface clocks */
 int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
@@ -550,6 +554,7 @@ int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
 out:
        return ret;
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_iface_clk);
 
 /* Turn OFF M-PHY RMMI interface clocks */
 void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy)
@@ -562,6 +567,7 @@ void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy)
                phy->is_iface_clk_enabled = false;
        }
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_iface_clk);
 
 int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
 {
@@ -578,6 +584,7 @@ int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_start_serdes);
 
 int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
 {
@@ -595,6 +602,7 @@ int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable);
 
 void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
                                          u8 major, u16 minor, u16 step)
@@ -605,6 +613,7 @@ void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
        ufs_qcom_phy->host_ctrl_rev_minor = minor;
        ufs_qcom_phy->host_ctrl_rev_step = step;
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version);
 
 int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
 {
@@ -625,6 +634,7 @@ int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy);
 
 int ufs_qcom_phy_remove(struct phy *generic_phy,
                        struct ufs_qcom_phy *ufs_qcom_phy)
@@ -662,6 +672,7 @@ int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy)
        return ufs_qcom_phy->phy_spec_ops->
                        is_physical_coding_sublayer_ready(ufs_qcom_phy);
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_is_pcs_ready);
 
 int ufs_qcom_phy_power_on(struct phy *generic_phy)
 {
index 6e0d9fa8e1d13f8d1ad0eec73b1040f594637df7..c7a05996d5c1a841a65e515e746d1008d3d47489 100644 (file)
@@ -17,8 +17,7 @@
 #include <linux/phy/phy.h>
 #include <linux/platform_device.h>
 #include <linux/spinlock.h>
-
-#include <asm/cmpxchg.h>
+#include <linux/atomic.h>
 
 #define USBHS_LPSTS                    0x02
 #define USBHS_UGCTRL                   0x80
index 5a5c073e72fe1ee6115bea310890b646b6b40a69..91d6f342c56596fc2e3fcff18213dba004546c3e 100644 (file)
@@ -98,6 +98,7 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
        struct device_node *child;
        struct regmap *grf;
        unsigned int reg_offset;
+       int err;
 
        grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
        if (IS_ERR(grf)) {
@@ -129,6 +130,11 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
                        return PTR_ERR(rk_phy->phy);
                }
                phy_set_drvdata(rk_phy->phy, rk_phy);
+
+               /* only power up usb phy when it use, so disable it when init*/
+               err = rockchip_usb_phy_power(rk_phy, 1);
+               if (err)
+                       return err;
        }
 
        phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
index 84dd2ed47a928e57effa8297597592733c703100..b422e4ed73f4266f7a8d13db4ec1096395163fc3 100644 (file)
@@ -67,6 +67,19 @@ config PINCTRL_AT91
        help
          Say Y here to enable the at91 pinctrl driver
 
+config PINCTRL_AT91PIO4
+       bool "AT91 PIO4 pinctrl driver"
+       depends on OF
+       depends on ARCH_AT91
+       select PINMUX
+       select GENERIC_PINCONF
+       select GPIOLIB
+       select GPIOLIB_IRQCHIP
+       select OF_GPIO
+       help
+         Say Y here to enable the at91 pinctrl/gpio driver for Atmel PIO4
+         controller available on sama5d2 SoC.
+
 config PINCTRL_AMD
        bool "AMD GPIO pin control"
        depends on GPIOLIB
index cad077c43fb7313af8b81bb9ca9a8a83f48e5224..738cb4929a49428890f543df5eab92976d40dbe7 100644 (file)
@@ -12,6 +12,7 @@ obj-$(CONFIG_PINCTRL_AS3722)  += pinctrl-as3722.o
 obj-$(CONFIG_PINCTRL_BF54x)    += pinctrl-adi2-bf54x.o
 obj-$(CONFIG_PINCTRL_BF60x)    += pinctrl-adi2-bf60x.o
 obj-$(CONFIG_PINCTRL_AT91)     += pinctrl-at91.o
+obj-$(CONFIG_PINCTRL_AT91PIO4) += pinctrl-at91-pio4.o
 obj-$(CONFIG_PINCTRL_AMD)      += pinctrl-amd.o
 obj-$(CONFIG_PINCTRL_DIGICOLOR)        += pinctrl-digicolor.o
 obj-$(CONFIG_PINCTRL_FALCON)   += pinctrl-falcon.o
@@ -50,6 +51,6 @@ obj-$(CONFIG_PINCTRL_SAMSUNG) += samsung/
 obj-$(CONFIG_PINCTRL_SH_PFC)   += sh-pfc/
 obj-$(CONFIG_PLAT_SPEAR)       += spear/
 obj-$(CONFIG_ARCH_SUNXI)       += sunxi/
-obj-$(CONFIG_ARCH_UNIPHIER)    += uniphier/
+obj-$(CONFIG_PINCTRL_UNIPHIER) += uniphier/
 obj-$(CONFIG_ARCH_VT8500)      += vt8500/
 obj-$(CONFIG_ARCH_MEDIATEK)    += mediatek/
index 8efa235ca1c96fa70af1ed1cecd86c5a53736221..a1ea565fcd46490e58d48e4363318c2ce6970ebc 100644 (file)
@@ -330,16 +330,6 @@ static inline void bcm2835_pinctrl_fsel_set(
        bcm2835_gpio_wr(pc, FSEL_REG(pin), val);
 }
 
-static int bcm2835_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-       return pinctrl_request_gpio(chip->base + offset);
-}
-
-static void bcm2835_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       pinctrl_free_gpio(chip->base + offset);
-}
-
 static int bcm2835_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 {
        return pinctrl_gpio_direction_input(chip->base + offset);
@@ -375,8 +365,8 @@ static int bcm2835_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
 static struct gpio_chip bcm2835_gpio_chip = {
        .label = MODULE_NAME,
        .owner = THIS_MODULE,
-       .request = bcm2835_gpio_request,
-       .free = bcm2835_gpio_free,
+       .request = gpiochip_generic_request,
+       .free = gpiochip_generic_free,
        .direction_input = bcm2835_gpio_direction_input,
        .direction_output = bcm2835_gpio_direction_output,
        .get = bcm2835_gpio_get,
index 1ca783098e47463bd3c9a5f267f5fb00eb68ef88..12a48f498b75847926ab7bc96ecd78fdd248971a 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
 #include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinmux.h>
 #include <linux/pinctrl/pinconf.h>
 #include <linux/pinctrl/pinconf-generic.h>
 
@@ -596,127 +595,6 @@ static const struct pinconf_ops cygnus_pconf_ops = {
        .pin_config_set = cygnus_pin_config_set,
 };
 
-/*
- * Map a GPIO in the local gpio_chip pin space to a pin in the Cygnus IOMUX
- * pinctrl pin space
- */
-struct cygnus_gpio_pin_range {
-       unsigned offset;
-       unsigned pin_base;
-       unsigned num_pins;
-};
-
-#define CYGNUS_PINRANGE(o, p, n) { .offset = o, .pin_base = p, .num_pins = n }
-
-/*
- * Pin mapping table for mapping local GPIO pins to Cygnus IOMUX pinctrl pins
- */
-static const struct cygnus_gpio_pin_range cygnus_gpio_pintable[] = {
-       CYGNUS_PINRANGE(0, 42, 1),
-       CYGNUS_PINRANGE(1, 44, 3),
-       CYGNUS_PINRANGE(4, 48, 1),
-       CYGNUS_PINRANGE(5, 50, 3),
-       CYGNUS_PINRANGE(8, 126, 1),
-       CYGNUS_PINRANGE(9, 155, 1),
-       CYGNUS_PINRANGE(10, 152, 1),
-       CYGNUS_PINRANGE(11, 154, 1),
-       CYGNUS_PINRANGE(12, 153, 1),
-       CYGNUS_PINRANGE(13, 127, 3),
-       CYGNUS_PINRANGE(16, 140, 1),
-       CYGNUS_PINRANGE(17, 145, 7),
-       CYGNUS_PINRANGE(24, 130, 10),
-       CYGNUS_PINRANGE(34, 141, 4),
-       CYGNUS_PINRANGE(38, 54, 1),
-       CYGNUS_PINRANGE(39, 56, 3),
-       CYGNUS_PINRANGE(42, 60, 3),
-       CYGNUS_PINRANGE(45, 64, 3),
-       CYGNUS_PINRANGE(48, 68, 2),
-       CYGNUS_PINRANGE(50, 84, 6),
-       CYGNUS_PINRANGE(56, 94, 6),
-       CYGNUS_PINRANGE(62, 72, 1),
-       CYGNUS_PINRANGE(63, 70, 1),
-       CYGNUS_PINRANGE(64, 80, 1),
-       CYGNUS_PINRANGE(65, 74, 3),
-       CYGNUS_PINRANGE(68, 78, 1),
-       CYGNUS_PINRANGE(69, 82, 1),
-       CYGNUS_PINRANGE(70, 156, 17),
-       CYGNUS_PINRANGE(87, 104, 12),
-       CYGNUS_PINRANGE(99, 102, 2),
-       CYGNUS_PINRANGE(101, 90, 4),
-       CYGNUS_PINRANGE(105, 116, 6),
-       CYGNUS_PINRANGE(111, 100, 2),
-       CYGNUS_PINRANGE(113, 122, 4),
-       CYGNUS_PINRANGE(123, 11, 1),
-       CYGNUS_PINRANGE(124, 38, 4),
-       CYGNUS_PINRANGE(128, 43, 1),
-       CYGNUS_PINRANGE(129, 47, 1),
-       CYGNUS_PINRANGE(130, 49, 1),
-       CYGNUS_PINRANGE(131, 53, 1),
-       CYGNUS_PINRANGE(132, 55, 1),
-       CYGNUS_PINRANGE(133, 59, 1),
-       CYGNUS_PINRANGE(134, 63, 1),
-       CYGNUS_PINRANGE(135, 67, 1),
-       CYGNUS_PINRANGE(136, 71, 1),
-       CYGNUS_PINRANGE(137, 73, 1),
-       CYGNUS_PINRANGE(138, 77, 1),
-       CYGNUS_PINRANGE(139, 79, 1),
-       CYGNUS_PINRANGE(140, 81, 1),
-       CYGNUS_PINRANGE(141, 83, 1),
-       CYGNUS_PINRANGE(142, 10, 1)
-};
-
-/*
- * The Cygnus IOMUX controller mainly supports group based mux configuration,
- * but certain pins can be muxed to GPIO individually. Only the ASIU GPIO
- * controller can support this, so it's an optional configuration
- *
- * Return -ENODEV means no support and that's fine
- */
-static int cygnus_gpio_pinmux_add_range(struct cygnus_gpio *chip)
-{
-       struct device_node *node = chip->dev->of_node;
-       struct device_node *pinmux_node;
-       struct platform_device *pinmux_pdev;
-       struct gpio_chip *gc = &chip->gc;
-       int i, ret = 0;
-
-       /* parse DT to find the phandle to the pinmux controller */
-       pinmux_node = of_parse_phandle(node, "pinmux", 0);
-       if (!pinmux_node)
-               return -ENODEV;
-
-       pinmux_pdev = of_find_device_by_node(pinmux_node);
-       /* no longer need the pinmux node */
-       of_node_put(pinmux_node);
-       if (!pinmux_pdev) {
-               dev_err(chip->dev, "failed to get pinmux device\n");
-               return -EINVAL;
-       }
-
-       /* now need to create the mapping between local GPIO and PINMUX pins */
-       for (i = 0; i < ARRAY_SIZE(cygnus_gpio_pintable); i++) {
-               ret = gpiochip_add_pin_range(gc, dev_name(&pinmux_pdev->dev),
-                                            cygnus_gpio_pintable[i].offset,
-                                            cygnus_gpio_pintable[i].pin_base,
-                                            cygnus_gpio_pintable[i].num_pins);
-               if (ret) {
-                       dev_err(chip->dev, "unable to add GPIO pin range\n");
-                       goto err_put_device;
-               }
-       }
-
-       chip->pinmux_is_supported = true;
-
-       /* no need for pinmux_pdev device reference anymore */
-       put_device(&pinmux_pdev->dev);
-       return 0;
-
-err_put_device:
-       put_device(&pinmux_pdev->dev);
-       gpiochip_remove_pin_ranges(gc);
-       return ret;
-}
-
 /*
  * Cygnus GPIO controller supports some PINCONF related configurations such as
  * pull up, pull down, and drive strength, when the pin is configured to GPIO
@@ -851,18 +729,15 @@ static int cygnus_gpio_probe(struct platform_device *pdev)
        gc->set = cygnus_gpio_set;
        gc->get = cygnus_gpio_get;
 
+       chip->pinmux_is_supported = of_property_read_bool(dev->of_node,
+                                                       "gpio-ranges");
+
        ret = gpiochip_add(gc);
        if (ret < 0) {
                dev_err(dev, "unable to add GPIO chip\n");
                return ret;
        }
 
-       ret = cygnus_gpio_pinmux_add_range(chip);
-       if (ret && ret != -ENODEV) {
-               dev_err(dev, "unable to add GPIO pin range\n");
-               goto err_rm_gpiochip;
-       }
-
        ret = cygnus_gpio_register_pinconf(chip);
        if (ret) {
                dev_err(dev, "unable to register pinconf\n");
index b18322bc7bf9d0bdd220d7b38968b0b7e8938c8b..8fe6ad7795dc32b7f6a4549339760b6700b3eae9 100644 (file)
@@ -1,4 +1,4 @@
-if ARCH_BERLIN
+if (ARCH_BERLIN || COMPILE_TEST)
 
 config PINCTRL_BERLIN
        bool
@@ -6,15 +6,23 @@ config PINCTRL_BERLIN
        select REGMAP_MMIO
 
 config PINCTRL_BERLIN_BG2
-       bool
+       def_bool MACH_BERLIN_BG2
+       depends on OF
        select PINCTRL_BERLIN
 
 config PINCTRL_BERLIN_BG2CD
-       bool
+       def_bool MACH_BERLIN_BG2CD
+       depends on OF
        select PINCTRL_BERLIN
 
 config PINCTRL_BERLIN_BG2Q
-       bool
+       def_bool MACH_BERLIN_BG2Q
+       depends on OF
+       select PINCTRL_BERLIN
+
+config PINCTRL_BERLIN_BG4CT
+       bool "Marvell berlin4ct pin controller driver"
+       depends on OF
        select PINCTRL_BERLIN
 
 endif
index deb0c6baf3161fdc8d6183c2fbbe0d2fbe27cf69..06f94029ad66715995288cf842b58e14cac573a9 100644 (file)
@@ -2,3 +2,4 @@ obj-$(CONFIG_PINCTRL_BERLIN)            += berlin.o
 obj-$(CONFIG_PINCTRL_BERLIN_BG2)       += berlin-bg2.o
 obj-$(CONFIG_PINCTRL_BERLIN_BG2CD)     += berlin-bg2cd.o
 obj-$(CONFIG_PINCTRL_BERLIN_BG2Q)      += berlin-bg2q.o
+obj-$(CONFIG_PINCTRL_BERLIN_BG4CT)     += berlin-bg4ct.o
index 274c5535b53155a60757151f97d7cf7e469ae453..fabe728ae268305400bde48abba549e33f49ec94 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2014 Marvell Technology Group Ltd.
  *
- * Antoine TÃ\83©nart <antoine.tenart@free-electrons.com>
+ * Antoine Ténart <antoine.tenart@free-electrons.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -246,6 +246,6 @@ static struct platform_driver berlin2_pinctrl_driver = {
 };
 module_platform_driver(berlin2_pinctrl_driver);
 
-MODULE_AUTHOR("Antoine TÃ\83©nart <antoine.tenart@free-electrons.com>");
+MODULE_AUTHOR("Antoine Ténart <antoine.tenart@free-electrons.com>");
 MODULE_DESCRIPTION("Marvell Berlin BG2 pinctrl driver");
 MODULE_LICENSE("GPL");
index 0cb793a3552a437162c6176e30231c1ffb51a318..ad8c7586137312fd77780b83a1b4b4795f837e0f 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2014 Marvell Technology Group Ltd.
  *
- * Antoine TÃ\83©nart <antoine.tenart@free-electrons.com>
+ * Antoine Ténart <antoine.tenart@free-electrons.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
 
 static const struct berlin_desc_group berlin2cd_soc_pinctrl_groups[] = {
        /* G */
-       BERLIN_PINCTRL_GROUP("G0", 0x00, 0x1, 0x00,
+       BERLIN_PINCTRL_GROUP("G0", 0x00, 0x3, 0x00,
                BERLIN_PINCTRL_FUNCTION(0x0, "jtag"),
                BERLIN_PINCTRL_FUNCTION(0x1, "gpio"),
                BERLIN_PINCTRL_FUNCTION(0x2, "led"),
                BERLIN_PINCTRL_FUNCTION(0x3, "pwm")),
-       BERLIN_PINCTRL_GROUP("G1", 0x00, 0x2, 0x01,
+       BERLIN_PINCTRL_GROUP("G1", 0x00, 0x3, 0x03,
                BERLIN_PINCTRL_FUNCTION(0x0, "gpio"),
                BERLIN_PINCTRL_FUNCTION(0x1, "sd0"),
                BERLIN_PINCTRL_FUNCTION(0x6, "usb0_dbg"),
                BERLIN_PINCTRL_FUNCTION(0x7, "usb1_dbg")),
-       BERLIN_PINCTRL_GROUP("G2", 0x00, 0x2, 0x02,
+       BERLIN_PINCTRL_GROUP("G2", 0x00, 0x3, 0x06,
                BERLIN_PINCTRL_FUNCTION(0x0, "gpio"),
                BERLIN_PINCTRL_FUNCTION(0x1, "sd0"),
                BERLIN_PINCTRL_FUNCTION(0x2, "fe"),
                BERLIN_PINCTRL_FUNCTION(0x3, "pll"),
                BERLIN_PINCTRL_FUNCTION(0x6, "usb0_dbg"),
                BERLIN_PINCTRL_FUNCTION(0x7, "usb1_dbg")),
-       BERLIN_PINCTRL_GROUP("G3", 0x00, 0x2, 0x04,
+       BERLIN_PINCTRL_GROUP("G3", 0x00, 0x3, 0x09,
                BERLIN_PINCTRL_FUNCTION(0x0, "gpio"),
                BERLIN_PINCTRL_FUNCTION(0x1, "sd0"),
                BERLIN_PINCTRL_FUNCTION(0x2, "twsi2"),
@@ -44,7 +44,7 @@ static const struct berlin_desc_group berlin2cd_soc_pinctrl_groups[] = {
                BERLIN_PINCTRL_FUNCTION(0x4, "fe"),
                BERLIN_PINCTRL_FUNCTION(0x6, "usb0_dbg"),
                BERLIN_PINCTRL_FUNCTION(0x7, "usb1_dbg")),
-       BERLIN_PINCTRL_GROUP("G4", 0x00, 0x2, 0x06,
+       BERLIN_PINCTRL_GROUP("G4", 0x00, 0x3, 0x0c,
                BERLIN_PINCTRL_FUNCTION(0x0, "gpio"),
                BERLIN_PINCTRL_FUNCTION(0x1, "sd0"),
                BERLIN_PINCTRL_FUNCTION(0x2, "twsi3"),
@@ -52,7 +52,7 @@ static const struct berlin_desc_group berlin2cd_soc_pinctrl_groups[] = {
                BERLIN_PINCTRL_FUNCTION(0x4, "pwm"),
                BERLIN_PINCTRL_FUNCTION(0x6, "usb0_dbg"),
                BERLIN_PINCTRL_FUNCTION(0x7, "usb1_dbg")),
-       BERLIN_PINCTRL_GROUP("G5", 0x00, 0x3, 0x08,
+       BERLIN_PINCTRL_GROUP("G5", 0x00, 0x3, 0x0f,
                BERLIN_PINCTRL_FUNCTION(0x0, "gpio"),
                BERLIN_PINCTRL_FUNCTION(0x1, "sd0"),
                BERLIN_PINCTRL_FUNCTION(0x2, "twsi3"),
@@ -60,64 +60,66 @@ static const struct berlin_desc_group berlin2cd_soc_pinctrl_groups[] = {
                BERLIN_PINCTRL_FUNCTION(0x4, "pwm"),
                BERLIN_PINCTRL_FUNCTION(0x6, "usb0_dbg"),
                BERLIN_PINCTRL_FUNCTION(0x7, "usb1_dbg")),
-       BERLIN_PINCTRL_GROUP("G6", 0x00, 0x2, 0x0b,
+       BERLIN_PINCTRL_GROUP("G6", 0x00, 0x3, 0x12,
                BERLIN_PINCTRL_FUNCTION(0x0, "uart0"),  /* RX/TX */
                BERLIN_PINCTRL_FUNCTION(0x1, "gpio")),
-       BERLIN_PINCTRL_GROUP("G7", 0x00, 0x3, 0x0d,
+       BERLIN_PINCTRL_GROUP("G7", 0x00, 0x3, 0x15,
                BERLIN_PINCTRL_FUNCTION(0x0, "eddc"),
                BERLIN_PINCTRL_FUNCTION(0x1, "twsi1"),
                BERLIN_PINCTRL_FUNCTION(0x2, "gpio")),
-       BERLIN_PINCTRL_GROUP("G8", 0x00, 0x3, 0x10,
+       BERLIN_PINCTRL_GROUP("G8", 0x00, 0x3, 0x18,
                BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
                BERLIN_PINCTRL_FUNCTION(0x1, "gpio")),
-       BERLIN_PINCTRL_GROUP("G9", 0x00, 0x3, 0x13,
+       BERLIN_PINCTRL_GROUP("G9", 0x00, 0x3, 0x1b,
                BERLIN_PINCTRL_FUNCTION(0x0, "gpio"),
                BERLIN_PINCTRL_FUNCTION(0x1, "spi1"), /* SS1n/SS2n */
-               BERLIN_PINCTRL_FUNCTION(0x2, "twsi0")),
-       BERLIN_PINCTRL_GROUP("G10", 0x00, 0x2, 0x16,
+               BERLIN_PINCTRL_FUNCTION(0x3, "twsi0")),
+       BERLIN_PINCTRL_GROUP("G10", 0x00, 0x2, 0x1e,
                BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* CLK */
                BERLIN_PINCTRL_FUNCTION(0x1, "gpio")),
-       BERLIN_PINCTRL_GROUP("G11", 0x00, 0x2, 0x18,
+       BERLIN_PINCTRL_GROUP("G11", 0x04, 0x2, 0x00,
                BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SDI/SDO */
                BERLIN_PINCTRL_FUNCTION(0x1, "gpio")),
-       BERLIN_PINCTRL_GROUP("G12", 0x00, 0x3, 0x1a,
+       BERLIN_PINCTRL_GROUP("G12", 0x04, 0x3, 0x02,
                BERLIN_PINCTRL_FUNCTION(0x0, "usb1"),
                BERLIN_PINCTRL_FUNCTION(0x1, "gpio")),
-       BERLIN_PINCTRL_GROUP("G13", 0x04, 0x3, 0x00,
+       BERLIN_PINCTRL_GROUP("G13", 0x04, 0x3, 0x05,
                BERLIN_PINCTRL_FUNCTION(0x0, "nand"),
                BERLIN_PINCTRL_FUNCTION(0x1, "usb0_dbg"),
                BERLIN_PINCTRL_FUNCTION(0x2, "usb1_dbg")),
-       BERLIN_PINCTRL_GROUP("G14", 0x04, 0x1, 0x03,
+       BERLIN_PINCTRL_GROUP("G14", 0x04, 0x1, 0x08,
                BERLIN_PINCTRL_FUNCTION(0x0, "nand"),
                BERLIN_PINCTRL_FUNCTION(0x1, "gpio")),
-       BERLIN_PINCTRL_GROUP("G15", 0x04, 0x2, 0x04,
+       BERLIN_PINCTRL_GROUP("G15", 0x04, 0x3, 0x09,
                BERLIN_PINCTRL_FUNCTION(0x0, "jtag"),
                BERLIN_PINCTRL_FUNCTION(0x1, "gpio")),
-       BERLIN_PINCTRL_GROUP("G16", 0x04, 0x3, 0x06,
+       BERLIN_PINCTRL_GROUP("G16", 0x04, 0x3, 0x0c,
                BERLIN_PINCTRL_FUNCTION_UNKNOWN),
-       BERLIN_PINCTRL_GROUP("G17", 0x04, 0x3, 0x09,
+       BERLIN_PINCTRL_GROUP("G17", 0x04, 0x3, 0x0f,
                BERLIN_PINCTRL_FUNCTION_UNKNOWN),
-       BERLIN_PINCTRL_GROUP("G18", 0x04, 0x1, 0x0c,
+       BERLIN_PINCTRL_GROUP("G18", 0x04, 0x2, 0x12,
                BERLIN_PINCTRL_FUNCTION_UNKNOWN),
-       BERLIN_PINCTRL_GROUP("G19", 0x04, 0x1, 0x0d,
+       BERLIN_PINCTRL_GROUP("G19", 0x04, 0x2, 0x14,
                BERLIN_PINCTRL_FUNCTION_UNKNOWN),
-       BERLIN_PINCTRL_GROUP("G20", 0x04, 0x1, 0x0e,
+       BERLIN_PINCTRL_GROUP("G20", 0x04, 0x2, 0x16,
                BERLIN_PINCTRL_FUNCTION_UNKNOWN),
-       BERLIN_PINCTRL_GROUP("G21", 0x04, 0x3, 0x0f,
+       BERLIN_PINCTRL_GROUP("G21", 0x04, 0x3, 0x18,
                BERLIN_PINCTRL_FUNCTION_UNKNOWN),
-       BERLIN_PINCTRL_GROUP("G22", 0x04, 0x3, 0x12,
+       BERLIN_PINCTRL_GROUP("G22", 0x04, 0x3, 0x1b,
                BERLIN_PINCTRL_FUNCTION_UNKNOWN),
-       BERLIN_PINCTRL_GROUP("G23", 0x04, 0x3, 0x15,
+       BERLIN_PINCTRL_GROUP("G23", 0x08, 0x3, 0x00,
                BERLIN_PINCTRL_FUNCTION_UNKNOWN),
-       BERLIN_PINCTRL_GROUP("G24", 0x04, 0x2, 0x18,
+       BERLIN_PINCTRL_GROUP("G24", 0x08, 0x2, 0x03,
                BERLIN_PINCTRL_FUNCTION_UNKNOWN),
-       BERLIN_PINCTRL_GROUP("G25", 0x04, 0x2, 0x1a,
+       BERLIN_PINCTRL_GROUP("G25", 0x08, 0x2, 0x05,
                BERLIN_PINCTRL_FUNCTION_UNKNOWN),
-       BERLIN_PINCTRL_GROUP("G26", 0x04, 0x1, 0x1c,
+       BERLIN_PINCTRL_GROUP("G26", 0x08, 0x1, 0x07,
                BERLIN_PINCTRL_FUNCTION_UNKNOWN),
-       BERLIN_PINCTRL_GROUP("G27", 0x04, 0x1, 0x1d,
+       BERLIN_PINCTRL_GROUP("G27", 0x08, 0x2, 0x08,
                BERLIN_PINCTRL_FUNCTION_UNKNOWN),
-       BERLIN_PINCTRL_GROUP("G28", 0x04, 0x2, 0x1e,
+       BERLIN_PINCTRL_GROUP("G28", 0x08, 0x3, 0x0a,
+               BERLIN_PINCTRL_FUNCTION_UNKNOWN),
+       BERLIN_PINCTRL_GROUP("G29", 0x08, 0x3, 0x0d,
                BERLIN_PINCTRL_FUNCTION_UNKNOWN),
 };
 
@@ -189,6 +191,6 @@ static struct platform_driver berlin2cd_pinctrl_driver = {
 };
 module_platform_driver(berlin2cd_pinctrl_driver);
 
-MODULE_AUTHOR("Antoine TÃ\83©nart <antoine.tenart@free-electrons.com>");
+MODULE_AUTHOR("Antoine Ténart <antoine.tenart@free-electrons.com>");
 MODULE_DESCRIPTION("Marvell Berlin BG2CD pinctrl driver");
 MODULE_LICENSE("GPL");
index a466054a8206e31f15edab4c85d933d8652812b8..cd171aea8ca8da2f6ae7aac096e90ee61ba644b9 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2014 Marvell Technology Group Ltd.
  *
- * Antoine TÃ\83©nart <antoine.tenart@free-electrons.com>
+ * Antoine Ténart <antoine.tenart@free-electrons.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -408,6 +408,6 @@ static struct platform_driver berlin2q_pinctrl_driver = {
 };
 module_platform_driver(berlin2q_pinctrl_driver);
 
-MODULE_AUTHOR("Antoine TÃ\83©nart <antoine.tenart@free-electrons.com>");
+MODULE_AUTHOR("Antoine Ténart <antoine.tenart@free-electrons.com>");
 MODULE_DESCRIPTION("Marvell Berlin BG2Q pinctrl driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/berlin/berlin-bg4ct.c b/drivers/pinctrl/berlin/berlin-bg4ct.c
new file mode 100644 (file)
index 0000000..0917204
--- /dev/null
@@ -0,0 +1,503 @@
+/*
+ * Marvell berlin4ct pinctrl driver
+ *
+ * Copyright (C) 2015 Marvell Technology Group Ltd.
+ *
+ * Author: Jisheng Zhang <jszhang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "berlin.h"
+
+static const struct berlin_desc_group berlin4ct_soc_pinctrl_groups[] = {
+       BERLIN_PINCTRL_GROUP("EMMC_RSTn", 0x0, 0x3, 0x00,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "emmc"), /* RSTn */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), /* GPIO47 */
+       BERLIN_PINCTRL_GROUP("NAND_IO0", 0x0, 0x3, 0x03,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO0 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* RXD0 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sd1"), /* CLK */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO0 */
+       BERLIN_PINCTRL_GROUP("NAND_IO1", 0x0, 0x3, 0x06,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO1 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* RXD1 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sd1"), /* CDn */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO1 */
+       BERLIN_PINCTRL_GROUP("NAND_IO2", 0x0, 0x3, 0x09,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO2 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* RXD2 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sd1"), /* DAT0 */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO2 */
+       BERLIN_PINCTRL_GROUP("NAND_IO3", 0x0, 0x3, 0x0c,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO3 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* RXD3 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sd1"), /* DAT1 */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO3 */
+       BERLIN_PINCTRL_GROUP("NAND_IO4", 0x0, 0x3, 0x0f,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO4 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* RXC */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sd1"), /* DAT2 */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO4 */
+       BERLIN_PINCTRL_GROUP("NAND_IO5", 0x0, 0x3, 0x12,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO5 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* RXCTL */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sd1"), /* DAT3 */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO5 */
+       BERLIN_PINCTRL_GROUP("NAND_IO6", 0x0, 0x3, 0x15,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO6 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* MDC */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sd1"), /* CMD */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO6 */
+       BERLIN_PINCTRL_GROUP("NAND_IO7", 0x0, 0x3, 0x18,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO7 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* MDIO */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sd1"), /* WP */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO7 */
+       BERLIN_PINCTRL_GROUP("NAND_ALE", 0x0, 0x3, 0x1b,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* ALE */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* TXD0 */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO8 */
+       BERLIN_PINCTRL_GROUP("NAND_CLE", 0x4, 0x3, 0x00,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* CLE */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* TXD1 */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO9 */
+       BERLIN_PINCTRL_GROUP("NAND_WEn", 0x4, 0x3, 0x03,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* WEn */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* TXD2 */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO10 */
+       BERLIN_PINCTRL_GROUP("NAND_REn", 0x4, 0x3, 0x06,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* REn */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* TXD3 */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO11 */
+       BERLIN_PINCTRL_GROUP("NAND_WPn", 0x4, 0x3, 0x09,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* WPn */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO12 */
+       BERLIN_PINCTRL_GROUP("NAND_CEn", 0x4, 0x3, 0x0c,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* CEn */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* TXC */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO13 */
+       BERLIN_PINCTRL_GROUP("NAND_RDY", 0x4, 0x3, 0x0f,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* RDY */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* TXCTL */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO14 */
+       BERLIN_PINCTRL_GROUP("SD0_CLK", 0x4, 0x3, 0x12,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO29 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* CLK*/
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sts4"), /* CLK */
+                       BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG8 */
+                       BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG8 */
+       BERLIN_PINCTRL_GROUP("SD0_DAT0", 0x4, 0x3, 0x15,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO30 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* DAT0 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sts4"), /* SOP */
+                       BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG9 */
+                       BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG9 */
+       BERLIN_PINCTRL_GROUP("SD0_DAT1", 0x4, 0x3, 0x18,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO31 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* DAT1 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sts4"), /* SD */
+                       BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG10 */
+                       BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG10 */
+       BERLIN_PINCTRL_GROUP("SD0_DAT2", 0x4, 0x3, 0x1b,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO32 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* DAT2 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sts4"), /* VALD */
+                       BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG11 */
+                       BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG11 */
+       BERLIN_PINCTRL_GROUP("SD0_DAT3", 0x8, 0x3, 0x00,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO33 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* DAT3 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sts5"), /* CLK */
+                       BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG12 */
+                       BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG12 */
+       BERLIN_PINCTRL_GROUP("SD0_CDn", 0x8, 0x3, 0x03,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO34 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* CDn */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sts5"), /* SOP */
+                       BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG13 */
+                       BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG13 */
+       BERLIN_PINCTRL_GROUP("SD0_CMD", 0x8, 0x3, 0x06,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO35 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* CMD */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sts5"), /* SD */
+                       BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG14 */
+                       BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG14 */
+       BERLIN_PINCTRL_GROUP("SD0_WP", 0x8, 0x3, 0x09,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO36 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* WP */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sts5"), /* VALD */
+                       BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG15 */
+                       BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG15 */
+       BERLIN_PINCTRL_GROUP("STS0_CLK", 0x8, 0x3, 0x0c,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO21 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "sts0"), /* CLK */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "cpupll"), /* CLKO */
+                       BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG0 */
+                       BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG0 */
+       BERLIN_PINCTRL_GROUP("STS0_SOP", 0x8, 0x3, 0x0f,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO22 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "sts0"), /* SOP */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "syspll"), /* CLKO */
+                       BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG1 */
+                       BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG1 */
+       BERLIN_PINCTRL_GROUP("STS0_SD", 0x8, 0x3, 0x12,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO23 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "sts0"), /* SD */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "mempll"), /* CLKO */
+                       BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG2 */
+                       BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG2 */
+       BERLIN_PINCTRL_GROUP("STS0_VALD", 0x8, 0x3, 0x15,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO24 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "sts0"), /* VALD */
+                       BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG3 */
+                       BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG3 */
+       BERLIN_PINCTRL_GROUP("STS1_CLK", 0x8, 0x3, 0x18,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO25 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "sts1"), /* CLK */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "pwm0"),
+                       BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG4 */
+                       BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG4 */
+       BERLIN_PINCTRL_GROUP("STS1_SOP", 0x8, 0x3, 0x1b,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO26 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "sts1"), /* SOP */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "pwm1"),
+                       BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG5 */
+                       BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG5 */
+       BERLIN_PINCTRL_GROUP("STS1_SD", 0xc, 0x3, 0x00,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO27 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "sts1"), /* SD */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "pwm2"),
+                       BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG6 */
+                       BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG6 */
+       BERLIN_PINCTRL_GROUP("STS1_VALD", 0xc, 0x3, 0x03,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO28 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "sts1"), /* VALD */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "pwm3"),
+                       BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG7 */
+                       BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG7 */
+       BERLIN_PINCTRL_GROUP("SCRD0_RST", 0xc, 0x3, 0x06,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO15 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* RST */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* CLK */
+       BERLIN_PINCTRL_GROUP("SCRD0_DCLK", 0xc, 0x3, 0x09,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO16 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* DCLK */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* CMD */
+       BERLIN_PINCTRL_GROUP("SCRD0_GPIO0", 0xc, 0x3, 0x0c,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO17 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* SCRD0 GPIO0 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sif"), /* DIO */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT0 */
+       BERLIN_PINCTRL_GROUP("SCRD0_GPIO1", 0xc, 0x3, 0x0f,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO18 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* SCRD0 GPIO1 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sif"), /* CLK */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT1 */
+       BERLIN_PINCTRL_GROUP("SCRD0_DIO", 0xc, 0x3, 0x12,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO19 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* DIO */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sif"), /* DEN */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT2 */
+       BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */
+       BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sts2")), /* CLK */
+       BERLIN_PINCTRL_GROUP("SPI1_SS1n", 0xc, 0x3, 0x1b,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS1n */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO38 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sts2"), /* SOP */
+                       BERLIN_PINCTRL_FUNCTION(0x4, "pwm1")),
+       BERLIN_PINCTRL_GROUP("SPI1_SS2n", 0x10, 0x3, 0x00,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS2n */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO39 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sts2"), /* SD */
+                       BERLIN_PINCTRL_FUNCTION(0x4, "pwm0")),
+       BERLIN_PINCTRL_GROUP("SPI1_SS3n", 0x10, 0x3, 0x03,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS3n */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO40 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sts2")), /* VALD */
+       BERLIN_PINCTRL_GROUP("SPI1_SCLK", 0x10, 0x3, 0x06,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SCLK */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO41 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sts3")), /* CLK */
+       BERLIN_PINCTRL_GROUP("SPI1_SDO", 0x10, 0x3, 0x09,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SDO */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO42 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sts3")), /* SOP */
+       BERLIN_PINCTRL_GROUP("SPI1_SDI", 0x10, 0x3, 0x0c,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SDI */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO43 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sts3")), /* SD */
+       BERLIN_PINCTRL_GROUP("USB0_DRV_VBUS", 0x10, 0x3, 0x0f,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO44 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "usb0"), /* VBUS */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "sts3")), /* VALD */
+       BERLIN_PINCTRL_GROUP("TW0_SCL", 0x10, 0x3, 0x12,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO45 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "tw0")), /* SCL */
+       BERLIN_PINCTRL_GROUP("TW0_SDA", 0x10, 0x3, 0x15,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO46 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "tw0")), /* SDA */
+};
+
+static const struct berlin_desc_group berlin4ct_avio_pinctrl_groups[] = {
+       BERLIN_PINCTRL_GROUP("TX_EDDC_SCL", 0x0, 0x3, 0x00,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO0 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "tx_eddc"), /* SCL */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "tw1")), /* SCL */
+       BERLIN_PINCTRL_GROUP("TX_EDDC_SDA", 0x0, 0x3, 0x03,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO1 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "tx_eddc"), /* SDA */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "tw1")), /* SDA */
+       BERLIN_PINCTRL_GROUP("I2S1_LRCKO", 0x0, 0x3, 0x06,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO2 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* LRCKO */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "sts6"), /* CLK */
+                       BERLIN_PINCTRL_FUNCTION(0x4, "adac"), /* DBG0 */
+                       BERLIN_PINCTRL_FUNCTION(0x6, "sd1b"), /* CLK */
+                       BERLIN_PINCTRL_FUNCTION(0x7, "avio")), /* DBG0 */
+       BERLIN_PINCTRL_GROUP("I2S1_BCLKO", 0x0, 0x3, 0x09,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO3 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* BCLKO */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "sts6"), /* SOP */
+                       BERLIN_PINCTRL_FUNCTION(0x4, "adac"), /* DBG1 */
+                       BERLIN_PINCTRL_FUNCTION(0x6, "sd1b"), /* CMD */
+                       BERLIN_PINCTRL_FUNCTION(0x7, "avio")), /* DBG1 */
+       BERLIN_PINCTRL_GROUP("I2S1_DO", 0x0, 0x3, 0x0c,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO4 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* DO */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "sts6"), /* SD */
+                       BERLIN_PINCTRL_FUNCTION(0x4, "adac"), /* DBG2 */
+                       BERLIN_PINCTRL_FUNCTION(0x6, "sd1b"), /* DAT0 */
+                       BERLIN_PINCTRL_FUNCTION(0x7, "avio")), /* DBG2 */
+       BERLIN_PINCTRL_GROUP("I2S1_MCLK", 0x0, 0x3, 0x0f,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO5 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* MCLK */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "sts6"), /* VALD */
+                       BERLIN_PINCTRL_FUNCTION(0x4, "adac_test"), /* MCLK */
+                       BERLIN_PINCTRL_FUNCTION(0x6, "sd1b"), /* DAT1 */
+                       BERLIN_PINCTRL_FUNCTION(0x7, "avio")), /* DBG3 */
+       BERLIN_PINCTRL_GROUP("SPDIFO", 0x0, 0x3, 0x12,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO6 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "spdifo"),
+                       BERLIN_PINCTRL_FUNCTION(0x2, "avpll"), /* CLKO */
+                       BERLIN_PINCTRL_FUNCTION(0x4, "adac")), /* DBG3 */
+       BERLIN_PINCTRL_GROUP("I2S2_MCLK", 0x0, 0x3, 0x15,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO7 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* MCLK */
+                       BERLIN_PINCTRL_FUNCTION(0x4, "hdmi"), /* FBCLK */
+                       BERLIN_PINCTRL_FUNCTION(0x5, "pdm")), /* CLKO */
+       BERLIN_PINCTRL_GROUP("I2S2_LRCKI", 0x0, 0x3, 0x18,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO8 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* LRCKI */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "pwm0"),
+                       BERLIN_PINCTRL_FUNCTION(0x3, "sts7"), /* CLK */
+                       BERLIN_PINCTRL_FUNCTION(0x4, "adac_test"), /* LRCK */
+                       BERLIN_PINCTRL_FUNCTION(0x6, "sd1b")), /* DAT2 */
+       BERLIN_PINCTRL_GROUP("I2S2_BCLKI", 0x0, 0x3, 0x1b,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO9 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* BCLKI */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "pwm1"),
+                       BERLIN_PINCTRL_FUNCTION(0x3, "sts7"), /* SOP */
+                       BERLIN_PINCTRL_FUNCTION(0x4, "adac_test"), /* BCLK */
+                       BERLIN_PINCTRL_FUNCTION(0x6, "sd1b")), /* DAT3 */
+       BERLIN_PINCTRL_GROUP("I2S2_DI0", 0x4, 0x3, 0x00,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO10 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* DI0 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "pwm2"),
+                       BERLIN_PINCTRL_FUNCTION(0x3, "sts7"), /* SD */
+                       BERLIN_PINCTRL_FUNCTION(0x4, "adac_test"), /* SDIN */
+                       BERLIN_PINCTRL_FUNCTION(0x5, "pdm"), /* DI0 */
+                       BERLIN_PINCTRL_FUNCTION(0x6, "sd1b")), /* CDn */
+       BERLIN_PINCTRL_GROUP("I2S2_DI1", 0x4, 0x3, 0x03,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO11 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* DI1 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "pwm3"),
+                       BERLIN_PINCTRL_FUNCTION(0x3, "sts7"), /* VALD */
+                       BERLIN_PINCTRL_FUNCTION(0x4, "adac_test"), /* PWMCLK */
+                       BERLIN_PINCTRL_FUNCTION(0x5, "pdm"), /* DI1 */
+                       BERLIN_PINCTRL_FUNCTION(0x6, "sd1b")), /* WP */
+};
+
+static const struct berlin_desc_group berlin4ct_sysmgr_pinctrl_groups[] = {
+       BERLIN_PINCTRL_GROUP("SM_TW2_SCL", 0x0, 0x3, 0x00,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO19 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "tw2")), /* SCL */
+       BERLIN_PINCTRL_GROUP("SM_TW2_SDA", 0x0, 0x3, 0x03,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO20 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "tw2")), /* SDA */
+       BERLIN_PINCTRL_GROUP("SM_TW3_SCL", 0x0, 0x3, 0x06,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO21 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "tw3")), /* SCL */
+       BERLIN_PINCTRL_GROUP("SM_TW3_SDA", 0x0, 0x3, 0x09,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO22 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "tw3")), /* SDA */
+       BERLIN_PINCTRL_GROUP("SM_TMS", 0x0, 0x3, 0x0c,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "jtag"), /* TMS */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* SM GPIO0 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "pwm0")),
+       BERLIN_PINCTRL_GROUP("SM_TDI", 0x0, 0x3, 0x0f,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "jtag"), /* TDI */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* SM GPIO1 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "pwm1")),
+       BERLIN_PINCTRL_GROUP("SM_TDO", 0x0, 0x3, 0x12,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "jtag"), /* TDO */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), /* SM GPIO2 */
+       BERLIN_PINCTRL_GROUP("SM_URT0_TXD", 0x0, 0x3, 0x15,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "uart0"), /* TXD */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), /* SM GPIO3 */
+       BERLIN_PINCTRL_GROUP("SM_URT0_RXD", 0x0, 0x3, 0x18,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "uart0"), /* RXD */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), /* SM GPIO4 */
+       BERLIN_PINCTRL_GROUP("SM_URT1_TXD", 0x0, 0x3, 0x1b,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO5 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "uart1"), /* TXD */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "eth1"), /* RXCLK */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "pwm2"),
+                       BERLIN_PINCTRL_FUNCTION(0x4, "timer0"),
+                       BERLIN_PINCTRL_FUNCTION(0x5, "clk_25m")),
+       BERLIN_PINCTRL_GROUP("SM_URT1_RXD", 0x4, 0x3, 0x00,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO6 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "uart1"), /* RXD */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "pwm3"),
+                       BERLIN_PINCTRL_FUNCTION(0x4, "timer1")),
+       BERLIN_PINCTRL_GROUP("SM_SPI2_SS0n", 0x4, 0x3, 0x03,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "spi2"), /* SS0 n*/
+                       BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), /* SM GPIO7 */
+       BERLIN_PINCTRL_GROUP("SM_SPI2_SS1n", 0x4, 0x3, 0x06,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO8 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "spi2")), /* SS1n */
+       BERLIN_PINCTRL_GROUP("SM_SPI2_SS2n", 0x4, 0x3, 0x09,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO9 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "spi2"), /* SS2n */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "eth1"), /* MDC */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "pwm0"),
+                       BERLIN_PINCTRL_FUNCTION(0x4, "timer0"),
+                       BERLIN_PINCTRL_FUNCTION(0x5, "clk_25m")),
+       BERLIN_PINCTRL_GROUP("SM_SPI2_SS3n", 0x4, 0x3, 0x0c,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO10 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "spi2"), /* SS3n */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "eth1"), /* MDIO */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "pwm1"),
+                       BERLIN_PINCTRL_FUNCTION(0x4, "timer1")),
+       BERLIN_PINCTRL_GROUP("SM_SPI2_SDO", 0x4, 0x3, 0x0f,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "spi2"), /* SDO */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), /* SM GPIO11 */
+       BERLIN_PINCTRL_GROUP("SM_SPI2_SDI", 0x4, 0x3, 0x12,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "spi2"), /* SDI */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), /* SM GPIO12 */
+       BERLIN_PINCTRL_GROUP("SM_SPI2_SCLK", 0x4, 0x3, 0x15,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "spi2"), /* SCLK */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), /* SM GPIO13 */
+       BERLIN_PINCTRL_GROUP("SM_FE_LED0", 0x4, 0x3, 0x18,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO14 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "led")), /* LED0 */
+       BERLIN_PINCTRL_GROUP("SM_FE_LED1", 0x4, 0x3, 0x1b,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "pwr"),
+                       BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* SM GPIO 15 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "led")), /* LED1 */
+       BERLIN_PINCTRL_GROUP("SM_FE_LED2", 0x8, 0x3, 0x00,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO16 */
+                       BERLIN_PINCTRL_FUNCTION(0x2, "led")), /* LED2 */
+       BERLIN_PINCTRL_GROUP("SM_HDMI_HPD", 0x8, 0x3, 0x03,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO17 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "hdmi")), /* HPD */
+       BERLIN_PINCTRL_GROUP("SM_HDMI_CEC", 0x8, 0x3, 0x06,
+                       BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO18 */
+                       BERLIN_PINCTRL_FUNCTION(0x1, "hdmi")), /* CEC */
+};
+
+static const struct berlin_pinctrl_desc berlin4ct_soc_pinctrl_data = {
+       .groups = berlin4ct_soc_pinctrl_groups,
+       .ngroups = ARRAY_SIZE(berlin4ct_soc_pinctrl_groups),
+};
+
+static const struct berlin_pinctrl_desc berlin4ct_avio_pinctrl_data = {
+       .groups = berlin4ct_avio_pinctrl_groups,
+       .ngroups = ARRAY_SIZE(berlin4ct_avio_pinctrl_groups),
+};
+
+static const struct berlin_pinctrl_desc berlin4ct_sysmgr_pinctrl_data = {
+       .groups = berlin4ct_sysmgr_pinctrl_groups,
+       .ngroups = ARRAY_SIZE(berlin4ct_sysmgr_pinctrl_groups),
+};
+
+static const struct of_device_id berlin4ct_pinctrl_match[] = {
+       {
+               .compatible = "marvell,berlin4ct-soc-pinctrl",
+               .data = &berlin4ct_soc_pinctrl_data,
+       },
+       {
+               .compatible = "marvell,berlin4ct-avio-pinctrl",
+               .data = &berlin4ct_avio_pinctrl_data,
+       },
+       {
+               .compatible = "marvell,berlin4ct-system-pinctrl",
+               .data = &berlin4ct_sysmgr_pinctrl_data,
+       },
+       {}
+};
+MODULE_DEVICE_TABLE(of, berlin4ct_pinctrl_match);
+
+static int berlin4ct_pinctrl_probe(struct platform_device *pdev)
+{
+       const struct of_device_id *match =
+               of_match_device(berlin4ct_pinctrl_match, &pdev->dev);
+       struct regmap_config *rmconfig;
+       struct regmap *regmap;
+       struct resource *res;
+       void __iomem *base;
+
+       rmconfig = devm_kzalloc(&pdev->dev, sizeof(*rmconfig), GFP_KERNEL);
+       if (!rmconfig)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       rmconfig->reg_bits = 32,
+       rmconfig->val_bits = 32,
+       rmconfig->reg_stride = 4,
+       rmconfig->max_register = resource_size(res);
+
+       regmap = devm_regmap_init_mmio(&pdev->dev, base, rmconfig);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       return berlin_pinctrl_probe_regmap(pdev, match->data, regmap);
+}
+
+static struct platform_driver berlin4ct_pinctrl_driver = {
+       .probe  = berlin4ct_pinctrl_probe,
+       .driver = {
+               .name = "berlin4ct-pinctrl",
+               .of_match_table = berlin4ct_pinctrl_match,
+       },
+};
+module_platform_driver(berlin4ct_pinctrl_driver);
+
+MODULE_AUTHOR("Jisheng Zhang <jszhang@marvell.com>");
+MODULE_DESCRIPTION("Marvell berlin4ct pinctrl driver");
+MODULE_LICENSE("GPL");
index f49580617055a25d6f3caa55189b473b880eb850..46f2b4818da3ae3a1c736b172916ab000252d3a2 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2014 Marvell Technology Group Ltd.
  *
- * Antoine TÃ\83©nart <antoine.tenart@free-electrons.com>
+ * Antoine Ténart <antoine.tenart@free-electrons.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -292,20 +292,14 @@ static struct pinctrl_desc berlin_pctrl_desc = {
        .owner          = THIS_MODULE,
 };
 
-int berlin_pinctrl_probe(struct platform_device *pdev,
-                        const struct berlin_pinctrl_desc *desc)
+int berlin_pinctrl_probe_regmap(struct platform_device *pdev,
+                               const struct berlin_pinctrl_desc *desc,
+                               struct regmap *regmap)
 {
        struct device *dev = &pdev->dev;
-       struct device_node *parent_np = of_get_parent(dev->of_node);
        struct berlin_pinctrl *pctrl;
-       struct regmap *regmap;
        int ret;
 
-       regmap = syscon_node_to_regmap(parent_np);
-       of_node_put(parent_np);
-       if (IS_ERR(regmap))
-               return PTR_ERR(regmap);
-
        pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL);
        if (!pctrl)
                return -ENOMEM;
@@ -330,3 +324,17 @@ int berlin_pinctrl_probe(struct platform_device *pdev,
 
        return 0;
 }
+
+int berlin_pinctrl_probe(struct platform_device *pdev,
+                        const struct berlin_pinctrl_desc *desc)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *parent_np = of_get_parent(dev->of_node);
+       struct regmap *regmap = syscon_node_to_regmap(parent_np);
+
+       of_node_put(parent_np);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       return berlin_pinctrl_probe_regmap(pdev, desc, regmap);
+}
index e1aa8414519418944ea09c92071d073238264821..e9b30f95b03e3c72c68d0dd1ffa1957d930416c9 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2014 Marvell Technology Group Ltd.
  *
- * Antoine TÃ\83©nart <antoine.tenart@free-electrons.com>
+ * Antoine Ténart <antoine.tenart@free-electrons.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -58,4 +58,8 @@ struct berlin_pinctrl_function {
 int berlin_pinctrl_probe(struct platform_device *pdev,
                         const struct berlin_pinctrl_desc *desc);
 
+int berlin_pinctrl_probe_regmap(struct platform_device *pdev,
+                               const struct berlin_pinctrl_desc *desc,
+                               struct regmap *regmap);
+
 #endif /* __PINCTRL_BERLIN_H */
index 9638a00c67c2bd9a3735a632c99d01e9c2625e5d..2686a4450dfc3261029d560d2ce9838191fbf39c 100644 (file)
@@ -1240,6 +1240,38 @@ int pinctrl_force_default(struct pinctrl_dev *pctldev)
 }
 EXPORT_SYMBOL_GPL(pinctrl_force_default);
 
+/**
+ * pinctrl_init_done() - tell pinctrl probe is done
+ *
+ * We'll use this time to switch the pins from "init" to "default" unless the
+ * driver selected some other state.
+ *
+ * @dev: device to that's done probing
+ */
+int pinctrl_init_done(struct device *dev)
+{
+       struct dev_pin_info *pins = dev->pins;
+       int ret;
+
+       if (!pins)
+               return 0;
+
+       if (IS_ERR(pins->init_state))
+               return 0; /* No such state */
+
+       if (pins->p->state != pins->init_state)
+               return 0; /* Not at init anyway */
+
+       if (IS_ERR(pins->default_state))
+               return 0; /* No default state */
+
+       ret = pinctrl_select_state(pins->p, pins->default_state);
+       if (ret)
+               dev_err(dev, "failed to activate default pinctrl state\n");
+
+       return ret;
+}
+
 #ifdef CONFIG_PM
 
 /**
index d7b98ba36825b2a7b7008c46a558ceeb02b6120f..a5bb939873789710d0b00dfcda9805bd287ce92d 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/of_address.h>
 #include <linux/pinctrl/machine.h>
 #include <linux/pinctrl/pinconf.h>
 #include <linux/pinctrl/pinctrl.h>
@@ -39,6 +40,7 @@ struct imx_pinctrl {
        struct device *dev;
        struct pinctrl_dev *pctl;
        void __iomem *base;
+       void __iomem *input_sel_base;
        const struct imx_pinctrl_soc_info *info;
 };
 
@@ -254,7 +256,12 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
                         * Regular select input register can never be at offset
                         * 0, and we only print register value for regular case.
                         */
-                       writel(pin->input_val, ipctl->base + pin->input_reg);
+                       if (ipctl->input_sel_base)
+                               writel(pin->input_val, ipctl->input_sel_base +
+                                               pin->input_reg);
+                       else
+                               writel(pin->input_val, ipctl->base +
+                                               pin->input_reg);
                        dev_dbg(ipctl->dev,
                                "==>select_input: offset 0x%x val 0x%x\n",
                                pin->input_reg, pin->input_val);
@@ -542,6 +549,9 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
                struct imx_pin_reg *pin_reg;
                struct imx_pin *pin = &grp->pins[i];
 
+               if (!(info->flags & ZERO_OFFSET_VALID) && !mux_reg)
+                       mux_reg = -1;
+
                if (info->flags & SHARE_MUX_CONF_REG) {
                        conf_reg = mux_reg;
                } else {
@@ -550,7 +560,7 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
                                conf_reg = -1;
                }
 
-               pin_id = mux_reg ? mux_reg / 4 : conf_reg / 4;
+               pin_id = (mux_reg != -1) ? mux_reg / 4 : conf_reg / 4;
                pin_reg = &info->pin_regs[pin_id];
                pin->pin = pin_id;
                grp->pin_ids[i] = pin_id;
@@ -580,7 +590,6 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
        struct device_node *child;
        struct imx_pmx_func *func;
        struct imx_pin_group *grp;
-       static u32 grp_index;
        u32 i = 0;
 
        dev_dbg(info->dev, "parse function(%d): %s\n", index, np->name);
@@ -599,7 +608,7 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
 
        for_each_child_of_node(np, child) {
                func->groups[i] = child->name;
-               grp = &info->groups[grp_index++];
+               grp = &info->groups[info->group_index++];
                imx_pinctrl_parse_groups(child, grp, info, i++);
        }
 
@@ -683,6 +692,8 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev,
 int imx_pinctrl_probe(struct platform_device *pdev,
                      struct imx_pinctrl_soc_info *info)
 {
+       struct device_node *dev_np = pdev->dev.of_node;
+       struct device_node *np;
        struct imx_pinctrl *ipctl;
        struct resource *res;
        int ret, i;
@@ -713,6 +724,23 @@ int imx_pinctrl_probe(struct platform_device *pdev,
        if (IS_ERR(ipctl->base))
                return PTR_ERR(ipctl->base);
 
+       if (of_property_read_bool(dev_np, "fsl,input-sel")) {
+               np = of_parse_phandle(dev_np, "fsl,input-sel", 0);
+               if (np) {
+                       ipctl->input_sel_base = of_iomap(np, 0);
+                       if (IS_ERR(ipctl->input_sel_base)) {
+                               of_node_put(np);
+                               dev_err(&pdev->dev,
+                                       "iomuxc input select base address not found\n");
+                               return PTR_ERR(ipctl->input_sel_base);
+                       }
+               } else {
+                       dev_err(&pdev->dev, "iomuxc fsl,input-sel property not found\n");
+                       return -EINVAL;
+               }
+               of_node_put(np);
+       }
+
        imx_pinctrl_desc.name = dev_name(&pdev->dev);
        imx_pinctrl_desc.pins = info->pins;
        imx_pinctrl_desc.npins = info->npins;
index 49e55d39f7c8aba924f250dad6563148e4219b35..2a592f657c184918ab1e7fbb6d9a4d862f0a2ab5 100644 (file)
@@ -78,12 +78,14 @@ struct imx_pinctrl_soc_info {
        struct imx_pin_reg *pin_regs;
        struct imx_pin_group *groups;
        unsigned int ngroups;
+       unsigned int group_index;
        struct imx_pmx_func *functions;
        unsigned int nfunctions;
        unsigned int flags;
 };
 
 #define SHARE_MUX_CONF_REG     0x1
+#define ZERO_OFFSET_VALID      0x2
 
 #define NO_MUX         0x0
 #define NO_PAD         0x0
index faf635654312a75168ad7237ecf1995d317e396d..293ed4381cc0e08c1bb195bbb9f535fba66075ed 100644 (file)
@@ -26,7 +26,8 @@
 #include "pinctrl-imx.h"
 
 enum imx25_pads {
-       MX25_PAD_RESERVE0 = 1,
+       MX25_PAD_RESERVE0 = 0,
+       MX25_PAD_RESERVE1 = 1,
        MX25_PAD_A10 = 2,
        MX25_PAD_A13 = 3,
        MX25_PAD_A14 = 4,
@@ -169,6 +170,7 @@ enum imx25_pads {
 /* Pad names for the pinmux subsystem */
 static const struct pinctrl_pin_desc imx25_pinctrl_pads[] = {
        IMX_PINCTRL_PIN(MX25_PAD_RESERVE0),
+       IMX_PINCTRL_PIN(MX25_PAD_RESERVE1),
        IMX_PINCTRL_PIN(MX25_PAD_A10),
        IMX_PINCTRL_PIN(MX25_PAD_A13),
        IMX_PINCTRL_PIN(MX25_PAD_A14),
index 1fa7530530dd94f25efb29a315076fec0b880871..16dc925117de1ad63cb4fd36251eac746e5bc8a2 100644 (file)
@@ -174,6 +174,17 @@ enum imx7d_pads {
        MX7D_PAD_ENET1_COL = 154,
 };
 
+enum imx7d_lpsr_pads {
+       MX7D_PAD_GPIO1_IO00 = 0,
+       MX7D_PAD_GPIO1_IO01 = 1,
+       MX7D_PAD_GPIO1_IO02 = 2,
+       MX7D_PAD_GPIO1_IO03 = 3,
+       MX7D_PAD_GPIO1_IO04 = 4,
+       MX7D_PAD_GPIO1_IO05 = 5,
+       MX7D_PAD_GPIO1_IO06 = 6,
+       MX7D_PAD_GPIO1_IO07 = 7,
+};
+
 /* Pad names for the pinmux subsystem */
 static const struct pinctrl_pin_desc imx7d_pinctrl_pads[] = {
        IMX_PINCTRL_PIN(MX7D_PAD_RESERVE0),
@@ -333,13 +344,32 @@ static const struct pinctrl_pin_desc imx7d_pinctrl_pads[] = {
        IMX_PINCTRL_PIN(MX7D_PAD_ENET1_COL),
 };
 
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx7d_lpsr_pinctrl_pads[] = {
+       IMX_PINCTRL_PIN(MX7D_PAD_GPIO1_IO00),
+       IMX_PINCTRL_PIN(MX7D_PAD_GPIO1_IO01),
+       IMX_PINCTRL_PIN(MX7D_PAD_GPIO1_IO02),
+       IMX_PINCTRL_PIN(MX7D_PAD_GPIO1_IO03),
+       IMX_PINCTRL_PIN(MX7D_PAD_GPIO1_IO04),
+       IMX_PINCTRL_PIN(MX7D_PAD_GPIO1_IO05),
+       IMX_PINCTRL_PIN(MX7D_PAD_GPIO1_IO06),
+       IMX_PINCTRL_PIN(MX7D_PAD_GPIO1_IO07),
+};
+
 static struct imx_pinctrl_soc_info imx7d_pinctrl_info = {
        .pins = imx7d_pinctrl_pads,
        .npins = ARRAY_SIZE(imx7d_pinctrl_pads),
 };
 
+static struct imx_pinctrl_soc_info imx7d_lpsr_pinctrl_info = {
+       .pins = imx7d_lpsr_pinctrl_pads,
+       .npins = ARRAY_SIZE(imx7d_lpsr_pinctrl_pads),
+       .flags = ZERO_OFFSET_VALID,
+};
+
 static struct of_device_id imx7d_pinctrl_of_match[] = {
        { .compatible = "fsl,imx7d-iomuxc", .data = &imx7d_pinctrl_info, },
+       { .compatible = "fsl,imx7d-iomuxc-lpsr", .data = &imx7d_lpsr_pinctrl_info },
        { /* sentinel */ }
 };
 
index f64eecb24755d778c250925bd377408981b4195d..6bbda6b4ab50d9e930544e4bfd72a67904261f85 100644 (file)
@@ -474,7 +474,7 @@ static int mxs_pinctrl_probe_dt(struct platform_device *pdev,
                        f->name = fn = child->name;
                }
                f->ngroups++;
-       };
+       }
 
        /* Get groups for each function */
        idxf = 0;
index fe5e07db0a958258196da3d636c1121779e5245e..4d2efad6553c2d422c182f4c2bb2da3b850caa56 100644 (file)
@@ -34,6 +34,14 @@ config PINCTRL_INTEL
        select GPIOLIB
        select GPIOLIB_IRQCHIP
 
+config PINCTRL_BROXTON
+       tristate "Intel Broxton pinctrl and GPIO driver"
+       depends on ACPI
+       select PINCTRL_INTEL
+       help
+         Broxton pinctrl driver provides an interface that allows
+         configuring of SoC pins and using them as GPIOs.
+
 config PINCTRL_SUNRISEPOINT
        tristate "Intel Sunrisepoint pinctrl and GPIO driver"
        depends on ACPI
index fee756e1255bb30aa64f2428d324501c82ddefdd..03bc68e3546cad35f8aa765222bb10a240735778 100644 (file)
@@ -3,4 +3,5 @@
 obj-$(CONFIG_PINCTRL_BAYTRAIL)         += pinctrl-baytrail.o
 obj-$(CONFIG_PINCTRL_CHERRYVIEW)       += pinctrl-cherryview.o
 obj-$(CONFIG_PINCTRL_INTEL)            += pinctrl-intel.o
+obj-$(CONFIG_PINCTRL_BROXTON)          += pinctrl-broxton.o
 obj-$(CONFIG_PINCTRL_SUNRISEPOINT)     += pinctrl-sunrisepoint.o
index f79ea430f651996a2f148e141319861e285c2d28..b59ce75b1947a2327804ca075769cddc9eba2801 100644 (file)
@@ -696,6 +696,7 @@ static int byt_gpio_resume(struct device *dev)
 }
 #endif
 
+#ifdef CONFIG_PM
 static int byt_gpio_runtime_suspend(struct device *dev)
 {
        return 0;
@@ -705,6 +706,7 @@ static int byt_gpio_runtime_resume(struct device *dev)
 {
        return 0;
 }
+#endif
 
 static const struct dev_pm_ops byt_gpio_pm_ops = {
        SET_LATE_SYSTEM_SLEEP_PM_OPS(byt_gpio_suspend, byt_gpio_resume)
diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c
new file mode 100644 (file)
index 0000000..e42d5d4
--- /dev/null
@@ -0,0 +1,1065 @@
+/*
+ * Intel Broxton SoC pinctrl/GPIO driver
+ *
+ * Copyright (C) 2015, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define BXT_PAD_OWN    0x020
+#define BXT_HOSTSW_OWN 0x080
+#define BXT_PADCFGLOCK 0x090
+#define BXT_GPI_IE     0x110
+
+#define BXT_COMMUNITY(s, e)                            \
+       {                                               \
+               .padown_offset = BXT_PAD_OWN,           \
+               .padcfglock_offset = BXT_PADCFGLOCK,    \
+               .hostown_offset = BXT_HOSTSW_OWN,       \
+               .ie_offset = BXT_GPI_IE,                \
+               .pin_base = (s),                        \
+               .npins = ((e) - (s) + 1),               \
+       }
+
+/* BXT */
+static const struct pinctrl_pin_desc bxt_north_pins[] = {
+       PINCTRL_PIN(0, "GPIO_0"),
+       PINCTRL_PIN(1, "GPIO_1"),
+       PINCTRL_PIN(2, "GPIO_2"),
+       PINCTRL_PIN(3, "GPIO_3"),
+       PINCTRL_PIN(4, "GPIO_4"),
+       PINCTRL_PIN(5, "GPIO_5"),
+       PINCTRL_PIN(6, "GPIO_6"),
+       PINCTRL_PIN(7, "GPIO_7"),
+       PINCTRL_PIN(8, "GPIO_8"),
+       PINCTRL_PIN(9, "GPIO_9"),
+       PINCTRL_PIN(10, "GPIO_10"),
+       PINCTRL_PIN(11, "GPIO_11"),
+       PINCTRL_PIN(12, "GPIO_12"),
+       PINCTRL_PIN(13, "GPIO_13"),
+       PINCTRL_PIN(14, "GPIO_14"),
+       PINCTRL_PIN(15, "GPIO_15"),
+       PINCTRL_PIN(16, "GPIO_16"),
+       PINCTRL_PIN(17, "GPIO_17"),
+       PINCTRL_PIN(18, "GPIO_18"),
+       PINCTRL_PIN(19, "GPIO_19"),
+       PINCTRL_PIN(20, "GPIO_20"),
+       PINCTRL_PIN(21, "GPIO_21"),
+       PINCTRL_PIN(22, "GPIO_22"),
+       PINCTRL_PIN(23, "GPIO_23"),
+       PINCTRL_PIN(24, "GPIO_24"),
+       PINCTRL_PIN(25, "GPIO_25"),
+       PINCTRL_PIN(26, "GPIO_26"),
+       PINCTRL_PIN(27, "GPIO_27"),
+       PINCTRL_PIN(28, "GPIO_28"),
+       PINCTRL_PIN(29, "GPIO_29"),
+       PINCTRL_PIN(30, "GPIO_30"),
+       PINCTRL_PIN(31, "GPIO_31"),
+       PINCTRL_PIN(32, "GPIO_32"),
+       PINCTRL_PIN(33, "GPIO_33"),
+       PINCTRL_PIN(34, "PWM0"),
+       PINCTRL_PIN(35, "PWM1"),
+       PINCTRL_PIN(36, "PWM2"),
+       PINCTRL_PIN(37, "PWM3"),
+       PINCTRL_PIN(38, "LPSS_UART0_RXD"),
+       PINCTRL_PIN(39, "LPSS_UART0_TXD"),
+       PINCTRL_PIN(40, "LPSS_UART0_RTS_B"),
+       PINCTRL_PIN(41, "LPSS_UART0_CTS_B"),
+       PINCTRL_PIN(42, "LPSS_UART1_RXD"),
+       PINCTRL_PIN(43, "LPSS_UART1_TXD"),
+       PINCTRL_PIN(44, "LPSS_UART1_RTS_B"),
+       PINCTRL_PIN(45, "LPSS_UART1_CTS_B"),
+       PINCTRL_PIN(46, "LPSS_UART2_RXD"),
+       PINCTRL_PIN(47, "LPSS_UART2_TXD"),
+       PINCTRL_PIN(48, "LPSS_UART2_RTS_B"),
+       PINCTRL_PIN(49, "LPSS_UART2_CTS_B"),
+       PINCTRL_PIN(50, "ISH_UART0_RXD"),
+       PINCTRL_PIN(51, "ISH_UART0_TXT"),
+       PINCTRL_PIN(52, "ISH_UART0_RTS_B"),
+       PINCTRL_PIN(53, "ISH_UART0_CTS_B"),
+       PINCTRL_PIN(54, "ISH_UART1_RXD"),
+       PINCTRL_PIN(55, "ISH_UART1_TXT"),
+       PINCTRL_PIN(56, "ISH_UART1_RTS_B"),
+       PINCTRL_PIN(57, "ISH_UART1_CTS_B"),
+       PINCTRL_PIN(58, "ISH_UART2_RXD"),
+       PINCTRL_PIN(59, "ISH_UART2_TXD"),
+       PINCTRL_PIN(60, "ISH_UART2_RTS_B"),
+       PINCTRL_PIN(61, "ISH_UART2_CTS_B"),
+       PINCTRL_PIN(62, "GP_CAMERASB00"),
+       PINCTRL_PIN(63, "GP_CAMERASB01"),
+       PINCTRL_PIN(64, "GP_CAMERASB02"),
+       PINCTRL_PIN(65, "GP_CAMERASB03"),
+       PINCTRL_PIN(66, "GP_CAMERASB04"),
+       PINCTRL_PIN(67, "GP_CAMERASB05"),
+       PINCTRL_PIN(68, "GP_CAMERASB06"),
+       PINCTRL_PIN(69, "GP_CAMERASB07"),
+       PINCTRL_PIN(70, "GP_CAMERASB08"),
+       PINCTRL_PIN(71, "GP_CAMERASB09"),
+       PINCTRL_PIN(72, "GP_CAMERASB10"),
+       PINCTRL_PIN(73, "GP_CAMERASB11"),
+       PINCTRL_PIN(74, "TCK"),
+       PINCTRL_PIN(75, "TRST_B"),
+       PINCTRL_PIN(76, "TMS"),
+       PINCTRL_PIN(77, "TDI"),
+       PINCTRL_PIN(78, "CX_PMODE"),
+       PINCTRL_PIN(79, "CX_PREQ_B"),
+       PINCTRL_PIN(80, "JTAGX"),
+       PINCTRL_PIN(81, "CX_PRDY_B"),
+       PINCTRL_PIN(82, "TDO"),
+};
+
+static const unsigned bxt_north_pwm0_pins[] = { 34 };
+static const unsigned bxt_north_pwm1_pins[] = { 35 };
+static const unsigned bxt_north_pwm2_pins[] = { 36 };
+static const unsigned bxt_north_pwm3_pins[] = { 37 };
+static const unsigned bxt_north_uart0_pins[] = { 38, 39, 40, 41 };
+static const unsigned bxt_north_uart1_pins[] = { 42, 43, 44, 45 };
+static const unsigned bxt_north_uart2_pins[] = { 46, 47, 48, 49 };
+static const unsigned bxt_north_uart0b_pins[] = { 50, 51, 52, 53 };
+static const unsigned bxt_north_uart1b_pins[] = { 54, 55, 56, 57 };
+static const unsigned bxt_north_uart2b_pins[] = { 58, 59, 60, 61 };
+static const unsigned bxt_north_uart3_pins[] = { 58, 59, 60, 61 };
+
+static const struct intel_pingroup bxt_north_groups[] = {
+       PIN_GROUP("pwm0_grp", bxt_north_pwm0_pins, 1),
+       PIN_GROUP("pwm1_grp", bxt_north_pwm1_pins, 1),
+       PIN_GROUP("pwm2_grp", bxt_north_pwm2_pins, 1),
+       PIN_GROUP("pwm3_grp", bxt_north_pwm3_pins, 1),
+       PIN_GROUP("uart0_grp", bxt_north_uart0_pins, 1),
+       PIN_GROUP("uart1_grp", bxt_north_uart1_pins, 1),
+       PIN_GROUP("uart2_grp", bxt_north_uart2_pins, 1),
+       PIN_GROUP("uart0b_grp", bxt_north_uart0b_pins, 2),
+       PIN_GROUP("uart1b_grp", bxt_north_uart1b_pins, 2),
+       PIN_GROUP("uart2b_grp", bxt_north_uart2b_pins, 2),
+       PIN_GROUP("uart3_grp", bxt_north_uart3_pins, 3),
+};
+
+static const char * const bxt_north_pwm0_groups[] = { "pwm0_grp" };
+static const char * const bxt_north_pwm1_groups[] = { "pwm1_grp" };
+static const char * const bxt_north_pwm2_groups[] = { "pwm2_grp" };
+static const char * const bxt_north_pwm3_groups[] = { "pwm3_grp" };
+static const char * const bxt_north_uart0_groups[] = {
+       "uart0_grp", "uart0b_grp",
+};
+static const char * const bxt_north_uart1_groups[] = {
+       "uart1_grp", "uart1b_grp",
+};
+static const char * const bxt_north_uart2_groups[] = {
+       "uart2_grp", "uart2b_grp",
+};
+static const char * const bxt_north_uart3_groups[] = { "uart3_grp" };
+
+static const struct intel_function bxt_north_functions[] = {
+       FUNCTION("pwm0", bxt_north_pwm0_groups),
+       FUNCTION("pwm1", bxt_north_pwm1_groups),
+       FUNCTION("pwm2", bxt_north_pwm2_groups),
+       FUNCTION("pwm3", bxt_north_pwm3_groups),
+       FUNCTION("uart0", bxt_north_uart0_groups),
+       FUNCTION("uart1", bxt_north_uart1_groups),
+       FUNCTION("uart2", bxt_north_uart2_groups),
+       FUNCTION("uart3", bxt_north_uart3_groups),
+};
+
+static const struct intel_community bxt_north_communities[] = {
+       BXT_COMMUNITY(0, 82),
+};
+
+static const struct intel_pinctrl_soc_data bxt_north_soc_data = {
+       .uid = "1",
+       .pins = bxt_north_pins,
+       .npins = ARRAY_SIZE(bxt_north_pins),
+       .groups = bxt_north_groups,
+       .ngroups = ARRAY_SIZE(bxt_north_groups),
+       .functions = bxt_north_functions,
+       .nfunctions = ARRAY_SIZE(bxt_north_functions),
+       .communities = bxt_north_communities,
+       .ncommunities = ARRAY_SIZE(bxt_north_communities),
+};
+
+static const struct pinctrl_pin_desc bxt_northwest_pins[] = {
+       PINCTRL_PIN(0, "PMC_SPI_FS0"),
+       PINCTRL_PIN(1, "PMC_SPI_FS1"),
+       PINCTRL_PIN(2, "PMC_SPI_FS2"),
+       PINCTRL_PIN(3, "PMC_SPI_RXD"),
+       PINCTRL_PIN(4, "PMC_SPI_TXD"),
+       PINCTRL_PIN(5, "PMC_SPI_CLK"),
+       PINCTRL_PIN(6, "PMC_UART_RXD"),
+       PINCTRL_PIN(7, "PMC_UART_TXD"),
+       PINCTRL_PIN(8, "PMIC_PWRGOOD"),
+       PINCTRL_PIN(9, "PMIC_RESET_B"),
+       PINCTRL_PIN(10, "RTC_CLK"),
+       PINCTRL_PIN(11, "PMIC_SDWN_B"),
+       PINCTRL_PIN(12, "PMIC_BCUDISW2"),
+       PINCTRL_PIN(13, "PMIC_BCUDISCRIT"),
+       PINCTRL_PIN(14, "PMIC_THERMTRIP_B"),
+       PINCTRL_PIN(15, "PMIC_STDBY"),
+       PINCTRL_PIN(16, "SVID0_ALERT_B"),
+       PINCTRL_PIN(17, "SVID0_DATA"),
+       PINCTRL_PIN(18, "SVID0_CLK"),
+       PINCTRL_PIN(19, "PMIC_I2C_SCL"),
+       PINCTRL_PIN(20, "PMIC_I2C_SDA"),
+       PINCTRL_PIN(21, "AVS_I2S1_MCLK"),
+       PINCTRL_PIN(22, "AVS_I2S1_BCLK"),
+       PINCTRL_PIN(23, "AVS_I2S1_WS_SYNC"),
+       PINCTRL_PIN(24, "AVS_I2S1_SDI"),
+       PINCTRL_PIN(25, "AVS_I2S1_SDO"),
+       PINCTRL_PIN(26, "AVS_M_CLK_A1"),
+       PINCTRL_PIN(27, "AVS_M_CLK_B1"),
+       PINCTRL_PIN(28, "AVS_M_DATA_1"),
+       PINCTRL_PIN(29, "AVS_M_CLK_AB2"),
+       PINCTRL_PIN(30, "AVS_M_DATA_2"),
+       PINCTRL_PIN(31, "AVS_I2S2_MCLK"),
+       PINCTRL_PIN(32, "AVS_I2S2_BCLK"),
+       PINCTRL_PIN(33, "AVS_I2S2_WS_SYNC"),
+       PINCTRL_PIN(34, "AVS_I2S2_SDI"),
+       PINCTRL_PIN(35, "AVS_I2S2_SDOK"),
+       PINCTRL_PIN(36, "AVS_I2S3_BCLK"),
+       PINCTRL_PIN(37, "AVS_I2S3_WS_SYNC"),
+       PINCTRL_PIN(38, "AVS_I2S3_SDI"),
+       PINCTRL_PIN(39, "AVS_I2S3_SDO"),
+       PINCTRL_PIN(40, "AVS_I2S4_BCLK"),
+       PINCTRL_PIN(41, "AVS_I2S4_WS_SYNC"),
+       PINCTRL_PIN(42, "AVS_I2S4_SDI"),
+       PINCTRL_PIN(43, "AVS_I2S4_SDO"),
+       PINCTRL_PIN(44, "PROCHOT_B"),
+       PINCTRL_PIN(45, "FST_SPI_CS0_B"),
+       PINCTRL_PIN(46, "FST_SPI_CS1_B"),
+       PINCTRL_PIN(47, "FST_SPI_MOSI_IO0"),
+       PINCTRL_PIN(48, "FST_SPI_MISO_IO1"),
+       PINCTRL_PIN(49, "FST_SPI_IO2"),
+       PINCTRL_PIN(50, "FST_SPI_IO3"),
+       PINCTRL_PIN(51, "FST_SPI_CLK"),
+       PINCTRL_PIN(52, "FST_SPI_CLK_FB"),
+       PINCTRL_PIN(53, "GP_SSP_0_CLK"),
+       PINCTRL_PIN(54, "GP_SSP_0_FS0"),
+       PINCTRL_PIN(55, "GP_SSP_0_FS1"),
+       PINCTRL_PIN(56, "GP_SSP_0_FS2"),
+       PINCTRL_PIN(57, "GP_SSP_0_RXD"),
+       PINCTRL_PIN(58, "GP_SSP_0_TXD"),
+       PINCTRL_PIN(59, "GP_SSP_1_CLK"),
+       PINCTRL_PIN(60, "GP_SSP_1_FS0"),
+       PINCTRL_PIN(61, "GP_SSP_1_FS1"),
+       PINCTRL_PIN(62, "GP_SSP_1_FS2"),
+       PINCTRL_PIN(63, "GP_SSP_1_FS3"),
+       PINCTRL_PIN(64, "GP_SSP_1_RXD"),
+       PINCTRL_PIN(65, "GP_SSP_1_TXD"),
+       PINCTRL_PIN(66, "GP_SSP_2_CLK"),
+       PINCTRL_PIN(67, "GP_SSP_2_FS0"),
+       PINCTRL_PIN(68, "GP_SSP_2_FS1"),
+       PINCTRL_PIN(69, "GP_SSP_2_FS2"),
+       PINCTRL_PIN(70, "GP_SSP_2_RXD"),
+       PINCTRL_PIN(71, "GP_SSP_2_TXD"),
+};
+
+static const unsigned bxt_northwest_ssp0_pins[] = { 53, 54, 55, 56, 57, 58 };
+static const unsigned bxt_northwest_ssp1_pins[] = {
+       59, 60, 61, 62, 63, 64, 65
+};
+static const unsigned bxt_northwest_ssp2_pins[] = { 66, 67, 68, 69, 70, 71 };
+static const unsigned bxt_northwest_uart3_pins[] = { 67, 68, 69, 70 };
+
+static const struct intel_pingroup bxt_northwest_groups[] = {
+       PIN_GROUP("ssp0_grp", bxt_northwest_ssp0_pins, 1),
+       PIN_GROUP("ssp1_grp", bxt_northwest_ssp1_pins, 1),
+       PIN_GROUP("ssp2_grp", bxt_northwest_ssp2_pins, 1),
+       PIN_GROUP("uart3_grp", bxt_northwest_uart3_pins, 2),
+};
+
+static const char * const bxt_northwest_ssp0_groups[] = { "ssp0_grp" };
+static const char * const bxt_northwest_ssp1_groups[] = { "ssp1_grp" };
+static const char * const bxt_northwest_ssp2_groups[] = { "ssp2_grp" };
+static const char * const bxt_northwest_uart3_groups[] = { "uart3_grp" };
+
+static const struct intel_function bxt_northwest_functions[] = {
+       FUNCTION("ssp0", bxt_northwest_ssp0_groups),
+       FUNCTION("ssp1", bxt_northwest_ssp1_groups),
+       FUNCTION("ssp2", bxt_northwest_ssp2_groups),
+       FUNCTION("uart3", bxt_northwest_uart3_groups),
+};
+
+static const struct intel_community bxt_northwest_communities[] = {
+       BXT_COMMUNITY(0, 71),
+};
+
+static const struct intel_pinctrl_soc_data bxt_northwest_soc_data = {
+       .uid = "2",
+       .pins = bxt_northwest_pins,
+       .npins = ARRAY_SIZE(bxt_northwest_pins),
+       .groups = bxt_northwest_groups,
+       .ngroups = ARRAY_SIZE(bxt_northwest_groups),
+       .functions = bxt_northwest_functions,
+       .nfunctions = ARRAY_SIZE(bxt_northwest_functions),
+       .communities = bxt_northwest_communities,
+       .ncommunities = ARRAY_SIZE(bxt_northwest_communities),
+};
+
+static const struct pinctrl_pin_desc bxt_west_pins[] = {
+       PINCTRL_PIN(0, "LPSS_I2C0_SDA"),
+       PINCTRL_PIN(1, "LPSS_I2C0_SCL"),
+       PINCTRL_PIN(2, "LPSS_I2C1_SDA"),
+       PINCTRL_PIN(3, "LPSS_I2C1_SCL"),
+       PINCTRL_PIN(4, "LPSS_I2C2_SDA"),
+       PINCTRL_PIN(5, "LPSS_I2C2_SCL"),
+       PINCTRL_PIN(6, "LPSS_I2C3_SDA"),
+       PINCTRL_PIN(7, "LPSS_I2C3_SCL"),
+       PINCTRL_PIN(8, "LPSS_I2C4_SDA"),
+       PINCTRL_PIN(9, "LPSS_I2C4_SCL"),
+       PINCTRL_PIN(10, "LPSS_I2C5_SDA"),
+       PINCTRL_PIN(11, "LPSS_I2C5_SCL"),
+       PINCTRL_PIN(12, "LPSS_I2C6_SDA"),
+       PINCTRL_PIN(13, "LPSS_I2C6_SCL"),
+       PINCTRL_PIN(14, "LPSS_I2C7_SDA"),
+       PINCTRL_PIN(15, "LPSS_I2C7_SCL"),
+       PINCTRL_PIN(16, "ISH_I2C0_SDA"),
+       PINCTRL_PIN(17, "ISH_I2C0_SCL"),
+       PINCTRL_PIN(18, "ISH_I2C1_SDA"),
+       PINCTRL_PIN(19, "ISH_I2C1_SCL"),
+       PINCTRL_PIN(20, "ISH_I2C2_SDA"),
+       PINCTRL_PIN(21, "ISH_I2C2_SCL"),
+       PINCTRL_PIN(22, "ISH_GPIO_0"),
+       PINCTRL_PIN(23, "ISH_GPIO_1"),
+       PINCTRL_PIN(24, "ISH_GPIO_2"),
+       PINCTRL_PIN(25, "ISH_GPIO_3"),
+       PINCTRL_PIN(26, "ISH_GPIO_4"),
+       PINCTRL_PIN(27, "ISH_GPIO_5"),
+       PINCTRL_PIN(28, "ISH_GPIO_6"),
+       PINCTRL_PIN(29, "ISH_GPIO_7"),
+       PINCTRL_PIN(30, "ISH_GPIO_8"),
+       PINCTRL_PIN(31, "ISH_GPIO_9"),
+       PINCTRL_PIN(32, "MODEM_CLKREQ"),
+       PINCTRL_PIN(33, "DGCLKDBG_PMC_0"),
+       PINCTRL_PIN(34, "DGCLKDBG_PMC_1"),
+       PINCTRL_PIN(35, "DGCLKDBG_PMC_2"),
+       PINCTRL_PIN(36, "DGCLKDBG_ICLK_0"),
+       PINCTRL_PIN(37, "DGCLKDBG_ICLK_1"),
+       PINCTRL_PIN(38, "OSC_CLK_OUT_0"),
+       PINCTRL_PIN(39, "OSC_CLK_OUT_1"),
+       PINCTRL_PIN(40, "OSC_CLK_OUT_2"),
+       PINCTRL_PIN(41, "OSC_CLK_OUT_3"),
+};
+
+static const unsigned bxt_west_i2c0_pins[] = { 0, 1 };
+static const unsigned bxt_west_i2c1_pins[] = { 2, 3 };
+static const unsigned bxt_west_i2c2_pins[] = { 4, 5 };
+static const unsigned bxt_west_i2c3_pins[] = { 6, 7 };
+static const unsigned bxt_west_i2c4_pins[] = { 8, 9 };
+static const unsigned bxt_west_i2c5_pins[] = { 10, 11 };
+static const unsigned bxt_west_i2c6_pins[] = { 12, 13 };
+static const unsigned bxt_west_i2c7_pins[] = { 14, 15 };
+static const unsigned bxt_west_i2c5b_pins[] = { 16, 17 };
+static const unsigned bxt_west_i2c6b_pins[] = { 18, 19 };
+static const unsigned bxt_west_i2c7b_pins[] = { 20, 21 };
+
+static const struct intel_pingroup bxt_west_groups[] = {
+       PIN_GROUP("i2c0_grp", bxt_west_i2c0_pins, 1),
+       PIN_GROUP("i2c1_grp", bxt_west_i2c1_pins, 1),
+       PIN_GROUP("i2c2_grp", bxt_west_i2c2_pins, 1),
+       PIN_GROUP("i2c3_grp", bxt_west_i2c3_pins, 1),
+       PIN_GROUP("i2c4_grp", bxt_west_i2c4_pins, 1),
+       PIN_GROUP("i2c5_grp", bxt_west_i2c5_pins, 1),
+       PIN_GROUP("i2c6_grp", bxt_west_i2c6_pins, 1),
+       PIN_GROUP("i2c7_grp", bxt_west_i2c7_pins, 1),
+       PIN_GROUP("i2c5b_grp", bxt_west_i2c5b_pins, 2),
+       PIN_GROUP("i2c6b_grp", bxt_west_i2c6b_pins, 2),
+       PIN_GROUP("i2c7b_grp", bxt_west_i2c7b_pins, 2),
+};
+
+static const char * const bxt_west_i2c0_groups[] = { "i2c0_grp" };
+static const char * const bxt_west_i2c1_groups[] = { "i2c1_grp" };
+static const char * const bxt_west_i2c2_groups[] = { "i2c2_grp" };
+static const char * const bxt_west_i2c3_groups[] = { "i2c3_grp" };
+static const char * const bxt_west_i2c4_groups[] = { "i2c4_grp" };
+static const char * const bxt_west_i2c5_groups[] = { "i2c5_grp", "i2c5b_grp" };
+static const char * const bxt_west_i2c6_groups[] = { "i2c6_grp", "i2c6b_grp" };
+static const char * const bxt_west_i2c7_groups[] = { "i2c7_grp", "i2c7b_grp" };
+
+static const struct intel_function bxt_west_functions[] = {
+       FUNCTION("i2c0", bxt_west_i2c0_groups),
+       FUNCTION("i2c1", bxt_west_i2c1_groups),
+       FUNCTION("i2c2", bxt_west_i2c2_groups),
+       FUNCTION("i2c3", bxt_west_i2c3_groups),
+       FUNCTION("i2c4", bxt_west_i2c4_groups),
+       FUNCTION("i2c5", bxt_west_i2c5_groups),
+       FUNCTION("i2c6", bxt_west_i2c6_groups),
+       FUNCTION("i2c7", bxt_west_i2c7_groups),
+};
+
+static const struct intel_community bxt_west_communities[] = {
+       BXT_COMMUNITY(0, 41),
+};
+
+static const struct intel_pinctrl_soc_data bxt_west_soc_data = {
+       .uid = "3",
+       .pins = bxt_west_pins,
+       .npins = ARRAY_SIZE(bxt_west_pins),
+       .groups = bxt_west_groups,
+       .ngroups = ARRAY_SIZE(bxt_west_groups),
+       .functions = bxt_west_functions,
+       .nfunctions = ARRAY_SIZE(bxt_west_functions),
+       .communities = bxt_west_communities,
+       .ncommunities = ARRAY_SIZE(bxt_west_communities),
+};
+
+static const struct pinctrl_pin_desc bxt_southwest_pins[] = {
+       PINCTRL_PIN(0, "EMMC0_CLK"),
+       PINCTRL_PIN(1, "EMMC0_D0"),
+       PINCTRL_PIN(2, "EMMC0_D1"),
+       PINCTRL_PIN(3, "EMMC0_D2"),
+       PINCTRL_PIN(4, "EMMC0_D3"),
+       PINCTRL_PIN(5, "EMMC0_D4"),
+       PINCTRL_PIN(6, "EMMC0_D5"),
+       PINCTRL_PIN(7, "EMMC0_D6"),
+       PINCTRL_PIN(8, "EMMC0_D7"),
+       PINCTRL_PIN(9, "EMMC0_CMD"),
+       PINCTRL_PIN(10, "SDIO_CLK"),
+       PINCTRL_PIN(11, "SDIO_D0"),
+       PINCTRL_PIN(12, "SDIO_D1"),
+       PINCTRL_PIN(13, "SDIO_D2"),
+       PINCTRL_PIN(14, "SDIO_D3"),
+       PINCTRL_PIN(15, "SDIO_CMD"),
+       PINCTRL_PIN(16, "SDCARD_CLK"),
+       PINCTRL_PIN(17, "SDCARD_D0"),
+       PINCTRL_PIN(18, "SDCARD_D1"),
+       PINCTRL_PIN(19, "SDCARD_D2"),
+       PINCTRL_PIN(20, "SDCARD_D3"),
+       PINCTRL_PIN(21, "SDCARD_CD_B"),
+       PINCTRL_PIN(22, "SDCARD_CMD"),
+       PINCTRL_PIN(23, "SDCARD_LVL_CLK_FB"),
+       PINCTRL_PIN(24, "SDCARD_LVL_CMD_DIR"),
+       PINCTRL_PIN(25, "SDCARD_LVL_DAT_DIR"),
+       PINCTRL_PIN(26, "EMMC0_STROBE"),
+       PINCTRL_PIN(27, "SDIO_PWR_DOWN_B"),
+       PINCTRL_PIN(28, "SDCARD_PWR_DOWN_B"),
+       PINCTRL_PIN(29, "SDCARD_LVL_SEL"),
+       PINCTRL_PIN(30, "SDCARD_LVL_WP"),
+};
+
+static const unsigned bxt_southwest_emmc0_pins[] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 26,
+};
+static const unsigned bxt_southwest_sdio_pins[] = {
+       10, 11, 12, 13, 14, 15, 27,
+};
+static const unsigned bxt_southwest_sdcard_pins[] = {
+       16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 28, 29, 30,
+};
+
+static const struct intel_pingroup bxt_southwest_groups[] = {
+       PIN_GROUP("emmc0_grp", bxt_southwest_emmc0_pins, 1),
+       PIN_GROUP("sdio_grp", bxt_southwest_sdio_pins, 1),
+       PIN_GROUP("sdcard_grp", bxt_southwest_sdcard_pins, 1),
+};
+
+static const char * const bxt_southwest_emmc0_groups[] = { "emmc0_grp" };
+static const char * const bxt_southwest_sdio_groups[] = { "sdio_grp" };
+static const char * const bxt_southwest_sdcard_groups[] = { "sdcard_grp" };
+
+static const struct intel_function bxt_southwest_functions[] = {
+       FUNCTION("emmc0", bxt_southwest_emmc0_groups),
+       FUNCTION("sdio", bxt_southwest_sdio_groups),
+       FUNCTION("sdcard", bxt_southwest_sdcard_groups),
+};
+
+static const struct intel_community bxt_southwest_communities[] = {
+       BXT_COMMUNITY(0, 30),
+};
+
+static const struct intel_pinctrl_soc_data bxt_southwest_soc_data = {
+       .uid = "4",
+       .pins = bxt_southwest_pins,
+       .npins = ARRAY_SIZE(bxt_southwest_pins),
+       .groups = bxt_southwest_groups,
+       .ngroups = ARRAY_SIZE(bxt_southwest_groups),
+       .functions = bxt_southwest_functions,
+       .nfunctions = ARRAY_SIZE(bxt_southwest_functions),
+       .communities = bxt_southwest_communities,
+       .ncommunities = ARRAY_SIZE(bxt_southwest_communities),
+};
+
+static const struct pinctrl_pin_desc bxt_south_pins[] = {
+       PINCTRL_PIN(0, "HV_DDI0_DDC_SDA"),
+       PINCTRL_PIN(1, "HV_DDI0_DDC_SCL"),
+       PINCTRL_PIN(2, "HV_DDI1_DDC_SDA"),
+       PINCTRL_PIN(3, "HV_DDI1_DDC_SCL"),
+       PINCTRL_PIN(4, "DBI_SDA"),
+       PINCTRL_PIN(5, "DBI_SCL"),
+       PINCTRL_PIN(6, "PANEL0_VDDEN"),
+       PINCTRL_PIN(7, "PANEL0_BKLTEN"),
+       PINCTRL_PIN(8, "PANEL0_BKLTCTL"),
+       PINCTRL_PIN(9, "PANEL1_VDDEN"),
+       PINCTRL_PIN(10, "PANEL1_BKLTEN"),
+       PINCTRL_PIN(11, "PANEL1_BKLTCTL"),
+       PINCTRL_PIN(12, "DBI_CSX"),
+       PINCTRL_PIN(13, "DBI_RESX"),
+       PINCTRL_PIN(14, "GP_INTD_DSI_TE1"),
+       PINCTRL_PIN(15, "GP_INTD_DSI_TE2"),
+       PINCTRL_PIN(16, "USB_OC0_B"),
+       PINCTRL_PIN(17, "USB_OC1_B"),
+       PINCTRL_PIN(18, "MEX_WAKE0_B"),
+       PINCTRL_PIN(19, "MEX_WAKE1_B"),
+};
+
+static const struct intel_community bxt_south_communities[] = {
+       BXT_COMMUNITY(0, 19),
+};
+
+static const struct intel_pinctrl_soc_data bxt_south_soc_data = {
+       .uid = "5",
+       .pins = bxt_south_pins,
+       .npins = ARRAY_SIZE(bxt_south_pins),
+       .communities = bxt_south_communities,
+       .ncommunities = ARRAY_SIZE(bxt_south_communities),
+};
+
+static const struct intel_pinctrl_soc_data *bxt_pinctrl_soc_data[] = {
+       &bxt_north_soc_data,
+       &bxt_northwest_soc_data,
+       &bxt_west_soc_data,
+       &bxt_southwest_soc_data,
+       &bxt_south_soc_data,
+       NULL,
+};
+
+/* APL */
+static const struct pinctrl_pin_desc apl_north_pins[] = {
+       PINCTRL_PIN(0, "GPIO_0"),
+       PINCTRL_PIN(1, "GPIO_1"),
+       PINCTRL_PIN(2, "GPIO_2"),
+       PINCTRL_PIN(3, "GPIO_3"),
+       PINCTRL_PIN(4, "GPIO_4"),
+       PINCTRL_PIN(5, "GPIO_5"),
+       PINCTRL_PIN(6, "GPIO_6"),
+       PINCTRL_PIN(7, "GPIO_7"),
+       PINCTRL_PIN(8, "GPIO_8"),
+       PINCTRL_PIN(9, "GPIO_9"),
+       PINCTRL_PIN(10, "GPIO_10"),
+       PINCTRL_PIN(11, "GPIO_11"),
+       PINCTRL_PIN(12, "GPIO_12"),
+       PINCTRL_PIN(13, "GPIO_13"),
+       PINCTRL_PIN(14, "GPIO_14"),
+       PINCTRL_PIN(15, "GPIO_15"),
+       PINCTRL_PIN(16, "GPIO_16"),
+       PINCTRL_PIN(17, "GPIO_17"),
+       PINCTRL_PIN(18, "GPIO_18"),
+       PINCTRL_PIN(19, "GPIO_19"),
+       PINCTRL_PIN(20, "GPIO_20"),
+       PINCTRL_PIN(21, "GPIO_21"),
+       PINCTRL_PIN(22, "GPIO_22"),
+       PINCTRL_PIN(23, "GPIO_23"),
+       PINCTRL_PIN(24, "GPIO_24"),
+       PINCTRL_PIN(25, "GPIO_25"),
+       PINCTRL_PIN(26, "GPIO_26"),
+       PINCTRL_PIN(27, "GPIO_27"),
+       PINCTRL_PIN(28, "GPIO_28"),
+       PINCTRL_PIN(29, "GPIO_29"),
+       PINCTRL_PIN(30, "GPIO_30"),
+       PINCTRL_PIN(31, "GPIO_31"),
+       PINCTRL_PIN(32, "GPIO_32"),
+       PINCTRL_PIN(33, "GPIO_33"),
+       PINCTRL_PIN(34, "PWM0"),
+       PINCTRL_PIN(35, "PWM1"),
+       PINCTRL_PIN(36, "PWM2"),
+       PINCTRL_PIN(37, "PWM3"),
+       PINCTRL_PIN(38, "LPSS_UART0_RXD"),
+       PINCTRL_PIN(39, "LPSS_UART0_TXD"),
+       PINCTRL_PIN(40, "LPSS_UART0_RTS_B"),
+       PINCTRL_PIN(41, "LPSS_UART0_CTS_B"),
+       PINCTRL_PIN(42, "LPSS_UART1_RXD"),
+       PINCTRL_PIN(43, "LPSS_UART1_TXD"),
+       PINCTRL_PIN(44, "LPSS_UART1_RTS_B"),
+       PINCTRL_PIN(45, "LPSS_UART1_CTS_B"),
+       PINCTRL_PIN(46, "LPSS_UART2_RXD"),
+       PINCTRL_PIN(47, "LPSS_UART2_TXD"),
+       PINCTRL_PIN(48, "LPSS_UART2_RTS_B"),
+       PINCTRL_PIN(49, "LPSS_UART2_CTS_B"),
+       PINCTRL_PIN(50, "GP_CAMERASB00"),
+       PINCTRL_PIN(51, "GP_CAMERASB01"),
+       PINCTRL_PIN(52, "GP_CAMERASB02"),
+       PINCTRL_PIN(53, "GP_CAMERASB03"),
+       PINCTRL_PIN(54, "GP_CAMERASB04"),
+       PINCTRL_PIN(55, "GP_CAMERASB05"),
+       PINCTRL_PIN(56, "GP_CAMERASB06"),
+       PINCTRL_PIN(57, "GP_CAMERASB07"),
+       PINCTRL_PIN(58, "GP_CAMERASB08"),
+       PINCTRL_PIN(59, "GP_CAMERASB09"),
+       PINCTRL_PIN(60, "GP_CAMERASB10"),
+       PINCTRL_PIN(61, "GP_CAMERASB11"),
+       PINCTRL_PIN(62, "TCK"),
+       PINCTRL_PIN(63, "TRST_B"),
+       PINCTRL_PIN(64, "TMS"),
+       PINCTRL_PIN(65, "TDI"),
+       PINCTRL_PIN(66, "CX_PMODE"),
+       PINCTRL_PIN(67, "CX_PREQ_B"),
+       PINCTRL_PIN(68, "JTAGX"),
+       PINCTRL_PIN(69, "CX_PRDY_B"),
+       PINCTRL_PIN(70, "TDO"),
+       PINCTRL_PIN(71, "CNV_BRI_DT"),
+       PINCTRL_PIN(72, "CNV_BRI_RSP"),
+       PINCTRL_PIN(73, "CNV_RGI_DT"),
+       PINCTRL_PIN(74, "CNV_RGI_RSP"),
+       PINCTRL_PIN(75, "SVID0_ALERT_B"),
+       PINCTRL_PIN(76, "SVID0_DATA"),
+       PINCTRL_PIN(77, "SVID0_CLK"),
+};
+
+static const unsigned apl_north_pwm0_pins[] = { 34 };
+static const unsigned apl_north_pwm1_pins[] = { 35 };
+static const unsigned apl_north_pwm2_pins[] = { 36 };
+static const unsigned apl_north_pwm3_pins[] = { 37 };
+static const unsigned apl_north_uart0_pins[] = { 38, 39, 40, 41 };
+static const unsigned apl_north_uart1_pins[] = { 42, 43, 44, 45 };
+static const unsigned apl_north_uart2_pins[] = { 46, 47, 48, 49 };
+
+static const struct intel_pingroup apl_north_groups[] = {
+       PIN_GROUP("pwm0_grp", apl_north_pwm0_pins, 1),
+       PIN_GROUP("pwm1_grp", apl_north_pwm1_pins, 1),
+       PIN_GROUP("pwm2_grp", apl_north_pwm2_pins, 1),
+       PIN_GROUP("pwm3_grp", apl_north_pwm3_pins, 1),
+       PIN_GROUP("uart0_grp", apl_north_uart0_pins, 1),
+       PIN_GROUP("uart1_grp", apl_north_uart1_pins, 1),
+       PIN_GROUP("uart2_grp", apl_north_uart2_pins, 1),
+};
+
+static const char * const apl_north_pwm0_groups[] = { "pwm0_grp" };
+static const char * const apl_north_pwm1_groups[] = { "pwm1_grp" };
+static const char * const apl_north_pwm2_groups[] = { "pwm2_grp" };
+static const char * const apl_north_pwm3_groups[] = { "pwm3_grp" };
+static const char * const apl_north_uart0_groups[] = { "uart0_grp" };
+static const char * const apl_north_uart1_groups[] = { "uart1_grp" };
+static const char * const apl_north_uart2_groups[] = { "uart2_grp" };
+
+static const struct intel_function apl_north_functions[] = {
+       FUNCTION("pwm0", apl_north_pwm0_groups),
+       FUNCTION("pwm1", apl_north_pwm1_groups),
+       FUNCTION("pwm2", apl_north_pwm2_groups),
+       FUNCTION("pwm3", apl_north_pwm3_groups),
+       FUNCTION("uart0", apl_north_uart0_groups),
+       FUNCTION("uart1", apl_north_uart1_groups),
+       FUNCTION("uart2", apl_north_uart2_groups),
+};
+
+static const struct intel_community apl_north_communities[] = {
+       BXT_COMMUNITY(0, 77),
+};
+
+static const struct intel_pinctrl_soc_data apl_north_soc_data = {
+       .uid = "1",
+       .pins = apl_north_pins,
+       .npins = ARRAY_SIZE(apl_north_pins),
+       .groups = apl_north_groups,
+       .ngroups = ARRAY_SIZE(apl_north_groups),
+       .functions = apl_north_functions,
+       .nfunctions = ARRAY_SIZE(apl_north_functions),
+       .communities = apl_north_communities,
+       .ncommunities = ARRAY_SIZE(apl_north_communities),
+};
+
+static const struct pinctrl_pin_desc apl_northwest_pins[] = {
+       PINCTRL_PIN(0, "HV_DDI0_DDC_SDA"),
+       PINCTRL_PIN(1, "HV_DDI0_DDC_SCL"),
+       PINCTRL_PIN(2, "HV_DDI1_DDC_SDA"),
+       PINCTRL_PIN(3, "HV_DDI1_DDC_SCL"),
+       PINCTRL_PIN(4, "DBI_SDA"),
+       PINCTRL_PIN(5, "DBI_SCL"),
+       PINCTRL_PIN(6, "PANEL0_VDDEN"),
+       PINCTRL_PIN(7, "PANEL0_BKLTEN"),
+       PINCTRL_PIN(8, "PANEL0_BKLTCTL"),
+       PINCTRL_PIN(9, "PANEL1_VDDEN"),
+       PINCTRL_PIN(10, "PANEL1_BKLTEN"),
+       PINCTRL_PIN(11, "PANEL1_BKLTCTL"),
+       PINCTRL_PIN(12, "DBI_CSX"),
+       PINCTRL_PIN(13, "DBI_RESX"),
+       PINCTRL_PIN(14, "GP_INTD_DSI_TE1"),
+       PINCTRL_PIN(15, "GP_INTD_DSI_TE2"),
+       PINCTRL_PIN(16, "USB_OC0_B"),
+       PINCTRL_PIN(17, "USB_OC1_B"),
+       PINCTRL_PIN(18, "PMC_SPI_FS0"),
+       PINCTRL_PIN(19, "PMC_SPI_FS1"),
+       PINCTRL_PIN(20, "PMC_SPI_FS2"),
+       PINCTRL_PIN(21, "PMC_SPI_RXD"),
+       PINCTRL_PIN(22, "PMC_SPI_TXD"),
+       PINCTRL_PIN(23, "PMC_SPI_CLK"),
+       PINCTRL_PIN(24, "PMIC_PWRGOOD"),
+       PINCTRL_PIN(25, "PMIC_RESET_B"),
+       PINCTRL_PIN(26, "PMIC_SDWN_B"),
+       PINCTRL_PIN(27, "PMIC_BCUDISW2"),
+       PINCTRL_PIN(28, "PMIC_BCUDISCRIT"),
+       PINCTRL_PIN(29, "PMIC_THERMTRIP_B"),
+       PINCTRL_PIN(30, "PMIC_STDBY"),
+       PINCTRL_PIN(31, "PROCHOT_B"),
+       PINCTRL_PIN(32, "PMIC_I2C_SCL"),
+       PINCTRL_PIN(33, "PMIC_I2C_SDA"),
+       PINCTRL_PIN(34, "AVS_I2S1_MCLK"),
+       PINCTRL_PIN(35, "AVS_I2S1_BCLK"),
+       PINCTRL_PIN(36, "AVS_I2S1_WS_SYNC"),
+       PINCTRL_PIN(37, "AVS_I2S1_SDI"),
+       PINCTRL_PIN(38, "AVS_I2S1_SDO"),
+       PINCTRL_PIN(39, "AVS_M_CLK_A1"),
+       PINCTRL_PIN(40, "AVS_M_CLK_B1"),
+       PINCTRL_PIN(41, "AVS_M_DATA_1"),
+       PINCTRL_PIN(42, "AVS_M_CLK_AB2"),
+       PINCTRL_PIN(43, "AVS_M_DATA_2"),
+       PINCTRL_PIN(44, "AVS_I2S2_MCLK"),
+       PINCTRL_PIN(45, "AVS_I2S2_BCLK"),
+       PINCTRL_PIN(46, "AVS_I2S2_WS_SYNC"),
+       PINCTRL_PIN(47, "AVS_I2S2_SDI"),
+       PINCTRL_PIN(48, "AVS_I2S2_SDO"),
+       PINCTRL_PIN(49, "AVS_I2S3_BCLK"),
+       PINCTRL_PIN(50, "AVS_I2S3_WS_SYNC"),
+       PINCTRL_PIN(51, "AVS_I2S3_SDI"),
+       PINCTRL_PIN(52, "AVS_I2S3_SDO"),
+       PINCTRL_PIN(53, "FST_SPI_CS0_B"),
+       PINCTRL_PIN(54, "FST_SPI_CS1_B"),
+       PINCTRL_PIN(55, "FST_SPI_MOSI_IO0"),
+       PINCTRL_PIN(56, "FST_SPI_MISO_IO1"),
+       PINCTRL_PIN(57, "FST_SPI_IO2"),
+       PINCTRL_PIN(58, "FST_SPI_IO3"),
+       PINCTRL_PIN(59, "FST_SPI_CLK"),
+       PINCTRL_PIN(60, "FST_SPI_CLK_FB"),
+       PINCTRL_PIN(61, "GP_SSP_0_CLK"),
+       PINCTRL_PIN(62, "GP_SSP_0_FS0"),
+       PINCTRL_PIN(63, "GP_SSP_0_FS1"),
+       PINCTRL_PIN(64, "GP_SSP_0_RXD"),
+       PINCTRL_PIN(65, "GP_SSP_0_TXD"),
+       PINCTRL_PIN(66, "GP_SSP_1_CLK"),
+       PINCTRL_PIN(67, "GP_SSP_1_FS0"),
+       PINCTRL_PIN(68, "GP_SSP_1_FS1"),
+       PINCTRL_PIN(69, "GP_SSP_1_RXD"),
+       PINCTRL_PIN(70, "GP_SSP_1_TXD"),
+       PINCTRL_PIN(71, "GP_SSP_2_CLK"),
+       PINCTRL_PIN(72, "GP_SSP_2_FS0"),
+       PINCTRL_PIN(73, "GP_SSP_2_FS1"),
+       PINCTRL_PIN(74, "GP_SSP_2_FS2"),
+       PINCTRL_PIN(75, "GP_SSP_2_RXD"),
+       PINCTRL_PIN(76, "GP_SSP_2_TXD"),
+};
+
+static const unsigned apl_northwest_ssp0_pins[] = { 61, 62, 63, 64, 65 };
+static const unsigned apl_northwest_ssp1_pins[] = { 66, 67, 68, 69, 70 };
+static const unsigned apl_northwest_ssp2_pins[] = { 71, 72, 73, 74, 75, 76 };
+static const unsigned apl_northwest_uart3_pins[] = { 67, 68, 69, 70 };
+
+static const struct intel_pingroup apl_northwest_groups[] = {
+       PIN_GROUP("ssp0_grp", apl_northwest_ssp0_pins, 1),
+       PIN_GROUP("ssp1_grp", apl_northwest_ssp1_pins, 1),
+       PIN_GROUP("ssp2_grp", apl_northwest_ssp2_pins, 1),
+       PIN_GROUP("uart3_grp", apl_northwest_uart3_pins, 2),
+};
+
+static const char * const apl_northwest_ssp0_groups[] = { "ssp0_grp" };
+static const char * const apl_northwest_ssp1_groups[] = { "ssp1_grp" };
+static const char * const apl_northwest_ssp2_groups[] = { "ssp2_grp" };
+static const char * const apl_northwest_uart3_groups[] = { "uart3_grp" };
+
+static const struct intel_function apl_northwest_functions[] = {
+       FUNCTION("ssp0", apl_northwest_ssp0_groups),
+       FUNCTION("ssp1", apl_northwest_ssp1_groups),
+       FUNCTION("ssp2", apl_northwest_ssp2_groups),
+       FUNCTION("uart3", apl_northwest_uart3_groups),
+};
+
+static const struct intel_community apl_northwest_communities[] = {
+       BXT_COMMUNITY(0, 76),
+};
+
+static const struct intel_pinctrl_soc_data apl_northwest_soc_data = {
+       .uid = "2",
+       .pins = apl_northwest_pins,
+       .npins = ARRAY_SIZE(apl_northwest_pins),
+       .groups = apl_northwest_groups,
+       .ngroups = ARRAY_SIZE(apl_northwest_groups),
+       .functions = apl_northwest_functions,
+       .nfunctions = ARRAY_SIZE(apl_northwest_functions),
+       .communities = apl_northwest_communities,
+       .ncommunities = ARRAY_SIZE(apl_northwest_communities),
+};
+
+static const struct pinctrl_pin_desc apl_west_pins[] = {
+       PINCTRL_PIN(0, "LPSS_I2C0_SDA"),
+       PINCTRL_PIN(1, "LPSS_I2C0_SCL"),
+       PINCTRL_PIN(2, "LPSS_I2C1_SDA"),
+       PINCTRL_PIN(3, "LPSS_I2C1_SCL"),
+       PINCTRL_PIN(4, "LPSS_I2C2_SDA"),
+       PINCTRL_PIN(5, "LPSS_I2C2_SCL"),
+       PINCTRL_PIN(6, "LPSS_I2C3_SDA"),
+       PINCTRL_PIN(7, "LPSS_I2C3_SCL"),
+       PINCTRL_PIN(8, "LPSS_I2C4_SDA"),
+       PINCTRL_PIN(9, "LPSS_I2C4_SCL"),
+       PINCTRL_PIN(10, "LPSS_I2C5_SDA"),
+       PINCTRL_PIN(11, "LPSS_I2C5_SCL"),
+       PINCTRL_PIN(12, "LPSS_I2C6_SDA"),
+       PINCTRL_PIN(13, "LPSS_I2C6_SCL"),
+       PINCTRL_PIN(14, "LPSS_I2C7_SDA"),
+       PINCTRL_PIN(15, "LPSS_I2C7_SCL"),
+       PINCTRL_PIN(16, "ISH_GPIO_0"),
+       PINCTRL_PIN(17, "ISH_GPIO_1"),
+       PINCTRL_PIN(18, "ISH_GPIO_2"),
+       PINCTRL_PIN(19, "ISH_GPIO_3"),
+       PINCTRL_PIN(20, "ISH_GPIO_4"),
+       PINCTRL_PIN(21, "ISH_GPIO_5"),
+       PINCTRL_PIN(22, "ISH_GPIO_6"),
+       PINCTRL_PIN(23, "ISH_GPIO_7"),
+       PINCTRL_PIN(24, "ISH_GPIO_8"),
+       PINCTRL_PIN(25, "ISH_GPIO_9"),
+       PINCTRL_PIN(26, "PCIE_CLKREQ0_B"),
+       PINCTRL_PIN(27, "PCIE_CLKREQ1_B"),
+       PINCTRL_PIN(28, "PCIE_CLKREQ2_B"),
+       PINCTRL_PIN(29, "PCIE_CLKREQ3_B"),
+       PINCTRL_PIN(30, "OSC_CLK_OUT_0"),
+       PINCTRL_PIN(31, "OSC_CLK_OUT_1"),
+       PINCTRL_PIN(32, "OSC_CLK_OUT_2"),
+       PINCTRL_PIN(33, "OSC_CLK_OUT_3"),
+       PINCTRL_PIN(34, "OSC_CLK_OUT_4"),
+       PINCTRL_PIN(35, "PMU_AC_PRESENT"),
+       PINCTRL_PIN(36, "PMU_BATLOW_B"),
+       PINCTRL_PIN(37, "PMU_PLTRST_B"),
+       PINCTRL_PIN(38, "PMU_PWRBTN_B"),
+       PINCTRL_PIN(39, "PMU_RESETBUTTON_B"),
+       PINCTRL_PIN(40, "PMU_SLP_S0_B"),
+       PINCTRL_PIN(41, "PMU_SLP_S3_B"),
+       PINCTRL_PIN(42, "PMU_SLP_S4_B"),
+       PINCTRL_PIN(43, "PMU_SUSCLK"),
+       PINCTRL_PIN(44, "PMU_WAKE_B"),
+       PINCTRL_PIN(45, "SUS_STAT_B"),
+       PINCTRL_PIN(46, "SUSPWRDNACK"),
+};
+
+static const unsigned apl_west_i2c0_pins[] = { 0, 1 };
+static const unsigned apl_west_i2c1_pins[] = { 2, 3 };
+static const unsigned apl_west_i2c2_pins[] = { 4, 5 };
+static const unsigned apl_west_i2c3_pins[] = { 6, 7 };
+static const unsigned apl_west_i2c4_pins[] = { 8, 9 };
+static const unsigned apl_west_i2c5_pins[] = { 10, 11 };
+static const unsigned apl_west_i2c6_pins[] = { 12, 13 };
+static const unsigned apl_west_i2c7_pins[] = { 14, 15 };
+static const unsigned apl_west_uart2_pins[] = { 20, 21, 22, 34 };
+
+static const struct intel_pingroup apl_west_groups[] = {
+       PIN_GROUP("i2c0_grp", apl_west_i2c0_pins, 1),
+       PIN_GROUP("i2c1_grp", apl_west_i2c1_pins, 1),
+       PIN_GROUP("i2c2_grp", apl_west_i2c2_pins, 1),
+       PIN_GROUP("i2c3_grp", apl_west_i2c3_pins, 1),
+       PIN_GROUP("i2c4_grp", apl_west_i2c4_pins, 1),
+       PIN_GROUP("i2c5_grp", apl_west_i2c5_pins, 1),
+       PIN_GROUP("i2c6_grp", apl_west_i2c6_pins, 1),
+       PIN_GROUP("i2c7_grp", apl_west_i2c7_pins, 1),
+       PIN_GROUP("uart2_grp", apl_west_uart2_pins, 3),
+};
+
+static const char * const apl_west_i2c0_groups[] = { "i2c0_grp" };
+static const char * const apl_west_i2c1_groups[] = { "i2c1_grp" };
+static const char * const apl_west_i2c2_groups[] = { "i2c2_grp" };
+static const char * const apl_west_i2c3_groups[] = { "i2c3_grp" };
+static const char * const apl_west_i2c4_groups[] = { "i2c4_grp" };
+static const char * const apl_west_i2c5_groups[] = { "i2c5_grp" };
+static const char * const apl_west_i2c6_groups[] = { "i2c6_grp" };
+static const char * const apl_west_i2c7_groups[] = { "i2c7_grp" };
+static const char * const apl_west_uart2_groups[] = { "uart2_grp" };
+
+static const struct intel_function apl_west_functions[] = {
+       FUNCTION("i2c0", apl_west_i2c0_groups),
+       FUNCTION("i2c1", apl_west_i2c1_groups),
+       FUNCTION("i2c2", apl_west_i2c2_groups),
+       FUNCTION("i2c3", apl_west_i2c3_groups),
+       FUNCTION("i2c4", apl_west_i2c4_groups),
+       FUNCTION("i2c5", apl_west_i2c5_groups),
+       FUNCTION("i2c6", apl_west_i2c6_groups),
+       FUNCTION("i2c7", apl_west_i2c7_groups),
+       FUNCTION("uart2", apl_west_uart2_groups),
+};
+
+static const struct intel_community apl_west_communities[] = {
+       BXT_COMMUNITY(0, 46),
+};
+
+static const struct intel_pinctrl_soc_data apl_west_soc_data = {
+       .uid = "3",
+       .pins = apl_west_pins,
+       .npins = ARRAY_SIZE(apl_west_pins),
+       .groups = apl_west_groups,
+       .ngroups = ARRAY_SIZE(apl_west_groups),
+       .functions = apl_west_functions,
+       .nfunctions = ARRAY_SIZE(apl_west_functions),
+       .communities = apl_west_communities,
+       .ncommunities = ARRAY_SIZE(apl_west_communities),
+};
+
+static const struct pinctrl_pin_desc apl_southwest_pins[] = {
+       PINCTRL_PIN(0, "PCIE_WAKE0_B"),
+       PINCTRL_PIN(1, "PCIE_WAKE1_B"),
+       PINCTRL_PIN(2, "PCIE_WAKE2_B"),
+       PINCTRL_PIN(3, "PCIE_WAKE3_B"),
+       PINCTRL_PIN(4, "EMMC0_CLK"),
+       PINCTRL_PIN(5, "EMMC0_D0"),
+       PINCTRL_PIN(6, "EMMC0_D1"),
+       PINCTRL_PIN(7, "EMMC0_D2"),
+       PINCTRL_PIN(8, "EMMC0_D3"),
+       PINCTRL_PIN(9, "EMMC0_D4"),
+       PINCTRL_PIN(10, "EMMC0_D5"),
+       PINCTRL_PIN(11, "EMMC0_D6"),
+       PINCTRL_PIN(12, "EMMC0_D7"),
+       PINCTRL_PIN(13, "EMMC0_CMD"),
+       PINCTRL_PIN(14, "SDIO_CLK"),
+       PINCTRL_PIN(15, "SDIO_D0"),
+       PINCTRL_PIN(16, "SDIO_D1"),
+       PINCTRL_PIN(17, "SDIO_D2"),
+       PINCTRL_PIN(18, "SDIO_D3"),
+       PINCTRL_PIN(19, "SDIO_CMD"),
+       PINCTRL_PIN(20, "SDCARD_CLK"),
+       PINCTRL_PIN(21, "SDCARD_CLK_FB"),
+       PINCTRL_PIN(22, "SDCARD_D0"),
+       PINCTRL_PIN(23, "SDCARD_D1"),
+       PINCTRL_PIN(24, "SDCARD_D2"),
+       PINCTRL_PIN(25, "SDCARD_D3"),
+       PINCTRL_PIN(26, "SDCARD_CD_B"),
+       PINCTRL_PIN(27, "SDCARD_CMD"),
+       PINCTRL_PIN(28, "SDCARD_LVL_WP"),
+       PINCTRL_PIN(29, "EMMC0_STROBE"),
+       PINCTRL_PIN(30, "SDIO_PWR_DOWN_B"),
+       PINCTRL_PIN(31, "SMB_ALERTB"),
+       PINCTRL_PIN(32, "SMB_CLK"),
+       PINCTRL_PIN(33, "SMB_DATA"),
+       PINCTRL_PIN(34, "LPC_ILB_SERIRQ"),
+       PINCTRL_PIN(35, "LPC_CLKOUT0"),
+       PINCTRL_PIN(36, "LPC_CLKOUT1"),
+       PINCTRL_PIN(37, "LPC_AD0"),
+       PINCTRL_PIN(38, "LPC_AD1"),
+       PINCTRL_PIN(39, "LPC_AD2"),
+       PINCTRL_PIN(40, "LPC_AD3"),
+       PINCTRL_PIN(41, "LPC_CLKRUNB"),
+       PINCTRL_PIN(42, "LPC_FRAMEB"),
+};
+
+static const unsigned apl_southwest_emmc0_pins[] = {
+       4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 29,
+};
+static const unsigned apl_southwest_sdio_pins[] = {
+       14, 15, 16, 17, 18, 19, 30,
+};
+static const unsigned apl_southwest_sdcard_pins[] = {
+       20, 21, 22, 23, 24, 25, 26, 27, 28,
+};
+static const unsigned apl_southwest_i2c7_pins[] = { 32, 33 };
+
+static const struct intel_pingroup apl_southwest_groups[] = {
+       PIN_GROUP("emmc0_grp", apl_southwest_emmc0_pins, 1),
+       PIN_GROUP("sdio_grp", apl_southwest_sdio_pins, 1),
+       PIN_GROUP("sdcard_grp", apl_southwest_sdcard_pins, 1),
+       PIN_GROUP("i2c7_grp", apl_southwest_i2c7_pins, 2),
+};
+
+static const char * const apl_southwest_emmc0_groups[] = { "emmc0_grp" };
+static const char * const apl_southwest_sdio_groups[] = { "sdio_grp" };
+static const char * const apl_southwest_sdcard_groups[] = { "sdcard_grp" };
+static const char * const apl_southwest_i2c7_groups[] = { "i2c7_grp" };
+
+static const struct intel_function apl_southwest_functions[] = {
+       FUNCTION("emmc0", apl_southwest_emmc0_groups),
+       FUNCTION("sdio", apl_southwest_sdio_groups),
+       FUNCTION("sdcard", apl_southwest_sdcard_groups),
+       FUNCTION("i2c7", apl_southwest_i2c7_groups),
+};
+
+static const struct intel_community apl_southwest_communities[] = {
+       BXT_COMMUNITY(0, 42),
+};
+
+static const struct intel_pinctrl_soc_data apl_southwest_soc_data = {
+       .uid = "4",
+       .pins = apl_southwest_pins,
+       .npins = ARRAY_SIZE(apl_southwest_pins),
+       .groups = apl_southwest_groups,
+       .ngroups = ARRAY_SIZE(apl_southwest_groups),
+       .functions = apl_southwest_functions,
+       .nfunctions = ARRAY_SIZE(apl_southwest_functions),
+       .communities = apl_southwest_communities,
+       .ncommunities = ARRAY_SIZE(apl_southwest_communities),
+};
+
+static const struct intel_pinctrl_soc_data *apl_pinctrl_soc_data[] = {
+       &apl_north_soc_data,
+       &apl_northwest_soc_data,
+       &apl_west_soc_data,
+       &apl_southwest_soc_data,
+       NULL,
+};
+
+static const struct acpi_device_id bxt_pinctrl_acpi_match[] = {
+       { "INT3452", (kernel_ulong_t)apl_pinctrl_soc_data },
+       { "INT34D1", (kernel_ulong_t)bxt_pinctrl_soc_data },
+       { }
+};
+MODULE_DEVICE_TABLE(acpi, bxt_pinctrl_acpi_match);
+
+static int bxt_pinctrl_probe(struct platform_device *pdev)
+{
+       const struct intel_pinctrl_soc_data *soc_data = NULL;
+       const struct intel_pinctrl_soc_data **soc_table;
+       const struct acpi_device_id *id;
+       struct acpi_device *adev;
+       int i;
+
+       adev = ACPI_COMPANION(&pdev->dev);
+       if (!adev)
+               return -ENODEV;
+
+       id = acpi_match_device(bxt_pinctrl_acpi_match, &pdev->dev);
+       if (!id)
+               return -ENODEV;
+
+       soc_table = (const struct intel_pinctrl_soc_data **)id->driver_data;
+
+       for (i = 0; soc_table[i]; i++) {
+               if (!strcmp(adev->pnp.unique_id, soc_table[i]->uid)) {
+                       soc_data = soc_table[i];
+                       break;
+               }
+       }
+
+       if (!soc_data)
+               return -ENODEV;
+
+       return intel_pinctrl_probe(pdev, soc_data);
+}
+
+static const struct dev_pm_ops bxt_pinctrl_pm_ops = {
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend,
+                                    intel_pinctrl_resume)
+};
+
+static struct platform_driver bxt_pinctrl_driver = {
+       .probe = bxt_pinctrl_probe,
+       .remove = intel_pinctrl_remove,
+       .driver = {
+               .name = "broxton-pinctrl",
+               .acpi_match_table = bxt_pinctrl_acpi_match,
+               .pm = &bxt_pinctrl_pm_ops,
+       },
+};
+
+static int __init bxt_pinctrl_init(void)
+{
+       return platform_driver_register(&bxt_pinctrl_driver);
+}
+subsys_initcall(bxt_pinctrl_init);
+
+static void __exit bxt_pinctrl_exit(void)
+{
+       platform_driver_unregister(&bxt_pinctrl_driver);
+}
+module_exit(bxt_pinctrl_exit);
+
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Broxton SoC pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
index 270c127e03ea72f759d74758d3d81b00d60e52bb..84936bae6e5ee07f741477cd185509f86bb816a7 100644 (file)
@@ -1149,16 +1149,6 @@ static struct pinctrl_desc chv_pinctrl_desc = {
        .owner = THIS_MODULE,
 };
 
-static int chv_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-       return pinctrl_request_gpio(chip->base + offset);
-}
-
-static void chv_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       pinctrl_free_gpio(chip->base + offset);
-}
-
 static unsigned chv_gpio_offset_to_pin(struct chv_pinctrl *pctrl,
                                       unsigned offset)
 {
@@ -1238,8 +1228,8 @@ static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
 
 static const struct gpio_chip chv_gpio_chip = {
        .owner = THIS_MODULE,
-       .request = chv_gpio_request,
-       .free = chv_gpio_free,
+       .request = gpiochip_generic_request,
+       .free = gpiochip_generic_free,
        .get_direction = chv_gpio_get_direction,
        .direction_input = chv_gpio_direction_input,
        .direction_output = chv_gpio_direction_output,
index 54848b8decef7735d8b009c6431a970f94204488..392e28d3f48d0bee1a4087154e904c7dfdc0faa7 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/acpi.h>
 #include <linux/gpio.h>
 #include <linux/gpio/driver.h>
@@ -159,8 +160,7 @@ static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin)
        return !(readl(padown) & PADOWN_MASK(padno));
 }
 
-static bool intel_pad_reserved_for_acpi(struct intel_pinctrl *pctrl,
-                                       unsigned pin)
+static bool intel_pad_acpi_mode(struct intel_pinctrl *pctrl, unsigned pin)
 {
        const struct intel_community *community;
        unsigned padno, gpp, offset;
@@ -216,7 +216,6 @@ static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin)
 static bool intel_pad_usable(struct intel_pinctrl *pctrl, unsigned pin)
 {
        return intel_pad_owned_by_host(pctrl, pin) &&
-               !intel_pad_reserved_for_acpi(pctrl, pin) &&
                !intel_pad_locked(pctrl, pin);
 }
 
@@ -269,7 +268,7 @@ static void intel_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
        seq_printf(s, "0x%08x 0x%08x", cfg0, cfg1);
 
        locked = intel_pad_locked(pctrl, pin);
-       acpi = intel_pad_reserved_for_acpi(pctrl, pin);
+       acpi = intel_pad_acpi_mode(pctrl, pin);
 
        if (locked || acpi) {
                seq_puts(s, " [");
@@ -597,16 +596,6 @@ static const struct pinctrl_desc intel_pinctrl_desc = {
        .owner = THIS_MODULE,
 };
 
-static int intel_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-       return pinctrl_request_gpio(chip->base + offset);
-}
-
-static void intel_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       pinctrl_free_gpio(chip->base + offset);
-}
-
 static int intel_gpio_get(struct gpio_chip *chip, unsigned offset)
 {
        struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(chip);
@@ -654,8 +643,8 @@ static int intel_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
 
 static const struct gpio_chip intel_gpio_chip = {
        .owner = THIS_MODULE,
-       .request = intel_gpio_request,
-       .free = intel_gpio_free,
+       .request = gpiochip_generic_request,
+       .free = gpiochip_generic_free,
        .direction_input = intel_gpio_direction_input,
        .direction_output = intel_gpio_direction_output,
        .get = intel_gpio_get,
@@ -736,6 +725,16 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type)
        if (!reg)
                return -EINVAL;
 
+       /*
+        * If the pin is in ACPI mode it is still usable as a GPIO but it
+        * cannot be used as IRQ because GPI_IS status bit will not be
+        * updated by the host controller hardware.
+        */
+       if (intel_pad_acpi_mode(pctrl, pin)) {
+               dev_warn(pctrl->dev, "pin %u cannot be used as IRQ\n", pin);
+               return -EPERM;
+       }
+
        spin_lock_irqsave(&pctrl->lock, flags);
 
        value = readl(reg);
@@ -803,9 +802,11 @@ static int intel_gpio_irq_wake(struct irq_data *d, unsigned int on)
        return 0;
 }
 
-static void intel_gpio_community_irq_handler(struct gpio_chip *gc,
+static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
        const struct intel_community *community)
 {
+       struct gpio_chip *gc = &pctrl->chip;
+       irqreturn_t ret = IRQ_NONE;
        int gpp;
 
        for (gpp = 0; gpp < community->ngpps; gpp++) {
@@ -832,24 +833,28 @@ static void intel_gpio_community_irq_handler(struct gpio_chip *gc,
                        irq = irq_find_mapping(gc->irqdomain,
                                               community->pin_base + padno);
                        generic_handle_irq(irq);
+
+                       ret |= IRQ_HANDLED;
                }
        }
+
+       return ret;
 }
 
-static void intel_gpio_irq_handler(struct irq_desc *desc)
+static irqreturn_t intel_gpio_irq(int irq, void *data)
 {
-       struct gpio_chip *gc = irq_desc_get_handler_data(desc);
-       struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
-       struct irq_chip *chip = irq_desc_get_chip(desc);
+       const struct intel_community *community;
+       struct intel_pinctrl *pctrl = data;
+       irqreturn_t ret = IRQ_NONE;
        int i;
 
-       chained_irq_enter(chip, desc);
-
        /* Need to check all communities for pending interrupts */
-       for (i = 0; i < pctrl->ncommunities; i++)
-               intel_gpio_community_irq_handler(gc, &pctrl->communities[i]);
+       for (i = 0; i < pctrl->ncommunities; i++) {
+               community = &pctrl->communities[i];
+               ret |= intel_gpio_community_irq_handler(pctrl, community);
+       }
 
-       chained_irq_exit(chip, desc);
+       return ret;
 }
 
 static struct irq_chip intel_gpio_irqchip = {
@@ -861,26 +866,6 @@ static struct irq_chip intel_gpio_irqchip = {
        .irq_set_wake = intel_gpio_irq_wake,
 };
 
-static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
-{
-       size_t i;
-
-       for (i = 0; i < pctrl->ncommunities; i++) {
-               const struct intel_community *community;
-               void __iomem *base;
-               unsigned gpp;
-
-               community = &pctrl->communities[i];
-               base = community->regs;
-
-               for (gpp = 0; gpp < community->ngpps; gpp++) {
-                       /* Mask and clear all interrupts */
-                       writel(0, base + community->ie_offset + gpp * 4);
-                       writel(0xffff, base + GPI_IS + gpp * 4);
-               }
-       }
-}
-
 static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
 {
        int ret;
@@ -902,21 +887,36 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
                                     0, 0, pctrl->soc->npins);
        if (ret) {
                dev_err(pctrl->dev, "failed to add GPIO pin range\n");
-               gpiochip_remove(&pctrl->chip);
-               return ret;
+               goto fail;
+       }
+
+       /*
+        * We need to request the interrupt here (instead of providing chip
+        * to the irq directly) because on some platforms several GPIO
+        * controllers share the same interrupt line.
+        */
+       ret = devm_request_irq(pctrl->dev, irq, intel_gpio_irq, IRQF_SHARED,
+                              dev_name(pctrl->dev), pctrl);
+       if (ret) {
+               dev_err(pctrl->dev, "failed to request interrupt\n");
+               goto fail;
        }
 
        ret = gpiochip_irqchip_add(&pctrl->chip, &intel_gpio_irqchip, 0,
                                   handle_simple_irq, IRQ_TYPE_NONE);
        if (ret) {
                dev_err(pctrl->dev, "failed to add irqchip\n");
-               gpiochip_remove(&pctrl->chip);
-               return ret;
+               goto fail;
        }
 
        gpiochip_set_chained_irqchip(&pctrl->chip, &intel_gpio_irqchip, irq,
-                                    intel_gpio_irq_handler);
+                                    NULL);
        return 0;
+
+fail:
+       gpiochip_remove(&pctrl->chip);
+
+       return ret;
 }
 
 static int intel_pinctrl_pm_init(struct intel_pinctrl *pctrl)
@@ -1087,6 +1087,26 @@ int intel_pinctrl_suspend(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(intel_pinctrl_suspend);
 
+static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
+{
+       size_t i;
+
+       for (i = 0; i < pctrl->ncommunities; i++) {
+               const struct intel_community *community;
+               void __iomem *base;
+               unsigned gpp;
+
+               community = &pctrl->communities[i];
+               base = community->regs;
+
+               for (gpp = 0; gpp < community->ngpps; gpp++) {
+                       /* Mask and clear all interrupts */
+                       writel(0, base + community->ie_offset + gpp * 4);
+                       writel(0xffff, base + GPI_IS + gpp * 4);
+               }
+       }
+}
+
 int intel_pinctrl_resume(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
index 1b22f96ba839af38d920d40f0f63309d2141a2b5..f307f1d27d646fcd3954a9d497528fffbf16dc2b 100644 (file)
@@ -723,16 +723,6 @@ static const struct pinmux_ops mtk_pmx_ops = {
        .gpio_set_direction     = mtk_pmx_gpio_set_direction,
 };
 
-static int mtk_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-       return pinctrl_request_gpio(chip->base + offset);
-}
-
-static void mtk_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       pinctrl_free_gpio(chip->base + offset);
-}
-
 static int mtk_gpio_direction_input(struct gpio_chip *chip,
                                        unsigned offset)
 {
@@ -899,7 +889,7 @@ static int mtk_eint_flip_edge(struct mtk_pinctrl *pctl, int hwirq)
        int start_level, curr_level;
        unsigned int reg_offset;
        const struct mtk_eint_offsets *eint_offsets = &(pctl->devdata->eint_offsets);
-       u32 mask = 1 << (hwirq & 0x1f);
+       u32 mask = BIT(hwirq & 0x1f);
        u32 port = (hwirq >> 5) & eint_offsets->port_mask;
        void __iomem *reg = pctl->eint_reg_base + (port << 2);
        const struct mtk_desc_pin *pin;
@@ -1005,8 +995,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
 
 static struct gpio_chip mtk_gpio_chip = {
        .owner                  = THIS_MODULE,
-       .request                = mtk_gpio_request,
-       .free                   = mtk_gpio_free,
+       .request                = gpiochip_generic_request,
+       .free                   = gpiochip_generic_free,
        .direction_input        = mtk_gpio_direction_input,
        .direction_output       = mtk_gpio_direction_output,
        .get                    = mtk_gpio_get,
@@ -1436,7 +1426,7 @@ int mtk_pctrl_init(struct platform_device *pdev,
                irq_set_chip_and_handler(virq, &mtk_pinctrl_irq_chip,
                        handle_level_irq);
                irq_set_chip_data(virq, pctl);
-       };
+       }
 
        irq_set_chained_handler_and_data(irq, mtk_eint_irq_handler, pctl);
        return 0;
index 97681fac082e71f449874a4a5a21a68b07e27fc2..b59fbb4b1fb1e5ba4325eeae1a18ffb91e7f841e 100644 (file)
@@ -654,25 +654,11 @@ static inline void abx500_gpio_dbg_show_one(struct seq_file *s,
 #define abx500_gpio_dbg_show   NULL
 #endif
 
-static int abx500_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-       int gpio = chip->base + offset;
-
-       return pinctrl_request_gpio(gpio);
-}
-
-static void abx500_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       int gpio = chip->base + offset;
-
-       pinctrl_free_gpio(gpio);
-}
-
 static struct gpio_chip abx500gpio_chip = {
        .label                  = "abx500-gpio",
        .owner                  = THIS_MODULE,
-       .request                = abx500_gpio_request,
-       .free                   = abx500_gpio_free,
+       .request                = gpiochip_generic_request,
+       .free                   = gpiochip_generic_free,
        .direction_input        = abx500_gpio_direction_input,
        .get                    = abx500_gpio_get,
        .direction_output       = abx500_gpio_direction_output,
index 96cf03908e93cc1462435b1aebc6c178c4a620f5..eebfae0c9b7c84a6eacc4ad450d0765e602a011f 100644 (file)
@@ -884,24 +884,6 @@ static void nmk_gpio_latent_irq_handler(struct irq_desc *desc)
 
 /* I/O Functions */
 
-static int nmk_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-       /*
-        * Map back to global GPIO space and request muxing, the direction
-        * parameter does not matter for this controller.
-        */
-       int gpio = chip->base + offset;
-
-       return pinctrl_request_gpio(gpio);
-}
-
-static void nmk_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       int gpio = chip->base + offset;
-
-       pinctrl_free_gpio(gpio);
-}
-
 static int nmk_gpio_make_input(struct gpio_chip *chip, unsigned offset)
 {
        struct nmk_gpio_chip *nmk_chip =
@@ -1267,8 +1249,8 @@ static int nmk_gpio_probe(struct platform_device *dev)
        spin_lock_init(&nmk_chip->lock);
 
        chip = &nmk_chip->chip;
-       chip->request = nmk_gpio_request;
-       chip->free = nmk_gpio_free;
+       chip->request = gpiochip_generic_request;
+       chip->free = gpiochip_generic_free;
        chip->direction_input = nmk_gpio_make_input;
        chip->get = nmk_gpio_get_input;
        chip->direction_output = nmk_gpio_make_output;
index e63ad9fbd388a79c1afe0f1b4a2ff99e19a11574..099a3442ff4214b2ab55497a93d49774e89d1c65 100644 (file)
 
 #ifdef CONFIG_DEBUG_FS
 static const struct pin_config_item conf_items[] = {
+       PCONFDUMP(PIN_CONFIG_BIAS_BUS_HOLD, "input bias bus hold", NULL, false),
        PCONFDUMP(PIN_CONFIG_BIAS_DISABLE, "input bias disabled", NULL, false),
        PCONFDUMP(PIN_CONFIG_BIAS_HIGH_IMPEDANCE, "input bias high impedance", NULL, false),
-       PCONFDUMP(PIN_CONFIG_BIAS_BUS_HOLD, "input bias bus hold", NULL, false),
-       PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false),
        PCONFDUMP(PIN_CONFIG_BIAS_PULL_DOWN, "input bias pull down", NULL, false),
        PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
                                "input bias pull to pin specific state", NULL, false),
-       PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false),
+       PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false),
        PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false),
        PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false),
+       PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false),
        PCONFDUMP(PIN_CONFIG_DRIVE_STRENGTH, "output drive strength", "mA", true),
+       PCONFDUMP(PIN_CONFIG_INPUT_DEBOUNCE, "input debounce", "usec", true),
        PCONFDUMP(PIN_CONFIG_INPUT_ENABLE, "input enabled", NULL, false),
-       PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_ENABLE, "input schmitt enabled", NULL, false),
        PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT, "input schmitt trigger", NULL, false),
-       PCONFDUMP(PIN_CONFIG_INPUT_DEBOUNCE, "input debounce", "usec", true),
-       PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector", true),
-       PCONFDUMP(PIN_CONFIG_SLEW_RATE, "slew rate", NULL, true),
+       PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_ENABLE, "input schmitt enabled", NULL, false),
        PCONFDUMP(PIN_CONFIG_LOW_POWER_MODE, "pin low power", "mode", true),
        PCONFDUMP(PIN_CONFIG_OUTPUT, "pin output", "level", true),
+       PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector", true),
+       PCONFDUMP(PIN_CONFIG_SLEW_RATE, "slew rate", NULL, true),
 };
 
 static void pinconf_generic_dump_one(struct pinctrl_dev *pctldev,
@@ -150,27 +150,28 @@ EXPORT_SYMBOL_GPL(pinconf_generic_dump_config);
 
 #ifdef CONFIG_OF
 static const struct pinconf_generic_params dt_params[] = {
+       { "bias-bus-hold", PIN_CONFIG_BIAS_BUS_HOLD, 0 },
        { "bias-disable", PIN_CONFIG_BIAS_DISABLE, 0 },
        { "bias-high-impedance", PIN_CONFIG_BIAS_HIGH_IMPEDANCE, 0 },
-       { "bias-bus-hold", PIN_CONFIG_BIAS_BUS_HOLD, 0 },
        { "bias-pull-up", PIN_CONFIG_BIAS_PULL_UP, 1 },
-       { "bias-pull-down", PIN_CONFIG_BIAS_PULL_DOWN, 1 },
        { "bias-pull-pin-default", PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 1 },
-       { "drive-push-pull", PIN_CONFIG_DRIVE_PUSH_PULL, 0 },
+       { "bias-pull-down", PIN_CONFIG_BIAS_PULL_DOWN, 1 },
        { "drive-open-drain", PIN_CONFIG_DRIVE_OPEN_DRAIN, 0 },
        { "drive-open-source", PIN_CONFIG_DRIVE_OPEN_SOURCE, 0 },
+       { "drive-push-pull", PIN_CONFIG_DRIVE_PUSH_PULL, 0 },
        { "drive-strength", PIN_CONFIG_DRIVE_STRENGTH, 0 },
-       { "input-enable", PIN_CONFIG_INPUT_ENABLE, 1 },
+       { "input-debounce", PIN_CONFIG_INPUT_DEBOUNCE, 0 },
        { "input-disable", PIN_CONFIG_INPUT_ENABLE, 0 },
-       { "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 },
+       { "input-enable", PIN_CONFIG_INPUT_ENABLE, 1 },
+       { "input-schmitt", PIN_CONFIG_INPUT_SCHMITT, 0 },
        { "input-schmitt-disable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 0 },
-       { "input-debounce", PIN_CONFIG_INPUT_DEBOUNCE, 0 },
-       { "power-source", PIN_CONFIG_POWER_SOURCE, 0 },
-       { "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 },
+       { "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 },
        { "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 },
-       { "output-low", PIN_CONFIG_OUTPUT, 0, },
+       { "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 },
        { "output-high", PIN_CONFIG_OUTPUT, 1, },
-       { "slew-rate", PIN_CONFIG_SLEW_RATE, 0},
+       { "output-low", PIN_CONFIG_OUTPUT, 0, },
+       { "power-source", PIN_CONFIG_POWER_SOURCE, 0 },
+       { "slew-rate", PIN_CONFIG_SLEW_RATE, 0 },
 };
 
 /**
index 29a7bb17a42f52b85cf82bbb6dc01c2311ffa0b1..4dd7722f993552a4c8cd698cdc2f8d224dc337c4 100644 (file)
@@ -411,7 +411,7 @@ static int pinconf_dbg_config_print(struct seq_file *s, void *d)
        const struct pinctrl_map *found = NULL;
        struct pinctrl_dev *pctldev;
        struct dbg_cfg *dbg = &pinconf_dbg_conf;
-       int i, j;
+       int i;
 
        mutex_lock(&pinctrl_maps_mutex);
 
@@ -424,13 +424,10 @@ static int pinconf_dbg_config_print(struct seq_file *s, void *d)
                if (strcmp(map->name, dbg->state_name))
                        continue;
 
-               for (j = 0; j < map->data.configs.num_configs; j++) {
-                       if (!strcmp(map->data.configs.group_or_pin,
-                                       dbg->pin_name)) {
-                               /* We found the right pin / state */
-                               found = map;
-                               break;
-                       }
+               if (!strcmp(map->data.configs.group_or_pin, dbg->pin_name)) {
+                       /* We found the right pin */
+                       found = map;
+                       break;
                }
        }
 
index f6be68518c87d78195d3a9505b8eab5377804227..fd342dffe4dc766cefe93a29b3408476753b7316 100644 (file)
@@ -713,16 +713,6 @@ static struct pinctrl_desc adi_pinmux_desc = {
        .owner = THIS_MODULE,
 };
 
-static int adi_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-       return pinctrl_request_gpio(chip->base + offset);
-}
-
-static void adi_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       pinctrl_free_gpio(chip->base + offset);
-}
-
 static int adi_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 {
        struct gpio_port *port;
@@ -994,8 +984,8 @@ static int adi_gpio_probe(struct platform_device *pdev)
        port->chip.get                  = adi_gpio_get_value;
        port->chip.direction_output     = adi_gpio_direction_output;
        port->chip.set                  = adi_gpio_set_value;
-       port->chip.request              = adi_gpio_request;
-       port->chip.free                 = adi_gpio_free;
+       port->chip.request              = gpiochip_generic_request,
+       port->chip.free                 = gpiochip_generic_free,
        port->chip.to_irq               = adi_gpio_to_irq;
        if (pdata->port_gpio_base > 0)
                port->chip.base         = pdata->port_gpio_base;
index 4747e08f5389c36698c3916453e5284d77f8e476..56af28b95a44f627d145c6f91a52765787f2ae52 100644 (file)
@@ -536,21 +536,11 @@ static int as3722_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
        return as3722_irq_get_virq(as_pci->as3722, offset);
 }
 
-static int as3722_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-       return pinctrl_request_gpio(chip->base + offset);
-}
-
-static void as3722_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       pinctrl_free_gpio(chip->base + offset);
-}
-
 static const struct gpio_chip as3722_gpio_chip = {
        .label                  = "as3722-gpio",
        .owner                  = THIS_MODULE,
-       .request                = as3722_gpio_request,
-       .free                   = as3722_gpio_free,
+       .request                = gpiochip_generic_request,
+       .free                   = gpiochip_generic_free,
        .get                    = as3722_gpio_get,
        .set                    = as3722_gpio_set,
        .direction_input        = as3722_gpio_direction_input,
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
new file mode 100644 (file)
index 0000000..33edd07
--- /dev/null
@@ -0,0 +1,1094 @@
+/*
+ * Driver for the Atmel PIO4 controller
+ *
+ * Copyright (C) 2015 Atmel,
+ *               2015 Ludovic Desroches <ludovic.desroches@atmel.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/slab.h>
+#include "core.h"
+#include "pinconf.h"
+#include "pinctrl-utils.h"
+
+/*
+ * Warning:
+ * In order to not introduce confusion between Atmel PIO groups and pinctrl
+ * framework groups, Atmel PIO groups will be called banks, line is kept to
+ * designed the pin id into this bank.
+ */
+
+#define ATMEL_PIO_MSKR         0x0000
+#define ATMEL_PIO_CFGR         0x0004
+#define                ATMEL_PIO_CFGR_FUNC_MASK        GENMASK(2, 0)
+#define                ATMEL_PIO_DIR_MASK              BIT(8)
+#define                ATMEL_PIO_PUEN_MASK             BIT(9)
+#define                ATMEL_PIO_PDEN_MASK             BIT(10)
+#define                ATMEL_PIO_IFEN_MASK             BIT(12)
+#define                ATMEL_PIO_IFSCEN_MASK           BIT(13)
+#define                ATMEL_PIO_OPD_MASK              BIT(14)
+#define                ATMEL_PIO_SCHMITT_MASK          BIT(15)
+#define                ATMEL_PIO_CFGR_EVTSEL_MASK      GENMASK(26, 24)
+#define                ATMEL_PIO_CFGR_EVTSEL_FALLING   (0 << 24)
+#define                ATMEL_PIO_CFGR_EVTSEL_RISING    (1 << 24)
+#define                ATMEL_PIO_CFGR_EVTSEL_BOTH      (2 << 24)
+#define                ATMEL_PIO_CFGR_EVTSEL_LOW       (3 << 24)
+#define                ATMEL_PIO_CFGR_EVTSEL_HIGH      (4 << 24)
+#define ATMEL_PIO_PDSR         0x0008
+#define ATMEL_PIO_LOCKSR       0x000C
+#define ATMEL_PIO_SODR         0x0010
+#define ATMEL_PIO_CODR         0x0014
+#define ATMEL_PIO_ODSR         0x0018
+#define ATMEL_PIO_IER          0x0020
+#define ATMEL_PIO_IDR          0x0024
+#define ATMEL_PIO_IMR          0x0028
+#define ATMEL_PIO_ISR          0x002C
+#define ATMEL_PIO_IOFR         0x003C
+
+#define ATMEL_PIO_NPINS_PER_BANK       32
+#define ATMEL_PIO_BANK(pin_id)         (pin_id / ATMEL_PIO_NPINS_PER_BANK)
+#define ATMEL_PIO_LINE(pin_id)         (pin_id % ATMEL_PIO_NPINS_PER_BANK)
+#define ATMEL_PIO_BANK_OFFSET          0x40
+
+#define ATMEL_GET_PIN_NO(pinfunc)      ((pinfunc) & 0xff)
+#define ATMEL_GET_PIN_FUNC(pinfunc)    ((pinfunc >> 16) & 0xf)
+#define ATMEL_GET_PIN_IOSET(pinfunc)   ((pinfunc >> 20) & 0xf)
+
+struct atmel_pioctrl_data {
+       unsigned nbanks;
+};
+
+struct atmel_group {
+       const char *name;
+       u32 pin;
+};
+
+struct atmel_pin {
+       unsigned pin_id;
+       unsigned mux;
+       unsigned ioset;
+       unsigned bank;
+       unsigned line;
+       const char *device;
+};
+
+/**
+ * struct atmel_pioctrl - Atmel PIO controller (pinmux + gpio)
+ * @reg_base: base address of the controller.
+ * @clk: clock of the controller.
+ * @nbanks: number of PIO groups, it can vary depending on the SoC.
+ * @pinctrl_dev: pinctrl device registered.
+ * @groups: groups table to provide group name and pin in the group to pinctrl.
+ * @group_names: group names table to provide all the group/pin names to
+ *     pinctrl or gpio.
+ * @pins: pins table used for both pinctrl and gpio. pin_id, bank and line
+ *     fields are set at probe time. Other ones are set when parsing dt
+ *     pinctrl.
+ * @npins: number of pins.
+ * @gpio_chip: gpio chip registered.
+ * @irq_domain: irq domain for the gpio controller.
+ * @irqs: table containing the hw irq number of the bank. The index of the
+ *     table is the bank id.
+ * @dev: device entry for the Atmel PIO controller.
+ * @node: node of the Atmel PIO controller.
+ */
+struct atmel_pioctrl {
+       void __iomem            *reg_base;
+       struct clk              *clk;
+       unsigned                nbanks;
+       struct pinctrl_dev      *pinctrl_dev;
+       struct atmel_group      *groups;
+       const char * const      *group_names;
+       struct atmel_pin        **pins;
+       unsigned                npins;
+       struct gpio_chip        *gpio_chip;
+       struct irq_domain       *irq_domain;
+       int                     *irqs;
+       unsigned                *pm_wakeup_sources;
+       unsigned                *pm_suspend_backup;
+       struct device           *dev;
+       struct device_node      *node;
+};
+
+static const char * const atmel_functions[] = {
+       "GPIO", "A", "B", "C", "D", "E", "F", "G"
+};
+
+/* --- GPIO --- */
+static unsigned int atmel_gpio_read(struct atmel_pioctrl *atmel_pioctrl,
+                                   unsigned int bank, unsigned int reg)
+{
+       return readl_relaxed(atmel_pioctrl->reg_base
+                            + ATMEL_PIO_BANK_OFFSET * bank + reg);
+}
+
+static void atmel_gpio_write(struct atmel_pioctrl *atmel_pioctrl,
+                            unsigned int bank, unsigned int reg,
+                            unsigned int val)
+{
+       writel_relaxed(val, atmel_pioctrl->reg_base
+                      + ATMEL_PIO_BANK_OFFSET * bank + reg);
+}
+
+static void atmel_gpio_irq_ack(struct irq_data *d)
+{
+       /*
+        * Nothing to do, interrupt is cleared when reading the status
+        * register.
+        */
+}
+
+static int atmel_gpio_irq_set_type(struct irq_data *d, unsigned type)
+{
+       struct atmel_pioctrl *atmel_pioctrl = irq_data_get_irq_chip_data(d);
+       struct atmel_pin *pin = atmel_pioctrl->pins[d->hwirq];
+       unsigned reg;
+
+       atmel_gpio_write(atmel_pioctrl, pin->bank, ATMEL_PIO_MSKR,
+                        BIT(pin->line));
+       reg = atmel_gpio_read(atmel_pioctrl, pin->bank, ATMEL_PIO_CFGR);
+       reg &= (~ATMEL_PIO_CFGR_EVTSEL_MASK);
+
+       switch (type) {
+       case IRQ_TYPE_EDGE_RISING:
+               irq_set_handler_locked(d, handle_edge_irq);
+               reg |= ATMEL_PIO_CFGR_EVTSEL_RISING;
+               break;
+       case IRQ_TYPE_EDGE_FALLING:
+               irq_set_handler_locked(d, handle_edge_irq);
+               reg |= ATMEL_PIO_CFGR_EVTSEL_FALLING;
+               break;
+       case IRQ_TYPE_EDGE_BOTH:
+               irq_set_handler_locked(d, handle_edge_irq);
+               reg |= ATMEL_PIO_CFGR_EVTSEL_BOTH;
+               break;
+       case IRQ_TYPE_LEVEL_LOW:
+               irq_set_handler_locked(d, handle_level_irq);
+               reg |= ATMEL_PIO_CFGR_EVTSEL_LOW;
+               break;
+       case IRQ_TYPE_LEVEL_HIGH:
+               irq_set_handler_locked(d, handle_level_irq);
+               reg |= ATMEL_PIO_CFGR_EVTSEL_HIGH;
+               break;
+       case IRQ_TYPE_NONE:
+       default:
+               return -EINVAL;
+       }
+
+       atmel_gpio_write(atmel_pioctrl, pin->bank, ATMEL_PIO_CFGR, reg);
+
+       return 0;
+}
+
+static void atmel_gpio_irq_mask(struct irq_data *d)
+{
+       struct atmel_pioctrl *atmel_pioctrl = irq_data_get_irq_chip_data(d);
+       struct atmel_pin *pin = atmel_pioctrl->pins[d->hwirq];
+
+       atmel_gpio_write(atmel_pioctrl, pin->bank, ATMEL_PIO_IDR,
+                        BIT(pin->line));
+}
+
+static void atmel_gpio_irq_unmask(struct irq_data *d)
+{
+       struct atmel_pioctrl *atmel_pioctrl = irq_data_get_irq_chip_data(d);
+       struct atmel_pin *pin = atmel_pioctrl->pins[d->hwirq];
+
+       atmel_gpio_write(atmel_pioctrl, pin->bank, ATMEL_PIO_IER,
+                        BIT(pin->line));
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int atmel_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+       struct atmel_pioctrl *atmel_pioctrl = irq_data_get_irq_chip_data(d);
+       int bank = ATMEL_PIO_BANK(d->hwirq);
+       int line = ATMEL_PIO_LINE(d->hwirq);
+
+       /* The gpio controller has one interrupt line per bank. */
+       irq_set_irq_wake(atmel_pioctrl->irqs[bank], on);
+
+       if (on)
+               atmel_pioctrl->pm_wakeup_sources[bank] |= BIT(line);
+       else
+               atmel_pioctrl->pm_wakeup_sources[bank] &= ~(BIT(line));
+
+       return 0;
+}
+#else
+#define atmel_gpio_irq_set_wake NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static struct irq_chip atmel_gpio_irq_chip = {
+       .name           = "GPIO",
+       .irq_ack        = atmel_gpio_irq_ack,
+       .irq_mask       = atmel_gpio_irq_mask,
+       .irq_unmask     = atmel_gpio_irq_unmask,
+       .irq_set_type   = atmel_gpio_irq_set_type,
+       .irq_set_wake   = atmel_gpio_irq_set_wake,
+};
+
+static void atmel_gpio_irq_handler(struct irq_desc *desc)
+{
+       unsigned int irq = irq_desc_get_irq(desc);
+       struct atmel_pioctrl *atmel_pioctrl = irq_desc_get_handler_data(desc);
+       struct irq_chip *chip = irq_desc_get_chip(desc);
+       unsigned long isr;
+       int n, bank = -1;
+
+       /* Find from which bank is the irq received. */
+       for (n = 0; n < atmel_pioctrl->nbanks; n++) {
+               if (atmel_pioctrl->irqs[n] == irq) {
+                       bank = n;
+                       break;
+               }
+       }
+
+       if (bank < 0) {
+               dev_err(atmel_pioctrl->dev,
+                       "no bank associated to irq %u\n", irq);
+               return;
+       }
+
+       chained_irq_enter(chip, desc);
+
+       for (;;) {
+               isr = (unsigned long)atmel_gpio_read(atmel_pioctrl, bank,
+                                                    ATMEL_PIO_ISR);
+               isr &= (unsigned long)atmel_gpio_read(atmel_pioctrl, bank,
+                                                     ATMEL_PIO_IMR);
+               if (!isr)
+                       break;
+
+               for_each_set_bit(n, &isr, BITS_PER_LONG)
+                       generic_handle_irq(gpio_to_irq(bank *
+                                       ATMEL_PIO_NPINS_PER_BANK + n));
+       }
+
+       chained_irq_exit(chip, desc);
+}
+
+static int atmel_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+       struct atmel_pioctrl *atmel_pioctrl = dev_get_drvdata(chip->dev);
+       struct atmel_pin *pin = atmel_pioctrl->pins[offset];
+       unsigned reg;
+
+       atmel_gpio_write(atmel_pioctrl, pin->bank, ATMEL_PIO_MSKR,
+                        BIT(pin->line));
+       reg = atmel_gpio_read(atmel_pioctrl, pin->bank, ATMEL_PIO_CFGR);
+       reg &= ~ATMEL_PIO_DIR_MASK;
+       atmel_gpio_write(atmel_pioctrl, pin->bank, ATMEL_PIO_CFGR, reg);
+
+       return 0;
+}
+
+static int atmel_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+       struct atmel_pioctrl *atmel_pioctrl = dev_get_drvdata(chip->dev);
+       struct atmel_pin *pin = atmel_pioctrl->pins[offset];
+       unsigned reg;
+
+       reg = atmel_gpio_read(atmel_pioctrl, pin->bank, ATMEL_PIO_PDSR);
+
+       return !!(reg & BIT(pin->line));
+}
+
+static int atmel_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+                                      int value)
+{
+       struct atmel_pioctrl *atmel_pioctrl = dev_get_drvdata(chip->dev);
+       struct atmel_pin *pin = atmel_pioctrl->pins[offset];
+       unsigned reg;
+
+       atmel_gpio_write(atmel_pioctrl, pin->bank,
+                        value ? ATMEL_PIO_SODR : ATMEL_PIO_CODR,
+                        BIT(pin->line));
+
+       atmel_gpio_write(atmel_pioctrl, pin->bank, ATMEL_PIO_MSKR,
+                        BIT(pin->line));
+       reg = atmel_gpio_read(atmel_pioctrl, pin->bank, ATMEL_PIO_CFGR);
+       reg |= ATMEL_PIO_DIR_MASK;
+       atmel_gpio_write(atmel_pioctrl, pin->bank, ATMEL_PIO_CFGR, reg);
+
+       return 0;
+}
+
+static void atmel_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+{
+       struct atmel_pioctrl *atmel_pioctrl = dev_get_drvdata(chip->dev);
+       struct atmel_pin *pin = atmel_pioctrl->pins[offset];
+
+       atmel_gpio_write(atmel_pioctrl, pin->bank,
+                        val ? ATMEL_PIO_SODR : ATMEL_PIO_CODR,
+                        BIT(pin->line));
+}
+
+static int atmel_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+       struct atmel_pioctrl *atmel_pioctrl = dev_get_drvdata(chip->dev);
+
+       return irq_find_mapping(atmel_pioctrl->irq_domain, offset);
+}
+
+static struct gpio_chip atmel_gpio_chip = {
+       .direction_input        = atmel_gpio_direction_input,
+       .get                    = atmel_gpio_get,
+       .direction_output       = atmel_gpio_direction_output,
+       .set                    = atmel_gpio_set,
+       .to_irq                 = atmel_gpio_to_irq,
+       .base                   = 0,
+};
+
+/* --- PINCTRL --- */
+static unsigned int atmel_pin_config_read(struct pinctrl_dev *pctldev,
+                                         unsigned pin_id)
+{
+       struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
+       unsigned bank = atmel_pioctrl->pins[pin_id]->bank;
+       unsigned line = atmel_pioctrl->pins[pin_id]->line;
+       void __iomem *addr = atmel_pioctrl->reg_base
+                            + bank * ATMEL_PIO_BANK_OFFSET;
+
+       writel_relaxed(BIT(line), addr + ATMEL_PIO_MSKR);
+       /* Have to set MSKR first, to access the right pin CFGR. */
+       wmb();
+
+       return readl_relaxed(addr + ATMEL_PIO_CFGR);
+}
+
+static void atmel_pin_config_write(struct pinctrl_dev *pctldev,
+                                  unsigned pin_id, u32 conf)
+{
+       struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
+       unsigned bank = atmel_pioctrl->pins[pin_id]->bank;
+       unsigned line = atmel_pioctrl->pins[pin_id]->line;
+       void __iomem *addr = atmel_pioctrl->reg_base
+                            + bank * ATMEL_PIO_BANK_OFFSET;
+
+       writel_relaxed(BIT(line), addr + ATMEL_PIO_MSKR);
+       /* Have to set MSKR first, to access the right pin CFGR. */
+       wmb();
+       writel_relaxed(conf, addr + ATMEL_PIO_CFGR);
+}
+
+static int atmel_pctl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+       struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
+
+       return atmel_pioctrl->npins;
+}
+
+static const char *atmel_pctl_get_group_name(struct pinctrl_dev *pctldev,
+                                            unsigned selector)
+{
+       struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
+
+       return atmel_pioctrl->groups[selector].name;
+}
+
+static int atmel_pctl_get_group_pins(struct pinctrl_dev *pctldev,
+                                    unsigned selector, const unsigned **pins,
+                                    unsigned *num_pins)
+{
+       struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
+
+       *pins = (unsigned *)&atmel_pioctrl->groups[selector].pin;
+       *num_pins = 1;
+
+       return 0;
+}
+
+struct atmel_group *atmel_pctl_find_group_by_pin(struct pinctrl_dev *pctldev,
+                                                unsigned pin)
+{
+       struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
+       int i;
+
+       for (i = 0; i < atmel_pioctrl->npins; i++) {
+               struct atmel_group *grp = atmel_pioctrl->groups + i;
+
+               if (grp->pin == pin)
+                       return grp;
+       }
+
+       return NULL;
+}
+
+static int atmel_pctl_xlate_pinfunc(struct pinctrl_dev *pctldev,
+                                   struct device_node *np,
+                                   u32 pinfunc, const char **grp_name,
+                                   const char **func_name)
+{
+       struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
+       unsigned pin_id, func_id;
+       struct atmel_group *grp;
+
+       pin_id = ATMEL_GET_PIN_NO(pinfunc);
+       func_id = ATMEL_GET_PIN_FUNC(pinfunc);
+
+       if (func_id >= ARRAY_SIZE(atmel_functions))
+               return -EINVAL;
+
+       *func_name = atmel_functions[func_id];
+
+       grp = atmel_pctl_find_group_by_pin(pctldev, pin_id);
+       if (!grp)
+               return -EINVAL;
+       *grp_name = grp->name;
+
+       atmel_pioctrl->pins[pin_id]->mux = func_id;
+       atmel_pioctrl->pins[pin_id]->ioset = ATMEL_GET_PIN_IOSET(pinfunc);
+       /* Want the device name not the group one. */
+       if (np->parent == atmel_pioctrl->node)
+               atmel_pioctrl->pins[pin_id]->device = np->name;
+       else
+               atmel_pioctrl->pins[pin_id]->device = np->parent->name;
+
+       return 0;
+}
+
+static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+                                       struct device_node *np,
+                                       struct pinctrl_map **map,
+                                       unsigned *reserved_maps,
+                                       unsigned *num_maps)
+{
+       unsigned num_pins, num_configs, reserve;
+       unsigned long *configs;
+       struct property *pins;
+       bool has_config;
+       u32 pinfunc;
+       int ret, i;
+
+       pins = of_find_property(np, "pinmux", NULL);
+       if (!pins)
+               return -EINVAL;
+
+       ret = pinconf_generic_parse_dt_config(np, pctldev, &configs,
+                                             &num_configs);
+       if (ret < 0) {
+               dev_err(pctldev->dev, "%s: could not parse node property\n",
+                       of_node_full_name(np));
+               return ret;
+       }
+
+       if (num_configs)
+               has_config = true;
+
+       num_pins = pins->length / sizeof(u32);
+       if (!num_pins) {
+               dev_err(pctldev->dev, "no pins found in node %s\n",
+                       of_node_full_name(np));
+               return -EINVAL;
+       }
+
+       /*
+        * Reserve maps, at least there is a mux map and an optional conf
+        * map for each pin.
+        */
+       reserve = 1;
+       if (has_config && num_pins >= 1)
+               reserve++;
+       reserve *= num_pins;
+       ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, num_maps,
+                                       reserve);
+       if (ret < 0)
+               return ret;
+
+       for (i = 0; i < num_pins; i++) {
+               const char *group, *func;
+
+               ret = of_property_read_u32_index(np, "pinmux", i, &pinfunc);
+               if (ret)
+                       return ret;
+
+               ret = atmel_pctl_xlate_pinfunc(pctldev, np, pinfunc, &group,
+                                              &func);
+               if (ret)
+                       return ret;
+
+               pinctrl_utils_add_map_mux(pctldev, map, reserved_maps, num_maps,
+                                         group, func);
+
+               if (has_config) {
+                       ret = pinctrl_utils_add_map_configs(pctldev, map,
+                                       reserved_maps, num_maps, group,
+                                       configs, num_configs,
+                                       PIN_MAP_TYPE_CONFIGS_GROUP);
+                       if (ret < 0)
+                               return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int atmel_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
+                                    struct device_node *np_config,
+                                    struct pinctrl_map **map,
+                                    unsigned *num_maps)
+{
+       struct device_node *np;
+       unsigned reserved_maps;
+       int ret;
+
+       *map = NULL;
+       *num_maps = 0;
+       reserved_maps = 0;
+
+       /*
+        * If all the pins of a device have the same configuration (or no one),
+        * it is useless to add a subnode, so directly parse node referenced by
+        * phandle.
+        */
+       ret = atmel_pctl_dt_subnode_to_map(pctldev, np_config, map,
+                                          &reserved_maps, num_maps);
+       if (ret) {
+               for_each_child_of_node(np_config, np) {
+                       ret = atmel_pctl_dt_subnode_to_map(pctldev, np, map,
+                                                   &reserved_maps, num_maps);
+                       if (ret < 0)
+                               break;
+               }
+       }
+
+       if (ret < 0) {
+               pinctrl_utils_dt_free_map(pctldev, *map, *num_maps);
+               dev_err(pctldev->dev, "can't create maps for node %s\n",
+                       np_config->full_name);
+       }
+
+       return ret;
+}
+
+static const struct pinctrl_ops atmel_pctlops = {
+       .get_groups_count       = atmel_pctl_get_groups_count,
+       .get_group_name         = atmel_pctl_get_group_name,
+       .get_group_pins         = atmel_pctl_get_group_pins,
+       .dt_node_to_map         = atmel_pctl_dt_node_to_map,
+       .dt_free_map            = pinctrl_utils_dt_free_map,
+};
+
+static int atmel_pmx_get_functions_count(struct pinctrl_dev *pctldev)
+{
+       return ARRAY_SIZE(atmel_functions);
+}
+
+static const char *atmel_pmx_get_function_name(struct pinctrl_dev *pctldev,
+                                              unsigned selector)
+{
+       return atmel_functions[selector];
+}
+
+static int atmel_pmx_get_function_groups(struct pinctrl_dev *pctldev,
+                                        unsigned selector,
+                                        const char * const **groups,
+                                        unsigned * const num_groups)
+{
+       struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
+
+       *groups = atmel_pioctrl->group_names;
+       *num_groups = atmel_pioctrl->npins;
+
+       return 0;
+}
+
+static int atmel_pmx_set_mux(struct pinctrl_dev *pctldev,
+                            unsigned function,
+                            unsigned group)
+{
+       struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
+       unsigned pin;
+       u32 conf;
+
+       dev_dbg(pctldev->dev, "enable function %s group %s\n",
+               atmel_functions[function], atmel_pioctrl->groups[group].name);
+
+       pin = atmel_pioctrl->groups[group].pin;
+       conf = atmel_pin_config_read(pctldev, pin);
+       conf &= (~ATMEL_PIO_CFGR_FUNC_MASK);
+       conf |= (function & ATMEL_PIO_CFGR_FUNC_MASK);
+       dev_dbg(pctldev->dev, "pin: %u, conf: 0x%08x\n", pin, conf);
+       atmel_pin_config_write(pctldev, pin, conf);
+
+       return 0;
+}
+
+static const struct pinmux_ops atmel_pmxops = {
+       .get_functions_count    = atmel_pmx_get_functions_count,
+       .get_function_name      = atmel_pmx_get_function_name,
+       .get_function_groups    = atmel_pmx_get_function_groups,
+       .set_mux                = atmel_pmx_set_mux,
+};
+
+static int atmel_conf_pin_config_group_get(struct pinctrl_dev *pctldev,
+                                          unsigned group,
+                                          unsigned long *config)
+{
+       struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
+       unsigned param = pinconf_to_config_param(*config), arg = 0;
+       struct atmel_group *grp = atmel_pioctrl->groups + group;
+       unsigned pin_id = grp->pin;
+       u32 res;
+
+       res = atmel_pin_config_read(pctldev, pin_id);
+
+       switch (param) {
+       case PIN_CONFIG_BIAS_PULL_UP:
+               if (!(res & ATMEL_PIO_PUEN_MASK))
+                       return -EINVAL;
+               arg = 1;
+               break;
+       case PIN_CONFIG_BIAS_PULL_DOWN:
+               if ((res & ATMEL_PIO_PUEN_MASK) ||
+                   (!(res & ATMEL_PIO_PDEN_MASK)))
+                       return -EINVAL;
+               arg = 1;
+               break;
+       case PIN_CONFIG_BIAS_DISABLE:
+               if ((res & ATMEL_PIO_PUEN_MASK) ||
+                   ((res & ATMEL_PIO_PDEN_MASK)))
+                       return -EINVAL;
+               arg = 1;
+               break;
+       case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+               if (!(res & ATMEL_PIO_OPD_MASK))
+                       return -EINVAL;
+               arg = 1;
+               break;
+       case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+               if (!(res & ATMEL_PIO_SCHMITT_MASK))
+                       return -EINVAL;
+               arg = 1;
+               break;
+       default:
+               return -ENOTSUPP;
+       }
+
+       *config = pinconf_to_config_packed(param, arg);
+       return 0;
+}
+
+static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
+                                          unsigned group,
+                                          unsigned long *configs,
+                                          unsigned num_configs)
+{
+       struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
+       struct atmel_group *grp = atmel_pioctrl->groups + group;
+       unsigned bank, pin, pin_id = grp->pin;
+       u32 mask, conf = 0;
+       int i;
+
+       conf = atmel_pin_config_read(pctldev, pin_id);
+
+       for (i = 0; i < num_configs; i++) {
+               unsigned param = pinconf_to_config_param(configs[i]);
+               unsigned arg = pinconf_to_config_argument(configs[i]);
+
+               dev_dbg(pctldev->dev, "%s: pin=%u, config=0x%lx\n",
+                       __func__, pin_id, configs[i]);
+
+               switch (param) {
+               case PIN_CONFIG_BIAS_DISABLE:
+                       conf &= (~ATMEL_PIO_PUEN_MASK);
+                       conf &= (~ATMEL_PIO_PDEN_MASK);
+                       break;
+               case PIN_CONFIG_BIAS_PULL_UP:
+                       conf |= ATMEL_PIO_PUEN_MASK;
+                       break;
+               case PIN_CONFIG_BIAS_PULL_DOWN:
+                       conf |= ATMEL_PIO_PDEN_MASK;
+                       break;
+               case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+                       if (arg == 0)
+                               conf &= (~ATMEL_PIO_OPD_MASK);
+                       else
+                               conf |= ATMEL_PIO_OPD_MASK;
+                       break;
+               case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+                       if (arg == 0)
+                               conf |= ATMEL_PIO_SCHMITT_MASK;
+                       else
+                               conf &= (~ATMEL_PIO_SCHMITT_MASK);
+                       break;
+               case PIN_CONFIG_INPUT_DEBOUNCE:
+                       if (arg == 0) {
+                               conf &= (~ATMEL_PIO_IFEN_MASK);
+                               conf &= (~ATMEL_PIO_IFSCEN_MASK);
+                       } else {
+                               /*
+                                * We don't care about the debounce value for several reasons:
+                                * - can't have different debounce periods inside a same group,
+                                * - the register to configure this period is a secure register.
+                                * The debouncing filter can filter a pulse with a duration of less
+                                * than 1/2 slow clock period.
+                                */
+                               conf |= ATMEL_PIO_IFEN_MASK;
+                               conf |= ATMEL_PIO_IFSCEN_MASK;
+                       }
+                       break;
+               case PIN_CONFIG_OUTPUT:
+                       conf |= ATMEL_PIO_DIR_MASK;
+                       bank = ATMEL_PIO_BANK(pin_id);
+                       pin = ATMEL_PIO_LINE(pin_id);
+                       mask = 1 << pin;
+
+                       if (arg == 0) {
+                               writel_relaxed(mask, atmel_pioctrl->reg_base +
+                                       bank * ATMEL_PIO_BANK_OFFSET +
+                                       ATMEL_PIO_CODR);
+                       } else {
+                               writel_relaxed(mask, atmel_pioctrl->reg_base +
+                                       bank * ATMEL_PIO_BANK_OFFSET +
+                                       ATMEL_PIO_SODR);
+                       }
+                       break;
+               default:
+                       dev_warn(pctldev->dev,
+                                "unsupported configuration parameter: %u\n",
+                                param);
+                       continue;
+               }
+       }
+
+       dev_dbg(pctldev->dev, "%s: reg=0x%08x\n", __func__, conf);
+       atmel_pin_config_write(pctldev, pin_id, conf);
+
+       return 0;
+}
+
+static void atmel_conf_pin_config_dbg_show(struct pinctrl_dev *pctldev,
+                                          struct seq_file *s, unsigned pin_id)
+{
+       struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
+       u32 conf;
+
+       if (!atmel_pioctrl->pins[pin_id]->device)
+               return;
+
+       if (atmel_pioctrl->pins[pin_id])
+               seq_printf(s, " (%s, ioset %u) ",
+                          atmel_pioctrl->pins[pin_id]->device,
+                          atmel_pioctrl->pins[pin_id]->ioset);
+
+       conf = atmel_pin_config_read(pctldev, pin_id);
+       if (conf & ATMEL_PIO_PUEN_MASK)
+               seq_printf(s, "%s ", "pull-up");
+       if (conf & ATMEL_PIO_PDEN_MASK)
+               seq_printf(s, "%s ", "pull-down");
+       if (conf & ATMEL_PIO_IFEN_MASK)
+               seq_printf(s, "%s ", "debounce");
+       if (conf & ATMEL_PIO_OPD_MASK)
+               seq_printf(s, "%s ", "open-drain");
+       if (conf & ATMEL_PIO_SCHMITT_MASK)
+               seq_printf(s, "%s ", "schmitt");
+}
+
+static const struct pinconf_ops atmel_confops = {
+       .pin_config_group_get   = atmel_conf_pin_config_group_get,
+       .pin_config_group_set   = atmel_conf_pin_config_group_set,
+       .pin_config_dbg_show    = atmel_conf_pin_config_dbg_show,
+};
+
+static struct pinctrl_desc atmel_pinctrl_desc = {
+       .name           = "atmel_pinctrl",
+       .confops        = &atmel_confops,
+       .pctlops        = &atmel_pctlops,
+       .pmxops         = &atmel_pmxops,
+};
+
+static int atmel_pctrl_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct atmel_pioctrl *atmel_pioctrl = platform_get_drvdata(pdev);
+       int i;
+
+       /*
+        * For each bank, save IMR to restore it later and disable all GPIO
+        * interrupts excepting the ones marked as wakeup sources.
+        */
+       for (i = 0; i < atmel_pioctrl->nbanks; i++) {
+               atmel_pioctrl->pm_suspend_backup[i] =
+                       atmel_gpio_read(atmel_pioctrl, i, ATMEL_PIO_IMR);
+               atmel_gpio_write(atmel_pioctrl, i, ATMEL_PIO_IDR,
+                                ~atmel_pioctrl->pm_wakeup_sources[i]);
+       }
+
+       return 0;
+}
+
+static int atmel_pctrl_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct atmel_pioctrl *atmel_pioctrl = platform_get_drvdata(pdev);
+       int i;
+
+       for (i = 0; i < atmel_pioctrl->nbanks; i++)
+               atmel_gpio_write(atmel_pioctrl, i, ATMEL_PIO_IER,
+                                atmel_pioctrl->pm_suspend_backup[i]);
+
+       return 0;
+}
+
+static const struct dev_pm_ops atmel_pctrl_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(atmel_pctrl_suspend, atmel_pctrl_resume)
+};
+
+/*
+ * The number of banks can be different from a SoC to another one.
+ * We can have up to 16 banks.
+ */
+static const struct atmel_pioctrl_data atmel_sama5d2_pioctrl_data = {
+       .nbanks         = 4,
+};
+
+static const struct of_device_id atmel_pctrl_of_match[] = {
+       {
+               .compatible = "atmel,sama5d2-pinctrl",
+               .data = &atmel_sama5d2_pioctrl_data,
+       }, {
+               /* sentinel */
+       }
+};
+MODULE_DEVICE_TABLE(of, atmel_pctrl_of_match);
+
+static int atmel_pinctrl_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct pinctrl_pin_desc *pin_desc;
+       const char **group_names;
+       const struct of_device_id *match;
+       int i, ret;
+       struct resource *res;
+       struct atmel_pioctrl *atmel_pioctrl;
+       struct atmel_pioctrl_data *atmel_pioctrl_data;
+
+       atmel_pioctrl = devm_kzalloc(dev, sizeof(*atmel_pioctrl), GFP_KERNEL);
+       if (!atmel_pioctrl)
+               return -ENOMEM;
+       atmel_pioctrl->dev = dev;
+       atmel_pioctrl->node = dev->of_node;
+       platform_set_drvdata(pdev, atmel_pioctrl);
+
+       match = of_match_node(atmel_pctrl_of_match, dev->of_node);
+       if (!match) {
+               dev_err(dev, "unknown compatible string\n");
+               return -ENODEV;
+       }
+       atmel_pioctrl_data = (struct atmel_pioctrl_data *)match->data;
+       atmel_pioctrl->nbanks = atmel_pioctrl_data->nbanks;
+       atmel_pioctrl->npins = atmel_pioctrl->nbanks * ATMEL_PIO_NPINS_PER_BANK;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(dev, "unable to get atmel pinctrl resource\n");
+               return -EINVAL;
+       }
+       atmel_pioctrl->reg_base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(atmel_pioctrl->reg_base))
+               return -EINVAL;
+
+       atmel_pioctrl->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(atmel_pioctrl->clk)) {
+               dev_err(dev, "failed to get clock\n");
+               return PTR_ERR(atmel_pioctrl->clk);
+       }
+
+       atmel_pioctrl->pins = devm_kzalloc(dev, sizeof(*atmel_pioctrl->pins)
+                       * atmel_pioctrl->npins, GFP_KERNEL);
+       if (!atmel_pioctrl->pins)
+               return -ENOMEM;
+
+       pin_desc = devm_kzalloc(dev, sizeof(*pin_desc)
+                       * atmel_pioctrl->npins, GFP_KERNEL);
+       if (!pin_desc)
+               return -ENOMEM;
+       atmel_pinctrl_desc.pins = pin_desc;
+       atmel_pinctrl_desc.npins = atmel_pioctrl->npins;
+
+       /* One pin is one group since a pin can achieve all functions. */
+       group_names = devm_kzalloc(dev, sizeof(*group_names)
+                       * atmel_pioctrl->npins, GFP_KERNEL);
+       if (!group_names)
+               return -ENOMEM;
+       atmel_pioctrl->group_names = group_names;
+
+       atmel_pioctrl->groups = devm_kzalloc(&pdev->dev,
+                       sizeof(*atmel_pioctrl->groups) * atmel_pioctrl->npins,
+                       GFP_KERNEL);
+       if (!atmel_pioctrl->groups)
+               return -ENOMEM;
+       for (i = 0 ; i < atmel_pioctrl->npins; i++) {
+               struct atmel_group *group = atmel_pioctrl->groups + i;
+               unsigned bank = ATMEL_PIO_BANK(i);
+               unsigned line = ATMEL_PIO_LINE(i);
+
+               atmel_pioctrl->pins[i] = devm_kzalloc(dev,
+                               sizeof(**atmel_pioctrl->pins), GFP_KERNEL);
+               if (!atmel_pioctrl->pins[i])
+                       return -ENOMEM;
+
+               atmel_pioctrl->pins[i]->pin_id = i;
+               atmel_pioctrl->pins[i]->bank = bank;
+               atmel_pioctrl->pins[i]->line = line;
+
+               pin_desc[i].number = i;
+               /* Pin naming convention: P(bank_name)(bank_pin_number). */
+               pin_desc[i].name = kasprintf(GFP_KERNEL, "P%c%d",
+                                            bank + 'A', line);
+
+               group->name = group_names[i] = pin_desc[i].name;
+               group->pin = pin_desc[i].number;
+
+               dev_dbg(dev, "pin_id=%u, bank=%u, line=%u", i, bank, line);
+       }
+
+       atmel_pioctrl->gpio_chip = &atmel_gpio_chip;
+       atmel_pioctrl->gpio_chip->of_node = dev->of_node;
+       atmel_pioctrl->gpio_chip->ngpio = atmel_pioctrl->npins;
+       atmel_pioctrl->gpio_chip->label = dev_name(dev);
+       atmel_pioctrl->gpio_chip->dev = dev;
+       atmel_pioctrl->gpio_chip->names = atmel_pioctrl->group_names;
+
+       atmel_pioctrl->pm_wakeup_sources = devm_kzalloc(dev,
+                       sizeof(*atmel_pioctrl->pm_wakeup_sources)
+                       * atmel_pioctrl->nbanks, GFP_KERNEL);
+       if (!atmel_pioctrl->pm_wakeup_sources)
+               return -ENOMEM;
+
+       atmel_pioctrl->pm_suspend_backup = devm_kzalloc(dev,
+                       sizeof(*atmel_pioctrl->pm_suspend_backup)
+                       * atmel_pioctrl->nbanks, GFP_KERNEL);
+       if (!atmel_pioctrl->pm_suspend_backup)
+               return -ENOMEM;
+
+       atmel_pioctrl->irqs = devm_kzalloc(dev, sizeof(*atmel_pioctrl->irqs)
+                       * atmel_pioctrl->nbanks, GFP_KERNEL);
+       if (!atmel_pioctrl->irqs)
+               return -ENOMEM;
+
+       /* There is one controller but each bank has its own irq line. */
+       for (i = 0; i < atmel_pioctrl->nbanks; i++) {
+               res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+               if (!res) {
+                       dev_err(dev, "missing irq resource for group %c\n",
+                               'A' + i);
+                       return -EINVAL;
+               }
+               atmel_pioctrl->irqs[i] = res->start;
+               irq_set_chained_handler(res->start, atmel_gpio_irq_handler);
+               irq_set_handler_data(res->start, atmel_pioctrl);
+               dev_dbg(dev, "bank %i: hwirq=%u\n", i, res->start);
+       }
+
+       atmel_pioctrl->irq_domain = irq_domain_add_linear(dev->of_node,
+                       atmel_pioctrl->gpio_chip->ngpio,
+                       &irq_domain_simple_ops, NULL);
+       if (!atmel_pioctrl->irq_domain) {
+               dev_err(dev, "can't add the irq domain\n");
+               return -ENODEV;
+       }
+       atmel_pioctrl->irq_domain->name = "atmel gpio";
+
+       for (i = 0; i < atmel_pioctrl->npins; i++) {
+               int irq = irq_create_mapping(atmel_pioctrl->irq_domain, i);
+
+               irq_set_chip_and_handler(irq, &atmel_gpio_irq_chip,
+                                        handle_simple_irq);
+               irq_set_chip_data(irq, atmel_pioctrl);
+               dev_dbg(dev,
+                       "atmel gpio irq domain: hwirq: %d, linux irq: %d\n",
+                       i, irq);
+       }
+
+       ret = clk_prepare_enable(atmel_pioctrl->clk);
+       if (ret) {
+               dev_err(dev, "failed to prepare and enable clock\n");
+               goto clk_prepare_enable_error;
+       }
+
+       atmel_pioctrl->pinctrl_dev = pinctrl_register(&atmel_pinctrl_desc,
+                                                     &pdev->dev,
+                                                     atmel_pioctrl);
+       if (!atmel_pioctrl->pinctrl_dev) {
+               dev_err(dev, "pinctrl registration failed\n");
+               goto pinctrl_register_error;
+       }
+
+       ret = gpiochip_add(atmel_pioctrl->gpio_chip);
+       if (ret) {
+               dev_err(dev, "failed to add gpiochip\n");
+               goto gpiochip_add_error;
+       }
+
+       ret = gpiochip_add_pin_range(atmel_pioctrl->gpio_chip, dev_name(dev),
+                                    0, 0, atmel_pioctrl->gpio_chip->ngpio);
+       if (ret) {
+               dev_err(dev, "failed to add gpio pin range\n");
+               goto gpiochip_add_pin_range_error;
+       }
+
+       dev_info(&pdev->dev, "atmel pinctrl initialized\n");
+
+       return 0;
+
+clk_prepare_enable_error:
+       irq_domain_remove(atmel_pioctrl->irq_domain);
+pinctrl_register_error:
+       clk_disable_unprepare(atmel_pioctrl->clk);
+gpiochip_add_error:
+       pinctrl_unregister(atmel_pioctrl->pinctrl_dev);
+gpiochip_add_pin_range_error:
+       gpiochip_remove(atmel_pioctrl->gpio_chip);
+
+       return ret;
+}
+
+int atmel_pinctrl_remove(struct platform_device *pdev)
+{
+       struct atmel_pioctrl *atmel_pioctrl = platform_get_drvdata(pdev);
+
+       irq_domain_remove(atmel_pioctrl->irq_domain);
+       clk_disable_unprepare(atmel_pioctrl->clk);
+       pinctrl_unregister(atmel_pioctrl->pinctrl_dev);
+       gpiochip_remove(atmel_pioctrl->gpio_chip);
+
+       return 0;
+}
+
+static struct platform_driver atmel_pinctrl_driver = {
+       .driver = {
+               .name = "pinctrl-at91-pio4",
+               .of_match_table = atmel_pctrl_of_match,
+               .pm = &atmel_pctrl_pm_ops,
+       },
+       .probe = atmel_pinctrl_probe,
+       .remove = atmel_pinctrl_remove,
+};
+module_platform_driver(atmel_pinctrl_driver);
+
+MODULE_AUTHOR(Ludovic Desroches <ludovic.desroches@atmel.com>);
+MODULE_DESCRIPTION("Atmel PIO4 pinctrl driver");
+MODULE_LICENSE("GPL v2");
index b0fde0f385e6864c4fd962751fab9bb51dfbfa13..0d2fc0cff35ee6216ef5e62a77376e63cb2675e7 100644 (file)
@@ -1122,8 +1122,10 @@ static int at91_pinctrl_parse_functions(struct device_node *np,
                func->groups[i] = child->name;
                grp = &info->groups[grp_index++];
                ret = at91_pinctrl_parse_groups(child, grp, info, i++);
-               if (ret)
+               if (ret) {
+                       of_node_put(child);
                        return ret;
+               }
        }
 
        return 0;
@@ -1196,6 +1198,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
                ret = at91_pinctrl_parse_functions(child, info, i++);
                if (ret) {
                        dev_err(&pdev->dev, "failed to parse function\n");
+                       of_node_put(child);
                        return ret;
                }
        }
@@ -1277,28 +1280,6 @@ static int at91_pinctrl_remove(struct platform_device *pdev)
        return 0;
 }
 
-static int at91_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-       /*
-        * Map back to global GPIO space and request muxing, the direction
-        * parameter does not matter for this controller.
-        */
-       int gpio = chip->base + offset;
-       int bank = chip->base / chip->ngpio;
-
-       dev_dbg(chip->dev, "%s:%d pio%c%d(%d)\n", __func__, __LINE__,
-                'A' + bank, offset, gpio);
-
-       return pinctrl_request_gpio(gpio);
-}
-
-static void at91_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       int gpio = chip->base + offset;
-
-       pinctrl_free_gpio(gpio);
-}
-
 static int at91_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
 {
        struct at91_gpio_chip *at91_gpio = to_at91_gpio_chip(chip);
@@ -1684,8 +1665,8 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
 
 /* This structure is replicated for each GPIO block allocated at probe time */
 static struct gpio_chip at91_gpio_template = {
-       .request                = at91_gpio_request,
-       .free                   = at91_gpio_free,
+       .request                = gpiochip_generic_request,
+       .free                   = gpiochip_generic_free,
        .get_direction          = at91_gpio_get_direction,
        .direction_input        = at91_gpio_direction_input,
        .get                    = at91_gpio_get,
index 9c9b88934bcc904de8b63f20778ef12985ad7ed5..813eb7c771ecb5a38e33eedcb5d0a92d4c9edde4 100644 (file)
@@ -217,24 +217,6 @@ static inline struct u300_gpio *to_u300_gpio(struct gpio_chip *chip)
        return container_of(chip, struct u300_gpio, chip);
 }
 
-static int u300_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-       /*
-        * Map back to global GPIO space and request muxing, the direction
-        * parameter does not matter for this controller.
-        */
-       int gpio = chip->base + offset;
-
-       return pinctrl_request_gpio(gpio);
-}
-
-static void u300_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       int gpio = chip->base + offset;
-
-       pinctrl_free_gpio(gpio);
-}
-
 static int u300_gpio_get(struct gpio_chip *chip, unsigned offset)
 {
        struct u300_gpio *gpio = to_u300_gpio(chip);
@@ -417,8 +399,8 @@ int u300_gpio_config_set(struct gpio_chip *chip, unsigned offset,
 static struct gpio_chip u300_gpio_chip = {
        .label                  = "u300-gpio-chip",
        .owner                  = THIS_MODULE,
-       .request                = u300_gpio_request,
-       .free                   = u300_gpio_free,
+       .request                = gpiochip_generic_request,
+       .free                   = gpiochip_generic_free,
        .get                    = u300_gpio_get,
        .set                    = u300_gpio_set,
        .direction_input        = u300_gpio_direction_input,
index 11f8b835d3b64f61fde81bc77c2b5ecf931fd351..38a7799f8257e1fe18756d208b7dfb776775f39b 100644 (file)
@@ -169,16 +169,6 @@ static struct pinmux_ops dc_pmxops = {
        .gpio_request_enable    = dc_pmx_request_gpio,
 };
 
-static int dc_gpio_request(struct gpio_chip *chip, unsigned gpio)
-{
-       return pinctrl_request_gpio(chip->base + gpio);
-}
-
-static void dc_gpio_free(struct gpio_chip *chip, unsigned gpio)
-{
-       pinctrl_free_gpio(chip->base + gpio);
-}
-
 static int dc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
 {
        struct dc_pinmap *pmap = container_of(chip, struct dc_pinmap, chip);
@@ -255,8 +245,8 @@ static int dc_gpiochip_add(struct dc_pinmap *pmap, struct device_node *np)
 
        chip->label             = DRIVER_NAME;
        chip->dev               = pmap->dev;
-       chip->request           = dc_gpio_request;
-       chip->free              = dc_gpio_free;
+       chip->request           = gpiochip_generic_request;
+       chip->free              = gpiochip_generic_free;
        chip->direction_input   = dc_gpio_direction_input;
        chip->direction_output  = dc_gpio_direction_output;
        chip->get               = dc_gpio_get;
index 952b1c62388773eb5594b03c7b0a639061c8f4b9..85c9046c690e2c44ab0da44571ac75a252da9708 100644 (file)
@@ -1171,16 +1171,6 @@ static struct pinctrl_desc pistachio_pinctrl_desc = {
        .confops = &pistachio_pinconf_ops,
 };
 
-static int pistachio_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-       return pinctrl_request_gpio(chip->base + offset);
-}
-
-static void pistachio_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       pinctrl_free_gpio(chip->base + offset);
-}
-
 static int pistachio_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
 {
        struct pistachio_gpio_bank *bank = gc_to_bank(chip);
@@ -1332,8 +1322,8 @@ static void pistachio_gpio_irq_handler(struct irq_desc *desc)
                .npins = _npins,                                        \
                .gpio_chip = {                                          \
                        .label = "GPIO" #_bank,                         \
-                       .request = pistachio_gpio_request,              \
-                       .free = pistachio_gpio_free,                    \
+                       .request = gpiochip_generic_request,            \
+                       .free = gpiochip_generic_free,                  \
                        .get_direction = pistachio_gpio_get_direction,  \
                        .direction_input = pistachio_gpio_direction_input, \
                        .direction_output = pistachio_gpio_direction_output, \
index 88bb707e107ad8d9b8f14832d5edd4788a8b5cf6..a0651128e23acc68fbe139dc10da3e859da2b9a1 100644 (file)
@@ -1374,16 +1374,6 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
  * GPIO handling
  */
 
-static int rockchip_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-       return pinctrl_request_gpio(chip->base + offset);
-}
-
-static void rockchip_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       pinctrl_free_gpio(chip->base + offset);
-}
-
 static void rockchip_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
 {
        struct rockchip_pin_bank *bank = gc_to_pin_bank(gc);
@@ -1461,8 +1451,8 @@ static int rockchip_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
 }
 
 static const struct gpio_chip rockchip_gpiolib_chip = {
-       .request = rockchip_gpio_request,
-       .free = rockchip_gpio_free,
+       .request = gpiochip_generic_request,
+       .free = gpiochip_generic_free,
        .set = rockchip_gpio_set,
        .get = rockchip_gpio_get,
        .direction_input = rockchip_gpio_direction_input,
@@ -2089,6 +2079,21 @@ static struct rockchip_pin_ctrl rk2928_pin_ctrl = {
                .pull_calc_reg          = rk2928_calc_pull_reg_and_bit,
 };
 
+static struct rockchip_pin_bank rk3036_pin_banks[] = {
+       PIN_BANK(0, 32, "gpio0"),
+       PIN_BANK(1, 32, "gpio1"),
+       PIN_BANK(2, 32, "gpio2"),
+};
+
+static struct rockchip_pin_ctrl rk3036_pin_ctrl = {
+               .pin_banks              = rk3036_pin_banks,
+               .nr_banks               = ARRAY_SIZE(rk3036_pin_banks),
+               .label                  = "RK3036-GPIO",
+               .type                   = RK2928,
+               .grf_mux_offset         = 0xa8,
+               .pull_calc_reg          = rk2928_calc_pull_reg_and_bit,
+};
+
 static struct rockchip_pin_bank rk3066a_pin_banks[] = {
        PIN_BANK(0, 32, "gpio0"),
        PIN_BANK(1, 32, "gpio1"),
@@ -2207,6 +2212,8 @@ static struct rockchip_pin_ctrl rk3368_pin_ctrl = {
 static const struct of_device_id rockchip_pinctrl_dt_match[] = {
        { .compatible = "rockchip,rk2928-pinctrl",
                .data = (void *)&rk2928_pin_ctrl },
+       { .compatible = "rockchip,rk3036-pinctrl",
+               .data = (void *)&rk3036_pin_ctrl },
        { .compatible = "rockchip,rk3066a-pinctrl",
                .data = (void *)&rk3066a_pin_ctrl },
        { .compatible = "rockchip,rk3066b-pinctrl",
index 389526e704fb0eb68125e31cee1363d78c3caf8b..b58d3f29148a402120a2993cc57a2d0eee1df563 100644 (file)
@@ -742,16 +742,6 @@ static void st_gpio_direction(struct st_gpio_bank *bank,
        }
 }
 
-static int st_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-       return pinctrl_request_gpio(chip->base + offset);
-}
-
-static void st_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       pinctrl_free_gpio(chip->base + offset);
-}
-
 static int st_gpio_get(struct gpio_chip *chip, unsigned offset)
 {
        struct st_gpio_bank *bank = gpio_chip_to_bank(chip);
@@ -1490,8 +1480,8 @@ static void st_gpio_irqmux_handler(struct irq_desc *desc)
 }
 
 static struct gpio_chip st_gpio_template = {
-       .request                = st_gpio_request,
-       .free                   = st_gpio_free,
+       .request                = gpiochip_generic_request,
+       .free                   = gpiochip_generic_free,
        .get                    = st_gpio_get,
        .set                    = st_gpio_set,
        .direction_input        = st_gpio_direction_input,
index 2651d04bd1beb8a59b9487fad6d2da1acc4ac18c..84a43e61295267aa5dfe07fc155095ac5ef390d3 100644 (file)
@@ -760,24 +760,15 @@ static const char * const tegra124_pcie_groups[] = {
        "pcie-2",
        "pcie-3",
        "pcie-4",
-       "sata-0",
 };
 
 static const char * const tegra124_usb3_groups[] = {
        "pcie-0",
        "pcie-1",
-       "pcie-2",
-       "pcie-3",
-       "pcie-4",
        "sata-0",
 };
 
 static const char * const tegra124_sata_groups[] = {
-       "pcie-0",
-       "pcie-1",
-       "pcie-2",
-       "pcie-3",
-       "pcie-4",
        "sata-0",
 };
 
index c349911708ef8e98b868dc0ed39ceec9b72f3605..b89ad3c0c731318ca6d5ce4896d1cbab0ef2a771 100644 (file)
@@ -668,7 +668,7 @@ static int tz1090_pdc_pinconf_reg(struct pinctrl_dev *pctldev,
                break;
        default:
                return -ENOTSUPP;
-       };
+       }
 
        /* Only input bias parameters supported */
        *reg = REG_GPIO_CONTROL2;
@@ -801,7 +801,7 @@ static int tz1090_pdc_pinconf_group_reg(struct pinctrl_dev *pctldev,
                break;
        default:
                return -ENOTSUPP;
-       };
+       }
 
        /* Calculate field information */
        *mask = (BIT(*width) - 1) << *shift;
index 6d07a2f64d978439d06ec15e47f5c65ab83945b6..5425299d759dcb044d2fbc9220d9628f8a15651d 100644 (file)
@@ -1661,7 +1661,7 @@ static int tz1090_pinconf_reg(struct pinctrl_dev *pctldev,
                break;
        default:
                return -ENOTSUPP;
-       };
+       }
 
        /* Only input bias parameters supported */
        pu = &tz1090_pinconf_pullup[pin];
@@ -1790,7 +1790,7 @@ static int tz1090_pinconf_group_reg(struct pinctrl_dev *pctldev,
                break;
        default:
                return -ENOTSUPP;
-       };
+       }
 
        /* Calculate field information */
        *shift = g->slw_bit * *width;
index 779950c62e53d238e7a4c35d7290ca66475fed86..ae724bdab3d324cbf9018e8f2bdaf6fc44103f02 100644 (file)
@@ -682,28 +682,14 @@ static int xway_gpio_dir_out(struct gpio_chip *chip, unsigned int pin, int val)
        return 0;
 }
 
-static int xway_gpio_req(struct gpio_chip *chip, unsigned offset)
-{
-       int gpio = chip->base + offset;
-
-       return pinctrl_request_gpio(gpio);
-}
-
-static void xway_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       int gpio = chip->base + offset;
-
-       pinctrl_free_gpio(gpio);
-}
-
 static struct gpio_chip xway_chip = {
        .label = "gpio-xway",
        .direction_input = xway_gpio_dir_in,
        .direction_output = xway_gpio_dir_out,
        .get = xway_gpio_get,
        .set = xway_gpio_set,
-       .request = xway_gpio_req,
-       .free = xway_gpio_free,
+       .request = gpiochip_generic_request,
+       .free = gpiochip_generic_free,
        .base = -1,
 };
 
index 5aafea8c6590a9625930e784f027e61c43b7a0ba..d57b5eca7b98350ecc2e9b708f05e7457a26db16 100644 (file)
@@ -3,7 +3,7 @@
  *
  *  Copyright (C) 2014 Xilinx
  *
- *  SÃ\83¶ren Brinkmann <soren.brinkmann@xilinx.com>
+ *  Sören Brinkmann <soren.brinkmann@xilinx.com>
  *
  * This program is free software: you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -1230,8 +1230,18 @@ static struct platform_driver zynq_pinctrl_driver = {
        .remove = zynq_pinctrl_remove,
 };
 
-module_platform_driver(zynq_pinctrl_driver);
+static int __init zynq_pinctrl_init(void)
+{
+       return platform_driver_register(&zynq_pinctrl_driver);
+}
+arch_initcall(zynq_pinctrl_init);
+
+static void __exit zynq_pinctrl_exit(void)
+{
+       platform_driver_unregister(&zynq_pinctrl_driver);
+}
+module_exit(zynq_pinctrl_exit);
 
-MODULE_AUTHOR("SÃ\83¶ren Brinkmann <soren.brinkmann@xilinx.com>");
+MODULE_AUTHOR("Sören Brinkmann <soren.brinkmann@xilinx.com>");
 MODULE_DESCRIPTION("Xilinx Zynq pinctrl driver");
 MODULE_LICENSE("GPL");
index a0c7407c1cac486b8609283f0b06350e124468da..146264a41ec8844876e81ecd77eda16588c80950 100644 (file)
@@ -458,18 +458,6 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
        spin_unlock_irqrestore(&pctrl->lock, flags);
 }
 
-static int msm_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-       int gpio = chip->base + offset;
-       return pinctrl_request_gpio(gpio);
-}
-
-static void msm_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       int gpio = chip->base + offset;
-       return pinctrl_free_gpio(gpio);
-}
-
 #ifdef CONFIG_DEBUG_FS
 #include <linux/seq_file.h>
 
@@ -527,8 +515,8 @@ static struct gpio_chip msm_gpio_template = {
        .direction_output = msm_gpio_direction_output,
        .get              = msm_gpio_get,
        .set              = msm_gpio_set,
-       .request          = msm_gpio_request,
-       .free             = msm_gpio_free,
+       .request          = gpiochip_generic_request,
+       .free             = gpiochip_generic_free,
        .dbg_show         = msm_gpio_dbg_show,
 };
 
index bd1e24598e12b88b69f329cf574c63c156217153..6c42ca14d2fd315d9e43faca2a48315815d53bd8 100644 (file)
@@ -546,16 +546,6 @@ static void pmic_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
        pmic_gpio_config_set(state->ctrl, pin, &config, 1);
 }
 
-static int pmic_gpio_request(struct gpio_chip *chip, unsigned base)
-{
-       return pinctrl_request_gpio(chip->base + base);
-}
-
-static void pmic_gpio_free(struct gpio_chip *chip, unsigned base)
-{
-       pinctrl_free_gpio(chip->base + base);
-}
-
 static int pmic_gpio_of_xlate(struct gpio_chip *chip,
                              const struct of_phandle_args *gpio_desc,
                              u32 *flags)
@@ -595,8 +585,8 @@ static const struct gpio_chip pmic_gpio_gpio_template = {
        .direction_output       = pmic_gpio_direction_output,
        .get                    = pmic_gpio_get,
        .set                    = pmic_gpio_set,
-       .request                = pmic_gpio_request,
-       .free                   = pmic_gpio_free,
+       .request                = gpiochip_generic_request,
+       .free                   = gpiochip_generic_free,
        .of_xlate               = pmic_gpio_of_xlate,
        .to_irq                 = pmic_gpio_to_irq,
        .dbg_show               = pmic_gpio_dbg_show,
index e3be3ce2cada2ee50b9ca0f64e93b266c26ea5ba..9ce0e30e33e81b024ecee8169d86b165ce1d69d7 100644 (file)
@@ -604,16 +604,6 @@ static void pmic_mpp_set(struct gpio_chip *chip, unsigned pin, int value)
        pmic_mpp_config_set(state->ctrl, pin, &config, 1);
 }
 
-static int pmic_mpp_request(struct gpio_chip *chip, unsigned base)
-{
-       return pinctrl_request_gpio(chip->base + base);
-}
-
-static void pmic_mpp_free(struct gpio_chip *chip, unsigned base)
-{
-       pinctrl_free_gpio(chip->base + base);
-}
-
 static int pmic_mpp_of_xlate(struct gpio_chip *chip,
                             const struct of_phandle_args *gpio_desc,
                             u32 *flags)
@@ -653,8 +643,8 @@ static const struct gpio_chip pmic_mpp_gpio_template = {
        .direction_output       = pmic_mpp_direction_output,
        .get                    = pmic_mpp_get,
        .set                    = pmic_mpp_set,
-       .request                = pmic_mpp_request,
-       .free                   = pmic_mpp_free,
+       .request                = gpiochip_generic_request,
+       .free                   = gpiochip_generic_free,
        .of_xlate               = pmic_mpp_of_xlate,
        .to_irq                 = pmic_mpp_to_irq,
        .dbg_show               = pmic_mpp_dbg_show,
index e1a3721bc8e5814fbef5a39184170407436cb193..d809c9eaa3231817512bdda858ad208d0f1138e4 100644 (file)
@@ -584,7 +584,7 @@ static void pm8xxx_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
 }
 
 #else
-#define msm_gpio_dbg_show NULL
+#define pm8xxx_gpio_dbg_show NULL
 #endif
 
 static struct gpio_chip pm8xxx_gpio_template = {
index 6652b8d7f707aefc5656edd348f95e0d139c21da..8982027de8e8b528f026f38d58ccdf692901635c 100644 (file)
@@ -639,7 +639,7 @@ static void pm8xxx_mpp_dbg_show(struct seq_file *s, struct gpio_chip *chip)
 }
 
 #else
-#define msm_mpp_dbg_show NULL
+#define pm8xxx_mpp_dbg_show NULL
 #endif
 
 static struct gpio_chip pm8xxx_mpp_template = {
index 9ce0b8619d4c6eb90715dd06ae50921266074307..82dc109f7ed46629405d7663f2b853c50ed647e5 100644 (file)
@@ -284,7 +284,7 @@ static void exynos5440_dt_free_map(struct pinctrl_dev *pctldev,
                        if (!idx)
                                kfree(map[idx].data.configs.group_or_pin);
                }
-       };
+       }
 
        kfree(map);
 }
index c760bf43d116cfaa83370d640deefd38abb9f407..3f622ccd8eabd74a3f0df889298254dbcdee1510 100644 (file)
@@ -888,19 +888,9 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
        return 0;
 }
 
-static int samsung_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-       return pinctrl_request_gpio(chip->base + offset);
-}
-
-static void samsung_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       pinctrl_free_gpio(chip->base + offset);
-}
-
 static const struct gpio_chip samsung_gpiolib_chip = {
-       .request = samsung_gpio_request,
-       .free = samsung_gpio_free,
+       .request = gpiochip_generic_request,
+       .free = gpiochip_generic_free,
        .set = samsung_gpio_set,
        .get = samsung_gpio_get,
        .direction_input = samsung_gpio_direction_input,
index 8e024c9c9115c448fbcd2d7b8bfbb7a413620c7d..35d6e95fa21fbbdd6cdf8bb9847bffad13ad88ce 100644 (file)
@@ -65,6 +65,11 @@ config PINCTRL_PFC_R8A7794
        depends on ARCH_R8A7794
        select PINCTRL_SH_PFC
 
+config PINCTRL_PFC_R8A7795
+       def_bool y
+       depends on ARCH_R8A7795
+       select PINCTRL_SH_PFC
+
 config PINCTRL_PFC_SH7203
        def_bool y
        depends on CPU_SUBTYPE_SH7203
index ea2a60ef122a664a30fa5921c0916e5152067c6c..173305fa3811755a845ae39a81a11f2610ea1552 100644 (file)
@@ -12,6 +12,7 @@ obj-$(CONFIG_PINCTRL_PFC_R8A7790)     += pfc-r8a7790.o
 obj-$(CONFIG_PINCTRL_PFC_R8A7791)      += pfc-r8a7791.o
 obj-$(CONFIG_PINCTRL_PFC_R8A7793)      += pfc-r8a7791.o
 obj-$(CONFIG_PINCTRL_PFC_R8A7794)      += pfc-r8a7794.o
+obj-$(CONFIG_PINCTRL_PFC_R8A7795)      += pfc-r8a7795.o
 obj-$(CONFIG_PINCTRL_PFC_SH7203)       += pfc-sh7203.o
 obj-$(CONFIG_PINCTRL_PFC_SH7264)       += pfc-sh7264.o
 obj-$(CONFIG_PINCTRL_PFC_SH7269)       += pfc-sh7269.o
index fb9c4480523431c878a1098df47c92b019ddefd1..181ea98a63b7ab02d367581b4027997ea0f6a574 100644 (file)
@@ -272,7 +272,7 @@ static int sh_pfc_get_config_reg(struct sh_pfc *pfc, u16 enum_id,
 static int sh_pfc_mark_to_enum(struct sh_pfc *pfc, u16 mark, int pos,
                              u16 *enum_idp)
 {
-       const u16 *data = pfc->info->gpio_data;
+       const u16 *data = pfc->info->pinmux_data;
        unsigned int k;
 
        if (pos) {
@@ -280,7 +280,7 @@ static int sh_pfc_mark_to_enum(struct sh_pfc *pfc, u16 mark, int pos,
                return pos + 1;
        }
 
-       for (k = 0; k < pfc->info->gpio_data_size; k++) {
+       for (k = 0; k < pfc->info->pinmux_data_size; k++) {
                if (data[k] == mark) {
                        *enum_idp = data[k + 1];
                        return k + 1;
@@ -489,6 +489,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
                .data = &r8a7794_pinmux_info,
        },
 #endif
+#ifdef CONFIG_PINCTRL_PFC_R8A7795
+       {
+               .compatible = "renesas,pfc-r8a7795",
+               .data = &r8a7795_pinmux_info,
+       },
+#endif
 #ifdef CONFIG_PINCTRL_PFC_SH73A0
        {
                .compatible = "renesas,pfc-sh73a0",
@@ -587,12 +593,6 @@ static int sh_pfc_remove(struct platform_device *pdev)
 }
 
 static const struct platform_device_id sh_pfc_id_table[] = {
-#ifdef CONFIG_PINCTRL_PFC_R8A7778
-       { "pfc-r8a7778", (kernel_ulong_t)&r8a7778_pinmux_info },
-#endif
-#ifdef CONFIG_PINCTRL_PFC_R8A7779
-       { "pfc-r8a7779", (kernel_ulong_t)&r8a7779_pinmux_info },
-#endif
 #ifdef CONFIG_PINCTRL_PFC_SH7203
        { "pfc-sh7203", (kernel_ulong_t)&sh7203_pinmux_info },
 #endif
index 4c3c37bf7161804d4481f1860f8d0c7c6687eddc..62f53b22ae85004628672533a446f6fade5a1a32 100644 (file)
@@ -46,7 +46,9 @@ struct sh_pfc {
        unsigned int nr_gpio_pins;
 
        struct sh_pfc_chip *gpio;
+#ifdef CONFIG_SUPERH
        struct sh_pfc_chip *func;
+#endif
 
        struct sh_pfc_pinctrl *pinctrl;
 };
@@ -73,6 +75,7 @@ extern const struct sh_pfc_soc_info r8a7790_pinmux_info;
 extern const struct sh_pfc_soc_info r8a7791_pinmux_info;
 extern const struct sh_pfc_soc_info r8a7793_pinmux_info;
 extern const struct sh_pfc_soc_info r8a7794_pinmux_info;
+extern const struct sh_pfc_soc_info r8a7795_pinmux_info;
 extern const struct sh_pfc_soc_info sh7203_pinmux_info;
 extern const struct sh_pfc_soc_info sh7264_pinmux_info;
 extern const struct sh_pfc_soc_info sh7269_pinmux_info;
index ba353735ecf2be9a1ca39c29e516c850beaa1ce1..db3f09aa8993cdda59a2cec8369d2d13047a230e 100644 (file)
@@ -219,10 +219,7 @@ static int gpio_pin_to_irq(struct gpio_chip *gc, unsigned offset)
        return -ENOSYS;
 
 found:
-       if (pfc->num_irqs)
-               return pfc->irqs[i];
-       else
-               return pfc->info->gpio_irq[i].irq;
+       return pfc->irqs[i];
 }
 
 static int gpio_pin_setup(struct sh_pfc_chip *chip)
@@ -261,6 +258,7 @@ static int gpio_pin_setup(struct sh_pfc_chip *chip)
  * Function GPIOs
  */
 
+#ifdef CONFIG_SUPERH
 static int gpio_function_request(struct gpio_chip *gc, unsigned offset)
 {
        static bool __print_once;
@@ -286,17 +284,12 @@ static int gpio_function_request(struct gpio_chip *gc, unsigned offset)
        return ret;
 }
 
-static void gpio_function_free(struct gpio_chip *gc, unsigned offset)
-{
-}
-
 static int gpio_function_setup(struct sh_pfc_chip *chip)
 {
        struct sh_pfc *pfc = chip->pfc;
        struct gpio_chip *gc = &chip->gpio_chip;
 
        gc->request = gpio_function_request;
-       gc->free = gpio_function_free;
 
        gc->label = pfc->info->name;
        gc->owner = THIS_MODULE;
@@ -305,6 +298,7 @@ static int gpio_function_setup(struct sh_pfc_chip *chip)
 
        return 0;
 }
+#endif
 
 /* -----------------------------------------------------------------------------
  * Register/unregister
@@ -344,7 +338,6 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
        struct sh_pfc_chip *chip;
        phys_addr_t address;
        unsigned int i;
-       int ret;
 
        if (pfc->info->data_regs == NULL)
                return 0;
@@ -367,7 +360,7 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
                return 0;
 
        /* If we have IRQ resources make sure their number is correct. */
-       if (pfc->num_irqs && pfc->num_irqs != pfc->info->gpio_irq_size) {
+       if (pfc->num_irqs != pfc->info->gpio_irq_size) {
                dev_err(pfc->dev, "invalid number of IRQ resources\n");
                return -EINVAL;
        }
@@ -379,20 +372,26 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
 
        pfc->gpio = chip;
 
-       /* Register the GPIO to pin mappings. As pins with GPIO ports must come
-        * first in the ranges, skip the pins without GPIO ports by stopping at
-        * the first range that contains such a pin.
+       if (IS_ENABLED(CONFIG_OF) && pfc->dev->of_node)
+               return 0;
+
+#ifdef CONFIG_SUPERH
+       /*
+        * Register the GPIO to pin mappings. As pins with GPIO ports
+        * must come first in the ranges, skip the pins without GPIO
+        * ports by stopping at the first range that contains such a
+        * pin.
         */
        for (i = 0; i < pfc->nr_ranges; ++i) {
                const struct sh_pfc_pin_range *range = &pfc->ranges[i];
+               int ret;
 
                if (range->start >= pfc->nr_gpio_pins)
                        break;
 
                ret = gpiochip_add_pin_range(&chip->gpio_chip,
-                                            dev_name(pfc->dev),
-                                            range->start, range->start,
-                                            range->end - range->start + 1);
+                       dev_name(pfc->dev), range->start, range->start,
+                       range->end - range->start + 1);
                if (ret < 0)
                        return ret;
        }
@@ -406,6 +405,7 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
                return PTR_ERR(chip);
 
        pfc->func = chip;
+#endif /* CONFIG_SUPERH */
 
        return 0;
 }
@@ -413,7 +413,8 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
 int sh_pfc_unregister_gpiochip(struct sh_pfc *pfc)
 {
        gpiochip_remove(&pfc->gpio->gpio_chip);
+#ifdef CONFIG_SUPERH
        gpiochip_remove(&pfc->func->gpio_chip);
-
+#endif
        return 0;
 }
index 849c6943ed308ee00ee15784bb3454667575a914..02118ab336fcbd6c7b91bc7058e0460a0834e5c1 100644 (file)
@@ -1706,6 +1706,6 @@ const struct sh_pfc_soc_info emev2_pinmux_info = {
 
        .cfg_regs       = pinmux_config_regs,
 
-       .gpio_data      = pinmux_data,
-       .gpio_data_size = ARRAY_SIZE(pinmux_data),
+       .pinmux_data    = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
 };
index ba18d2e65e6745630811fb8110cfb34efd5637e0..d9d9228b15faf6f32c8677c1316abfc1d3a0af5b 100644 (file)
@@ -2603,64 +2603,64 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
 };
 
 static const struct pinmux_irq pinmux_irqs[] = {
-       PINMUX_IRQ(irq_pin(0), 0),
-       PINMUX_IRQ(irq_pin(1), 1),
-       PINMUX_IRQ(irq_pin(2), 2),
-       PINMUX_IRQ(irq_pin(3), 3),
-       PINMUX_IRQ(irq_pin(4), 4),
-       PINMUX_IRQ(irq_pin(5), 5),
-       PINMUX_IRQ(irq_pin(6), 6),
-       PINMUX_IRQ(irq_pin(7), 7),
-       PINMUX_IRQ(irq_pin(8), 8),
-       PINMUX_IRQ(irq_pin(9), 9),
-       PINMUX_IRQ(irq_pin(10), 10),
-       PINMUX_IRQ(irq_pin(11), 11),
-       PINMUX_IRQ(irq_pin(12), 12),
-       PINMUX_IRQ(irq_pin(13), 13),
-       PINMUX_IRQ(irq_pin(14), 14),
-       PINMUX_IRQ(irq_pin(15), 15),
-       PINMUX_IRQ(irq_pin(16), 320),
-       PINMUX_IRQ(irq_pin(17), 321),
-       PINMUX_IRQ(irq_pin(18), 85),
-       PINMUX_IRQ(irq_pin(19), 84),
-       PINMUX_IRQ(irq_pin(20), 160),
-       PINMUX_IRQ(irq_pin(21), 161),
-       PINMUX_IRQ(irq_pin(22), 162),
-       PINMUX_IRQ(irq_pin(23), 163),
-       PINMUX_IRQ(irq_pin(24), 175),
-       PINMUX_IRQ(irq_pin(25), 176),
-       PINMUX_IRQ(irq_pin(26), 177),
-       PINMUX_IRQ(irq_pin(27), 178),
-       PINMUX_IRQ(irq_pin(28), 322),
-       PINMUX_IRQ(irq_pin(29), 323),
-       PINMUX_IRQ(irq_pin(30), 324),
-       PINMUX_IRQ(irq_pin(31), 192),
-       PINMUX_IRQ(irq_pin(32), 193),
-       PINMUX_IRQ(irq_pin(33), 194),
-       PINMUX_IRQ(irq_pin(34), 195),
-       PINMUX_IRQ(irq_pin(35), 196),
-       PINMUX_IRQ(irq_pin(36), 197),
-       PINMUX_IRQ(irq_pin(37), 198),
-       PINMUX_IRQ(irq_pin(38), 199),
-       PINMUX_IRQ(irq_pin(39), 200),
-       PINMUX_IRQ(irq_pin(40), 66),
-       PINMUX_IRQ(irq_pin(41), 102),
-       PINMUX_IRQ(irq_pin(42), 103),
-       PINMUX_IRQ(irq_pin(43), 109),
-       PINMUX_IRQ(irq_pin(44), 110),
-       PINMUX_IRQ(irq_pin(45), 111),
-       PINMUX_IRQ(irq_pin(46), 112),
-       PINMUX_IRQ(irq_pin(47), 113),
-       PINMUX_IRQ(irq_pin(48), 114),
-       PINMUX_IRQ(irq_pin(49), 115),
-       PINMUX_IRQ(irq_pin(50), 301),
-       PINMUX_IRQ(irq_pin(51), 290),
-       PINMUX_IRQ(irq_pin(52), 296),
-       PINMUX_IRQ(irq_pin(53), 325),
-       PINMUX_IRQ(irq_pin(54), 326),
-       PINMUX_IRQ(irq_pin(55), 327),
-       PINMUX_IRQ(irq_pin(56), 328),
-       PINMUX_IRQ(irq_pin(57), 329),
+       PINMUX_IRQ(0),          /* IRQ0 */
+       PINMUX_IRQ(1),          /* IRQ1 */
+       PINMUX_IRQ(2),          /* IRQ2 */
+       PINMUX_IRQ(3),          /* IRQ3 */
+       PINMUX_IRQ(4),          /* IRQ4 */
+       PINMUX_IRQ(5),          /* IRQ5 */
+       PINMUX_IRQ(6),          /* IRQ6 */
+       PINMUX_IRQ(7),          /* IRQ7 */
+       PINMUX_IRQ(8),          /* IRQ8 */
+       PINMUX_IRQ(9),          /* IRQ9 */
+       PINMUX_IRQ(10),         /* IRQ10 */
+       PINMUX_IRQ(11),         /* IRQ11 */
+       PINMUX_IRQ(12),         /* IRQ12 */
+       PINMUX_IRQ(13),         /* IRQ13 */
+       PINMUX_IRQ(14),         /* IRQ14 */
+       PINMUX_IRQ(15),         /* IRQ15 */
+       PINMUX_IRQ(320),        /* IRQ16 */
+       PINMUX_IRQ(321),        /* IRQ17 */
+       PINMUX_IRQ(85),         /* IRQ18 */
+       PINMUX_IRQ(84),         /* IRQ19 */
+       PINMUX_IRQ(160),        /* IRQ20 */
+       PINMUX_IRQ(161),        /* IRQ21 */
+       PINMUX_IRQ(162),        /* IRQ22 */
+       PINMUX_IRQ(163),        /* IRQ23 */
+       PINMUX_IRQ(175),        /* IRQ24 */
+       PINMUX_IRQ(176),        /* IRQ25 */
+       PINMUX_IRQ(177),        /* IRQ26 */
+       PINMUX_IRQ(178),        /* IRQ27 */
+       PINMUX_IRQ(322),        /* IRQ28 */
+       PINMUX_IRQ(323),        /* IRQ29 */
+       PINMUX_IRQ(324),        /* IRQ30 */
+       PINMUX_IRQ(192),        /* IRQ31 */
+       PINMUX_IRQ(193),        /* IRQ32 */
+       PINMUX_IRQ(194),        /* IRQ33 */
+       PINMUX_IRQ(195),        /* IRQ34 */
+       PINMUX_IRQ(196),        /* IRQ35 */
+       PINMUX_IRQ(197),        /* IRQ36 */
+       PINMUX_IRQ(198),        /* IRQ37 */
+       PINMUX_IRQ(199),        /* IRQ38 */
+       PINMUX_IRQ(200),        /* IRQ39 */
+       PINMUX_IRQ(66),         /* IRQ40 */
+       PINMUX_IRQ(102),        /* IRQ41 */
+       PINMUX_IRQ(103),        /* IRQ42 */
+       PINMUX_IRQ(109),        /* IRQ43 */
+       PINMUX_IRQ(110),        /* IRQ44 */
+       PINMUX_IRQ(111),        /* IRQ45 */
+       PINMUX_IRQ(112),        /* IRQ46 */
+       PINMUX_IRQ(113),        /* IRQ47 */
+       PINMUX_IRQ(114),        /* IRQ48 */
+       PINMUX_IRQ(115),        /* IRQ49 */
+       PINMUX_IRQ(301),        /* IRQ50 */
+       PINMUX_IRQ(290),        /* IRQ51 */
+       PINMUX_IRQ(296),        /* IRQ52 */
+       PINMUX_IRQ(325),        /* IRQ53 */
+       PINMUX_IRQ(326),        /* IRQ54 */
+       PINMUX_IRQ(327),        /* IRQ55 */
+       PINMUX_IRQ(328),        /* IRQ56 */
+       PINMUX_IRQ(329),        /* IRQ57 */
 };
 
 #define PORTCR_PULMD_OFF (0 << 6)
@@ -2734,11 +2734,11 @@ const struct sh_pfc_soc_info r8a73a4_pinmux_info = {
        .functions = pinmux_functions,
        .nr_functions = ARRAY_SIZE(pinmux_functions),
 
-       .cfg_regs       = pinmux_config_regs,
-       .data_regs      = pinmux_data_regs,
+       .cfg_regs = pinmux_config_regs,
+       .data_regs = pinmux_data_regs,
 
-       .gpio_data      = pinmux_data,
-       .gpio_data_size = ARRAY_SIZE(pinmux_data),
+       .pinmux_data = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
 
        .gpio_irq = pinmux_irqs,
        .gpio_irq_size = ARRAY_SIZE(pinmux_irqs),
index 82ef1862dd1ba808f22a951419efca45945b3422..279e9dd442e4472989e32cdc9126d23c88593693 100644 (file)
@@ -3651,38 +3651,38 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
 };
 
 static const struct pinmux_irq pinmux_irqs[] = {
-       PINMUX_IRQ(irq_pin(0), 2,   13),        /* IRQ0A */
-       PINMUX_IRQ(irq_pin(1), 20),             /* IRQ1A */
-       PINMUX_IRQ(irq_pin(2), 11,  12),        /* IRQ2A */
-       PINMUX_IRQ(irq_pin(3), 10,  14),        /* IRQ3A */
-       PINMUX_IRQ(irq_pin(4), 15,  172),       /* IRQ4A */
-       PINMUX_IRQ(irq_pin(5), 0,   1),         /* IRQ5A */
-       PINMUX_IRQ(irq_pin(6), 121, 173),       /* IRQ6A */
-       PINMUX_IRQ(irq_pin(7), 120, 209),       /* IRQ7A */
-       PINMUX_IRQ(irq_pin(8), 119),            /* IRQ8A */
-       PINMUX_IRQ(irq_pin(9), 118, 210),       /* IRQ9A */
-       PINMUX_IRQ(irq_pin(10), 19),            /* IRQ10A */
-       PINMUX_IRQ(irq_pin(11), 104),           /* IRQ11A */
-       PINMUX_IRQ(irq_pin(12), 42,  97),       /* IRQ12A */
-       PINMUX_IRQ(irq_pin(13), 64,  98),       /* IRQ13A */
-       PINMUX_IRQ(irq_pin(14), 63,  99),       /* IRQ14A */
-       PINMUX_IRQ(irq_pin(15), 62,  100),      /* IRQ15A */
-       PINMUX_IRQ(irq_pin(16), 68,  211),      /* IRQ16A */
-       PINMUX_IRQ(irq_pin(17), 69),            /* IRQ17A */
-       PINMUX_IRQ(irq_pin(18), 70),            /* IRQ18A */
-       PINMUX_IRQ(irq_pin(19), 71),            /* IRQ19A */
-       PINMUX_IRQ(irq_pin(20), 67),            /* IRQ20A */
-       PINMUX_IRQ(irq_pin(21), 202),           /* IRQ21A */
-       PINMUX_IRQ(irq_pin(22), 95),            /* IRQ22A */
-       PINMUX_IRQ(irq_pin(23), 96),            /* IRQ23A */
-       PINMUX_IRQ(irq_pin(24), 180),           /* IRQ24A */
-       PINMUX_IRQ(irq_pin(25), 38),            /* IRQ25A */
-       PINMUX_IRQ(irq_pin(26), 58,  81),       /* IRQ26A */
-       PINMUX_IRQ(irq_pin(27), 57,  168),      /* IRQ27A */
-       PINMUX_IRQ(irq_pin(28), 56,  169),      /* IRQ28A */
-       PINMUX_IRQ(irq_pin(29), 50,  170),      /* IRQ29A */
-       PINMUX_IRQ(irq_pin(30), 49,  171),      /* IRQ30A */
-       PINMUX_IRQ(irq_pin(31), 41,  167),      /* IRQ31A */
+       PINMUX_IRQ(2,   13),    /* IRQ0A */
+       PINMUX_IRQ(20),         /* IRQ1A */
+       PINMUX_IRQ(11,  12),    /* IRQ2A */
+       PINMUX_IRQ(10,  14),    /* IRQ3A */
+       PINMUX_IRQ(15,  172),   /* IRQ4A */
+       PINMUX_IRQ(0,   1),     /* IRQ5A */
+       PINMUX_IRQ(121, 173),   /* IRQ6A */
+       PINMUX_IRQ(120, 209),   /* IRQ7A */
+       PINMUX_IRQ(119),        /* IRQ8A */
+       PINMUX_IRQ(118, 210),   /* IRQ9A */
+       PINMUX_IRQ(19),         /* IRQ10A */
+       PINMUX_IRQ(104),        /* IRQ11A */
+       PINMUX_IRQ(42,  97),    /* IRQ12A */
+       PINMUX_IRQ(64,  98),    /* IRQ13A */
+       PINMUX_IRQ(63,  99),    /* IRQ14A */
+       PINMUX_IRQ(62,  100),   /* IRQ15A */
+       PINMUX_IRQ(68,  211),   /* IRQ16A */
+       PINMUX_IRQ(69),         /* IRQ17A */
+       PINMUX_IRQ(70),         /* IRQ18A */
+       PINMUX_IRQ(71),         /* IRQ19A */
+       PINMUX_IRQ(67),         /* IRQ20A */
+       PINMUX_IRQ(202),        /* IRQ21A */
+       PINMUX_IRQ(95),         /* IRQ22A */
+       PINMUX_IRQ(96),         /* IRQ23A */
+       PINMUX_IRQ(180),        /* IRQ24A */
+       PINMUX_IRQ(38),         /* IRQ25A */
+       PINMUX_IRQ(58,  81),    /* IRQ26A */
+       PINMUX_IRQ(57,  168),   /* IRQ27A */
+       PINMUX_IRQ(56,  169),   /* IRQ28A */
+       PINMUX_IRQ(50,  170),   /* IRQ29A */
+       PINMUX_IRQ(49,  171),   /* IRQ30A */
+       PINMUX_IRQ(41,  167),   /* IRQ31A */
 };
 
 #define PORTnCR_PULMD_OFF      (0 << 6)
@@ -3774,8 +3774,8 @@ const struct sh_pfc_soc_info r8a7740_pinmux_info = {
        .cfg_regs       = pinmux_config_regs,
        .data_regs      = pinmux_data_regs,
 
-       .gpio_data      = pinmux_data,
-       .gpio_data_size = ARRAY_SIZE(pinmux_data),
+       .pinmux_data    = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
 
        .gpio_irq       = pinmux_irqs,
        .gpio_irq_size  = ARRAY_SIZE(pinmux_irqs),
index c7d610d1f3efb8054a3358a2f6c087795d5705f7..bbd35dc1a0c4c35a512a501d5f7d3d3a3b67ea8c 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright (C) 2013  Renesas Solutions Corp.
  * Copyright (C) 2013  Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
  * Copyright (C) 2013  Cogent Embedded, Inc.
+ * Copyright (C) 2015  Ulrich Hecht
  *
  * based on
  * Copyright (C) 2011  Renesas Solutions Corp.
  * GNU General Public License for more details.
  */
 
-#include <linux/platform_data/gpio-rcar.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include "core.h"
 #include "sh_pfc.h"
 
-#define PORT_GP_27(bank, fn, sfx)                                      \
-       PORT_GP_1(bank, 0,  fn, sfx), PORT_GP_1(bank, 1,  fn, sfx),     \
-       PORT_GP_1(bank, 2,  fn, sfx), PORT_GP_1(bank, 3,  fn, sfx),     \
-       PORT_GP_1(bank, 4,  fn, sfx), PORT_GP_1(bank, 5,  fn, sfx),     \
-       PORT_GP_1(bank, 6,  fn, sfx), PORT_GP_1(bank, 7,  fn, sfx),     \
-       PORT_GP_1(bank, 8,  fn, sfx), PORT_GP_1(bank, 9,  fn, sfx),     \
-       PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx),     \
-       PORT_GP_1(bank, 12, fn, sfx), PORT_GP_1(bank, 13, fn, sfx),     \
-       PORT_GP_1(bank, 14, fn, sfx), PORT_GP_1(bank, 15, fn, sfx),     \
-       PORT_GP_1(bank, 16, fn, sfx), PORT_GP_1(bank, 17, fn, sfx),     \
-       PORT_GP_1(bank, 18, fn, sfx), PORT_GP_1(bank, 19, fn, sfx),     \
-       PORT_GP_1(bank, 20, fn, sfx), PORT_GP_1(bank, 21, fn, sfx),     \
-       PORT_GP_1(bank, 22, fn, sfx), PORT_GP_1(bank, 23, fn, sfx),     \
-       PORT_GP_1(bank, 24, fn, sfx), PORT_GP_1(bank, 25, fn, sfx),     \
-       PORT_GP_1(bank, 26, fn, sfx)
+#define PORT_GP_PUP_1(bank, pin, fn, sfx)      \
+       PORT_GP_CFG_1(bank, pin, fn, sfx, SH_PFC_PIN_CFG_PULL_UP)
+
+#define PORT_GP_PUP_27(bank, fn, sfx)                                  \
+       PORT_GP_PUP_1(bank, 0,  fn, sfx), PORT_GP_PUP_1(bank, 1,  fn, sfx),     \
+       PORT_GP_PUP_1(bank, 2,  fn, sfx), PORT_GP_PUP_1(bank, 3,  fn, sfx),     \
+       PORT_GP_PUP_1(bank, 4,  fn, sfx), PORT_GP_PUP_1(bank, 5,  fn, sfx),     \
+       PORT_GP_PUP_1(bank, 6,  fn, sfx), PORT_GP_PUP_1(bank, 7,  fn, sfx),     \
+       PORT_GP_PUP_1(bank, 8,  fn, sfx), PORT_GP_PUP_1(bank, 9,  fn, sfx),     \
+       PORT_GP_PUP_1(bank, 10, fn, sfx), PORT_GP_PUP_1(bank, 11, fn, sfx),     \
+       PORT_GP_PUP_1(bank, 12, fn, sfx), PORT_GP_PUP_1(bank, 13, fn, sfx),     \
+       PORT_GP_PUP_1(bank, 14, fn, sfx), PORT_GP_PUP_1(bank, 15, fn, sfx),     \
+       PORT_GP_PUP_1(bank, 16, fn, sfx), PORT_GP_PUP_1(bank, 17, fn, sfx),     \
+       PORT_GP_PUP_1(bank, 18, fn, sfx), PORT_GP_PUP_1(bank, 19, fn, sfx),     \
+       PORT_GP_PUP_1(bank, 20, fn, sfx), PORT_GP_PUP_1(bank, 21, fn, sfx),     \
+       PORT_GP_PUP_1(bank, 22, fn, sfx), PORT_GP_PUP_1(bank, 23, fn, sfx),     \
+       PORT_GP_PUP_1(bank, 24, fn, sfx), PORT_GP_PUP_1(bank, 25, fn, sfx),     \
+       PORT_GP_PUP_1(bank, 26, fn, sfx)
 
 #define CPU_ALL_PORT(fn, sfx)          \
-       PORT_GP_32(0, fn, sfx),         \
-       PORT_GP_32(1, fn, sfx),         \
-       PORT_GP_32(2, fn, sfx),         \
-       PORT_GP_32(3, fn, sfx),         \
-       PORT_GP_27(4, fn, sfx)
+       PORT_GP_CFG_32(0, fn, sfx, SH_PFC_PIN_CFG_PULL_UP),             \
+       PORT_GP_CFG_32(1, fn, sfx, SH_PFC_PIN_CFG_PULL_UP),             \
+       PORT_GP_CFG_32(2, fn, sfx, SH_PFC_PIN_CFG_PULL_UP),             \
+       PORT_GP_CFG_32(3, fn, sfx, SH_PFC_PIN_CFG_PULL_UP),             \
+       PORT_GP_PUP_27(4, fn, sfx)
 
 enum {
        PINMUX_RESERVED = 0,
@@ -2905,8 +2911,222 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
        { },
 };
 
+#define PUPR0  0x100
+#define PUPR1  0x104
+#define PUPR2  0x108
+#define PUPR3  0x10c
+#define PUPR4  0x110
+#define PUPR5  0x114
+
+static const struct {
+       u16 reg : 11;
+       u16 bit : 5;
+} pullups[] = {
+       [RCAR_GP_PIN(0,  6)] = { PUPR0,  0 },   /* A0 */
+       [RCAR_GP_PIN(0,  7)] = { PUPR0,  1 },   /* A1 */
+       [RCAR_GP_PIN(0,  8)] = { PUPR0,  2 },   /* A2 */
+       [RCAR_GP_PIN(0,  9)] = { PUPR0,  3 },   /* A3 */
+       [RCAR_GP_PIN(0, 10)] = { PUPR0,  4 },   /* A4 */
+       [RCAR_GP_PIN(0, 11)] = { PUPR0,  5 },   /* A5 */
+       [RCAR_GP_PIN(0, 12)] = { PUPR0,  6 },   /* A6 */
+       [RCAR_GP_PIN(0, 13)] = { PUPR0,  7 },   /* A7 */
+       [RCAR_GP_PIN(0, 14)] = { PUPR0,  8 },   /* A8 */
+       [RCAR_GP_PIN(0, 15)] = { PUPR0,  9 },   /* A9 */
+       [RCAR_GP_PIN(0, 16)] = { PUPR0, 10 },   /* A10 */
+       [RCAR_GP_PIN(0, 17)] = { PUPR0, 11 },   /* A11 */
+       [RCAR_GP_PIN(0, 18)] = { PUPR0, 12 },   /* A12 */
+       [RCAR_GP_PIN(0, 19)] = { PUPR0, 13 },   /* A13 */
+       [RCAR_GP_PIN(0, 20)] = { PUPR0, 14 },   /* A14 */
+       [RCAR_GP_PIN(0, 21)] = { PUPR0, 15 },   /* A15 */
+       [RCAR_GP_PIN(0, 22)] = { PUPR0, 16 },   /* A16 */
+       [RCAR_GP_PIN(0, 23)] = { PUPR0, 17 },   /* A17 */
+       [RCAR_GP_PIN(0, 24)] = { PUPR0, 18 },   /* A18 */
+       [RCAR_GP_PIN(0, 25)] = { PUPR0, 19 },   /* A19 */
+       [RCAR_GP_PIN(0, 26)] = { PUPR0, 20 },   /* A20 */
+       [RCAR_GP_PIN(0, 27)] = { PUPR0, 21 },   /* A21 */
+       [RCAR_GP_PIN(0, 28)] = { PUPR0, 22 },   /* A22 */
+       [RCAR_GP_PIN(0, 29)] = { PUPR0, 23 },   /* A23 */
+       [RCAR_GP_PIN(0, 30)] = { PUPR0, 24 },   /* A24 */
+       [RCAR_GP_PIN(0, 31)] = { PUPR0, 25 },   /* A25 */
+       [RCAR_GP_PIN(1,  3)] = { PUPR0, 26 },   /* /EX_CS0 */
+       [RCAR_GP_PIN(1,  4)] = { PUPR0, 27 },   /* /EX_CS1 */
+       [RCAR_GP_PIN(1,  5)] = { PUPR0, 28 },   /* /EX_CS2 */
+       [RCAR_GP_PIN(1,  6)] = { PUPR0, 29 },   /* /EX_CS3 */
+       [RCAR_GP_PIN(1,  7)] = { PUPR0, 30 },   /* /EX_CS4 */
+       [RCAR_GP_PIN(1,  8)] = { PUPR0, 31 },   /* /EX_CS5 */
+
+       [RCAR_GP_PIN(0,  0)] = { PUPR1,  0 },   /* /PRESETOUT   */
+       [RCAR_GP_PIN(0,  5)] = { PUPR1,  1 },   /* /BS          */
+       [RCAR_GP_PIN(1,  0)] = { PUPR1,  2 },   /* RD//WR       */
+       [RCAR_GP_PIN(1,  1)] = { PUPR1,  3 },   /* /WE0         */
+       [RCAR_GP_PIN(1,  2)] = { PUPR1,  4 },   /* /WE1         */
+       [RCAR_GP_PIN(1, 11)] = { PUPR1,  5 },   /* EX_WAIT0     */
+       [RCAR_GP_PIN(1,  9)] = { PUPR1,  6 },   /* DREQ0        */
+       [RCAR_GP_PIN(1, 10)] = { PUPR1,  7 },   /* DACK0        */
+       [RCAR_GP_PIN(1, 12)] = { PUPR1,  8 },   /* IRQ0         */
+       [RCAR_GP_PIN(1, 13)] = { PUPR1,  9 },   /* IRQ1         */
+
+       [RCAR_GP_PIN(1, 22)] = { PUPR2,  0 },   /* DU0_DR0      */
+       [RCAR_GP_PIN(1, 23)] = { PUPR2,  1 },   /* DU0_DR1      */
+       [RCAR_GP_PIN(1, 24)] = { PUPR2,  2 },   /* DU0_DR2      */
+       [RCAR_GP_PIN(1, 25)] = { PUPR2,  3 },   /* DU0_DR3      */
+       [RCAR_GP_PIN(1, 26)] = { PUPR2,  4 },   /* DU0_DR4      */
+       [RCAR_GP_PIN(1, 27)] = { PUPR2,  5 },   /* DU0_DR5      */
+       [RCAR_GP_PIN(1, 28)] = { PUPR2,  6 },   /* DU0_DR6      */
+       [RCAR_GP_PIN(1, 29)] = { PUPR2,  7 },   /* DU0_DR7      */
+       [RCAR_GP_PIN(1, 30)] = { PUPR2,  8 },   /* DU0_DG0      */
+       [RCAR_GP_PIN(1, 31)] = { PUPR2,  9 },   /* DU0_DG1      */
+       [RCAR_GP_PIN(2,  0)] = { PUPR2, 10 },   /* DU0_DG2      */
+       [RCAR_GP_PIN(2,  1)] = { PUPR2, 11 },   /* DU0_DG3      */
+       [RCAR_GP_PIN(2,  2)] = { PUPR2, 12 },   /* DU0_DG4      */
+       [RCAR_GP_PIN(2,  3)] = { PUPR2, 13 },   /* DU0_DG5      */
+       [RCAR_GP_PIN(2,  4)] = { PUPR2, 14 },   /* DU0_DG6      */
+       [RCAR_GP_PIN(2,  5)] = { PUPR2, 15 },   /* DU0_DG7      */
+       [RCAR_GP_PIN(2,  6)] = { PUPR2, 16 },   /* DU0_DB0      */
+       [RCAR_GP_PIN(2,  7)] = { PUPR2, 17 },   /* DU0_DB1      */
+       [RCAR_GP_PIN(2,  8)] = { PUPR2, 18 },   /* DU0_DB2      */
+       [RCAR_GP_PIN(2,  9)] = { PUPR2, 19 },   /* DU0_DB3      */
+       [RCAR_GP_PIN(2, 10)] = { PUPR2, 20 },   /* DU0_DB4      */
+       [RCAR_GP_PIN(2, 11)] = { PUPR2, 21 },   /* DU0_DB5      */
+       [RCAR_GP_PIN(2, 12)] = { PUPR2, 22 },   /* DU0_DB6      */
+       [RCAR_GP_PIN(2, 13)] = { PUPR2, 23 },   /* DU0_DB7      */
+       [RCAR_GP_PIN(2, 14)] = { PUPR2, 24 },   /* DU0_DOTCLKIN */
+       [RCAR_GP_PIN(2, 15)] = { PUPR2, 25 },   /* DU0_DOTCLKOUT0 */
+       [RCAR_GP_PIN(2, 17)] = { PUPR2, 26 },   /* DU0_HSYNC    */
+       [RCAR_GP_PIN(2, 18)] = { PUPR2, 27 },   /* DU0_VSYNC    */
+       [RCAR_GP_PIN(2, 19)] = { PUPR2, 28 },   /* DU0_EXODDF   */
+       [RCAR_GP_PIN(2, 20)] = { PUPR2, 29 },   /* DU0_DISP     */
+       [RCAR_GP_PIN(2, 21)] = { PUPR2, 30 },   /* DU0_CDE      */
+       [RCAR_GP_PIN(2, 16)] = { PUPR2, 31 },   /* DU0_DOTCLKOUT1 */
+
+       [RCAR_GP_PIN(3, 24)] = { PUPR3,  0 },   /* VI0_CLK      */
+       [RCAR_GP_PIN(3, 25)] = { PUPR3,  1 },   /* VI0_CLKENB   */
+       [RCAR_GP_PIN(3, 26)] = { PUPR3,  2 },   /* VI0_FIELD    */
+       [RCAR_GP_PIN(3, 27)] = { PUPR3,  3 },   /* /VI0_HSYNC   */
+       [RCAR_GP_PIN(3, 28)] = { PUPR3,  4 },   /* /VI0_VSYNC   */
+       [RCAR_GP_PIN(3, 29)] = { PUPR3,  5 },   /* VI0_DATA0    */
+       [RCAR_GP_PIN(3, 30)] = { PUPR3,  6 },   /* VI0_DATA1    */
+       [RCAR_GP_PIN(3, 31)] = { PUPR3,  7 },   /* VI0_DATA2    */
+       [RCAR_GP_PIN(4,  0)] = { PUPR3,  8 },   /* VI0_DATA3    */
+       [RCAR_GP_PIN(4,  1)] = { PUPR3,  9 },   /* VI0_DATA4    */
+       [RCAR_GP_PIN(4,  2)] = { PUPR3, 10 },   /* VI0_DATA5    */
+       [RCAR_GP_PIN(4,  3)] = { PUPR3, 11 },   /* VI0_DATA6    */
+       [RCAR_GP_PIN(4,  4)] = { PUPR3, 12 },   /* VI0_DATA7    */
+       [RCAR_GP_PIN(4,  5)] = { PUPR3, 13 },   /* VI0_G2       */
+       [RCAR_GP_PIN(4,  6)] = { PUPR3, 14 },   /* VI0_G3       */
+       [RCAR_GP_PIN(4,  7)] = { PUPR3, 15 },   /* VI0_G4       */
+       [RCAR_GP_PIN(4,  8)] = { PUPR3, 16 },   /* VI0_G5       */
+       [RCAR_GP_PIN(4, 21)] = { PUPR3, 17 },   /* VI1_DATA12   */
+       [RCAR_GP_PIN(4, 22)] = { PUPR3, 18 },   /* VI1_DATA13   */
+       [RCAR_GP_PIN(4, 23)] = { PUPR3, 19 },   /* VI1_DATA14   */
+       [RCAR_GP_PIN(4, 24)] = { PUPR3, 20 },   /* VI1_DATA15   */
+       [RCAR_GP_PIN(4,  9)] = { PUPR3, 21 },   /* ETH_REF_CLK  */
+       [RCAR_GP_PIN(4, 10)] = { PUPR3, 22 },   /* ETH_TXD0     */
+       [RCAR_GP_PIN(4, 11)] = { PUPR3, 23 },   /* ETH_TXD1     */
+       [RCAR_GP_PIN(4, 12)] = { PUPR3, 24 },   /* ETH_CRS_DV   */
+       [RCAR_GP_PIN(4, 13)] = { PUPR3, 25 },   /* ETH_TX_EN    */
+       [RCAR_GP_PIN(4, 14)] = { PUPR3, 26 },   /* ETH_RX_ER    */
+       [RCAR_GP_PIN(4, 15)] = { PUPR3, 27 },   /* ETH_RXD0     */
+       [RCAR_GP_PIN(4, 16)] = { PUPR3, 28 },   /* ETH_RXD1     */
+       [RCAR_GP_PIN(4, 17)] = { PUPR3, 29 },   /* ETH_MDC      */
+       [RCAR_GP_PIN(4, 18)] = { PUPR3, 30 },   /* ETH_MDIO     */
+       [RCAR_GP_PIN(4, 19)] = { PUPR3, 31 },   /* ETH_LINK     */
+
+       [RCAR_GP_PIN(3,  6)] = { PUPR4,  0 },   /* SSI_SCK012   */
+       [RCAR_GP_PIN(3,  7)] = { PUPR4,  1 },   /* SSI_WS012    */
+       [RCAR_GP_PIN(3, 10)] = { PUPR4,  2 },   /* SSI_SDATA0   */
+       [RCAR_GP_PIN(3,  9)] = { PUPR4,  3 },   /* SSI_SDATA1   */
+       [RCAR_GP_PIN(3,  8)] = { PUPR4,  4 },   /* SSI_SDATA2   */
+       [RCAR_GP_PIN(3,  2)] = { PUPR4,  5 },   /* SSI_SCK34    */
+       [RCAR_GP_PIN(3,  3)] = { PUPR4,  6 },   /* SSI_WS34     */
+       [RCAR_GP_PIN(3,  5)] = { PUPR4,  7 },   /* SSI_SDATA3   */
+       [RCAR_GP_PIN(3,  4)] = { PUPR4,  8 },   /* SSI_SDATA4   */
+       [RCAR_GP_PIN(2, 31)] = { PUPR4,  9 },   /* SSI_SCK5     */
+       [RCAR_GP_PIN(3,  0)] = { PUPR4, 10 },   /* SSI_WS5      */
+       [RCAR_GP_PIN(3,  1)] = { PUPR4, 11 },   /* SSI_SDATA5   */
+       [RCAR_GP_PIN(2, 28)] = { PUPR4, 12 },   /* SSI_SCK6     */
+       [RCAR_GP_PIN(2, 29)] = { PUPR4, 13 },   /* SSI_WS6      */
+       [RCAR_GP_PIN(2, 30)] = { PUPR4, 14 },   /* SSI_SDATA6   */
+       [RCAR_GP_PIN(2, 24)] = { PUPR4, 15 },   /* SSI_SCK78    */
+       [RCAR_GP_PIN(2, 25)] = { PUPR4, 16 },   /* SSI_WS78     */
+       [RCAR_GP_PIN(2, 27)] = { PUPR4, 17 },   /* SSI_SDATA7   */
+       [RCAR_GP_PIN(2, 26)] = { PUPR4, 18 },   /* SSI_SDATA8   */
+       [RCAR_GP_PIN(3, 23)] = { PUPR4, 19 },   /* TCLK0        */
+       [RCAR_GP_PIN(3, 11)] = { PUPR4, 20 },   /* SD0_CLK      */
+       [RCAR_GP_PIN(3, 12)] = { PUPR4, 21 },   /* SD0_CMD      */
+       [RCAR_GP_PIN(3, 13)] = { PUPR4, 22 },   /* SD0_DAT0     */
+       [RCAR_GP_PIN(3, 14)] = { PUPR4, 23 },   /* SD0_DAT1     */
+       [RCAR_GP_PIN(3, 15)] = { PUPR4, 24 },   /* SD0_DAT2     */
+       [RCAR_GP_PIN(3, 16)] = { PUPR4, 25 },   /* SD0_DAT3     */
+       [RCAR_GP_PIN(3, 17)] = { PUPR4, 26 },   /* SD0_CD       */
+       [RCAR_GP_PIN(3, 18)] = { PUPR4, 27 },   /* SD0_WP       */
+       [RCAR_GP_PIN(2, 22)] = { PUPR4, 28 },   /* AUDIO_CLKA   */
+       [RCAR_GP_PIN(2, 23)] = { PUPR4, 29 },   /* AUDIO_CLKB   */
+       [RCAR_GP_PIN(1, 14)] = { PUPR4, 30 },   /* IRQ2         */
+       [RCAR_GP_PIN(1, 15)] = { PUPR4, 31 },   /* IRQ3         */
+
+       [RCAR_GP_PIN(0,  1)] = { PUPR5,  0 },   /* PENC0        */
+       [RCAR_GP_PIN(0,  2)] = { PUPR5,  1 },   /* PENC1        */
+       [RCAR_GP_PIN(0,  3)] = { PUPR5,  2 },   /* USB_OVC0     */
+       [RCAR_GP_PIN(0,  4)] = { PUPR5,  3 },   /* USB_OVC1     */
+       [RCAR_GP_PIN(1, 16)] = { PUPR5,  4 },   /* SCIF_CLK     */
+       [RCAR_GP_PIN(1, 17)] = { PUPR5,  5 },   /* TX0          */
+       [RCAR_GP_PIN(1, 18)] = { PUPR5,  6 },   /* RX0          */
+       [RCAR_GP_PIN(1, 19)] = { PUPR5,  7 },   /* SCK0         */
+       [RCAR_GP_PIN(1, 20)] = { PUPR5,  8 },   /* /CTS0        */
+       [RCAR_GP_PIN(1, 21)] = { PUPR5,  9 },   /* /RTS0        */
+       [RCAR_GP_PIN(3, 19)] = { PUPR5, 10 },   /* HSPI_CLK0    */
+       [RCAR_GP_PIN(3, 20)] = { PUPR5, 11 },   /* /HSPI_CS0    */
+       [RCAR_GP_PIN(3, 21)] = { PUPR5, 12 },   /* HSPI_RX0     */
+       [RCAR_GP_PIN(3, 22)] = { PUPR5, 13 },   /* HSPI_TX0     */
+       [RCAR_GP_PIN(4, 20)] = { PUPR5, 14 },   /* ETH_MAGIC    */
+       [RCAR_GP_PIN(4, 25)] = { PUPR5, 15 },   /* AVS1         */
+       [RCAR_GP_PIN(4, 26)] = { PUPR5, 16 },   /* AVS2         */
+};
+
+static unsigned int r8a7778_pinmux_get_bias(struct sh_pfc *pfc,
+                                           unsigned int pin)
+{
+       void __iomem *addr;
+
+       if (WARN_ON_ONCE(!pullups[pin].reg))
+               return PIN_CONFIG_BIAS_DISABLE;
+
+       addr = pfc->windows->virt + pullups[pin].reg;
+
+       if (ioread32(addr) & BIT(pullups[pin].bit))
+               return PIN_CONFIG_BIAS_PULL_UP;
+       else
+               return PIN_CONFIG_BIAS_DISABLE;
+}
+
+static void r8a7778_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+                                  unsigned int bias)
+{
+       void __iomem *addr;
+       u32 value;
+       u32 bit;
+
+       if (WARN_ON_ONCE(!pullups[pin].reg))
+               return;
+
+       addr = pfc->windows->virt + pullups[pin].reg;
+       bit = BIT(pullups[pin].bit);
+
+       value = ioread32(addr) & ~bit;
+       if (bias == PIN_CONFIG_BIAS_PULL_UP)
+               value |= bit;
+       iowrite32(value, addr);
+}
+
+static const struct sh_pfc_soc_operations r8a7778_pfc_ops = {
+       .get_bias = r8a7778_pinmux_get_bias,
+       .set_bias = r8a7778_pinmux_set_bias,
+};
+
 const struct sh_pfc_soc_info r8a7778_pinmux_info = {
        .name = "r8a7778_pfc",
+       .ops  = &r8a7778_pfc_ops,
 
        .unlock_reg = 0xfffc0000, /* PMMR */
 
@@ -2923,6 +3143,6 @@ const struct sh_pfc_soc_info r8a7778_pinmux_info = {
 
        .cfg_regs = pinmux_config_regs,
 
-       .gpio_data = pinmux_data,
-       .gpio_data_size = ARRAY_SIZE(pinmux_data),
+       .pinmux_data = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
 };
index f5c01e1e2615f3af907d1a9b2ef64d2ccd72acd7..ed4e0788035c50b4f51985e1d4eb91e0345c2452 100644 (file)
@@ -20,7 +20,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/platform_data/gpio-rcar.h>
 
 #include "sh_pfc.h"
 
@@ -620,18 +619,18 @@ static const u16 pinmux_data[] = {
        PINMUX_DATA(USB_PENC1_MARK, FN_USB_PENC1),
 
        PINMUX_IPSR_DATA(IP0_2_0, USB_PENC2),
-       PINMUX_IPSR_MODSEL_DATA(IP0_2_0, SCK0, SEL_SCIF0_0),
+       PINMUX_IPSR_MSEL(IP0_2_0, SCK0, SEL_SCIF0_0),
        PINMUX_IPSR_DATA(IP0_2_0, PWM1),
-       PINMUX_IPSR_MODSEL_DATA(IP0_2_0, PWMFSW0, SEL_PWMFSW_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_2_0, SCIF_CLK, SEL_SCIF_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_2_0, TCLK0_C, SEL_TMU0_2),
+       PINMUX_IPSR_MSEL(IP0_2_0, PWMFSW0, SEL_PWMFSW_0),
+       PINMUX_IPSR_MSEL(IP0_2_0, SCIF_CLK, SEL_SCIF_0),
+       PINMUX_IPSR_MSEL(IP0_2_0, TCLK0_C, SEL_TMU0_2),
        PINMUX_IPSR_DATA(IP0_5_3, BS),
        PINMUX_IPSR_DATA(IP0_5_3, SD1_DAT2),
        PINMUX_IPSR_DATA(IP0_5_3, MMC0_D2),
        PINMUX_IPSR_DATA(IP0_5_3, FD2),
        PINMUX_IPSR_DATA(IP0_5_3, ATADIR0),
        PINMUX_IPSR_DATA(IP0_5_3, SDSELF),
-       PINMUX_IPSR_MODSEL_DATA(IP0_5_3, HCTS1, SEL_HSCIF1_0),
+       PINMUX_IPSR_MSEL(IP0_5_3, HCTS1, SEL_HSCIF1_0),
        PINMUX_IPSR_DATA(IP0_5_3, TX4_C),
        PINMUX_IPSR_DATA(IP0_7_6, A0),
        PINMUX_IPSR_DATA(IP0_7_6, SD1_DAT3),
@@ -641,37 +640,37 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP0_9_8, TX5_D),
        PINMUX_IPSR_DATA(IP0_9_8, HSPI_TX2_B),
        PINMUX_IPSR_DATA(IP0_11_10, A21),
-       PINMUX_IPSR_MODSEL_DATA(IP0_11_10, SCK5_D, SEL_SCIF5_3),
-       PINMUX_IPSR_MODSEL_DATA(IP0_11_10, HSPI_CLK2_B, SEL_HSPI2_1),
+       PINMUX_IPSR_MSEL(IP0_11_10, SCK5_D, SEL_SCIF5_3),
+       PINMUX_IPSR_MSEL(IP0_11_10, HSPI_CLK2_B, SEL_HSPI2_1),
        PINMUX_IPSR_DATA(IP0_13_12, A22),
-       PINMUX_IPSR_MODSEL_DATA(IP0_13_12, RX5_D, SEL_SCIF5_3),
-       PINMUX_IPSR_MODSEL_DATA(IP0_13_12, HSPI_RX2_B, SEL_HSPI2_1),
+       PINMUX_IPSR_MSEL(IP0_13_12, RX5_D, SEL_SCIF5_3),
+       PINMUX_IPSR_MSEL(IP0_13_12, HSPI_RX2_B, SEL_HSPI2_1),
        PINMUX_IPSR_DATA(IP0_13_12, VI1_R0),
        PINMUX_IPSR_DATA(IP0_15_14, A23),
        PINMUX_IPSR_DATA(IP0_15_14, FCLE),
-       PINMUX_IPSR_MODSEL_DATA(IP0_15_14, HSPI_CLK2, SEL_HSPI2_0),
+       PINMUX_IPSR_MSEL(IP0_15_14, HSPI_CLK2, SEL_HSPI2_0),
        PINMUX_IPSR_DATA(IP0_15_14, VI1_R1),
        PINMUX_IPSR_DATA(IP0_18_16, A24),
        PINMUX_IPSR_DATA(IP0_18_16, SD1_CD),
        PINMUX_IPSR_DATA(IP0_18_16, MMC0_D4),
        PINMUX_IPSR_DATA(IP0_18_16, FD4),
-       PINMUX_IPSR_MODSEL_DATA(IP0_18_16, HSPI_CS2, SEL_HSPI2_0),
+       PINMUX_IPSR_MSEL(IP0_18_16, HSPI_CS2, SEL_HSPI2_0),
        PINMUX_IPSR_DATA(IP0_18_16, VI1_R2),
-       PINMUX_IPSR_MODSEL_DATA(IP0_18_16, SSI_WS78_B, SEL_SSI7_1),
+       PINMUX_IPSR_MSEL(IP0_18_16, SSI_WS78_B, SEL_SSI7_1),
        PINMUX_IPSR_DATA(IP0_22_19, A25),
        PINMUX_IPSR_DATA(IP0_22_19, SD1_WP),
        PINMUX_IPSR_DATA(IP0_22_19, MMC0_D5),
        PINMUX_IPSR_DATA(IP0_22_19, FD5),
-       PINMUX_IPSR_MODSEL_DATA(IP0_22_19, HSPI_RX2, SEL_HSPI2_0),
+       PINMUX_IPSR_MSEL(IP0_22_19, HSPI_RX2, SEL_HSPI2_0),
        PINMUX_IPSR_DATA(IP0_22_19, VI1_R3),
        PINMUX_IPSR_DATA(IP0_22_19, TX5_B),
-       PINMUX_IPSR_MODSEL_DATA(IP0_22_19, SSI_SDATA7_B, SEL_SSI7_1),
-       PINMUX_IPSR_MODSEL_DATA(IP0_22_19, CTS0_B, SEL_SCIF0_1),
+       PINMUX_IPSR_MSEL(IP0_22_19, SSI_SDATA7_B, SEL_SSI7_1),
+       PINMUX_IPSR_MSEL(IP0_22_19, CTS0_B, SEL_SCIF0_1),
        PINMUX_IPSR_DATA(IP0_24_23, CLKOUT),
        PINMUX_IPSR_DATA(IP0_24_23, TX3C_IRDA_TX_C),
        PINMUX_IPSR_DATA(IP0_24_23, PWM0_B),
        PINMUX_IPSR_DATA(IP0_25, CS0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_25, HSPI_CS2_B, SEL_HSPI2_1),
+       PINMUX_IPSR_MSEL(IP0_25, HSPI_CS2_B, SEL_HSPI2_1),
        PINMUX_IPSR_DATA(IP0_27_26, CS1_A26),
        PINMUX_IPSR_DATA(IP0_27_26, HSPI_TX2),
        PINMUX_IPSR_DATA(IP0_27_26, SDSELF_B),
@@ -679,11 +678,11 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP0_30_28, FWE),
        PINMUX_IPSR_DATA(IP0_30_28, ATAG0),
        PINMUX_IPSR_DATA(IP0_30_28, VI1_R7),
-       PINMUX_IPSR_MODSEL_DATA(IP0_30_28, HRTS1, SEL_HSCIF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_30_28, RX4_C, SEL_SCIF4_2),
+       PINMUX_IPSR_MSEL(IP0_30_28, HRTS1, SEL_HSCIF1_0),
+       PINMUX_IPSR_MSEL(IP0_30_28, RX4_C, SEL_SCIF4_2),
 
        PINMUX_IPSR_DATA(IP1_1_0, EX_CS0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_1_0, RX3_C_IRDA_RX_C, SEL_SCIF3_2),
+       PINMUX_IPSR_MSEL(IP1_1_0, RX3_C_IRDA_RX_C, SEL_SCIF3_2),
        PINMUX_IPSR_DATA(IP1_1_0, MMC0_D6),
        PINMUX_IPSR_DATA(IP1_1_0, FD6),
        PINMUX_IPSR_DATA(IP1_3_2, EX_CS1),
@@ -700,45 +699,45 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP1_10_7, FRE),
        PINMUX_IPSR_DATA(IP1_10_7, ATACS10),
        PINMUX_IPSR_DATA(IP1_10_7, VI1_R4),
-       PINMUX_IPSR_MODSEL_DATA(IP1_10_7, RX5_B, SEL_SCIF5_1),
-       PINMUX_IPSR_MODSEL_DATA(IP1_10_7, HSCK1, SEL_HSCIF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_10_7, SSI_SDATA8_B, SEL_SSI8_1),
-       PINMUX_IPSR_MODSEL_DATA(IP1_10_7, RTS0_B_TANS_B, SEL_SCIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP1_10_7, SSI_SDATA9, SEL_SSI9_0),
+       PINMUX_IPSR_MSEL(IP1_10_7, RX5_B, SEL_SCIF5_1),
+       PINMUX_IPSR_MSEL(IP1_10_7, HSCK1, SEL_HSCIF1_0),
+       PINMUX_IPSR_MSEL(IP1_10_7, SSI_SDATA8_B, SEL_SSI8_1),
+       PINMUX_IPSR_MSEL(IP1_10_7, RTS0_B_TANS_B, SEL_SCIF0_1),
+       PINMUX_IPSR_MSEL(IP1_10_7, SSI_SDATA9, SEL_SSI9_0),
        PINMUX_IPSR_DATA(IP1_14_11, EX_CS4),
        PINMUX_IPSR_DATA(IP1_14_11, SD1_DAT0),
        PINMUX_IPSR_DATA(IP1_14_11, MMC0_D0),
        PINMUX_IPSR_DATA(IP1_14_11, FD0),
        PINMUX_IPSR_DATA(IP1_14_11, ATARD0),
        PINMUX_IPSR_DATA(IP1_14_11, VI1_R5),
-       PINMUX_IPSR_MODSEL_DATA(IP1_14_11, SCK5_B, SEL_SCIF5_1),
+       PINMUX_IPSR_MSEL(IP1_14_11, SCK5_B, SEL_SCIF5_1),
        PINMUX_IPSR_DATA(IP1_14_11, HTX1),
        PINMUX_IPSR_DATA(IP1_14_11, TX2_E),
        PINMUX_IPSR_DATA(IP1_14_11, TX0_B),
-       PINMUX_IPSR_MODSEL_DATA(IP1_14_11, SSI_SCK9, SEL_SSI9_0),
+       PINMUX_IPSR_MSEL(IP1_14_11, SSI_SCK9, SEL_SSI9_0),
        PINMUX_IPSR_DATA(IP1_18_15, EX_CS5),
        PINMUX_IPSR_DATA(IP1_18_15, SD1_DAT1),
        PINMUX_IPSR_DATA(IP1_18_15, MMC0_D1),
        PINMUX_IPSR_DATA(IP1_18_15, FD1),
        PINMUX_IPSR_DATA(IP1_18_15, ATAWR0),
        PINMUX_IPSR_DATA(IP1_18_15, VI1_R6),
-       PINMUX_IPSR_MODSEL_DATA(IP1_18_15, HRX1, SEL_HSCIF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_18_15, RX2_E, SEL_SCIF2_4),
-       PINMUX_IPSR_MODSEL_DATA(IP1_18_15, RX0_B, SEL_SCIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP1_18_15, SSI_WS9, SEL_SSI9_0),
+       PINMUX_IPSR_MSEL(IP1_18_15, HRX1, SEL_HSCIF1_0),
+       PINMUX_IPSR_MSEL(IP1_18_15, RX2_E, SEL_SCIF2_4),
+       PINMUX_IPSR_MSEL(IP1_18_15, RX0_B, SEL_SCIF0_1),
+       PINMUX_IPSR_MSEL(IP1_18_15, SSI_WS9, SEL_SSI9_0),
        PINMUX_IPSR_DATA(IP1_20_19, MLB_CLK),
        PINMUX_IPSR_DATA(IP1_20_19, PWM2),
-       PINMUX_IPSR_MODSEL_DATA(IP1_20_19, SCK4, SEL_SCIF4_0),
+       PINMUX_IPSR_MSEL(IP1_20_19, SCK4, SEL_SCIF4_0),
        PINMUX_IPSR_DATA(IP1_22_21, MLB_SIG),
        PINMUX_IPSR_DATA(IP1_22_21, PWM3),
        PINMUX_IPSR_DATA(IP1_22_21, TX4),
        PINMUX_IPSR_DATA(IP1_24_23, MLB_DAT),
        PINMUX_IPSR_DATA(IP1_24_23, PWM4),
-       PINMUX_IPSR_MODSEL_DATA(IP1_24_23, RX4, SEL_SCIF4_0),
+       PINMUX_IPSR_MSEL(IP1_24_23, RX4, SEL_SCIF4_0),
        PINMUX_IPSR_DATA(IP1_28_25, HTX0),
        PINMUX_IPSR_DATA(IP1_28_25, TX1),
        PINMUX_IPSR_DATA(IP1_28_25, SDATA),
-       PINMUX_IPSR_MODSEL_DATA(IP1_28_25, CTS0_C, SEL_SCIF0_2),
+       PINMUX_IPSR_MSEL(IP1_28_25, CTS0_C, SEL_SCIF0_2),
        PINMUX_IPSR_DATA(IP1_28_25, SUB_TCK),
        PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE2),
        PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE10),
@@ -746,39 +745,39 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE26),
        PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE34),
 
-       PINMUX_IPSR_MODSEL_DATA(IP2_3_0, HRX0, SEL_HSCIF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_3_0, RX1, SEL_SCIF1_0),
+       PINMUX_IPSR_MSEL(IP2_3_0, HRX0, SEL_HSCIF0_0),
+       PINMUX_IPSR_MSEL(IP2_3_0, RX1, SEL_SCIF1_0),
        PINMUX_IPSR_DATA(IP2_3_0, SCKZ),
-       PINMUX_IPSR_MODSEL_DATA(IP2_3_0, RTS0_C_TANS_C, SEL_SCIF0_2),
+       PINMUX_IPSR_MSEL(IP2_3_0, RTS0_C_TANS_C, SEL_SCIF0_2),
        PINMUX_IPSR_DATA(IP2_3_0, SUB_TDI),
        PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE3),
        PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE11),
        PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE19),
        PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE27),
        PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE35),
-       PINMUX_IPSR_MODSEL_DATA(IP2_7_4, HSCK0, SEL_HSCIF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_7_4, SCK1, SEL_SCIF1_0),
+       PINMUX_IPSR_MSEL(IP2_7_4, HSCK0, SEL_HSCIF0_0),
+       PINMUX_IPSR_MSEL(IP2_7_4, SCK1, SEL_SCIF1_0),
        PINMUX_IPSR_DATA(IP2_7_4, MTS),
        PINMUX_IPSR_DATA(IP2_7_4, PWM5),
-       PINMUX_IPSR_MODSEL_DATA(IP2_7_4, SCK0_C, SEL_SCIF0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP2_7_4, SSI_SDATA9_B, SEL_SSI9_1),
+       PINMUX_IPSR_MSEL(IP2_7_4, SCK0_C, SEL_SCIF0_2),
+       PINMUX_IPSR_MSEL(IP2_7_4, SSI_SDATA9_B, SEL_SSI9_1),
        PINMUX_IPSR_DATA(IP2_7_4, SUB_TDO),
        PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE0),
        PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE8),
        PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE16),
        PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE24),
        PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE32),
-       PINMUX_IPSR_MODSEL_DATA(IP2_11_8, HCTS0, SEL_HSCIF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_11_8, CTS1, SEL_SCIF1_0),
+       PINMUX_IPSR_MSEL(IP2_11_8, HCTS0, SEL_HSCIF0_0),
+       PINMUX_IPSR_MSEL(IP2_11_8, CTS1, SEL_SCIF1_0),
        PINMUX_IPSR_DATA(IP2_11_8, STM),
        PINMUX_IPSR_DATA(IP2_11_8, PWM0_D),
-       PINMUX_IPSR_MODSEL_DATA(IP2_11_8, RX0_C, SEL_SCIF0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP2_11_8, SCIF_CLK_C, SEL_SCIF_2),
+       PINMUX_IPSR_MSEL(IP2_11_8, RX0_C, SEL_SCIF0_2),
+       PINMUX_IPSR_MSEL(IP2_11_8, SCIF_CLK_C, SEL_SCIF_2),
        PINMUX_IPSR_DATA(IP2_11_8, SUB_TRST),
-       PINMUX_IPSR_MODSEL_DATA(IP2_11_8, TCLK1_B, SEL_TMU1_1),
+       PINMUX_IPSR_MSEL(IP2_11_8, TCLK1_B, SEL_TMU1_1),
        PINMUX_IPSR_DATA(IP2_11_8, CC5_OSCOUT),
-       PINMUX_IPSR_MODSEL_DATA(IP2_15_12, HRTS0, SEL_HSCIF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_15_12, RTS1_TANS, SEL_SCIF1_0),
+       PINMUX_IPSR_MSEL(IP2_15_12, HRTS0, SEL_HSCIF0_0),
+       PINMUX_IPSR_MSEL(IP2_15_12, RTS1_TANS, SEL_SCIF1_0),
        PINMUX_IPSR_DATA(IP2_15_12, MDATA),
        PINMUX_IPSR_DATA(IP2_15_12, TX0_C),
        PINMUX_IPSR_DATA(IP2_15_12, SUB_TMS),
@@ -789,17 +788,17 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP2_15_12, CC5_STATE33),
        PINMUX_IPSR_DATA(IP2_18_16, DU0_DR0),
        PINMUX_IPSR_DATA(IP2_18_16, LCDOUT0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_18_16, DREQ0, SEL_EXBUS0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_18_16, GPS_CLK_B, SEL_GPS_1),
+       PINMUX_IPSR_MSEL(IP2_18_16, DREQ0, SEL_EXBUS0_0),
+       PINMUX_IPSR_MSEL(IP2_18_16, GPS_CLK_B, SEL_GPS_1),
        PINMUX_IPSR_DATA(IP2_18_16, AUDATA0),
        PINMUX_IPSR_DATA(IP2_18_16, TX5_C),
        PINMUX_IPSR_DATA(IP2_21_19, DU0_DR1),
        PINMUX_IPSR_DATA(IP2_21_19, LCDOUT1),
        PINMUX_IPSR_DATA(IP2_21_19, DACK0),
        PINMUX_IPSR_DATA(IP2_21_19, DRACK0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_21_19, GPS_SIGN_B, SEL_GPS_1),
+       PINMUX_IPSR_MSEL(IP2_21_19, GPS_SIGN_B, SEL_GPS_1),
        PINMUX_IPSR_DATA(IP2_21_19, AUDATA1),
-       PINMUX_IPSR_MODSEL_DATA(IP2_21_19, RX5_C, SEL_SCIF5_2),
+       PINMUX_IPSR_MSEL(IP2_21_19, RX5_C, SEL_SCIF5_2),
        PINMUX_IPSR_DATA(IP2_22, DU0_DR2),
        PINMUX_IPSR_DATA(IP2_22, LCDOUT2),
        PINMUX_IPSR_DATA(IP2_23, DU0_DR3),
@@ -814,14 +813,14 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP2_27, LCDOUT7),
        PINMUX_IPSR_DATA(IP2_30_28, DU0_DG0),
        PINMUX_IPSR_DATA(IP2_30_28, LCDOUT8),
-       PINMUX_IPSR_MODSEL_DATA(IP2_30_28, DREQ1, SEL_EXBUS1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_30_28, SCL2, SEL_I2C2_0),
+       PINMUX_IPSR_MSEL(IP2_30_28, DREQ1, SEL_EXBUS1_0),
+       PINMUX_IPSR_MSEL(IP2_30_28, SCL2, SEL_I2C2_0),
        PINMUX_IPSR_DATA(IP2_30_28, AUDATA2),
 
        PINMUX_IPSR_DATA(IP3_2_0, DU0_DG1),
        PINMUX_IPSR_DATA(IP3_2_0, LCDOUT9),
        PINMUX_IPSR_DATA(IP3_2_0, DACK1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_2_0, SDA2, SEL_I2C2_0),
+       PINMUX_IPSR_MSEL(IP3_2_0, SDA2, SEL_I2C2_0),
        PINMUX_IPSR_DATA(IP3_2_0, AUDATA3),
        PINMUX_IPSR_DATA(IP3_3, DU0_DG2),
        PINMUX_IPSR_DATA(IP3_3, LCDOUT10),
@@ -838,16 +837,16 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP3_11_9, DU0_DB0),
        PINMUX_IPSR_DATA(IP3_11_9, LCDOUT16),
        PINMUX_IPSR_DATA(IP3_11_9, EX_WAIT1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_11_9, SCL1, SEL_I2C1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_11_9, TCLK1, SEL_TMU1_0),
+       PINMUX_IPSR_MSEL(IP3_11_9, SCL1, SEL_I2C1_0),
+       PINMUX_IPSR_MSEL(IP3_11_9, TCLK1, SEL_TMU1_0),
        PINMUX_IPSR_DATA(IP3_11_9, AUDATA4),
        PINMUX_IPSR_DATA(IP3_14_12, DU0_DB1),
        PINMUX_IPSR_DATA(IP3_14_12, LCDOUT17),
        PINMUX_IPSR_DATA(IP3_14_12, EX_WAIT2),
-       PINMUX_IPSR_MODSEL_DATA(IP3_14_12, SDA1, SEL_I2C1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_14_12, GPS_MAG_B, SEL_GPS_1),
+       PINMUX_IPSR_MSEL(IP3_14_12, SDA1, SEL_I2C1_0),
+       PINMUX_IPSR_MSEL(IP3_14_12, GPS_MAG_B, SEL_GPS_1),
        PINMUX_IPSR_DATA(IP3_14_12, AUDATA5),
-       PINMUX_IPSR_MODSEL_DATA(IP3_14_12, SCK5_C, SEL_SCIF5_2),
+       PINMUX_IPSR_MSEL(IP3_14_12, SCK5_C, SEL_SCIF5_2),
        PINMUX_IPSR_DATA(IP3_15, DU0_DB2),
        PINMUX_IPSR_DATA(IP3_15, LCDOUT18),
        PINMUX_IPSR_DATA(IP3_16, DU0_DB3),
@@ -863,14 +862,14 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP3_22_21, DU0_DOTCLKIN),
        PINMUX_IPSR_DATA(IP3_22_21, QSTVA_QVS),
        PINMUX_IPSR_DATA(IP3_22_21, TX3_D_IRDA_TX_D),
-       PINMUX_IPSR_MODSEL_DATA(IP3_22_21, SCL3_B, SEL_I2C3_1),
+       PINMUX_IPSR_MSEL(IP3_22_21, SCL3_B, SEL_I2C3_1),
        PINMUX_IPSR_DATA(IP3_23, DU0_DOTCLKOUT0),
        PINMUX_IPSR_DATA(IP3_23, QCLK),
        PINMUX_IPSR_DATA(IP3_26_24, DU0_DOTCLKOUT1),
        PINMUX_IPSR_DATA(IP3_26_24, QSTVB_QVE),
-       PINMUX_IPSR_MODSEL_DATA(IP3_26_24, RX3_D_IRDA_RX_D, SEL_SCIF3_3),
-       PINMUX_IPSR_MODSEL_DATA(IP3_26_24, SDA3_B, SEL_I2C3_1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_26_24, SDA2_C, SEL_I2C2_2),
+       PINMUX_IPSR_MSEL(IP3_26_24, RX3_D_IRDA_RX_D, SEL_SCIF3_3),
+       PINMUX_IPSR_MSEL(IP3_26_24, SDA3_B, SEL_I2C3_1),
+       PINMUX_IPSR_MSEL(IP3_26_24, SDA2_C, SEL_I2C2_2),
        PINMUX_IPSR_DATA(IP3_26_24, DACK0_B),
        PINMUX_IPSR_DATA(IP3_26_24, DRACK0_B),
        PINMUX_IPSR_DATA(IP3_27, DU0_EXHSYNC_DU0_HSYNC),
@@ -881,34 +880,34 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP3_31_29, QCPV_QDE),
        PINMUX_IPSR_DATA(IP3_31_29, CAN1_TX),
        PINMUX_IPSR_DATA(IP3_31_29, TX2_C),
-       PINMUX_IPSR_MODSEL_DATA(IP3_31_29, SCL2_C, SEL_I2C2_2),
+       PINMUX_IPSR_MSEL(IP3_31_29, SCL2_C, SEL_I2C2_2),
        PINMUX_IPSR_DATA(IP3_31_29, REMOCON),
 
        PINMUX_IPSR_DATA(IP4_1_0, DU0_DISP),
        PINMUX_IPSR_DATA(IP4_1_0, QPOLA),
-       PINMUX_IPSR_MODSEL_DATA(IP4_1_0, CAN_CLK_C, SEL_CANCLK_2),
-       PINMUX_IPSR_MODSEL_DATA(IP4_1_0, SCK2_C, SEL_SCIF2_2),
+       PINMUX_IPSR_MSEL(IP4_1_0, CAN_CLK_C, SEL_CANCLK_2),
+       PINMUX_IPSR_MSEL(IP4_1_0, SCK2_C, SEL_SCIF2_2),
        PINMUX_IPSR_DATA(IP4_4_2, DU0_CDE),
        PINMUX_IPSR_DATA(IP4_4_2, QPOLB),
        PINMUX_IPSR_DATA(IP4_4_2, CAN1_RX),
-       PINMUX_IPSR_MODSEL_DATA(IP4_4_2, RX2_C, SEL_SCIF2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP4_4_2, DREQ0_B, SEL_EXBUS0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP4_4_2, SSI_SCK78_B, SEL_SSI7_1),
-       PINMUX_IPSR_MODSEL_DATA(IP4_4_2, SCK0_B, SEL_SCIF0_1),
+       PINMUX_IPSR_MSEL(IP4_4_2, RX2_C, SEL_SCIF2_2),
+       PINMUX_IPSR_MSEL(IP4_4_2, DREQ0_B, SEL_EXBUS0_1),
+       PINMUX_IPSR_MSEL(IP4_4_2, SSI_SCK78_B, SEL_SSI7_1),
+       PINMUX_IPSR_MSEL(IP4_4_2, SCK0_B, SEL_SCIF0_1),
        PINMUX_IPSR_DATA(IP4_7_5, DU1_DR0),
        PINMUX_IPSR_DATA(IP4_7_5, VI2_DATA0_VI2_B0),
        PINMUX_IPSR_DATA(IP4_7_5, PWM6),
        PINMUX_IPSR_DATA(IP4_7_5, SD3_CLK),
        PINMUX_IPSR_DATA(IP4_7_5, TX3_E_IRDA_TX_E),
        PINMUX_IPSR_DATA(IP4_7_5, AUDCK),
-       PINMUX_IPSR_MODSEL_DATA(IP4_7_5, PWMFSW0_B, SEL_PWMFSW_1),
+       PINMUX_IPSR_MSEL(IP4_7_5, PWMFSW0_B, SEL_PWMFSW_1),
        PINMUX_IPSR_DATA(IP4_10_8, DU1_DR1),
        PINMUX_IPSR_DATA(IP4_10_8, VI2_DATA1_VI2_B1),
        PINMUX_IPSR_DATA(IP4_10_8, PWM0),
        PINMUX_IPSR_DATA(IP4_10_8, SD3_CMD),
-       PINMUX_IPSR_MODSEL_DATA(IP4_10_8, RX3_E_IRDA_RX_E, SEL_SCIF3_4),
+       PINMUX_IPSR_MSEL(IP4_10_8, RX3_E_IRDA_RX_E, SEL_SCIF3_4),
        PINMUX_IPSR_DATA(IP4_10_8, AUDSYNC),
-       PINMUX_IPSR_MODSEL_DATA(IP4_10_8, CTS0_D, SEL_SCIF0_3),
+       PINMUX_IPSR_MSEL(IP4_10_8, CTS0_D, SEL_SCIF0_3),
        PINMUX_IPSR_DATA(IP4_11, DU1_DR2),
        PINMUX_IPSR_DATA(IP4_11, VI2_G0),
        PINMUX_IPSR_DATA(IP4_12, DU1_DR3),
@@ -923,18 +922,18 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP4_16, VI2_G5),
        PINMUX_IPSR_DATA(IP4_19_17, DU1_DG0),
        PINMUX_IPSR_DATA(IP4_19_17, VI2_DATA2_VI2_B2),
-       PINMUX_IPSR_MODSEL_DATA(IP4_19_17, SCL1_B, SEL_I2C1_1),
+       PINMUX_IPSR_MSEL(IP4_19_17, SCL1_B, SEL_I2C1_1),
        PINMUX_IPSR_DATA(IP4_19_17, SD3_DAT2),
-       PINMUX_IPSR_MODSEL_DATA(IP4_19_17, SCK3_E, SEL_SCIF3_4),
+       PINMUX_IPSR_MSEL(IP4_19_17, SCK3_E, SEL_SCIF3_4),
        PINMUX_IPSR_DATA(IP4_19_17, AUDATA6),
        PINMUX_IPSR_DATA(IP4_19_17, TX0_D),
        PINMUX_IPSR_DATA(IP4_22_20, DU1_DG1),
        PINMUX_IPSR_DATA(IP4_22_20, VI2_DATA3_VI2_B3),
-       PINMUX_IPSR_MODSEL_DATA(IP4_22_20, SDA1_B, SEL_I2C1_1),
+       PINMUX_IPSR_MSEL(IP4_22_20, SDA1_B, SEL_I2C1_1),
        PINMUX_IPSR_DATA(IP4_22_20, SD3_DAT3),
-       PINMUX_IPSR_MODSEL_DATA(IP4_22_20, SCK5, SEL_SCIF5_0),
+       PINMUX_IPSR_MSEL(IP4_22_20, SCK5, SEL_SCIF5_0),
        PINMUX_IPSR_DATA(IP4_22_20, AUDATA7),
-       PINMUX_IPSR_MODSEL_DATA(IP4_22_20, RX0_D, SEL_SCIF0_3),
+       PINMUX_IPSR_MSEL(IP4_22_20, RX0_D, SEL_SCIF0_3),
        PINMUX_IPSR_DATA(IP4_23, DU1_DG2),
        PINMUX_IPSR_DATA(IP4_23, VI2_G6),
        PINMUX_IPSR_DATA(IP4_24, DU1_DG3),
@@ -949,17 +948,17 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP4_28, VI2_R3),
        PINMUX_IPSR_DATA(IP4_31_29, DU1_DB0),
        PINMUX_IPSR_DATA(IP4_31_29, VI2_DATA4_VI2_B4),
-       PINMUX_IPSR_MODSEL_DATA(IP4_31_29, SCL2_B, SEL_I2C2_1),
+       PINMUX_IPSR_MSEL(IP4_31_29, SCL2_B, SEL_I2C2_1),
        PINMUX_IPSR_DATA(IP4_31_29, SD3_DAT0),
        PINMUX_IPSR_DATA(IP4_31_29, TX5),
-       PINMUX_IPSR_MODSEL_DATA(IP4_31_29, SCK0_D, SEL_SCIF0_3),
+       PINMUX_IPSR_MSEL(IP4_31_29, SCK0_D, SEL_SCIF0_3),
 
        PINMUX_IPSR_DATA(IP5_2_0, DU1_DB1),
        PINMUX_IPSR_DATA(IP5_2_0, VI2_DATA5_VI2_B5),
-       PINMUX_IPSR_MODSEL_DATA(IP5_2_0, SDA2_B, SEL_I2C2_1),
+       PINMUX_IPSR_MSEL(IP5_2_0, SDA2_B, SEL_I2C2_1),
        PINMUX_IPSR_DATA(IP5_2_0, SD3_DAT1),
-       PINMUX_IPSR_MODSEL_DATA(IP5_2_0, RX5, SEL_SCIF5_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_2_0, RTS0_D_TANS_D, SEL_SCIF0_3),
+       PINMUX_IPSR_MSEL(IP5_2_0, RX5, SEL_SCIF5_0),
+       PINMUX_IPSR_MSEL(IP5_2_0, RTS0_D_TANS_D, SEL_SCIF0_3),
        PINMUX_IPSR_DATA(IP5_3, DU1_DB2),
        PINMUX_IPSR_DATA(IP5_3, VI2_R4),
        PINMUX_IPSR_DATA(IP5_4, DU1_DB3),
@@ -969,16 +968,16 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP5_6, DU1_DB5),
        PINMUX_IPSR_DATA(IP5_6, VI2_R7),
        PINMUX_IPSR_DATA(IP5_7, DU1_DB6),
-       PINMUX_IPSR_MODSEL_DATA(IP5_7, SCL2_D, SEL_I2C2_3),
+       PINMUX_IPSR_MSEL(IP5_7, SCL2_D, SEL_I2C2_3),
        PINMUX_IPSR_DATA(IP5_8, DU1_DB7),
-       PINMUX_IPSR_MODSEL_DATA(IP5_8, SDA2_D, SEL_I2C2_3),
+       PINMUX_IPSR_MSEL(IP5_8, SDA2_D, SEL_I2C2_3),
        PINMUX_IPSR_DATA(IP5_10_9, DU1_DOTCLKIN),
        PINMUX_IPSR_DATA(IP5_10_9, VI2_CLKENB),
-       PINMUX_IPSR_MODSEL_DATA(IP5_10_9, HSPI_CS1, SEL_HSPI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_10_9, SCL1_D, SEL_I2C1_3),
+       PINMUX_IPSR_MSEL(IP5_10_9, HSPI_CS1, SEL_HSPI1_0),
+       PINMUX_IPSR_MSEL(IP5_10_9, SCL1_D, SEL_I2C1_3),
        PINMUX_IPSR_DATA(IP5_12_11, DU1_DOTCLKOUT),
        PINMUX_IPSR_DATA(IP5_12_11, VI2_FIELD),
-       PINMUX_IPSR_MODSEL_DATA(IP5_12_11, SDA1_D, SEL_I2C1_3),
+       PINMUX_IPSR_MSEL(IP5_12_11, SDA1_D, SEL_I2C1_3),
        PINMUX_IPSR_DATA(IP5_14_13, DU1_EXHSYNC_DU1_HSYNC),
        PINMUX_IPSR_DATA(IP5_14_13, VI2_HSYNC),
        PINMUX_IPSR_DATA(IP5_14_13, VI3_HSYNC),
@@ -995,26 +994,26 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP5_20_17, AUDIO_CLKC),
        PINMUX_IPSR_DATA(IP5_20_17, TX2_D),
        PINMUX_IPSR_DATA(IP5_20_17, SPEEDIN),
-       PINMUX_IPSR_MODSEL_DATA(IP5_20_17, GPS_SIGN_D, SEL_GPS_3),
+       PINMUX_IPSR_MSEL(IP5_20_17, GPS_SIGN_D, SEL_GPS_3),
        PINMUX_IPSR_DATA(IP5_23_21, DU1_DISP),
        PINMUX_IPSR_DATA(IP5_23_21, VI2_DATA6_VI2_B6),
-       PINMUX_IPSR_MODSEL_DATA(IP5_23_21, TCLK0, SEL_TMU0_0),
+       PINMUX_IPSR_MSEL(IP5_23_21, TCLK0, SEL_TMU0_0),
        PINMUX_IPSR_DATA(IP5_23_21, QSTVA_B_QVS_B),
-       PINMUX_IPSR_MODSEL_DATA(IP5_23_21, HSPI_CLK1, SEL_HSPI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_23_21, SCK2_D, SEL_SCIF2_3),
+       PINMUX_IPSR_MSEL(IP5_23_21, HSPI_CLK1, SEL_HSPI1_0),
+       PINMUX_IPSR_MSEL(IP5_23_21, SCK2_D, SEL_SCIF2_3),
        PINMUX_IPSR_DATA(IP5_23_21, AUDIO_CLKOUT_B),
-       PINMUX_IPSR_MODSEL_DATA(IP5_23_21, GPS_MAG_D, SEL_GPS_3),
+       PINMUX_IPSR_MSEL(IP5_23_21, GPS_MAG_D, SEL_GPS_3),
        PINMUX_IPSR_DATA(IP5_27_24, DU1_CDE),
        PINMUX_IPSR_DATA(IP5_27_24, VI2_DATA7_VI2_B7),
-       PINMUX_IPSR_MODSEL_DATA(IP5_27_24, RX3_B_IRDA_RX_B, SEL_SCIF3_1),
+       PINMUX_IPSR_MSEL(IP5_27_24, RX3_B_IRDA_RX_B, SEL_SCIF3_1),
        PINMUX_IPSR_DATA(IP5_27_24, SD3_WP),
-       PINMUX_IPSR_MODSEL_DATA(IP5_27_24, HSPI_RX1, SEL_HSPI1_0),
+       PINMUX_IPSR_MSEL(IP5_27_24, HSPI_RX1, SEL_HSPI1_0),
        PINMUX_IPSR_DATA(IP5_27_24, VI1_FIELD),
        PINMUX_IPSR_DATA(IP5_27_24, VI3_FIELD),
        PINMUX_IPSR_DATA(IP5_27_24, AUDIO_CLKOUT),
-       PINMUX_IPSR_MODSEL_DATA(IP5_27_24, RX2_D, SEL_SCIF2_3),
-       PINMUX_IPSR_MODSEL_DATA(IP5_27_24, GPS_CLK_C, SEL_GPS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP5_27_24, GPS_CLK_D, SEL_GPS_3),
+       PINMUX_IPSR_MSEL(IP5_27_24, RX2_D, SEL_SCIF2_3),
+       PINMUX_IPSR_MSEL(IP5_27_24, GPS_CLK_C, SEL_GPS_2),
+       PINMUX_IPSR_MSEL(IP5_27_24, GPS_CLK_D, SEL_GPS_3),
        PINMUX_IPSR_DATA(IP5_28, AUDIO_CLKA),
        PINMUX_IPSR_DATA(IP5_28, CAN_TXCLK),
        PINMUX_IPSR_DATA(IP5_30_29, AUDIO_CLKB),
@@ -1039,82 +1038,82 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP6_11_9, SSI_SCK34),
        PINMUX_IPSR_DATA(IP6_11_9, CAN_DEBUGOUT6),
        PINMUX_IPSR_DATA(IP6_11_9, CAN0_TX_B),
-       PINMUX_IPSR_MODSEL_DATA(IP6_11_9, IERX, SEL_IE_0),
-       PINMUX_IPSR_MODSEL_DATA(IP6_11_9, SSI_SCK9_C, SEL_SSI9_2),
+       PINMUX_IPSR_MSEL(IP6_11_9, IERX, SEL_IE_0),
+       PINMUX_IPSR_MSEL(IP6_11_9, SSI_SCK9_C, SEL_SSI9_2),
        PINMUX_IPSR_DATA(IP6_14_12, SSI_WS34),
        PINMUX_IPSR_DATA(IP6_14_12, CAN_DEBUGOUT7),
-       PINMUX_IPSR_MODSEL_DATA(IP6_14_12, CAN0_RX_B, SEL_CAN0_1),
+       PINMUX_IPSR_MSEL(IP6_14_12, CAN0_RX_B, SEL_CAN0_1),
        PINMUX_IPSR_DATA(IP6_14_12, IETX),
-       PINMUX_IPSR_MODSEL_DATA(IP6_14_12, SSI_WS9_C, SEL_SSI9_2),
+       PINMUX_IPSR_MSEL(IP6_14_12, SSI_WS9_C, SEL_SSI9_2),
        PINMUX_IPSR_DATA(IP6_17_15, SSI_SDATA3),
        PINMUX_IPSR_DATA(IP6_17_15, PWM0_C),
        PINMUX_IPSR_DATA(IP6_17_15, CAN_DEBUGOUT8),
-       PINMUX_IPSR_MODSEL_DATA(IP6_17_15, CAN_CLK_B, SEL_CANCLK_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_17_15, IECLK, SEL_IE_0),
-       PINMUX_IPSR_MODSEL_DATA(IP6_17_15, SCIF_CLK_B, SEL_SCIF_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_17_15, TCLK0_B, SEL_TMU0_1),
+       PINMUX_IPSR_MSEL(IP6_17_15, CAN_CLK_B, SEL_CANCLK_1),
+       PINMUX_IPSR_MSEL(IP6_17_15, IECLK, SEL_IE_0),
+       PINMUX_IPSR_MSEL(IP6_17_15, SCIF_CLK_B, SEL_SCIF_1),
+       PINMUX_IPSR_MSEL(IP6_17_15, TCLK0_B, SEL_TMU0_1),
        PINMUX_IPSR_DATA(IP6_19_18, SSI_SDATA4),
        PINMUX_IPSR_DATA(IP6_19_18, CAN_DEBUGOUT9),
-       PINMUX_IPSR_MODSEL_DATA(IP6_19_18, SSI_SDATA9_C, SEL_SSI9_2),
+       PINMUX_IPSR_MSEL(IP6_19_18, SSI_SDATA9_C, SEL_SSI9_2),
        PINMUX_IPSR_DATA(IP6_22_20, SSI_SCK5),
        PINMUX_IPSR_DATA(IP6_22_20, ADICLK),
        PINMUX_IPSR_DATA(IP6_22_20, CAN_DEBUGOUT10),
-       PINMUX_IPSR_MODSEL_DATA(IP6_22_20, SCK3, SEL_SCIF3_0),
-       PINMUX_IPSR_MODSEL_DATA(IP6_22_20, TCLK0_D, SEL_TMU0_3),
+       PINMUX_IPSR_MSEL(IP6_22_20, SCK3, SEL_SCIF3_0),
+       PINMUX_IPSR_MSEL(IP6_22_20, TCLK0_D, SEL_TMU0_3),
        PINMUX_IPSR_DATA(IP6_24_23, SSI_WS5),
-       PINMUX_IPSR_MODSEL_DATA(IP6_24_23, ADICS_SAMP, SEL_ADI_0),
+       PINMUX_IPSR_MSEL(IP6_24_23, ADICS_SAMP, SEL_ADI_0),
        PINMUX_IPSR_DATA(IP6_24_23, CAN_DEBUGOUT11),
        PINMUX_IPSR_DATA(IP6_24_23, TX3_IRDA_TX),
        PINMUX_IPSR_DATA(IP6_26_25, SSI_SDATA5),
-       PINMUX_IPSR_MODSEL_DATA(IP6_26_25, ADIDATA, SEL_ADI_0),
+       PINMUX_IPSR_MSEL(IP6_26_25, ADIDATA, SEL_ADI_0),
        PINMUX_IPSR_DATA(IP6_26_25, CAN_DEBUGOUT12),
-       PINMUX_IPSR_MODSEL_DATA(IP6_26_25, RX3_IRDA_RX, SEL_SCIF3_0),
+       PINMUX_IPSR_MSEL(IP6_26_25, RX3_IRDA_RX, SEL_SCIF3_0),
        PINMUX_IPSR_DATA(IP6_30_29, SSI_SCK6),
        PINMUX_IPSR_DATA(IP6_30_29, ADICHS0),
        PINMUX_IPSR_DATA(IP6_30_29, CAN0_TX),
-       PINMUX_IPSR_MODSEL_DATA(IP6_30_29, IERX_B, SEL_IE_1),
+       PINMUX_IPSR_MSEL(IP6_30_29, IERX_B, SEL_IE_1),
 
        PINMUX_IPSR_DATA(IP7_1_0, SSI_WS6),
        PINMUX_IPSR_DATA(IP7_1_0, ADICHS1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_1_0, CAN0_RX, SEL_CAN0_0),
+       PINMUX_IPSR_MSEL(IP7_1_0, CAN0_RX, SEL_CAN0_0),
        PINMUX_IPSR_DATA(IP7_1_0, IETX_B),
        PINMUX_IPSR_DATA(IP7_3_2, SSI_SDATA6),
        PINMUX_IPSR_DATA(IP7_3_2, ADICHS2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_3_2, CAN_CLK, SEL_CANCLK_0),
-       PINMUX_IPSR_MODSEL_DATA(IP7_3_2, IECLK_B, SEL_IE_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_6_4, SSI_SCK78, SEL_SSI7_0),
+       PINMUX_IPSR_MSEL(IP7_3_2, CAN_CLK, SEL_CANCLK_0),
+       PINMUX_IPSR_MSEL(IP7_3_2, IECLK_B, SEL_IE_1),
+       PINMUX_IPSR_MSEL(IP7_6_4, SSI_SCK78, SEL_SSI7_0),
        PINMUX_IPSR_DATA(IP7_6_4, CAN_DEBUGOUT13),
-       PINMUX_IPSR_MODSEL_DATA(IP7_6_4, IRQ0_B, SEL_INT0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_6_4, SSI_SCK9_B, SEL_SSI9_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_6_4, HSPI_CLK1_C, SEL_HSPI1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_9_7, SSI_WS78, SEL_SSI7_0),
+       PINMUX_IPSR_MSEL(IP7_6_4, IRQ0_B, SEL_INT0_1),
+       PINMUX_IPSR_MSEL(IP7_6_4, SSI_SCK9_B, SEL_SSI9_1),
+       PINMUX_IPSR_MSEL(IP7_6_4, HSPI_CLK1_C, SEL_HSPI1_2),
+       PINMUX_IPSR_MSEL(IP7_9_7, SSI_WS78, SEL_SSI7_0),
        PINMUX_IPSR_DATA(IP7_9_7, CAN_DEBUGOUT14),
-       PINMUX_IPSR_MODSEL_DATA(IP7_9_7, IRQ1_B, SEL_INT1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_9_7, SSI_WS9_B, SEL_SSI9_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_9_7, HSPI_CS1_C, SEL_HSPI1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_12_10, SSI_SDATA7, SEL_SSI7_0),
+       PINMUX_IPSR_MSEL(IP7_9_7, IRQ1_B, SEL_INT1_1),
+       PINMUX_IPSR_MSEL(IP7_9_7, SSI_WS9_B, SEL_SSI9_1),
+       PINMUX_IPSR_MSEL(IP7_9_7, HSPI_CS1_C, SEL_HSPI1_2),
+       PINMUX_IPSR_MSEL(IP7_12_10, SSI_SDATA7, SEL_SSI7_0),
        PINMUX_IPSR_DATA(IP7_12_10, CAN_DEBUGOUT15),
-       PINMUX_IPSR_MODSEL_DATA(IP7_12_10, IRQ2_B, SEL_INT2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_12_10, TCLK1_C, SEL_TMU1_2),
+       PINMUX_IPSR_MSEL(IP7_12_10, IRQ2_B, SEL_INT2_1),
+       PINMUX_IPSR_MSEL(IP7_12_10, TCLK1_C, SEL_TMU1_2),
        PINMUX_IPSR_DATA(IP7_12_10, HSPI_TX1_C),
-       PINMUX_IPSR_MODSEL_DATA(IP7_14_13, SSI_SDATA8, SEL_SSI8_0),
+       PINMUX_IPSR_MSEL(IP7_14_13, SSI_SDATA8, SEL_SSI8_0),
        PINMUX_IPSR_DATA(IP7_14_13, VSP),
-       PINMUX_IPSR_MODSEL_DATA(IP7_14_13, IRQ3_B, SEL_INT3_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_14_13, HSPI_RX1_C, SEL_HSPI1_2),
+       PINMUX_IPSR_MSEL(IP7_14_13, IRQ3_B, SEL_INT3_1),
+       PINMUX_IPSR_MSEL(IP7_14_13, HSPI_RX1_C, SEL_HSPI1_2),
        PINMUX_IPSR_DATA(IP7_16_15, SD0_CLK),
        PINMUX_IPSR_DATA(IP7_16_15, ATACS01),
-       PINMUX_IPSR_MODSEL_DATA(IP7_16_15, SCK1_B, SEL_SCIF1_1),
+       PINMUX_IPSR_MSEL(IP7_16_15, SCK1_B, SEL_SCIF1_1),
        PINMUX_IPSR_DATA(IP7_18_17, SD0_CMD),
        PINMUX_IPSR_DATA(IP7_18_17, ATACS11),
        PINMUX_IPSR_DATA(IP7_18_17, TX1_B),
        PINMUX_IPSR_DATA(IP7_18_17, CC5_TDO),
        PINMUX_IPSR_DATA(IP7_20_19, SD0_DAT0),
        PINMUX_IPSR_DATA(IP7_20_19, ATADIR1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_20_19, RX1_B, SEL_SCIF1_1),
+       PINMUX_IPSR_MSEL(IP7_20_19, RX1_B, SEL_SCIF1_1),
        PINMUX_IPSR_DATA(IP7_20_19, CC5_TRST),
        PINMUX_IPSR_DATA(IP7_22_21, SD0_DAT1),
        PINMUX_IPSR_DATA(IP7_22_21, ATAG1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_22_21, SCK2_B, SEL_SCIF2_1),
+       PINMUX_IPSR_MSEL(IP7_22_21, SCK2_B, SEL_SCIF2_1),
        PINMUX_IPSR_DATA(IP7_22_21, CC5_TMS),
        PINMUX_IPSR_DATA(IP7_24_23, SD0_DAT2),
        PINMUX_IPSR_DATA(IP7_24_23, ATARD1),
@@ -1122,17 +1121,17 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP7_24_23, CC5_TCK),
        PINMUX_IPSR_DATA(IP7_26_25, SD0_DAT3),
        PINMUX_IPSR_DATA(IP7_26_25, ATAWR1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_26_25, RX2_B, SEL_SCIF2_1),
+       PINMUX_IPSR_MSEL(IP7_26_25, RX2_B, SEL_SCIF2_1),
        PINMUX_IPSR_DATA(IP7_26_25, CC5_TDI),
        PINMUX_IPSR_DATA(IP7_28_27, SD0_CD),
-       PINMUX_IPSR_MODSEL_DATA(IP7_28_27, DREQ2, SEL_EXBUS2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP7_28_27, RTS1_B_TANS_B, SEL_SCIF1_1),
+       PINMUX_IPSR_MSEL(IP7_28_27, DREQ2, SEL_EXBUS2_0),
+       PINMUX_IPSR_MSEL(IP7_28_27, RTS1_B_TANS_B, SEL_SCIF1_1),
        PINMUX_IPSR_DATA(IP7_30_29, SD0_WP),
        PINMUX_IPSR_DATA(IP7_30_29, DACK2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_30_29, CTS1_B, SEL_SCIF1_1),
+       PINMUX_IPSR_MSEL(IP7_30_29, CTS1_B, SEL_SCIF1_1),
 
        PINMUX_IPSR_DATA(IP8_3_0, HSPI_CLK0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_3_0, CTS0, SEL_SCIF0_0),
+       PINMUX_IPSR_MSEL(IP8_3_0, CTS0, SEL_SCIF0_0),
        PINMUX_IPSR_DATA(IP8_3_0, USB_OVC0),
        PINMUX_IPSR_DATA(IP8_3_0, AD_CLK),
        PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE4),
@@ -1141,7 +1140,7 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE28),
        PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE36),
        PINMUX_IPSR_DATA(IP8_7_4, HSPI_CS0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_7_4, RTS0_TANS, SEL_SCIF0_0),
+       PINMUX_IPSR_MSEL(IP8_7_4, RTS0_TANS, SEL_SCIF0_0),
        PINMUX_IPSR_DATA(IP8_7_4, USB_OVC1),
        PINMUX_IPSR_DATA(IP8_7_4, AD_DI),
        PINMUX_IPSR_DATA(IP8_7_4, CC5_STATE5),
@@ -1159,7 +1158,7 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE30),
        PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE38),
        PINMUX_IPSR_DATA(IP8_15_12, HSPI_RX0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_15_12, RX0, SEL_SCIF0_0),
+       PINMUX_IPSR_MSEL(IP8_15_12, RX0, SEL_SCIF0_0),
        PINMUX_IPSR_DATA(IP8_15_12, CAN_STEP0),
        PINMUX_IPSR_DATA(IP8_15_12, AD_NCS),
        PINMUX_IPSR_DATA(IP8_15_12, CC5_STATE7),
@@ -1181,25 +1180,25 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP8_22_21, HTX1_B),
        PINMUX_IPSR_DATA(IP8_22_21, MT1_SYNC),
        PINMUX_IPSR_DATA(IP8_24_23, VI0_FIELD),
-       PINMUX_IPSR_MODSEL_DATA(IP8_24_23, RX1_C, SEL_SCIF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP8_24_23, HRX1_B, SEL_HSCIF1_1),
+       PINMUX_IPSR_MSEL(IP8_24_23, RX1_C, SEL_SCIF1_2),
+       PINMUX_IPSR_MSEL(IP8_24_23, HRX1_B, SEL_HSCIF1_1),
        PINMUX_IPSR_DATA(IP8_27_25, VI0_HSYNC),
-       PINMUX_IPSR_MODSEL_DATA(IP8_27_25, VI0_DATA0_B_VI0_B0_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_27_25, CTS1_C, SEL_SCIF1_2),
+       PINMUX_IPSR_MSEL(IP8_27_25, VI0_DATA0_B_VI0_B0_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP8_27_25, CTS1_C, SEL_SCIF1_2),
        PINMUX_IPSR_DATA(IP8_27_25, TX4_D),
        PINMUX_IPSR_DATA(IP8_27_25, MMC1_CMD),
-       PINMUX_IPSR_MODSEL_DATA(IP8_27_25, HSCK1_B, SEL_HSCIF1_1),
+       PINMUX_IPSR_MSEL(IP8_27_25, HSCK1_B, SEL_HSCIF1_1),
        PINMUX_IPSR_DATA(IP8_30_28, VI0_VSYNC),
-       PINMUX_IPSR_MODSEL_DATA(IP8_30_28, VI0_DATA1_B_VI0_B1_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_30_28, RTS1_C_TANS_C, SEL_SCIF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP8_30_28, RX4_D, SEL_SCIF4_3),
-       PINMUX_IPSR_MODSEL_DATA(IP8_30_28, PWMFSW0_C, SEL_PWMFSW_2),
+       PINMUX_IPSR_MSEL(IP8_30_28, VI0_DATA1_B_VI0_B1_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP8_30_28, RTS1_C_TANS_C, SEL_SCIF1_2),
+       PINMUX_IPSR_MSEL(IP8_30_28, RX4_D, SEL_SCIF4_3),
+       PINMUX_IPSR_MSEL(IP8_30_28, PWMFSW0_C, SEL_PWMFSW_2),
 
-       PINMUX_IPSR_MODSEL_DATA(IP9_1_0, VI0_DATA0_VI0_B0, SEL_VI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_1_0, HRTS1_B, SEL_HSCIF1_1),
+       PINMUX_IPSR_MSEL(IP9_1_0, VI0_DATA0_VI0_B0, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP9_1_0, HRTS1_B, SEL_HSCIF1_1),
        PINMUX_IPSR_DATA(IP9_1_0, MT1_VCXO),
-       PINMUX_IPSR_MODSEL_DATA(IP9_3_2, VI0_DATA1_VI0_B1, SEL_VI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_3_2, HCTS1_B, SEL_HSCIF1_1),
+       PINMUX_IPSR_MSEL(IP9_3_2, VI0_DATA1_VI0_B1, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP9_3_2, HCTS1_B, SEL_HSCIF1_1),
        PINMUX_IPSR_DATA(IP9_3_2, MT1_PWM),
        PINMUX_IPSR_DATA(IP9_4, VI0_DATA2_VI0_B2),
        PINMUX_IPSR_DATA(IP9_4, MMC1_D0),
@@ -1216,12 +1215,12 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP9_11_10, MMC1_D5),
        PINMUX_IPSR_DATA(IP9_11_10, ARM_TRACEDATA_1),
        PINMUX_IPSR_DATA(IP9_13_12, VI0_G0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_13_12, SSI_SCK78_C, SEL_SSI7_2),
-       PINMUX_IPSR_MODSEL_DATA(IP9_13_12, IRQ0, SEL_INT0_0),
+       PINMUX_IPSR_MSEL(IP9_13_12, SSI_SCK78_C, SEL_SSI7_2),
+       PINMUX_IPSR_MSEL(IP9_13_12, IRQ0, SEL_INT0_0),
        PINMUX_IPSR_DATA(IP9_13_12, ARM_TRACEDATA_2),
        PINMUX_IPSR_DATA(IP9_15_14, VI0_G1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_15_14, SSI_WS78_C, SEL_SSI7_2),
-       PINMUX_IPSR_MODSEL_DATA(IP9_15_14, IRQ1, SEL_INT1_0),
+       PINMUX_IPSR_MSEL(IP9_15_14, SSI_WS78_C, SEL_SSI7_2),
+       PINMUX_IPSR_MSEL(IP9_15_14, IRQ1, SEL_INT1_0),
        PINMUX_IPSR_DATA(IP9_15_14, ARM_TRACEDATA_3),
        PINMUX_IPSR_DATA(IP9_18_16, VI0_G2),
        PINMUX_IPSR_DATA(IP9_18_16, ETH_TXD1),
@@ -1235,29 +1234,29 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP9_21_19, TS_SDAT0),
        PINMUX_IPSR_DATA(IP9_23_22, VI0_G4),
        PINMUX_IPSR_DATA(IP9_23_22, ETH_TX_EN),
-       PINMUX_IPSR_MODSEL_DATA(IP9_23_22, SD2_DAT0_B, SEL_SD2_1),
+       PINMUX_IPSR_MSEL(IP9_23_22, SD2_DAT0_B, SEL_SD2_1),
        PINMUX_IPSR_DATA(IP9_23_22, ARM_TRACEDATA_6),
        PINMUX_IPSR_DATA(IP9_25_24, VI0_G5),
        PINMUX_IPSR_DATA(IP9_25_24, ETH_RX_ER),
-       PINMUX_IPSR_MODSEL_DATA(IP9_25_24, SD2_DAT1_B, SEL_SD2_1),
+       PINMUX_IPSR_MSEL(IP9_25_24, SD2_DAT1_B, SEL_SD2_1),
        PINMUX_IPSR_DATA(IP9_25_24, ARM_TRACEDATA_7),
        PINMUX_IPSR_DATA(IP9_27_26, VI0_G6),
        PINMUX_IPSR_DATA(IP9_27_26, ETH_RXD0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_27_26, SD2_DAT2_B, SEL_SD2_1),
+       PINMUX_IPSR_MSEL(IP9_27_26, SD2_DAT2_B, SEL_SD2_1),
        PINMUX_IPSR_DATA(IP9_27_26, ARM_TRACEDATA_8),
        PINMUX_IPSR_DATA(IP9_29_28, VI0_G7),
        PINMUX_IPSR_DATA(IP9_29_28, ETH_RXD1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_29_28, SD2_DAT3_B, SEL_SD2_1),
+       PINMUX_IPSR_MSEL(IP9_29_28, SD2_DAT3_B, SEL_SD2_1),
        PINMUX_IPSR_DATA(IP9_29_28, ARM_TRACEDATA_9),
 
        PINMUX_IPSR_DATA(IP10_2_0, VI0_R0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_2_0, SSI_SDATA7_C, SEL_SSI7_2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_2_0, SCK1_C, SEL_SCIF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_2_0, DREQ1_B, SEL_EXBUS1_0),
+       PINMUX_IPSR_MSEL(IP10_2_0, SSI_SDATA7_C, SEL_SSI7_2),
+       PINMUX_IPSR_MSEL(IP10_2_0, SCK1_C, SEL_SCIF1_2),
+       PINMUX_IPSR_MSEL(IP10_2_0, DREQ1_B, SEL_EXBUS1_0),
        PINMUX_IPSR_DATA(IP10_2_0, ARM_TRACEDATA_10),
-       PINMUX_IPSR_MODSEL_DATA(IP10_2_0, DREQ0_C, SEL_EXBUS0_2),
+       PINMUX_IPSR_MSEL(IP10_2_0, DREQ0_C, SEL_EXBUS0_2),
        PINMUX_IPSR_DATA(IP10_5_3, VI0_R1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_5_3, SSI_SDATA8_C, SEL_SSI8_2),
+       PINMUX_IPSR_MSEL(IP10_5_3, SSI_SDATA8_C, SEL_SSI8_2),
        PINMUX_IPSR_DATA(IP10_5_3, DACK1_B),
        PINMUX_IPSR_DATA(IP10_5_3, ARM_TRACEDATA_11),
        PINMUX_IPSR_DATA(IP10_5_3, DACK0_C),
@@ -1265,74 +1264,74 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP10_8_6, VI0_R2),
        PINMUX_IPSR_DATA(IP10_8_6, ETH_LINK),
        PINMUX_IPSR_DATA(IP10_8_6, SD2_CLK_B),
-       PINMUX_IPSR_MODSEL_DATA(IP10_8_6, IRQ2, SEL_INT2_0),
+       PINMUX_IPSR_MSEL(IP10_8_6, IRQ2, SEL_INT2_0),
        PINMUX_IPSR_DATA(IP10_8_6, ARM_TRACEDATA_12),
        PINMUX_IPSR_DATA(IP10_11_9, VI0_R3),
        PINMUX_IPSR_DATA(IP10_11_9, ETH_MAGIC),
-       PINMUX_IPSR_MODSEL_DATA(IP10_11_9, SD2_CMD_B, SEL_SD2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_11_9, IRQ3, SEL_INT3_0),
+       PINMUX_IPSR_MSEL(IP10_11_9, SD2_CMD_B, SEL_SD2_1),
+       PINMUX_IPSR_MSEL(IP10_11_9, IRQ3, SEL_INT3_0),
        PINMUX_IPSR_DATA(IP10_11_9, ARM_TRACEDATA_13),
        PINMUX_IPSR_DATA(IP10_14_12, VI0_R4),
        PINMUX_IPSR_DATA(IP10_14_12, ETH_REFCLK),
-       PINMUX_IPSR_MODSEL_DATA(IP10_14_12, SD2_CD_B, SEL_SD2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_14_12, HSPI_CLK1_B, SEL_HSPI1_1),
+       PINMUX_IPSR_MSEL(IP10_14_12, SD2_CD_B, SEL_SD2_1),
+       PINMUX_IPSR_MSEL(IP10_14_12, HSPI_CLK1_B, SEL_HSPI1_1),
        PINMUX_IPSR_DATA(IP10_14_12, ARM_TRACEDATA_14),
        PINMUX_IPSR_DATA(IP10_14_12, MT1_CLK),
        PINMUX_IPSR_DATA(IP10_14_12, TS_SCK0),
        PINMUX_IPSR_DATA(IP10_17_15, VI0_R5),
        PINMUX_IPSR_DATA(IP10_17_15, ETH_TXD0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_17_15, SD2_WP_B, SEL_SD2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_17_15, HSPI_CS1_B, SEL_HSPI1_1),
+       PINMUX_IPSR_MSEL(IP10_17_15, SD2_WP_B, SEL_SD2_1),
+       PINMUX_IPSR_MSEL(IP10_17_15, HSPI_CS1_B, SEL_HSPI1_1),
        PINMUX_IPSR_DATA(IP10_17_15, ARM_TRACEDATA_15),
        PINMUX_IPSR_DATA(IP10_17_15, MT1_D),
        PINMUX_IPSR_DATA(IP10_17_15, TS_SDEN0),
        PINMUX_IPSR_DATA(IP10_20_18, VI0_R6),
        PINMUX_IPSR_DATA(IP10_20_18, ETH_MDC),
-       PINMUX_IPSR_MODSEL_DATA(IP10_20_18, DREQ2_C, SEL_EXBUS2_2),
+       PINMUX_IPSR_MSEL(IP10_20_18, DREQ2_C, SEL_EXBUS2_2),
        PINMUX_IPSR_DATA(IP10_20_18, HSPI_TX1_B),
        PINMUX_IPSR_DATA(IP10_20_18, TRACECLK),
        PINMUX_IPSR_DATA(IP10_20_18, MT1_BEN),
-       PINMUX_IPSR_MODSEL_DATA(IP10_20_18, PWMFSW0_D, SEL_PWMFSW_3),
+       PINMUX_IPSR_MSEL(IP10_20_18, PWMFSW0_D, SEL_PWMFSW_3),
        PINMUX_IPSR_DATA(IP10_23_21, VI0_R7),
        PINMUX_IPSR_DATA(IP10_23_21, ETH_MDIO),
        PINMUX_IPSR_DATA(IP10_23_21, DACK2_C),
-       PINMUX_IPSR_MODSEL_DATA(IP10_23_21, HSPI_RX1_B, SEL_HSPI1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_23_21, SCIF_CLK_D, SEL_SCIF_3),
+       PINMUX_IPSR_MSEL(IP10_23_21, HSPI_RX1_B, SEL_HSPI1_1),
+       PINMUX_IPSR_MSEL(IP10_23_21, SCIF_CLK_D, SEL_SCIF_3),
        PINMUX_IPSR_DATA(IP10_23_21, TRACECTL),
        PINMUX_IPSR_DATA(IP10_23_21, MT1_PEN),
        PINMUX_IPSR_DATA(IP10_25_24, VI1_CLK),
-       PINMUX_IPSR_MODSEL_DATA(IP10_25_24, SIM_D, SEL_SIM_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_25_24, SDA3, SEL_I2C3_0),
+       PINMUX_IPSR_MSEL(IP10_25_24, SIM_D, SEL_SIM_0),
+       PINMUX_IPSR_MSEL(IP10_25_24, SDA3, SEL_I2C3_0),
        PINMUX_IPSR_DATA(IP10_28_26, VI1_HSYNC),
        PINMUX_IPSR_DATA(IP10_28_26, VI3_CLK),
        PINMUX_IPSR_DATA(IP10_28_26, SSI_SCK4),
-       PINMUX_IPSR_MODSEL_DATA(IP10_28_26, GPS_SIGN_C, SEL_GPS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_28_26, PWMFSW0_E, SEL_PWMFSW_4),
+       PINMUX_IPSR_MSEL(IP10_28_26, GPS_SIGN_C, SEL_GPS_2),
+       PINMUX_IPSR_MSEL(IP10_28_26, PWMFSW0_E, SEL_PWMFSW_4),
        PINMUX_IPSR_DATA(IP10_31_29, VI1_VSYNC),
        PINMUX_IPSR_DATA(IP10_31_29, AUDIO_CLKOUT_C),
        PINMUX_IPSR_DATA(IP10_31_29, SSI_WS4),
        PINMUX_IPSR_DATA(IP10_31_29, SIM_CLK),
-       PINMUX_IPSR_MODSEL_DATA(IP10_31_29, GPS_MAG_C, SEL_GPS_2),
+       PINMUX_IPSR_MSEL(IP10_31_29, GPS_MAG_C, SEL_GPS_2),
        PINMUX_IPSR_DATA(IP10_31_29, SPV_TRST),
-       PINMUX_IPSR_MODSEL_DATA(IP10_31_29, SCL3, SEL_I2C3_0),
+       PINMUX_IPSR_MSEL(IP10_31_29, SCL3, SEL_I2C3_0),
 
        PINMUX_IPSR_DATA(IP11_2_0, VI1_DATA0_VI1_B0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_2_0, SD2_DAT0, SEL_SD2_0),
+       PINMUX_IPSR_MSEL(IP11_2_0, SD2_DAT0, SEL_SD2_0),
        PINMUX_IPSR_DATA(IP11_2_0, SIM_RST),
        PINMUX_IPSR_DATA(IP11_2_0, SPV_TCK),
        PINMUX_IPSR_DATA(IP11_2_0, ADICLK_B),
        PINMUX_IPSR_DATA(IP11_5_3, VI1_DATA1_VI1_B1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_5_3, SD2_DAT1, SEL_SD2_0),
+       PINMUX_IPSR_MSEL(IP11_5_3, SD2_DAT1, SEL_SD2_0),
        PINMUX_IPSR_DATA(IP11_5_3, MT0_CLK),
        PINMUX_IPSR_DATA(IP11_5_3, SPV_TMS),
-       PINMUX_IPSR_MODSEL_DATA(IP11_5_3, ADICS_B_SAMP_B, SEL_ADI_1),
+       PINMUX_IPSR_MSEL(IP11_5_3, ADICS_B_SAMP_B, SEL_ADI_1),
        PINMUX_IPSR_DATA(IP11_8_6, VI1_DATA2_VI1_B2),
-       PINMUX_IPSR_MODSEL_DATA(IP11_8_6, SD2_DAT2, SEL_SD2_0),
+       PINMUX_IPSR_MSEL(IP11_8_6, SD2_DAT2, SEL_SD2_0),
        PINMUX_IPSR_DATA(IP11_8_6, MT0_D),
        PINMUX_IPSR_DATA(IP11_8_6, SPVTDI),
-       PINMUX_IPSR_MODSEL_DATA(IP11_8_6, ADIDATA_B, SEL_ADI_1),
+       PINMUX_IPSR_MSEL(IP11_8_6, ADIDATA_B, SEL_ADI_1),
        PINMUX_IPSR_DATA(IP11_11_9, VI1_DATA3_VI1_B3),
-       PINMUX_IPSR_MODSEL_DATA(IP11_11_9, SD2_DAT3, SEL_SD2_0),
+       PINMUX_IPSR_MSEL(IP11_11_9, SD2_DAT3, SEL_SD2_0),
        PINMUX_IPSR_DATA(IP11_11_9, MT0_BEN),
        PINMUX_IPSR_DATA(IP11_11_9, SPV_TDO),
        PINMUX_IPSR_DATA(IP11_11_9, ADICHS0_B),
@@ -1340,74 +1339,74 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP11_14_12, SD2_CLK),
        PINMUX_IPSR_DATA(IP11_14_12, MT0_PEN),
        PINMUX_IPSR_DATA(IP11_14_12, SPA_TRST),
-       PINMUX_IPSR_MODSEL_DATA(IP11_14_12, HSPI_CLK1_D, SEL_HSPI1_3),
+       PINMUX_IPSR_MSEL(IP11_14_12, HSPI_CLK1_D, SEL_HSPI1_3),
        PINMUX_IPSR_DATA(IP11_14_12, ADICHS1_B),
        PINMUX_IPSR_DATA(IP11_17_15, VI1_DATA5_VI1_B5),
-       PINMUX_IPSR_MODSEL_DATA(IP11_17_15, SD2_CMD, SEL_SD2_0),
+       PINMUX_IPSR_MSEL(IP11_17_15, SD2_CMD, SEL_SD2_0),
        PINMUX_IPSR_DATA(IP11_17_15, MT0_SYNC),
        PINMUX_IPSR_DATA(IP11_17_15, SPA_TCK),
-       PINMUX_IPSR_MODSEL_DATA(IP11_17_15, HSPI_CS1_D, SEL_HSPI1_3),
+       PINMUX_IPSR_MSEL(IP11_17_15, HSPI_CS1_D, SEL_HSPI1_3),
        PINMUX_IPSR_DATA(IP11_17_15, ADICHS2_B),
        PINMUX_IPSR_DATA(IP11_20_18, VI1_DATA6_VI1_B6),
-       PINMUX_IPSR_MODSEL_DATA(IP11_20_18, SD2_CD, SEL_SD2_0),
+       PINMUX_IPSR_MSEL(IP11_20_18, SD2_CD, SEL_SD2_0),
        PINMUX_IPSR_DATA(IP11_20_18, MT0_VCXO),
        PINMUX_IPSR_DATA(IP11_20_18, SPA_TMS),
        PINMUX_IPSR_DATA(IP11_20_18, HSPI_TX1_D),
        PINMUX_IPSR_DATA(IP11_23_21, VI1_DATA7_VI1_B7),
-       PINMUX_IPSR_MODSEL_DATA(IP11_23_21, SD2_WP, SEL_SD2_0),
+       PINMUX_IPSR_MSEL(IP11_23_21, SD2_WP, SEL_SD2_0),
        PINMUX_IPSR_DATA(IP11_23_21, MT0_PWM),
        PINMUX_IPSR_DATA(IP11_23_21, SPA_TDI),
-       PINMUX_IPSR_MODSEL_DATA(IP11_23_21, HSPI_RX1_D, SEL_HSPI1_3),
+       PINMUX_IPSR_MSEL(IP11_23_21, HSPI_RX1_D, SEL_HSPI1_3),
        PINMUX_IPSR_DATA(IP11_26_24, VI1_G0),
        PINMUX_IPSR_DATA(IP11_26_24, VI3_DATA0),
        PINMUX_IPSR_DATA(IP11_26_24, TS_SCK1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_26_24, DREQ2_B, SEL_EXBUS2_1),
+       PINMUX_IPSR_MSEL(IP11_26_24, DREQ2_B, SEL_EXBUS2_1),
        PINMUX_IPSR_DATA(IP11_26_24, TX2),
        PINMUX_IPSR_DATA(IP11_26_24, SPA_TDO),
-       PINMUX_IPSR_MODSEL_DATA(IP11_26_24, HCTS0_B, SEL_HSCIF0_1),
+       PINMUX_IPSR_MSEL(IP11_26_24, HCTS0_B, SEL_HSCIF0_1),
        PINMUX_IPSR_DATA(IP11_29_27, VI1_G1),
        PINMUX_IPSR_DATA(IP11_29_27, VI3_DATA1),
        PINMUX_IPSR_DATA(IP11_29_27, SSI_SCK1),
        PINMUX_IPSR_DATA(IP11_29_27, TS_SDEN1),
        PINMUX_IPSR_DATA(IP11_29_27, DACK2_B),
-       PINMUX_IPSR_MODSEL_DATA(IP11_29_27, RX2, SEL_SCIF2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_29_27, HRTS0_B, SEL_HSCIF0_1),
+       PINMUX_IPSR_MSEL(IP11_29_27, RX2, SEL_SCIF2_0),
+       PINMUX_IPSR_MSEL(IP11_29_27, HRTS0_B, SEL_HSCIF0_1),
 
        PINMUX_IPSR_DATA(IP12_2_0, VI1_G2),
        PINMUX_IPSR_DATA(IP12_2_0, VI3_DATA2),
        PINMUX_IPSR_DATA(IP12_2_0, SSI_WS1),
        PINMUX_IPSR_DATA(IP12_2_0, TS_SPSYNC1),
-       PINMUX_IPSR_MODSEL_DATA(IP12_2_0, SCK2, SEL_SCIF2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_2_0, HSCK0_B, SEL_HSCIF0_1),
+       PINMUX_IPSR_MSEL(IP12_2_0, SCK2, SEL_SCIF2_0),
+       PINMUX_IPSR_MSEL(IP12_2_0, HSCK0_B, SEL_HSCIF0_1),
        PINMUX_IPSR_DATA(IP12_5_3, VI1_G3),
        PINMUX_IPSR_DATA(IP12_5_3, VI3_DATA3),
        PINMUX_IPSR_DATA(IP12_5_3, SSI_SCK2),
        PINMUX_IPSR_DATA(IP12_5_3, TS_SDAT1),
-       PINMUX_IPSR_MODSEL_DATA(IP12_5_3, SCL1_C, SEL_I2C1_2),
+       PINMUX_IPSR_MSEL(IP12_5_3, SCL1_C, SEL_I2C1_2),
        PINMUX_IPSR_DATA(IP12_5_3, HTX0_B),
        PINMUX_IPSR_DATA(IP12_8_6, VI1_G4),
        PINMUX_IPSR_DATA(IP12_8_6, VI3_DATA4),
        PINMUX_IPSR_DATA(IP12_8_6, SSI_WS2),
-       PINMUX_IPSR_MODSEL_DATA(IP12_8_6, SDA1_C, SEL_I2C1_2),
+       PINMUX_IPSR_MSEL(IP12_8_6, SDA1_C, SEL_I2C1_2),
        PINMUX_IPSR_DATA(IP12_8_6, SIM_RST_B),
-       PINMUX_IPSR_MODSEL_DATA(IP12_8_6, HRX0_B, SEL_HSCIF0_1),
+       PINMUX_IPSR_MSEL(IP12_8_6, HRX0_B, SEL_HSCIF0_1),
        PINMUX_IPSR_DATA(IP12_11_9, VI1_G5),
        PINMUX_IPSR_DATA(IP12_11_9, VI3_DATA5),
-       PINMUX_IPSR_MODSEL_DATA(IP12_11_9, GPS_CLK, SEL_GPS_0),
+       PINMUX_IPSR_MSEL(IP12_11_9, GPS_CLK, SEL_GPS_0),
        PINMUX_IPSR_DATA(IP12_11_9, FSE),
        PINMUX_IPSR_DATA(IP12_11_9, TX4_B),
-       PINMUX_IPSR_MODSEL_DATA(IP12_11_9, SIM_D_B, SEL_SIM_1),
+       PINMUX_IPSR_MSEL(IP12_11_9, SIM_D_B, SEL_SIM_1),
        PINMUX_IPSR_DATA(IP12_14_12, VI1_G6),
        PINMUX_IPSR_DATA(IP12_14_12, VI3_DATA6),
-       PINMUX_IPSR_MODSEL_DATA(IP12_14_12, GPS_SIGN, SEL_GPS_0),
+       PINMUX_IPSR_MSEL(IP12_14_12, GPS_SIGN, SEL_GPS_0),
        PINMUX_IPSR_DATA(IP12_14_12, FRB),
-       PINMUX_IPSR_MODSEL_DATA(IP12_14_12, RX4_B, SEL_SCIF4_1),
+       PINMUX_IPSR_MSEL(IP12_14_12, RX4_B, SEL_SCIF4_1),
        PINMUX_IPSR_DATA(IP12_14_12, SIM_CLK_B),
        PINMUX_IPSR_DATA(IP12_17_15, VI1_G7),
        PINMUX_IPSR_DATA(IP12_17_15, VI3_DATA7),
-       PINMUX_IPSR_MODSEL_DATA(IP12_17_15, GPS_MAG, SEL_GPS_0),
+       PINMUX_IPSR_MSEL(IP12_17_15, GPS_MAG, SEL_GPS_0),
        PINMUX_IPSR_DATA(IP12_17_15, FCE),
-       PINMUX_IPSR_MODSEL_DATA(IP12_17_15, SCK4_B, SEL_SCIF4_1),
+       PINMUX_IPSR_MSEL(IP12_17_15, SCK4_B, SEL_SCIF4_1),
 };
 
 static const struct sh_pfc_pin pinmux_pins[] = {
@@ -3868,6 +3867,6 @@ const struct sh_pfc_soc_info r8a7779_pinmux_info = {
 
        .cfg_regs = pinmux_config_regs,
 
-       .gpio_data = pinmux_data,
-       .gpio_data_size = ARRAY_SIZE(pinmux_data),
+       .pinmux_data = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
 };
index fc344a7c2b5382189fff1ca014a45d00ad394c7d..d9924b0d53b789c36cf47fd7c5c133b12c32c729 100644 (file)
@@ -22,7 +22,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/platform_data/gpio-rcar.h>
 
 #include "core.h"
 #include "sh_pfc.h"
@@ -818,103 +817,103 @@ static const u16 pinmux_data[] = {
        PINMUX_DATA(DU_DOTCLKIN2_MARK, FN_DU_DOTCLKIN2),
 
        PINMUX_IPSR_DATA(IP0_2_0, D0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_2_0, MSIOF3_SCK_B, SEL_SOF3_1),
-       PINMUX_IPSR_MODSEL_DATA(IP0_2_0, VI3_DATA0, SEL_VI3_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_2_0, VI0_G4, SEL_VI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_2_0, VI0_G4_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP0_2_0, MSIOF3_SCK_B, SEL_SOF3_1),
+       PINMUX_IPSR_MSEL(IP0_2_0, VI3_DATA0, SEL_VI3_0),
+       PINMUX_IPSR_MSEL(IP0_2_0, VI0_G4, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP0_2_0, VI0_G4_B, SEL_VI0_1),
        PINMUX_IPSR_DATA(IP0_5_3, D1),
-       PINMUX_IPSR_MODSEL_DATA(IP0_5_3, MSIOF3_SYNC_B, SEL_SOF3_1),
-       PINMUX_IPSR_MODSEL_DATA(IP0_5_3, VI3_DATA1, SEL_VI3_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_5_3, VI0_G5, SEL_VI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_5_3, VI0_G5_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP0_5_3, MSIOF3_SYNC_B, SEL_SOF3_1),
+       PINMUX_IPSR_MSEL(IP0_5_3, VI3_DATA1, SEL_VI3_0),
+       PINMUX_IPSR_MSEL(IP0_5_3, VI0_G5, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP0_5_3, VI0_G5_B, SEL_VI0_1),
        PINMUX_IPSR_DATA(IP0_8_6, D2),
-       PINMUX_IPSR_MODSEL_DATA(IP0_8_6, MSIOF3_RXD_B, SEL_SOF3_1),
-       PINMUX_IPSR_MODSEL_DATA(IP0_8_6, VI3_DATA2, SEL_VI3_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_8_6, VI0_G6, SEL_VI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_8_6, VI0_G6_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP0_8_6, MSIOF3_RXD_B, SEL_SOF3_1),
+       PINMUX_IPSR_MSEL(IP0_8_6, VI3_DATA2, SEL_VI3_0),
+       PINMUX_IPSR_MSEL(IP0_8_6, VI0_G6, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP0_8_6, VI0_G6_B, SEL_VI0_1),
        PINMUX_IPSR_DATA(IP0_11_9, D3),
-       PINMUX_IPSR_MODSEL_DATA(IP0_11_9, MSIOF3_TXD_B, SEL_SOF3_1),
-       PINMUX_IPSR_MODSEL_DATA(IP0_11_9, VI3_DATA3, SEL_VI3_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_11_9, VI0_G7, SEL_VI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_11_9, VI0_G7_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP0_11_9, MSIOF3_TXD_B, SEL_SOF3_1),
+       PINMUX_IPSR_MSEL(IP0_11_9, VI3_DATA3, SEL_VI3_0),
+       PINMUX_IPSR_MSEL(IP0_11_9, VI0_G7, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP0_11_9, VI0_G7_B, SEL_VI0_1),
        PINMUX_IPSR_DATA(IP0_15_12, D4),
-       PINMUX_IPSR_MODSEL_DATA(IP0_15_12, SCIFB1_RXD_F, SEL_SCIFB1_5),
-       PINMUX_IPSR_MODSEL_DATA(IP0_15_12, SCIFB0_RXD_C, SEL_SCIFB_2),
-       PINMUX_IPSR_MODSEL_DATA(IP0_15_12, VI3_DATA4, SEL_VI3_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_15_12, VI0_R0, SEL_VI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_15_12, VI0_R0_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP0_15_12, RX0_B, SEL_SCIF0_1),
+       PINMUX_IPSR_MSEL(IP0_15_12, SCIFB1_RXD_F, SEL_SCIFB1_5),
+       PINMUX_IPSR_MSEL(IP0_15_12, SCIFB0_RXD_C, SEL_SCIFB_2),
+       PINMUX_IPSR_MSEL(IP0_15_12, VI3_DATA4, SEL_VI3_0),
+       PINMUX_IPSR_MSEL(IP0_15_12, VI0_R0, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP0_15_12, VI0_R0_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP0_15_12, RX0_B, SEL_SCIF0_1),
        PINMUX_IPSR_DATA(IP0_19_16, D5),
-       PINMUX_IPSR_MODSEL_DATA(IP0_19_16, SCIFB1_TXD_F, SEL_SCIFB1_5),
-       PINMUX_IPSR_MODSEL_DATA(IP0_19_16, SCIFB0_TXD_C, SEL_SCIFB_2),
-       PINMUX_IPSR_MODSEL_DATA(IP0_19_16, VI3_DATA5, SEL_VI3_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_19_16, VI0_R1, SEL_VI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_19_16, VI0_R1_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP0_19_16, TX0_B, SEL_SCIF0_1),
+       PINMUX_IPSR_MSEL(IP0_19_16, SCIFB1_TXD_F, SEL_SCIFB1_5),
+       PINMUX_IPSR_MSEL(IP0_19_16, SCIFB0_TXD_C, SEL_SCIFB_2),
+       PINMUX_IPSR_MSEL(IP0_19_16, VI3_DATA5, SEL_VI3_0),
+       PINMUX_IPSR_MSEL(IP0_19_16, VI0_R1, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP0_19_16, VI0_R1_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP0_19_16, TX0_B, SEL_SCIF0_1),
        PINMUX_IPSR_DATA(IP0_22_20, D6),
-       PINMUX_IPSR_MODSEL_DATA(IP0_22_20, IIC2_SCL_C, SEL_IIC2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP0_22_20, VI3_DATA6, SEL_VI3_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_22_20, VI0_R2, SEL_VI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_22_20, VI0_R2_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP0_22_20, I2C2_SCL_C, SEL_I2C2_2),
+       PINMUX_IPSR_MSEL(IP0_22_20, IIC2_SCL_C, SEL_IIC2_2),
+       PINMUX_IPSR_MSEL(IP0_22_20, VI3_DATA6, SEL_VI3_0),
+       PINMUX_IPSR_MSEL(IP0_22_20, VI0_R2, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP0_22_20, VI0_R2_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP0_22_20, I2C2_SCL_C, SEL_I2C2_2),
        PINMUX_IPSR_DATA(IP0_26_23, D7),
-       PINMUX_IPSR_MODSEL_DATA(IP0_26_23, AD_DI_B, SEL_ADI_1),
-       PINMUX_IPSR_MODSEL_DATA(IP0_26_23, IIC2_SDA_C, SEL_IIC2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP0_26_23, VI3_DATA7, SEL_VI3_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_26_23, VI0_R3, SEL_VI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_26_23, VI0_R3_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP0_26_23, I2C2_SDA_C, SEL_I2C2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP0_26_23, TCLK1, SEL_TMU1_0),
+       PINMUX_IPSR_MSEL(IP0_26_23, AD_DI_B, SEL_ADI_1),
+       PINMUX_IPSR_MSEL(IP0_26_23, IIC2_SDA_C, SEL_IIC2_2),
+       PINMUX_IPSR_MSEL(IP0_26_23, VI3_DATA7, SEL_VI3_0),
+       PINMUX_IPSR_MSEL(IP0_26_23, VI0_R3, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP0_26_23, VI0_R3_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP0_26_23, I2C2_SDA_C, SEL_I2C2_2),
+       PINMUX_IPSR_MSEL(IP0_26_23, TCLK1, SEL_TMU1_0),
        PINMUX_IPSR_DATA(IP0_30_27, D8),
-       PINMUX_IPSR_MODSEL_DATA(IP0_30_27, SCIFA1_SCK_C, SEL_SCIFA1_2),
+       PINMUX_IPSR_MSEL(IP0_30_27, SCIFA1_SCK_C, SEL_SCIFA1_2),
        PINMUX_IPSR_DATA(IP0_30_27, AVB_TXD0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_30_27, VI0_G0, SEL_VI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_30_27, VI0_G0_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP0_30_27, VI2_DATA0_VI2_B0, SEL_VI2_0),
+       PINMUX_IPSR_MSEL(IP0_30_27, VI0_G0, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP0_30_27, VI0_G0_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP0_30_27, VI2_DATA0_VI2_B0, SEL_VI2_0),
 
        PINMUX_IPSR_DATA(IP1_3_0, D9),
-       PINMUX_IPSR_MODSEL_DATA(IP1_3_0, SCIFA1_RXD_C, SEL_SCIFA1_2),
+       PINMUX_IPSR_MSEL(IP1_3_0, SCIFA1_RXD_C, SEL_SCIFA1_2),
        PINMUX_IPSR_DATA(IP1_3_0, AVB_TXD1),
-       PINMUX_IPSR_MODSEL_DATA(IP1_3_0, VI0_G1, SEL_VI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_3_0, VI0_G1_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP1_3_0, VI2_DATA1_VI2_B1, SEL_VI2_0),
+       PINMUX_IPSR_MSEL(IP1_3_0, VI0_G1, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP1_3_0, VI0_G1_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP1_3_0, VI2_DATA1_VI2_B1, SEL_VI2_0),
        PINMUX_IPSR_DATA(IP1_7_4, D10),
-       PINMUX_IPSR_MODSEL_DATA(IP1_7_4, SCIFA1_TXD_C, SEL_SCIFA1_2),
+       PINMUX_IPSR_MSEL(IP1_7_4, SCIFA1_TXD_C, SEL_SCIFA1_2),
        PINMUX_IPSR_DATA(IP1_7_4, AVB_TXD2),
-       PINMUX_IPSR_MODSEL_DATA(IP1_7_4, VI0_G2, SEL_VI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_7_4, VI0_G2_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP1_7_4, VI2_DATA2_VI2_B2, SEL_VI2_0),
+       PINMUX_IPSR_MSEL(IP1_7_4, VI0_G2, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP1_7_4, VI0_G2_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP1_7_4, VI2_DATA2_VI2_B2, SEL_VI2_0),
        PINMUX_IPSR_DATA(IP1_11_8, D11),
-       PINMUX_IPSR_MODSEL_DATA(IP1_11_8, SCIFA1_CTS_N_C, SEL_SCIFA1_2),
+       PINMUX_IPSR_MSEL(IP1_11_8, SCIFA1_CTS_N_C, SEL_SCIFA1_2),
        PINMUX_IPSR_DATA(IP1_11_8, AVB_TXD3),
-       PINMUX_IPSR_MODSEL_DATA(IP1_11_8, VI0_G3, SEL_VI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_11_8, VI0_G3_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP1_11_8, VI2_DATA3_VI2_B3, SEL_VI2_0),
+       PINMUX_IPSR_MSEL(IP1_11_8, VI0_G3, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP1_11_8, VI0_G3_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP1_11_8, VI2_DATA3_VI2_B3, SEL_VI2_0),
        PINMUX_IPSR_DATA(IP1_14_12, D12),
-       PINMUX_IPSR_MODSEL_DATA(IP1_14_12, SCIFA1_RTS_N_C, SEL_SCIFA1_2),
+       PINMUX_IPSR_MSEL(IP1_14_12, SCIFA1_RTS_N_C, SEL_SCIFA1_2),
        PINMUX_IPSR_DATA(IP1_14_12, AVB_TXD4),
-       PINMUX_IPSR_MODSEL_DATA(IP1_14_12, VI0_HSYNC_N, SEL_VI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_14_12, VI0_HSYNC_N_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP1_14_12, VI2_DATA4_VI2_B4, SEL_VI2_0),
+       PINMUX_IPSR_MSEL(IP1_14_12, VI0_HSYNC_N, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP1_14_12, VI0_HSYNC_N_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP1_14_12, VI2_DATA4_VI2_B4, SEL_VI2_0),
        PINMUX_IPSR_DATA(IP1_17_15, D13),
        PINMUX_IPSR_DATA(IP1_17_15, AVB_TXD5),
-       PINMUX_IPSR_MODSEL_DATA(IP1_17_15, VI0_VSYNC_N, SEL_VI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_17_15, VI0_VSYNC_N_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP1_17_15, VI2_DATA5_VI2_B5, SEL_VI2_0),
+       PINMUX_IPSR_MSEL(IP1_17_15, VI0_VSYNC_N, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP1_17_15, VI0_VSYNC_N_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP1_17_15, VI2_DATA5_VI2_B5, SEL_VI2_0),
        PINMUX_IPSR_DATA(IP1_21_18, D14),
-       PINMUX_IPSR_MODSEL_DATA(IP1_21_18, SCIFB1_RXD_C, SEL_SCIFB1_2),
+       PINMUX_IPSR_MSEL(IP1_21_18, SCIFB1_RXD_C, SEL_SCIFB1_2),
        PINMUX_IPSR_DATA(IP1_21_18, AVB_TXD6),
-       PINMUX_IPSR_MODSEL_DATA(IP1_21_18, RX1_B, SEL_SCIF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP1_21_18, VI0_CLKENB, SEL_VI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_21_18, VI0_CLKENB_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP1_21_18, VI2_DATA6_VI2_B6, SEL_VI2_0),
+       PINMUX_IPSR_MSEL(IP1_21_18, RX1_B, SEL_SCIF1_1),
+       PINMUX_IPSR_MSEL(IP1_21_18, VI0_CLKENB, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP1_21_18, VI0_CLKENB_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP1_21_18, VI2_DATA6_VI2_B6, SEL_VI2_0),
        PINMUX_IPSR_DATA(IP1_25_22, D15),
-       PINMUX_IPSR_MODSEL_DATA(IP1_25_22, SCIFB1_TXD_C, SEL_SCIFB1_2),
+       PINMUX_IPSR_MSEL(IP1_25_22, SCIFB1_TXD_C, SEL_SCIFB1_2),
        PINMUX_IPSR_DATA(IP1_25_22, AVB_TXD7),
-       PINMUX_IPSR_MODSEL_DATA(IP1_25_22, TX1_B, SEL_SCIF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP1_25_22, VI0_FIELD, SEL_VI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_25_22, VI0_FIELD_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP1_25_22, VI2_DATA7_VI2_B7, SEL_VI2_0),
+       PINMUX_IPSR_MSEL(IP1_25_22, TX1_B, SEL_SCIF1_1),
+       PINMUX_IPSR_MSEL(IP1_25_22, VI0_FIELD, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP1_25_22, VI0_FIELD_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP1_25_22, VI2_DATA7_VI2_B7, SEL_VI2_0),
        PINMUX_IPSR_DATA(IP1_27_26, A0),
        PINMUX_IPSR_DATA(IP1_27_26, PWM3),
        PINMUX_IPSR_DATA(IP1_29_28, A1),
@@ -922,512 +921,512 @@ static const u16 pinmux_data[] = {
 
        PINMUX_IPSR_DATA(IP2_2_0, A2),
        PINMUX_IPSR_DATA(IP2_2_0, PWM5),
-       PINMUX_IPSR_MODSEL_DATA(IP2_2_0, MSIOF1_SS1_B, SEL_SOF1_1),
+       PINMUX_IPSR_MSEL(IP2_2_0, MSIOF1_SS1_B, SEL_SOF1_1),
        PINMUX_IPSR_DATA(IP2_5_3, A3),
        PINMUX_IPSR_DATA(IP2_5_3, PWM6),
-       PINMUX_IPSR_MODSEL_DATA(IP2_5_3, MSIOF1_SS2_B, SEL_SOF1_1),
+       PINMUX_IPSR_MSEL(IP2_5_3, MSIOF1_SS2_B, SEL_SOF1_1),
        PINMUX_IPSR_DATA(IP2_8_6, A4),
-       PINMUX_IPSR_MODSEL_DATA(IP2_8_6, MSIOF1_TXD_B, SEL_SOF1_1),
+       PINMUX_IPSR_MSEL(IP2_8_6, MSIOF1_TXD_B, SEL_SOF1_1),
        PINMUX_IPSR_DATA(IP2_8_6, TPU0TO0),
        PINMUX_IPSR_DATA(IP2_11_9, A5),
-       PINMUX_IPSR_MODSEL_DATA(IP2_11_9, SCIFA1_TXD_B, SEL_SCIFA1_1),
+       PINMUX_IPSR_MSEL(IP2_11_9, SCIFA1_TXD_B, SEL_SCIFA1_1),
        PINMUX_IPSR_DATA(IP2_11_9, TPU0TO1),
        PINMUX_IPSR_DATA(IP2_14_12, A6),
-       PINMUX_IPSR_MODSEL_DATA(IP2_14_12, SCIFA1_RTS_N_B, SEL_SCIFA1_1),
+       PINMUX_IPSR_MSEL(IP2_14_12, SCIFA1_RTS_N_B, SEL_SCIFA1_1),
        PINMUX_IPSR_DATA(IP2_14_12, TPU0TO2),
        PINMUX_IPSR_DATA(IP2_17_15, A7),
-       PINMUX_IPSR_MODSEL_DATA(IP2_17_15, SCIFA1_SCK_B, SEL_SCIFA1_1),
+       PINMUX_IPSR_MSEL(IP2_17_15, SCIFA1_SCK_B, SEL_SCIFA1_1),
        PINMUX_IPSR_DATA(IP2_17_15, AUDIO_CLKOUT_B),
        PINMUX_IPSR_DATA(IP2_17_15, TPU0TO3),
        PINMUX_IPSR_DATA(IP2_21_18, A8),
-       PINMUX_IPSR_MODSEL_DATA(IP2_21_18, SCIFA1_RXD_B, SEL_SCIFA1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP2_21_18, SSI_SCK5_B, SEL_SSI5_1),
-       PINMUX_IPSR_MODSEL_DATA(IP2_21_18, VI0_R4, SEL_VI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_21_18, VI0_R4_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP2_21_18, SCIFB2_RXD_C, SEL_SCIFB2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP2_21_18, RX2_B, SEL_SCIF2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP2_21_18, VI2_DATA0_VI2_B0_B, SEL_VI2_1),
+       PINMUX_IPSR_MSEL(IP2_21_18, SCIFA1_RXD_B, SEL_SCIFA1_1),
+       PINMUX_IPSR_MSEL(IP2_21_18, SSI_SCK5_B, SEL_SSI5_1),
+       PINMUX_IPSR_MSEL(IP2_21_18, VI0_R4, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP2_21_18, VI0_R4_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP2_21_18, SCIFB2_RXD_C, SEL_SCIFB2_2),
+       PINMUX_IPSR_MSEL(IP2_21_18, RX2_B, SEL_SCIF2_1),
+       PINMUX_IPSR_MSEL(IP2_21_18, VI2_DATA0_VI2_B0_B, SEL_VI2_1),
        PINMUX_IPSR_DATA(IP2_25_22, A9),
-       PINMUX_IPSR_MODSEL_DATA(IP2_25_22, SCIFA1_CTS_N_B, SEL_SCIFA1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP2_25_22, SSI_WS5_B, SEL_SSI5_1),
-       PINMUX_IPSR_MODSEL_DATA(IP2_25_22, VI0_R5, SEL_VI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_25_22, VI0_R5_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP2_25_22, SCIFB2_TXD_C, SEL_SCIFB2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP2_25_22, TX2_B, SEL_SCIF2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP2_25_22, VI2_DATA1_VI2_B1_B, SEL_VI2_1),
+       PINMUX_IPSR_MSEL(IP2_25_22, SCIFA1_CTS_N_B, SEL_SCIFA1_1),
+       PINMUX_IPSR_MSEL(IP2_25_22, SSI_WS5_B, SEL_SSI5_1),
+       PINMUX_IPSR_MSEL(IP2_25_22, VI0_R5, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP2_25_22, VI0_R5_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP2_25_22, SCIFB2_TXD_C, SEL_SCIFB2_2),
+       PINMUX_IPSR_MSEL(IP2_25_22, TX2_B, SEL_SCIF2_1),
+       PINMUX_IPSR_MSEL(IP2_25_22, VI2_DATA1_VI2_B1_B, SEL_VI2_1),
        PINMUX_IPSR_DATA(IP2_28_26, A10),
-       PINMUX_IPSR_MODSEL_DATA(IP2_28_26, SSI_SDATA5_B, SEL_SSI5_1),
+       PINMUX_IPSR_MSEL(IP2_28_26, SSI_SDATA5_B, SEL_SSI5_1),
        PINMUX_IPSR_DATA(IP2_28_26, MSIOF2_SYNC),
-       PINMUX_IPSR_MODSEL_DATA(IP2_28_26, VI0_R6, SEL_VI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_28_26, VI0_R6_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP2_28_26, VI2_DATA2_VI2_B2_B, SEL_VI2_1),
+       PINMUX_IPSR_MSEL(IP2_28_26, VI0_R6, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP2_28_26, VI0_R6_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP2_28_26, VI2_DATA2_VI2_B2_B, SEL_VI2_1),
 
        PINMUX_IPSR_DATA(IP3_3_0, A11),
-       PINMUX_IPSR_MODSEL_DATA(IP3_3_0, SCIFB2_CTS_N_B, SEL_SCIFB2_1),
+       PINMUX_IPSR_MSEL(IP3_3_0, SCIFB2_CTS_N_B, SEL_SCIFB2_1),
        PINMUX_IPSR_DATA(IP3_3_0, MSIOF2_SCK),
-       PINMUX_IPSR_MODSEL_DATA(IP3_3_0, VI1_R0, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_3_0, VI1_R0_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP3_3_0, VI1_R0, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP3_3_0, VI1_R0_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP3_3_0, VI2_G0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_3_0, VI2_DATA3_VI2_B3_B, SEL_VI2_1),
+       PINMUX_IPSR_MSEL(IP3_3_0, VI2_DATA3_VI2_B3_B, SEL_VI2_1),
        PINMUX_IPSR_DATA(IP3_7_4, A12),
-       PINMUX_IPSR_MODSEL_DATA(IP3_7_4, SCIFB2_RXD_B, SEL_SCIFB2_1),
+       PINMUX_IPSR_MSEL(IP3_7_4, SCIFB2_RXD_B, SEL_SCIFB2_1),
        PINMUX_IPSR_DATA(IP3_7_4, MSIOF2_TXD),
-       PINMUX_IPSR_MODSEL_DATA(IP3_7_4, VI1_R1, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_7_4, VI1_R1_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP3_7_4, VI1_R1, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP3_7_4, VI1_R1_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP3_7_4, VI2_G1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_7_4, VI2_DATA4_VI2_B4_B, SEL_VI2_1),
+       PINMUX_IPSR_MSEL(IP3_7_4, VI2_DATA4_VI2_B4_B, SEL_VI2_1),
        PINMUX_IPSR_DATA(IP3_11_8, A13),
-       PINMUX_IPSR_MODSEL_DATA(IP3_11_8, SCIFB2_RTS_N_B, SEL_SCIFB2_1),
+       PINMUX_IPSR_MSEL(IP3_11_8, SCIFB2_RTS_N_B, SEL_SCIFB2_1),
        PINMUX_IPSR_DATA(IP3_11_8, EX_WAIT2),
        PINMUX_IPSR_DATA(IP3_11_8, MSIOF2_RXD),
-       PINMUX_IPSR_MODSEL_DATA(IP3_11_8, VI1_R2, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_11_8, VI1_R2_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP3_11_8, VI1_R2, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP3_11_8, VI1_R2_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP3_11_8, VI2_G2),
-       PINMUX_IPSR_MODSEL_DATA(IP3_11_8, VI2_DATA5_VI2_B5_B, SEL_VI2_1),
+       PINMUX_IPSR_MSEL(IP3_11_8, VI2_DATA5_VI2_B5_B, SEL_VI2_1),
        PINMUX_IPSR_DATA(IP3_14_12, A14),
-       PINMUX_IPSR_MODSEL_DATA(IP3_14_12, SCIFB2_TXD_B, SEL_SCIFB2_1),
+       PINMUX_IPSR_MSEL(IP3_14_12, SCIFB2_TXD_B, SEL_SCIFB2_1),
        PINMUX_IPSR_DATA(IP3_14_12, ATACS11_N),
        PINMUX_IPSR_DATA(IP3_14_12, MSIOF2_SS1),
        PINMUX_IPSR_DATA(IP3_17_15, A15),
-       PINMUX_IPSR_MODSEL_DATA(IP3_17_15, SCIFB2_SCK_B, SEL_SCIFB2_1),
+       PINMUX_IPSR_MSEL(IP3_17_15, SCIFB2_SCK_B, SEL_SCIFB2_1),
        PINMUX_IPSR_DATA(IP3_17_15, ATARD1_N),
        PINMUX_IPSR_DATA(IP3_17_15, MSIOF2_SS2),
        PINMUX_IPSR_DATA(IP3_19_18, A16),
        PINMUX_IPSR_DATA(IP3_19_18, ATAWR1_N),
        PINMUX_IPSR_DATA(IP3_22_20, A17),
-       PINMUX_IPSR_MODSEL_DATA(IP3_22_20, AD_DO_B, SEL_ADI_1),
+       PINMUX_IPSR_MSEL(IP3_22_20, AD_DO_B, SEL_ADI_1),
        PINMUX_IPSR_DATA(IP3_22_20, ATADIR1_N),
        PINMUX_IPSR_DATA(IP3_25_23, A18),
-       PINMUX_IPSR_MODSEL_DATA(IP3_25_23, AD_CLK_B, SEL_ADI_1),
+       PINMUX_IPSR_MSEL(IP3_25_23, AD_CLK_B, SEL_ADI_1),
        PINMUX_IPSR_DATA(IP3_25_23, ATAG1_N),
        PINMUX_IPSR_DATA(IP3_28_26, A19),
-       PINMUX_IPSR_MODSEL_DATA(IP3_28_26, AD_NCS_N_B, SEL_ADI_1),
+       PINMUX_IPSR_MSEL(IP3_28_26, AD_NCS_N_B, SEL_ADI_1),
        PINMUX_IPSR_DATA(IP3_28_26, ATACS01_N),
-       PINMUX_IPSR_MODSEL_DATA(IP3_28_26, EX_WAIT0_B, SEL_LBS_1),
+       PINMUX_IPSR_MSEL(IP3_28_26, EX_WAIT0_B, SEL_LBS_1),
        PINMUX_IPSR_DATA(IP3_31_29, A20),
        PINMUX_IPSR_DATA(IP3_31_29, SPCLK),
-       PINMUX_IPSR_MODSEL_DATA(IP3_31_29, VI1_R3, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_31_29, VI1_R3_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP3_31_29, VI1_R3, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP3_31_29, VI1_R3_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP3_31_29, VI2_G4),
 
        PINMUX_IPSR_DATA(IP4_2_0, A21),
        PINMUX_IPSR_DATA(IP4_2_0, MOSI_IO0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_2_0, VI1_R4, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_2_0, VI1_R4_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP4_2_0, VI1_R4, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP4_2_0, VI1_R4_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP4_2_0, VI2_G5),
        PINMUX_IPSR_DATA(IP4_5_3, A22),
        PINMUX_IPSR_DATA(IP4_5_3, MISO_IO1),
-       PINMUX_IPSR_MODSEL_DATA(IP4_5_3, VI1_R5, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_5_3, VI1_R5_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP4_5_3, VI1_R5, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP4_5_3, VI1_R5_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP4_5_3, VI2_G6),
        PINMUX_IPSR_DATA(IP4_8_6, A23),
        PINMUX_IPSR_DATA(IP4_8_6, IO2),
-       PINMUX_IPSR_MODSEL_DATA(IP4_8_6, VI1_G7, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_8_6, VI1_G7_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP4_8_6, VI1_G7, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP4_8_6, VI1_G7_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP4_8_6, VI2_G7),
        PINMUX_IPSR_DATA(IP4_11_9, A24),
        PINMUX_IPSR_DATA(IP4_11_9, IO3),
-       PINMUX_IPSR_MODSEL_DATA(IP4_11_9, VI1_R7, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_11_9, VI1_R7_B, SEL_VI1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP4_11_9, VI2_CLKENB, SEL_VI2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_11_9, VI2_CLKENB_B, SEL_VI2_1),
+       PINMUX_IPSR_MSEL(IP4_11_9, VI1_R7, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP4_11_9, VI1_R7_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP4_11_9, VI2_CLKENB, SEL_VI2_0),
+       PINMUX_IPSR_MSEL(IP4_11_9, VI2_CLKENB_B, SEL_VI2_1),
        PINMUX_IPSR_DATA(IP4_14_12, A25),
        PINMUX_IPSR_DATA(IP4_14_12, SSL),
-       PINMUX_IPSR_MODSEL_DATA(IP4_14_12, VI1_G6, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_14_12, VI1_G6_B, SEL_VI1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP4_14_12, VI2_FIELD, SEL_VI2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_14_12, VI2_FIELD_B, SEL_VI2_1),
+       PINMUX_IPSR_MSEL(IP4_14_12, VI1_G6, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP4_14_12, VI1_G6_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP4_14_12, VI2_FIELD, SEL_VI2_0),
+       PINMUX_IPSR_MSEL(IP4_14_12, VI2_FIELD_B, SEL_VI2_1),
        PINMUX_IPSR_DATA(IP4_17_15, CS0_N),
-       PINMUX_IPSR_MODSEL_DATA(IP4_17_15, VI1_R6, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_17_15, VI1_R6_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP4_17_15, VI1_R6, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP4_17_15, VI1_R6_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP4_17_15, VI2_G3),
-       PINMUX_IPSR_MODSEL_DATA(IP4_17_15, MSIOF0_SS2_B, SEL_SOF0_1),
+       PINMUX_IPSR_MSEL(IP4_17_15, MSIOF0_SS2_B, SEL_SOF0_1),
        PINMUX_IPSR_DATA(IP4_20_18, CS1_N_A26),
        PINMUX_IPSR_DATA(IP4_20_18, SPEEDIN),
-       PINMUX_IPSR_MODSEL_DATA(IP4_20_18, VI0_R7, SEL_VI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_20_18, VI0_R7_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP4_20_18, VI2_CLK, SEL_VI2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_20_18, VI2_CLK_B, SEL_VI2_1),
+       PINMUX_IPSR_MSEL(IP4_20_18, VI0_R7, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP4_20_18, VI0_R7_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP4_20_18, VI2_CLK, SEL_VI2_0),
+       PINMUX_IPSR_MSEL(IP4_20_18, VI2_CLK_B, SEL_VI2_1),
        PINMUX_IPSR_DATA(IP4_23_21, EX_CS0_N),
-       PINMUX_IPSR_MODSEL_DATA(IP4_23_21, HRX1_B, SEL_HSCIF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP4_23_21, VI1_G5, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_23_21, VI1_G5_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP4_23_21, HRX1_B, SEL_HSCIF1_1),
+       PINMUX_IPSR_MSEL(IP4_23_21, VI1_G5, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP4_23_21, VI1_G5_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP4_23_21, VI2_R0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_23_21, HTX0_B, SEL_HSCIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP4_23_21, MSIOF0_SS1_B, SEL_SOF0_1),
+       PINMUX_IPSR_MSEL(IP4_23_21, HTX0_B, SEL_HSCIF0_1),
+       PINMUX_IPSR_MSEL(IP4_23_21, MSIOF0_SS1_B, SEL_SOF0_1),
        PINMUX_IPSR_DATA(IP4_26_24, EX_CS1_N),
        PINMUX_IPSR_DATA(IP4_26_24, GPS_CLK),
-       PINMUX_IPSR_MODSEL_DATA(IP4_26_24, HCTS1_N_B, SEL_HSCIF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP4_26_24, VI1_FIELD, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_26_24, VI1_FIELD_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP4_26_24, HCTS1_N_B, SEL_HSCIF1_1),
+       PINMUX_IPSR_MSEL(IP4_26_24, VI1_FIELD, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP4_26_24, VI1_FIELD_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP4_26_24, VI2_R1),
        PINMUX_IPSR_DATA(IP4_29_27, EX_CS2_N),
        PINMUX_IPSR_DATA(IP4_29_27, GPS_SIGN),
-       PINMUX_IPSR_MODSEL_DATA(IP4_29_27, HRTS1_N_B, SEL_HSCIF1_1),
+       PINMUX_IPSR_MSEL(IP4_29_27, HRTS1_N_B, SEL_HSCIF1_1),
        PINMUX_IPSR_DATA(IP4_29_27, VI3_CLKENB),
-       PINMUX_IPSR_MODSEL_DATA(IP4_29_27, VI1_G0, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_29_27, VI1_G0_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP4_29_27, VI1_G0, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP4_29_27, VI1_G0_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP4_29_27, VI2_R2),
 
        PINMUX_IPSR_DATA(IP5_2_0, EX_CS3_N),
        PINMUX_IPSR_DATA(IP5_2_0, GPS_MAG),
        PINMUX_IPSR_DATA(IP5_2_0, VI3_FIELD),
-       PINMUX_IPSR_MODSEL_DATA(IP5_2_0, VI1_G1, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_2_0, VI1_G1_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP5_2_0, VI1_G1, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP5_2_0, VI1_G1_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP5_2_0, VI2_R3),
        PINMUX_IPSR_DATA(IP5_5_3, EX_CS4_N),
-       PINMUX_IPSR_MODSEL_DATA(IP5_5_3, MSIOF1_SCK_B, SEL_SOF1_1),
+       PINMUX_IPSR_MSEL(IP5_5_3, MSIOF1_SCK_B, SEL_SOF1_1),
        PINMUX_IPSR_DATA(IP5_5_3, VI3_HSYNC_N),
-       PINMUX_IPSR_MODSEL_DATA(IP5_5_3, VI2_HSYNC_N, SEL_VI2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_5_3, IIC1_SCL, SEL_IIC1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_5_3, VI2_HSYNC_N_B, SEL_VI2_1),
+       PINMUX_IPSR_MSEL(IP5_5_3, VI2_HSYNC_N, SEL_VI2_0),
+       PINMUX_IPSR_MSEL(IP5_5_3, IIC1_SCL, SEL_IIC1_0),
+       PINMUX_IPSR_MSEL(IP5_5_3, VI2_HSYNC_N_B, SEL_VI2_1),
        PINMUX_IPSR_DATA(IP5_5_3, INTC_EN0_N),
-       PINMUX_IPSR_MODSEL_DATA(IP5_5_3, I2C1_SCL, SEL_I2C1_0),
+       PINMUX_IPSR_MSEL(IP5_5_3, I2C1_SCL, SEL_I2C1_0),
        PINMUX_IPSR_DATA(IP5_9_6, EX_CS5_N),
-       PINMUX_IPSR_MODSEL_DATA(IP5_9_6, CAN0_RX, SEL_CAN0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_9_6, MSIOF1_RXD_B, SEL_SOF1_1),
+       PINMUX_IPSR_MSEL(IP5_9_6, CAN0_RX, SEL_CAN0_0),
+       PINMUX_IPSR_MSEL(IP5_9_6, MSIOF1_RXD_B, SEL_SOF1_1),
        PINMUX_IPSR_DATA(IP5_9_6, VI3_VSYNC_N),
-       PINMUX_IPSR_MODSEL_DATA(IP5_9_6, VI1_G2, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_9_6, VI1_G2_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP5_9_6, VI1_G2, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP5_9_6, VI1_G2_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP5_9_6, VI2_R4),
-       PINMUX_IPSR_MODSEL_DATA(IP5_9_6, IIC1_SDA, SEL_IIC1_0),
+       PINMUX_IPSR_MSEL(IP5_9_6, IIC1_SDA, SEL_IIC1_0),
        PINMUX_IPSR_DATA(IP5_9_6, INTC_EN1_N),
-       PINMUX_IPSR_MODSEL_DATA(IP5_9_6, I2C1_SDA, SEL_I2C1_0),
+       PINMUX_IPSR_MSEL(IP5_9_6, I2C1_SDA, SEL_I2C1_0),
        PINMUX_IPSR_DATA(IP5_12_10, BS_N),
-       PINMUX_IPSR_MODSEL_DATA(IP5_12_10, IETX, SEL_IEB_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_12_10, HTX1_B, SEL_HSCIF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP5_12_10, CAN1_TX, SEL_CAN1_0),
+       PINMUX_IPSR_MSEL(IP5_12_10, IETX, SEL_IEB_0),
+       PINMUX_IPSR_MSEL(IP5_12_10, HTX1_B, SEL_HSCIF1_1),
+       PINMUX_IPSR_MSEL(IP5_12_10, CAN1_TX, SEL_CAN1_0),
        PINMUX_IPSR_DATA(IP5_12_10, DRACK0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_12_10, IETX_C, SEL_IEB_2),
+       PINMUX_IPSR_MSEL(IP5_12_10, IETX_C, SEL_IEB_2),
        PINMUX_IPSR_DATA(IP5_14_13, RD_N),
-       PINMUX_IPSR_MODSEL_DATA(IP5_14_13, CAN0_TX, SEL_CAN0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_14_13, SCIFA0_SCK_B, SEL_SCFA_1),
+       PINMUX_IPSR_MSEL(IP5_14_13, CAN0_TX, SEL_CAN0_0),
+       PINMUX_IPSR_MSEL(IP5_14_13, SCIFA0_SCK_B, SEL_SCFA_1),
        PINMUX_IPSR_DATA(IP5_17_15, RD_WR_N),
-       PINMUX_IPSR_MODSEL_DATA(IP5_17_15, VI1_G3, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_17_15, VI1_G3_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP5_17_15, VI1_G3, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP5_17_15, VI1_G3_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP5_17_15, VI2_R5),
-       PINMUX_IPSR_MODSEL_DATA(IP5_17_15, SCIFA0_RXD_B, SEL_SCFA_1),
+       PINMUX_IPSR_MSEL(IP5_17_15, SCIFA0_RXD_B, SEL_SCFA_1),
        PINMUX_IPSR_DATA(IP5_17_15, INTC_IRQ4_N),
        PINMUX_IPSR_DATA(IP5_20_18, WE0_N),
-       PINMUX_IPSR_MODSEL_DATA(IP5_20_18, IECLK, SEL_IEB_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_20_18, CAN_CLK, SEL_CANCLK_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_20_18, VI2_VSYNC_N, SEL_VI2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_20_18, SCIFA0_TXD_B, SEL_SCFA_1),
-       PINMUX_IPSR_MODSEL_DATA(IP5_20_18, VI2_VSYNC_N_B, SEL_VI2_1),
+       PINMUX_IPSR_MSEL(IP5_20_18, IECLK, SEL_IEB_0),
+       PINMUX_IPSR_MSEL(IP5_20_18, CAN_CLK, SEL_CANCLK_0),
+       PINMUX_IPSR_MSEL(IP5_20_18, VI2_VSYNC_N, SEL_VI2_0),
+       PINMUX_IPSR_MSEL(IP5_20_18, SCIFA0_TXD_B, SEL_SCFA_1),
+       PINMUX_IPSR_MSEL(IP5_20_18, VI2_VSYNC_N_B, SEL_VI2_1),
        PINMUX_IPSR_DATA(IP5_23_21, WE1_N),
-       PINMUX_IPSR_MODSEL_DATA(IP5_23_21, IERX, SEL_IEB_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_23_21, CAN1_RX, SEL_CAN1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_23_21, VI1_G4, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_23_21, VI1_G4_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP5_23_21, IERX, SEL_IEB_0),
+       PINMUX_IPSR_MSEL(IP5_23_21, CAN1_RX, SEL_CAN1_0),
+       PINMUX_IPSR_MSEL(IP5_23_21, VI1_G4, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP5_23_21, VI1_G4_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP5_23_21, VI2_R6),
-       PINMUX_IPSR_MODSEL_DATA(IP5_23_21, SCIFA0_CTS_N_B, SEL_SCFA_1),
-       PINMUX_IPSR_MODSEL_DATA(IP5_23_21, IERX_C, SEL_IEB_2),
-       PINMUX_IPSR_MODSEL_DATA(IP5_26_24, EX_WAIT0, SEL_LBS_0),
+       PINMUX_IPSR_MSEL(IP5_23_21, SCIFA0_CTS_N_B, SEL_SCFA_1),
+       PINMUX_IPSR_MSEL(IP5_23_21, IERX_C, SEL_IEB_2),
+       PINMUX_IPSR_MSEL(IP5_26_24, EX_WAIT0, SEL_LBS_0),
        PINMUX_IPSR_DATA(IP5_26_24, IRQ3),
        PINMUX_IPSR_DATA(IP5_26_24, INTC_IRQ3_N),
-       PINMUX_IPSR_MODSEL_DATA(IP5_26_24, VI3_CLK, SEL_VI3_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_26_24, SCIFA0_RTS_N_B, SEL_SCFA_1),
-       PINMUX_IPSR_MODSEL_DATA(IP5_26_24, HRX0_B, SEL_HSCIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP5_26_24, MSIOF0_SCK_B, SEL_SOF0_1),
+       PINMUX_IPSR_MSEL(IP5_26_24, VI3_CLK, SEL_VI3_0),
+       PINMUX_IPSR_MSEL(IP5_26_24, SCIFA0_RTS_N_B, SEL_SCFA_1),
+       PINMUX_IPSR_MSEL(IP5_26_24, HRX0_B, SEL_HSCIF0_1),
+       PINMUX_IPSR_MSEL(IP5_26_24, MSIOF0_SCK_B, SEL_SOF0_1),
        PINMUX_IPSR_DATA(IP5_29_27, DREQ0_N),
-       PINMUX_IPSR_MODSEL_DATA(IP5_29_27, VI1_HSYNC_N, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_29_27, VI1_HSYNC_N_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP5_29_27, VI1_HSYNC_N, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP5_29_27, VI1_HSYNC_N_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP5_29_27, VI2_R7),
-       PINMUX_IPSR_MODSEL_DATA(IP5_29_27, SSI_SCK78_C, SEL_SSI7_2),
-       PINMUX_IPSR_MODSEL_DATA(IP5_29_27, SSI_WS78_B, SEL_SSI7_1),
+       PINMUX_IPSR_MSEL(IP5_29_27, SSI_SCK78_C, SEL_SSI7_2),
+       PINMUX_IPSR_MSEL(IP5_29_27, SSI_WS78_B, SEL_SSI7_1),
 
        PINMUX_IPSR_DATA(IP6_2_0, DACK0),
        PINMUX_IPSR_DATA(IP6_2_0, IRQ0),
        PINMUX_IPSR_DATA(IP6_2_0, INTC_IRQ0_N),
-       PINMUX_IPSR_MODSEL_DATA(IP6_2_0, SSI_SCK6_B, SEL_SSI6_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_2_0, VI1_VSYNC_N, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP6_2_0, VI1_VSYNC_N_B, SEL_VI1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_2_0, SSI_WS78_C, SEL_SSI7_2),
+       PINMUX_IPSR_MSEL(IP6_2_0, SSI_SCK6_B, SEL_SSI6_1),
+       PINMUX_IPSR_MSEL(IP6_2_0, VI1_VSYNC_N, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP6_2_0, VI1_VSYNC_N_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP6_2_0, SSI_WS78_C, SEL_SSI7_2),
        PINMUX_IPSR_DATA(IP6_5_3, DREQ1_N),
-       PINMUX_IPSR_MODSEL_DATA(IP6_5_3, VI1_CLKENB, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP6_5_3, VI1_CLKENB_B, SEL_VI1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_5_3, SSI_SDATA7_C, SEL_SSI7_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_5_3, SSI_SCK78_B, SEL_SSI7_1),
+       PINMUX_IPSR_MSEL(IP6_5_3, VI1_CLKENB, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP6_5_3, VI1_CLKENB_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP6_5_3, SSI_SDATA7_C, SEL_SSI7_2),
+       PINMUX_IPSR_MSEL(IP6_5_3, SSI_SCK78_B, SEL_SSI7_1),
        PINMUX_IPSR_DATA(IP6_8_6, DACK1),
        PINMUX_IPSR_DATA(IP6_8_6, IRQ1),
        PINMUX_IPSR_DATA(IP6_8_6, INTC_IRQ1_N),
-       PINMUX_IPSR_MODSEL_DATA(IP6_8_6, SSI_WS6_B, SEL_SSI6_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_8_6, SSI_SDATA8_C, SEL_SSI8_2),
+       PINMUX_IPSR_MSEL(IP6_8_6, SSI_WS6_B, SEL_SSI6_1),
+       PINMUX_IPSR_MSEL(IP6_8_6, SSI_SDATA8_C, SEL_SSI8_2),
        PINMUX_IPSR_DATA(IP6_10_9, DREQ2_N),
-       PINMUX_IPSR_MODSEL_DATA(IP6_10_9, HSCK1_B, SEL_HSCIF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_10_9, HCTS0_N_B, SEL_HSCIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_10_9, MSIOF0_TXD_B, SEL_SOF0_1),
+       PINMUX_IPSR_MSEL(IP6_10_9, HSCK1_B, SEL_HSCIF1_1),
+       PINMUX_IPSR_MSEL(IP6_10_9, HCTS0_N_B, SEL_HSCIF0_1),
+       PINMUX_IPSR_MSEL(IP6_10_9, MSIOF0_TXD_B, SEL_SOF0_1),
        PINMUX_IPSR_DATA(IP6_13_11, DACK2),
        PINMUX_IPSR_DATA(IP6_13_11, IRQ2),
        PINMUX_IPSR_DATA(IP6_13_11, INTC_IRQ2_N),
-       PINMUX_IPSR_MODSEL_DATA(IP6_13_11, SSI_SDATA6_B, SEL_SSI6_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_13_11, HRTS0_N_B, SEL_HSCIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_13_11, MSIOF0_RXD_B, SEL_SOF0_1),
+       PINMUX_IPSR_MSEL(IP6_13_11, SSI_SDATA6_B, SEL_SSI6_1),
+       PINMUX_IPSR_MSEL(IP6_13_11, HRTS0_N_B, SEL_HSCIF0_1),
+       PINMUX_IPSR_MSEL(IP6_13_11, MSIOF0_RXD_B, SEL_SOF0_1),
        PINMUX_IPSR_DATA(IP6_16_14, ETH_CRS_DV),
-       PINMUX_IPSR_MODSEL_DATA(IP6_16_14, STP_ISCLK_0_B, SEL_SSP_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_16_14, TS_SDEN0_D, SEL_TSIF0_3),
-       PINMUX_IPSR_MODSEL_DATA(IP6_16_14, GLO_Q0_C, SEL_GPS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_16_14, IIC2_SCL_E, SEL_IIC2_4),
-       PINMUX_IPSR_MODSEL_DATA(IP6_16_14, I2C2_SCL_E, SEL_I2C2_4),
+       PINMUX_IPSR_MSEL(IP6_16_14, STP_ISCLK_0_B, SEL_SSP_1),
+       PINMUX_IPSR_MSEL(IP6_16_14, TS_SDEN0_D, SEL_TSIF0_3),
+       PINMUX_IPSR_MSEL(IP6_16_14, GLO_Q0_C, SEL_GPS_2),
+       PINMUX_IPSR_MSEL(IP6_16_14, IIC2_SCL_E, SEL_IIC2_4),
+       PINMUX_IPSR_MSEL(IP6_16_14, I2C2_SCL_E, SEL_I2C2_4),
        PINMUX_IPSR_DATA(IP6_19_17, ETH_RX_ER),
-       PINMUX_IPSR_MODSEL_DATA(IP6_19_17, STP_ISD_0_B, SEL_SSP_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_19_17, TS_SPSYNC0_D, SEL_TSIF0_3),
-       PINMUX_IPSR_MODSEL_DATA(IP6_19_17, GLO_Q1_C, SEL_GPS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_19_17, IIC2_SDA_E, SEL_IIC2_4),
-       PINMUX_IPSR_MODSEL_DATA(IP6_19_17, I2C2_SDA_E, SEL_I2C2_4),
+       PINMUX_IPSR_MSEL(IP6_19_17, STP_ISD_0_B, SEL_SSP_1),
+       PINMUX_IPSR_MSEL(IP6_19_17, TS_SPSYNC0_D, SEL_TSIF0_3),
+       PINMUX_IPSR_MSEL(IP6_19_17, GLO_Q1_C, SEL_GPS_2),
+       PINMUX_IPSR_MSEL(IP6_19_17, IIC2_SDA_E, SEL_IIC2_4),
+       PINMUX_IPSR_MSEL(IP6_19_17, I2C2_SDA_E, SEL_I2C2_4),
        PINMUX_IPSR_DATA(IP6_22_20, ETH_RXD0),
-       PINMUX_IPSR_MODSEL_DATA(IP6_22_20, STP_ISEN_0_B, SEL_SSP_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_22_20, TS_SDAT0_D, SEL_TSIF0_3),
-       PINMUX_IPSR_MODSEL_DATA(IP6_22_20, GLO_I0_C, SEL_GPS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_22_20, SCIFB1_SCK_G, SEL_SCIFB1_6),
-       PINMUX_IPSR_MODSEL_DATA(IP6_22_20, SCK1_E, SEL_SCIF1_4),
+       PINMUX_IPSR_MSEL(IP6_22_20, STP_ISEN_0_B, SEL_SSP_1),
+       PINMUX_IPSR_MSEL(IP6_22_20, TS_SDAT0_D, SEL_TSIF0_3),
+       PINMUX_IPSR_MSEL(IP6_22_20, GLO_I0_C, SEL_GPS_2),
+       PINMUX_IPSR_MSEL(IP6_22_20, SCIFB1_SCK_G, SEL_SCIFB1_6),
+       PINMUX_IPSR_MSEL(IP6_22_20, SCK1_E, SEL_SCIF1_4),
        PINMUX_IPSR_DATA(IP6_25_23, ETH_RXD1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_25_23, HRX0_E, SEL_HSCIF0_4),
-       PINMUX_IPSR_MODSEL_DATA(IP6_25_23, STP_ISSYNC_0_B, SEL_SSP_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_25_23, TS_SCK0_D, SEL_TSIF0_3),
-       PINMUX_IPSR_MODSEL_DATA(IP6_25_23, GLO_I1_C, SEL_GPS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_25_23, SCIFB1_RXD_G, SEL_SCIFB1_6),
-       PINMUX_IPSR_MODSEL_DATA(IP6_25_23, RX1_E, SEL_SCIF1_4),
+       PINMUX_IPSR_MSEL(IP6_25_23, HRX0_E, SEL_HSCIF0_4),
+       PINMUX_IPSR_MSEL(IP6_25_23, STP_ISSYNC_0_B, SEL_SSP_1),
+       PINMUX_IPSR_MSEL(IP6_25_23, TS_SCK0_D, SEL_TSIF0_3),
+       PINMUX_IPSR_MSEL(IP6_25_23, GLO_I1_C, SEL_GPS_2),
+       PINMUX_IPSR_MSEL(IP6_25_23, SCIFB1_RXD_G, SEL_SCIFB1_6),
+       PINMUX_IPSR_MSEL(IP6_25_23, RX1_E, SEL_SCIF1_4),
        PINMUX_IPSR_DATA(IP6_28_26, ETH_LINK),
-       PINMUX_IPSR_MODSEL_DATA(IP6_28_26, HTX0_E, SEL_HSCIF0_4),
-       PINMUX_IPSR_MODSEL_DATA(IP6_28_26, STP_IVCXO27_0_B, SEL_SSP_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_28_26, SCIFB1_TXD_G, SEL_SCIFB1_6),
-       PINMUX_IPSR_MODSEL_DATA(IP6_28_26, TX1_E, SEL_SCIF1_4),
+       PINMUX_IPSR_MSEL(IP6_28_26, HTX0_E, SEL_HSCIF0_4),
+       PINMUX_IPSR_MSEL(IP6_28_26, STP_IVCXO27_0_B, SEL_SSP_1),
+       PINMUX_IPSR_MSEL(IP6_28_26, SCIFB1_TXD_G, SEL_SCIFB1_6),
+       PINMUX_IPSR_MSEL(IP6_28_26, TX1_E, SEL_SCIF1_4),
        PINMUX_IPSR_DATA(IP6_31_29, ETH_REF_CLK),
-       PINMUX_IPSR_MODSEL_DATA(IP6_31_29, HCTS0_N_E, SEL_HSCIF0_4),
-       PINMUX_IPSR_MODSEL_DATA(IP6_31_29, STP_IVCXO27_1_B, SEL_SSP_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_31_29, HRX0_F, SEL_HSCIF0_5),
+       PINMUX_IPSR_MSEL(IP6_31_29, HCTS0_N_E, SEL_HSCIF0_4),
+       PINMUX_IPSR_MSEL(IP6_31_29, STP_IVCXO27_1_B, SEL_SSP_1),
+       PINMUX_IPSR_MSEL(IP6_31_29, HRX0_F, SEL_HSCIF0_5),
 
        PINMUX_IPSR_DATA(IP7_2_0, ETH_MDIO),
-       PINMUX_IPSR_MODSEL_DATA(IP7_2_0, HRTS0_N_E, SEL_HSCIF0_4),
-       PINMUX_IPSR_MODSEL_DATA(IP7_2_0, SIM0_D_C, SEL_SIM_2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_2_0, HCTS0_N_F, SEL_HSCIF0_5),
+       PINMUX_IPSR_MSEL(IP7_2_0, HRTS0_N_E, SEL_HSCIF0_4),
+       PINMUX_IPSR_MSEL(IP7_2_0, SIM0_D_C, SEL_SIM_2),
+       PINMUX_IPSR_MSEL(IP7_2_0, HCTS0_N_F, SEL_HSCIF0_5),
        PINMUX_IPSR_DATA(IP7_5_3, ETH_TXD1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_5_3, HTX0_F, SEL_HSCIF0_5),
-       PINMUX_IPSR_MODSEL_DATA(IP7_5_3, BPFCLK_G, SEL_FM_6),
+       PINMUX_IPSR_MSEL(IP7_5_3, HTX0_F, SEL_HSCIF0_5),
+       PINMUX_IPSR_MSEL(IP7_5_3, BPFCLK_G, SEL_FM_6),
        PINMUX_IPSR_DATA(IP7_7_6, ETH_TX_EN),
-       PINMUX_IPSR_MODSEL_DATA(IP7_7_6, SIM0_CLK_C, SEL_SIM_2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_7_6, HRTS0_N_F, SEL_HSCIF0_5),
+       PINMUX_IPSR_MSEL(IP7_7_6, SIM0_CLK_C, SEL_SIM_2),
+       PINMUX_IPSR_MSEL(IP7_7_6, HRTS0_N_F, SEL_HSCIF0_5),
        PINMUX_IPSR_DATA(IP7_9_8, ETH_MAGIC),
-       PINMUX_IPSR_MODSEL_DATA(IP7_9_8, SIM0_RST_C, SEL_SIM_2),
+       PINMUX_IPSR_MSEL(IP7_9_8, SIM0_RST_C, SEL_SIM_2),
        PINMUX_IPSR_DATA(IP7_12_10, ETH_TXD0),
-       PINMUX_IPSR_MODSEL_DATA(IP7_12_10, STP_ISCLK_1_B, SEL_SSP_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_12_10, TS_SDEN1_C, SEL_TSIF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_12_10, GLO_SCLK_C, SEL_GPS_2),
+       PINMUX_IPSR_MSEL(IP7_12_10, STP_ISCLK_1_B, SEL_SSP_1),
+       PINMUX_IPSR_MSEL(IP7_12_10, TS_SDEN1_C, SEL_TSIF1_2),
+       PINMUX_IPSR_MSEL(IP7_12_10, GLO_SCLK_C, SEL_GPS_2),
        PINMUX_IPSR_DATA(IP7_15_13, ETH_MDC),
-       PINMUX_IPSR_MODSEL_DATA(IP7_15_13, STP_ISD_1_B, SEL_SSP_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_15_13, TS_SPSYNC1_C, SEL_TSIF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_15_13, GLO_SDATA_C, SEL_GPS_2),
+       PINMUX_IPSR_MSEL(IP7_15_13, STP_ISD_1_B, SEL_SSP_1),
+       PINMUX_IPSR_MSEL(IP7_15_13, TS_SPSYNC1_C, SEL_TSIF1_2),
+       PINMUX_IPSR_MSEL(IP7_15_13, GLO_SDATA_C, SEL_GPS_2),
        PINMUX_IPSR_DATA(IP7_18_16, PWM0),
-       PINMUX_IPSR_MODSEL_DATA(IP7_18_16, SCIFA2_SCK_C, SEL_SCIFA2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_18_16, STP_ISEN_1_B, SEL_SSP_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_18_16, TS_SDAT1_C, SEL_TSIF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_18_16, GLO_SS_C, SEL_GPS_2),
+       PINMUX_IPSR_MSEL(IP7_18_16, SCIFA2_SCK_C, SEL_SCIFA2_2),
+       PINMUX_IPSR_MSEL(IP7_18_16, STP_ISEN_1_B, SEL_SSP_1),
+       PINMUX_IPSR_MSEL(IP7_18_16, TS_SDAT1_C, SEL_TSIF1_2),
+       PINMUX_IPSR_MSEL(IP7_18_16, GLO_SS_C, SEL_GPS_2),
        PINMUX_IPSR_DATA(IP7_21_19, PWM1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_21_19, SCIFA2_TXD_C, SEL_SCIFA2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_21_19, STP_ISSYNC_1_B, SEL_SSP_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_21_19, TS_SCK1_C, SEL_TSIF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_21_19, GLO_RFON_C, SEL_GPS_2),
+       PINMUX_IPSR_MSEL(IP7_21_19, SCIFA2_TXD_C, SEL_SCIFA2_2),
+       PINMUX_IPSR_MSEL(IP7_21_19, STP_ISSYNC_1_B, SEL_SSP_1),
+       PINMUX_IPSR_MSEL(IP7_21_19, TS_SCK1_C, SEL_TSIF1_2),
+       PINMUX_IPSR_MSEL(IP7_21_19, GLO_RFON_C, SEL_GPS_2),
        PINMUX_IPSR_DATA(IP7_21_19, PCMOE_N),
        PINMUX_IPSR_DATA(IP7_24_22, PWM2),
        PINMUX_IPSR_DATA(IP7_24_22, PWMFSW0),
-       PINMUX_IPSR_MODSEL_DATA(IP7_24_22, SCIFA2_RXD_C, SEL_SCIFA2_2),
+       PINMUX_IPSR_MSEL(IP7_24_22, SCIFA2_RXD_C, SEL_SCIFA2_2),
        PINMUX_IPSR_DATA(IP7_24_22, PCMWE_N),
-       PINMUX_IPSR_MODSEL_DATA(IP7_24_22, IECLK_C, SEL_IEB_2),
+       PINMUX_IPSR_MSEL(IP7_24_22, IECLK_C, SEL_IEB_2),
        PINMUX_IPSR_DATA(IP7_26_25, DU_DOTCLKIN1),
        PINMUX_IPSR_DATA(IP7_26_25, AUDIO_CLKC),
        PINMUX_IPSR_DATA(IP7_26_25, AUDIO_CLKOUT_C),
-       PINMUX_IPSR_MODSEL_DATA(IP7_28_27, VI0_CLK, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP7_28_27, VI0_CLK, SEL_VI0_0),
        PINMUX_IPSR_DATA(IP7_28_27, ATACS00_N),
        PINMUX_IPSR_DATA(IP7_28_27, AVB_RXD1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_30_29, VI0_DATA0_VI0_B0, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP7_30_29, VI0_DATA0_VI0_B0, SEL_VI0_0),
        PINMUX_IPSR_DATA(IP7_30_29, ATACS10_N),
        PINMUX_IPSR_DATA(IP7_30_29, AVB_RXD2),
 
-       PINMUX_IPSR_MODSEL_DATA(IP8_1_0, VI0_DATA1_VI0_B1, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP8_1_0, VI0_DATA1_VI0_B1, SEL_VI0_0),
        PINMUX_IPSR_DATA(IP8_1_0, ATARD0_N),
        PINMUX_IPSR_DATA(IP8_1_0, AVB_RXD3),
-       PINMUX_IPSR_MODSEL_DATA(IP8_3_2, VI0_DATA2_VI0_B2, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP8_3_2, VI0_DATA2_VI0_B2, SEL_VI0_0),
        PINMUX_IPSR_DATA(IP8_3_2, ATAWR0_N),
        PINMUX_IPSR_DATA(IP8_3_2, AVB_RXD4),
-       PINMUX_IPSR_MODSEL_DATA(IP8_5_4, VI0_DATA3_VI0_B3, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP8_5_4, VI0_DATA3_VI0_B3, SEL_VI0_0),
        PINMUX_IPSR_DATA(IP8_5_4, ATADIR0_N),
        PINMUX_IPSR_DATA(IP8_5_4, AVB_RXD5),
-       PINMUX_IPSR_MODSEL_DATA(IP8_7_6, VI0_DATA4_VI0_B4, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP8_7_6, VI0_DATA4_VI0_B4, SEL_VI0_0),
        PINMUX_IPSR_DATA(IP8_7_6, ATAG0_N),
        PINMUX_IPSR_DATA(IP8_7_6, AVB_RXD6),
-       PINMUX_IPSR_MODSEL_DATA(IP8_9_8, VI0_DATA5_VI0_B5, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP8_9_8, VI0_DATA5_VI0_B5, SEL_VI0_0),
        PINMUX_IPSR_DATA(IP8_9_8, EX_WAIT1),
        PINMUX_IPSR_DATA(IP8_9_8, AVB_RXD7),
-       PINMUX_IPSR_MODSEL_DATA(IP8_11_10, VI0_DATA6_VI0_B6, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP8_11_10, VI0_DATA6_VI0_B6, SEL_VI0_0),
        PINMUX_IPSR_DATA(IP8_11_10, AVB_RX_ER),
-       PINMUX_IPSR_MODSEL_DATA(IP8_13_12, VI0_DATA7_VI0_B7, SEL_VI0_0),
+       PINMUX_IPSR_MSEL(IP8_13_12, VI0_DATA7_VI0_B7, SEL_VI0_0),
        PINMUX_IPSR_DATA(IP8_13_12, AVB_RX_CLK),
-       PINMUX_IPSR_MODSEL_DATA(IP8_15_14, VI1_CLK, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP8_15_14, VI1_CLK, SEL_VI1_0),
        PINMUX_IPSR_DATA(IP8_15_14, AVB_RX_DV),
-       PINMUX_IPSR_MODSEL_DATA(IP8_17_16, VI1_DATA0_VI1_B0, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_17_16, SCIFA1_SCK_D, SEL_SCIFA1_3),
+       PINMUX_IPSR_MSEL(IP8_17_16, VI1_DATA0_VI1_B0, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP8_17_16, SCIFA1_SCK_D, SEL_SCIFA1_3),
        PINMUX_IPSR_DATA(IP8_17_16, AVB_CRS),
-       PINMUX_IPSR_MODSEL_DATA(IP8_19_18, VI1_DATA1_VI1_B1, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_19_18, SCIFA1_RXD_D, SEL_SCIFA1_3),
+       PINMUX_IPSR_MSEL(IP8_19_18, VI1_DATA1_VI1_B1, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP8_19_18, SCIFA1_RXD_D, SEL_SCIFA1_3),
        PINMUX_IPSR_DATA(IP8_19_18, AVB_MDC),
-       PINMUX_IPSR_MODSEL_DATA(IP8_21_20, VI1_DATA2_VI1_B2, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_21_20, SCIFA1_TXD_D, SEL_SCIFA1_3),
+       PINMUX_IPSR_MSEL(IP8_21_20, VI1_DATA2_VI1_B2, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP8_21_20, SCIFA1_TXD_D, SEL_SCIFA1_3),
        PINMUX_IPSR_DATA(IP8_21_20, AVB_MDIO),
-       PINMUX_IPSR_MODSEL_DATA(IP8_23_22, VI1_DATA3_VI1_B3, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_23_22, SCIFA1_CTS_N_D, SEL_SCIFA1_3),
+       PINMUX_IPSR_MSEL(IP8_23_22, VI1_DATA3_VI1_B3, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP8_23_22, SCIFA1_CTS_N_D, SEL_SCIFA1_3),
        PINMUX_IPSR_DATA(IP8_23_22, AVB_GTX_CLK),
-       PINMUX_IPSR_MODSEL_DATA(IP8_25_24, VI1_DATA4_VI1_B4, SEL_VI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_25_24, SCIFA1_RTS_N_D, SEL_SCIFA1_3),
+       PINMUX_IPSR_MSEL(IP8_25_24, VI1_DATA4_VI1_B4, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP8_25_24, SCIFA1_RTS_N_D, SEL_SCIFA1_3),
        PINMUX_IPSR_DATA(IP8_25_24, AVB_MAGIC),
-       PINMUX_IPSR_MODSEL_DATA(IP8_26, VI1_DATA5_VI1_B5, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP8_26, VI1_DATA5_VI1_B5, SEL_VI1_0),
        PINMUX_IPSR_DATA(IP8_26, AVB_PHY_INT),
-       PINMUX_IPSR_MODSEL_DATA(IP8_27, VI1_DATA6_VI1_B6, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP8_27, VI1_DATA6_VI1_B6, SEL_VI1_0),
        PINMUX_IPSR_DATA(IP8_27, AVB_GTXREFCLK),
        PINMUX_IPSR_DATA(IP8_28, SD0_CLK),
-       PINMUX_IPSR_MODSEL_DATA(IP8_28, VI1_DATA0_VI1_B0_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP8_28, VI1_DATA0_VI1_B0_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP8_30_29, SD0_CMD),
-       PINMUX_IPSR_MODSEL_DATA(IP8_30_29, SCIFB1_SCK_B, SEL_SCIFB1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_30_29, VI1_DATA1_VI1_B1_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP8_30_29, SCIFB1_SCK_B, SEL_SCIFB1_1),
+       PINMUX_IPSR_MSEL(IP8_30_29, VI1_DATA1_VI1_B1_B, SEL_VI1_1),
 
        PINMUX_IPSR_DATA(IP9_1_0, SD0_DAT0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_1_0, SCIFB1_RXD_B, SEL_SCIFB1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_1_0, VI1_DATA2_VI1_B2_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP9_1_0, SCIFB1_RXD_B, SEL_SCIFB1_1),
+       PINMUX_IPSR_MSEL(IP9_1_0, VI1_DATA2_VI1_B2_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP9_3_2, SD0_DAT1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_3_2, SCIFB1_TXD_B, SEL_SCIFB1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_3_2, VI1_DATA3_VI1_B3_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP9_3_2, SCIFB1_TXD_B, SEL_SCIFB1_1),
+       PINMUX_IPSR_MSEL(IP9_3_2, VI1_DATA3_VI1_B3_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP9_5_4, SD0_DAT2),
-       PINMUX_IPSR_MODSEL_DATA(IP9_5_4, SCIFB1_CTS_N_B, SEL_SCIFB1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_5_4, VI1_DATA4_VI1_B4_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP9_5_4, SCIFB1_CTS_N_B, SEL_SCIFB1_1),
+       PINMUX_IPSR_MSEL(IP9_5_4, VI1_DATA4_VI1_B4_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP9_7_6, SD0_DAT3),
-       PINMUX_IPSR_MODSEL_DATA(IP9_7_6, SCIFB1_RTS_N_B, SEL_SCIFB1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_7_6, VI1_DATA5_VI1_B5_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP9_7_6, SCIFB1_RTS_N_B, SEL_SCIFB1_1),
+       PINMUX_IPSR_MSEL(IP9_7_6, VI1_DATA5_VI1_B5_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP9_11_8, SD0_CD),
        PINMUX_IPSR_DATA(IP9_11_8, MMC0_D6),
-       PINMUX_IPSR_MODSEL_DATA(IP9_11_8, TS_SDEN0_B, SEL_TSIF0_1),
+       PINMUX_IPSR_MSEL(IP9_11_8, TS_SDEN0_B, SEL_TSIF0_1),
        PINMUX_IPSR_DATA(IP9_11_8, USB0_EXTP),
-       PINMUX_IPSR_MODSEL_DATA(IP9_11_8, GLO_SCLK, SEL_GPS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_11_8, VI1_DATA6_VI1_B6_B, SEL_VI1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_11_8, IIC1_SCL_B, SEL_IIC1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_11_8, I2C1_SCL_B, SEL_I2C1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_11_8, VI2_DATA6_VI2_B6_B, SEL_VI2_1),
+       PINMUX_IPSR_MSEL(IP9_11_8, GLO_SCLK, SEL_GPS_0),
+       PINMUX_IPSR_MSEL(IP9_11_8, VI1_DATA6_VI1_B6_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP9_11_8, IIC1_SCL_B, SEL_IIC1_1),
+       PINMUX_IPSR_MSEL(IP9_11_8, I2C1_SCL_B, SEL_I2C1_1),
+       PINMUX_IPSR_MSEL(IP9_11_8, VI2_DATA6_VI2_B6_B, SEL_VI2_1),
        PINMUX_IPSR_DATA(IP9_15_12, SD0_WP),
        PINMUX_IPSR_DATA(IP9_15_12, MMC0_D7),
-       PINMUX_IPSR_MODSEL_DATA(IP9_15_12, TS_SPSYNC0_B, SEL_TSIF0_1),
+       PINMUX_IPSR_MSEL(IP9_15_12, TS_SPSYNC0_B, SEL_TSIF0_1),
        PINMUX_IPSR_DATA(IP9_15_12, USB0_IDIN),
-       PINMUX_IPSR_MODSEL_DATA(IP9_15_12, GLO_SDATA, SEL_GPS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_15_12, VI1_DATA7_VI1_B7_B, SEL_VI1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_15_12, IIC1_SDA_B, SEL_IIC1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_15_12, I2C1_SDA_B, SEL_I2C1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_15_12, VI2_DATA7_VI2_B7_B, SEL_VI2_1),
+       PINMUX_IPSR_MSEL(IP9_15_12, GLO_SDATA, SEL_GPS_0),
+       PINMUX_IPSR_MSEL(IP9_15_12, VI1_DATA7_VI1_B7_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP9_15_12, IIC1_SDA_B, SEL_IIC1_1),
+       PINMUX_IPSR_MSEL(IP9_15_12, I2C1_SDA_B, SEL_I2C1_1),
+       PINMUX_IPSR_MSEL(IP9_15_12, VI2_DATA7_VI2_B7_B, SEL_VI2_1),
        PINMUX_IPSR_DATA(IP9_17_16, SD1_CLK),
        PINMUX_IPSR_DATA(IP9_17_16, AVB_TX_EN),
        PINMUX_IPSR_DATA(IP9_19_18, SD1_CMD),
        PINMUX_IPSR_DATA(IP9_19_18, AVB_TX_ER),
-       PINMUX_IPSR_MODSEL_DATA(IP9_19_18, SCIFB0_SCK_B, SEL_SCIFB_1),
+       PINMUX_IPSR_MSEL(IP9_19_18, SCIFB0_SCK_B, SEL_SCIFB_1),
        PINMUX_IPSR_DATA(IP9_21_20, SD1_DAT0),
        PINMUX_IPSR_DATA(IP9_21_20, AVB_TX_CLK),
-       PINMUX_IPSR_MODSEL_DATA(IP9_21_20, SCIFB0_RXD_B, SEL_SCIFB_1),
+       PINMUX_IPSR_MSEL(IP9_21_20, SCIFB0_RXD_B, SEL_SCIFB_1),
        PINMUX_IPSR_DATA(IP9_23_22, SD1_DAT1),
        PINMUX_IPSR_DATA(IP9_23_22, AVB_LINK),
-       PINMUX_IPSR_MODSEL_DATA(IP9_23_22, SCIFB0_TXD_B, SEL_SCIFB_1),
+       PINMUX_IPSR_MSEL(IP9_23_22, SCIFB0_TXD_B, SEL_SCIFB_1),
        PINMUX_IPSR_DATA(IP9_25_24, SD1_DAT2),
        PINMUX_IPSR_DATA(IP9_25_24, AVB_COL),
-       PINMUX_IPSR_MODSEL_DATA(IP9_25_24, SCIFB0_CTS_N_B, SEL_SCIFB_1),
+       PINMUX_IPSR_MSEL(IP9_25_24, SCIFB0_CTS_N_B, SEL_SCIFB_1),
        PINMUX_IPSR_DATA(IP9_27_26, SD1_DAT3),
        PINMUX_IPSR_DATA(IP9_27_26, AVB_RXD0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_27_26, SCIFB0_RTS_N_B, SEL_SCIFB_1),
+       PINMUX_IPSR_MSEL(IP9_27_26, SCIFB0_RTS_N_B, SEL_SCIFB_1),
        PINMUX_IPSR_DATA(IP9_31_28, SD1_CD),
        PINMUX_IPSR_DATA(IP9_31_28, MMC1_D6),
-       PINMUX_IPSR_MODSEL_DATA(IP9_31_28, TS_SDEN1, SEL_TSIF1_0),
+       PINMUX_IPSR_MSEL(IP9_31_28, TS_SDEN1, SEL_TSIF1_0),
        PINMUX_IPSR_DATA(IP9_31_28, USB1_EXTP),
-       PINMUX_IPSR_MODSEL_DATA(IP9_31_28, GLO_SS, SEL_GPS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_31_28, VI0_CLK_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_31_28, IIC2_SCL_D, SEL_IIC2_3),
-       PINMUX_IPSR_MODSEL_DATA(IP9_31_28, I2C2_SCL_D, SEL_I2C2_3),
-       PINMUX_IPSR_MODSEL_DATA(IP9_31_28, SIM0_CLK_B, SEL_SIM_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_31_28, VI3_CLK_B, SEL_VI3_1),
+       PINMUX_IPSR_MSEL(IP9_31_28, GLO_SS, SEL_GPS_0),
+       PINMUX_IPSR_MSEL(IP9_31_28, VI0_CLK_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP9_31_28, IIC2_SCL_D, SEL_IIC2_3),
+       PINMUX_IPSR_MSEL(IP9_31_28, I2C2_SCL_D, SEL_I2C2_3),
+       PINMUX_IPSR_MSEL(IP9_31_28, SIM0_CLK_B, SEL_SIM_1),
+       PINMUX_IPSR_MSEL(IP9_31_28, VI3_CLK_B, SEL_VI3_1),
 
        PINMUX_IPSR_DATA(IP10_3_0, SD1_WP),
        PINMUX_IPSR_DATA(IP10_3_0, MMC1_D7),
-       PINMUX_IPSR_MODSEL_DATA(IP10_3_0, TS_SPSYNC1, SEL_TSIF1_0),
+       PINMUX_IPSR_MSEL(IP10_3_0, TS_SPSYNC1, SEL_TSIF1_0),
        PINMUX_IPSR_DATA(IP10_3_0, USB1_IDIN),
-       PINMUX_IPSR_MODSEL_DATA(IP10_3_0, GLO_RFON, SEL_GPS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_3_0, VI1_CLK_B, SEL_VI1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_3_0, IIC2_SDA_D, SEL_IIC2_3),
-       PINMUX_IPSR_MODSEL_DATA(IP10_3_0, I2C2_SDA_D, SEL_I2C2_3),
-       PINMUX_IPSR_MODSEL_DATA(IP10_3_0, SIM0_D_B, SEL_SIM_1),
+       PINMUX_IPSR_MSEL(IP10_3_0, GLO_RFON, SEL_GPS_0),
+       PINMUX_IPSR_MSEL(IP10_3_0, VI1_CLK_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP10_3_0, IIC2_SDA_D, SEL_IIC2_3),
+       PINMUX_IPSR_MSEL(IP10_3_0, I2C2_SDA_D, SEL_I2C2_3),
+       PINMUX_IPSR_MSEL(IP10_3_0, SIM0_D_B, SEL_SIM_1),
        PINMUX_IPSR_DATA(IP10_6_4, SD2_CLK),
        PINMUX_IPSR_DATA(IP10_6_4, MMC0_CLK),
-       PINMUX_IPSR_MODSEL_DATA(IP10_6_4, SIM0_CLK, SEL_SIM_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_6_4, VI0_DATA0_VI0_B0_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_6_4, TS_SDEN0_C, SEL_TSIF0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_6_4, GLO_SCLK_B, SEL_GPS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_6_4, VI3_DATA0_B, SEL_VI3_1),
+       PINMUX_IPSR_MSEL(IP10_6_4, SIM0_CLK, SEL_SIM_0),
+       PINMUX_IPSR_MSEL(IP10_6_4, VI0_DATA0_VI0_B0_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP10_6_4, TS_SDEN0_C, SEL_TSIF0_2),
+       PINMUX_IPSR_MSEL(IP10_6_4, GLO_SCLK_B, SEL_GPS_1),
+       PINMUX_IPSR_MSEL(IP10_6_4, VI3_DATA0_B, SEL_VI3_1),
        PINMUX_IPSR_DATA(IP10_10_7, SD2_CMD),
        PINMUX_IPSR_DATA(IP10_10_7, MMC0_CMD),
-       PINMUX_IPSR_MODSEL_DATA(IP10_10_7, SIM0_D, SEL_SIM_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_10_7, VI0_DATA1_VI0_B1_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_10_7, SCIFB1_SCK_E, SEL_SCIFB1_4),
-       PINMUX_IPSR_MODSEL_DATA(IP10_10_7, SCK1_D, SEL_SCIF1_3),
-       PINMUX_IPSR_MODSEL_DATA(IP10_10_7, TS_SPSYNC0_C, SEL_TSIF0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_10_7, GLO_SDATA_B, SEL_GPS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_10_7, VI3_DATA1_B, SEL_VI3_1),
+       PINMUX_IPSR_MSEL(IP10_10_7, SIM0_D, SEL_SIM_0),
+       PINMUX_IPSR_MSEL(IP10_10_7, VI0_DATA1_VI0_B1_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP10_10_7, SCIFB1_SCK_E, SEL_SCIFB1_4),
+       PINMUX_IPSR_MSEL(IP10_10_7, SCK1_D, SEL_SCIF1_3),
+       PINMUX_IPSR_MSEL(IP10_10_7, TS_SPSYNC0_C, SEL_TSIF0_2),
+       PINMUX_IPSR_MSEL(IP10_10_7, GLO_SDATA_B, SEL_GPS_1),
+       PINMUX_IPSR_MSEL(IP10_10_7, VI3_DATA1_B, SEL_VI3_1),
        PINMUX_IPSR_DATA(IP10_14_11, SD2_DAT0),
        PINMUX_IPSR_DATA(IP10_14_11, MMC0_D0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_14_11, FMCLK_B, SEL_FM_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_14_11, VI0_DATA2_VI0_B2_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_14_11, SCIFB1_RXD_E, SEL_SCIFB1_4),
-       PINMUX_IPSR_MODSEL_DATA(IP10_14_11, RX1_D, SEL_SCIF1_3),
-       PINMUX_IPSR_MODSEL_DATA(IP10_14_11, TS_SDAT0_C, SEL_TSIF0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_14_11, GLO_SS_B, SEL_GPS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_14_11, VI3_DATA2_B, SEL_VI3_1),
+       PINMUX_IPSR_MSEL(IP10_14_11, FMCLK_B, SEL_FM_1),
+       PINMUX_IPSR_MSEL(IP10_14_11, VI0_DATA2_VI0_B2_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP10_14_11, SCIFB1_RXD_E, SEL_SCIFB1_4),
+       PINMUX_IPSR_MSEL(IP10_14_11, RX1_D, SEL_SCIF1_3),
+       PINMUX_IPSR_MSEL(IP10_14_11, TS_SDAT0_C, SEL_TSIF0_2),
+       PINMUX_IPSR_MSEL(IP10_14_11, GLO_SS_B, SEL_GPS_1),
+       PINMUX_IPSR_MSEL(IP10_14_11, VI3_DATA2_B, SEL_VI3_1),
        PINMUX_IPSR_DATA(IP10_18_15, SD2_DAT1),
        PINMUX_IPSR_DATA(IP10_18_15, MMC0_D1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_18_15, FMIN_B, SEL_FM_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_18_15, VI0_DATA3_VI0_B3_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_18_15, SCIFB1_TXD_E, SEL_SCIFB1_4),
-       PINMUX_IPSR_MODSEL_DATA(IP10_18_15, TX1_D, SEL_SCIF1_3),
-       PINMUX_IPSR_MODSEL_DATA(IP10_18_15, TS_SCK0_C, SEL_TSIF0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_18_15, GLO_RFON_B, SEL_GPS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_18_15, VI3_DATA3_B, SEL_VI3_1),
+       PINMUX_IPSR_MSEL(IP10_18_15, FMIN_B, SEL_FM_1),
+       PINMUX_IPSR_MSEL(IP10_18_15, VI0_DATA3_VI0_B3_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP10_18_15, SCIFB1_TXD_E, SEL_SCIFB1_4),
+       PINMUX_IPSR_MSEL(IP10_18_15, TX1_D, SEL_SCIF1_3),
+       PINMUX_IPSR_MSEL(IP10_18_15, TS_SCK0_C, SEL_TSIF0_2),
+       PINMUX_IPSR_MSEL(IP10_18_15, GLO_RFON_B, SEL_GPS_1),
+       PINMUX_IPSR_MSEL(IP10_18_15, VI3_DATA3_B, SEL_VI3_1),
        PINMUX_IPSR_DATA(IP10_22_19, SD2_DAT2),
        PINMUX_IPSR_DATA(IP10_22_19, MMC0_D2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_22_19, BPFCLK_B, SEL_FM_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_22_19, VI0_DATA4_VI0_B4_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_22_19, HRX0_D, SEL_HSCIF0_3),
-       PINMUX_IPSR_MODSEL_DATA(IP10_22_19, TS_SDEN1_B, SEL_TSIF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_22_19, GLO_Q0_B, SEL_GPS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_22_19, VI3_DATA4_B, SEL_VI3_1),
+       PINMUX_IPSR_MSEL(IP10_22_19, BPFCLK_B, SEL_FM_1),
+       PINMUX_IPSR_MSEL(IP10_22_19, VI0_DATA4_VI0_B4_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP10_22_19, HRX0_D, SEL_HSCIF0_3),
+       PINMUX_IPSR_MSEL(IP10_22_19, TS_SDEN1_B, SEL_TSIF1_1),
+       PINMUX_IPSR_MSEL(IP10_22_19, GLO_Q0_B, SEL_GPS_1),
+       PINMUX_IPSR_MSEL(IP10_22_19, VI3_DATA4_B, SEL_VI3_1),
        PINMUX_IPSR_DATA(IP10_25_23, SD2_DAT3),
        PINMUX_IPSR_DATA(IP10_25_23, MMC0_D3),
-       PINMUX_IPSR_MODSEL_DATA(IP10_25_23, SIM0_RST, SEL_SIM_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_25_23, VI0_DATA5_VI0_B5_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_25_23, HTX0_D, SEL_HSCIF0_3),
-       PINMUX_IPSR_MODSEL_DATA(IP10_25_23, TS_SPSYNC1_B, SEL_TSIF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_25_23, GLO_Q1_B, SEL_GPS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_25_23, VI3_DATA5_B, SEL_VI3_1),
+       PINMUX_IPSR_MSEL(IP10_25_23, SIM0_RST, SEL_SIM_0),
+       PINMUX_IPSR_MSEL(IP10_25_23, VI0_DATA5_VI0_B5_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP10_25_23, HTX0_D, SEL_HSCIF0_3),
+       PINMUX_IPSR_MSEL(IP10_25_23, TS_SPSYNC1_B, SEL_TSIF1_1),
+       PINMUX_IPSR_MSEL(IP10_25_23, GLO_Q1_B, SEL_GPS_1),
+       PINMUX_IPSR_MSEL(IP10_25_23, VI3_DATA5_B, SEL_VI3_1),
        PINMUX_IPSR_DATA(IP10_29_26, SD2_CD),
        PINMUX_IPSR_DATA(IP10_29_26, MMC0_D4),
-       PINMUX_IPSR_MODSEL_DATA(IP10_29_26, TS_SDAT0_B, SEL_TSIF0_1),
+       PINMUX_IPSR_MSEL(IP10_29_26, TS_SDAT0_B, SEL_TSIF0_1),
        PINMUX_IPSR_DATA(IP10_29_26, USB2_EXTP),
-       PINMUX_IPSR_MODSEL_DATA(IP10_29_26, GLO_I0, SEL_GPS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_29_26, VI0_DATA6_VI0_B6_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_29_26, HCTS0_N_D, SEL_HSCIF0_3),
-       PINMUX_IPSR_MODSEL_DATA(IP10_29_26, TS_SDAT1_B, SEL_TSIF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_29_26, GLO_I0_B, SEL_GPS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_29_26, VI3_DATA6_B, SEL_VI3_1),
+       PINMUX_IPSR_MSEL(IP10_29_26, GLO_I0, SEL_GPS_0),
+       PINMUX_IPSR_MSEL(IP10_29_26, VI0_DATA6_VI0_B6_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP10_29_26, HCTS0_N_D, SEL_HSCIF0_3),
+       PINMUX_IPSR_MSEL(IP10_29_26, TS_SDAT1_B, SEL_TSIF1_1),
+       PINMUX_IPSR_MSEL(IP10_29_26, GLO_I0_B, SEL_GPS_1),
+       PINMUX_IPSR_MSEL(IP10_29_26, VI3_DATA6_B, SEL_VI3_1),
 
        PINMUX_IPSR_DATA(IP11_3_0, SD2_WP),
        PINMUX_IPSR_DATA(IP11_3_0, MMC0_D5),
-       PINMUX_IPSR_MODSEL_DATA(IP11_3_0, TS_SCK0_B, SEL_TSIF0_1),
+       PINMUX_IPSR_MSEL(IP11_3_0, TS_SCK0_B, SEL_TSIF0_1),
        PINMUX_IPSR_DATA(IP11_3_0, USB2_IDIN),
-       PINMUX_IPSR_MODSEL_DATA(IP11_3_0, GLO_I1, SEL_GPS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_3_0, VI0_DATA7_VI0_B7_B, SEL_VI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_3_0, HRTS0_N_D, SEL_HSCIF0_3),
-       PINMUX_IPSR_MODSEL_DATA(IP11_3_0, TS_SCK1_B, SEL_TSIF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_3_0, GLO_I1_B, SEL_GPS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_3_0, VI3_DATA7_B, SEL_VI3_1),
+       PINMUX_IPSR_MSEL(IP11_3_0, GLO_I1, SEL_GPS_0),
+       PINMUX_IPSR_MSEL(IP11_3_0, VI0_DATA7_VI0_B7_B, SEL_VI0_1),
+       PINMUX_IPSR_MSEL(IP11_3_0, HRTS0_N_D, SEL_HSCIF0_3),
+       PINMUX_IPSR_MSEL(IP11_3_0, TS_SCK1_B, SEL_TSIF1_1),
+       PINMUX_IPSR_MSEL(IP11_3_0, GLO_I1_B, SEL_GPS_1),
+       PINMUX_IPSR_MSEL(IP11_3_0, VI3_DATA7_B, SEL_VI3_1),
        PINMUX_IPSR_DATA(IP11_4, SD3_CLK),
        PINMUX_IPSR_DATA(IP11_4, MMC1_CLK),
        PINMUX_IPSR_DATA(IP11_6_5, SD3_CMD),
@@ -1447,298 +1446,298 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP11_14_13, SCKZ),
        PINMUX_IPSR_DATA(IP11_17_15, SD3_CD),
        PINMUX_IPSR_DATA(IP11_17_15, MMC1_D4),
-       PINMUX_IPSR_MODSEL_DATA(IP11_17_15, TS_SDAT1, SEL_TSIF1_0),
+       PINMUX_IPSR_MSEL(IP11_17_15, TS_SDAT1, SEL_TSIF1_0),
        PINMUX_IPSR_DATA(IP11_17_15, VSP),
-       PINMUX_IPSR_MODSEL_DATA(IP11_17_15, GLO_Q0, SEL_GPS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_17_15, SIM0_RST_B, SEL_SIM_1),
+       PINMUX_IPSR_MSEL(IP11_17_15, GLO_Q0, SEL_GPS_0),
+       PINMUX_IPSR_MSEL(IP11_17_15, SIM0_RST_B, SEL_SIM_1),
        PINMUX_IPSR_DATA(IP11_21_18, SD3_WP),
        PINMUX_IPSR_DATA(IP11_21_18, MMC1_D5),
-       PINMUX_IPSR_MODSEL_DATA(IP11_21_18, TS_SCK1, SEL_TSIF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_21_18, GLO_Q1, SEL_GPS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_21_18, FMIN_C, SEL_FM_2),
-       PINMUX_IPSR_MODSEL_DATA(IP11_21_18, FMIN_E, SEL_FM_4),
-       PINMUX_IPSR_MODSEL_DATA(IP11_21_18, FMIN_F, SEL_FM_5),
+       PINMUX_IPSR_MSEL(IP11_21_18, TS_SCK1, SEL_TSIF1_0),
+       PINMUX_IPSR_MSEL(IP11_21_18, GLO_Q1, SEL_GPS_0),
+       PINMUX_IPSR_MSEL(IP11_21_18, FMIN_C, SEL_FM_2),
+       PINMUX_IPSR_MSEL(IP11_21_18, FMIN_E, SEL_FM_4),
+       PINMUX_IPSR_MSEL(IP11_21_18, FMIN_F, SEL_FM_5),
        PINMUX_IPSR_DATA(IP11_23_22, MLB_CLK),
-       PINMUX_IPSR_MODSEL_DATA(IP11_23_22, IIC2_SCL_B, SEL_IIC2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_23_22, I2C2_SCL_B, SEL_I2C2_1),
+       PINMUX_IPSR_MSEL(IP11_23_22, IIC2_SCL_B, SEL_IIC2_1),
+       PINMUX_IPSR_MSEL(IP11_23_22, I2C2_SCL_B, SEL_I2C2_1),
        PINMUX_IPSR_DATA(IP11_26_24, MLB_SIG),
-       PINMUX_IPSR_MODSEL_DATA(IP11_26_24, SCIFB1_RXD_D, SEL_SCIFB1_3),
-       PINMUX_IPSR_MODSEL_DATA(IP11_26_24, RX1_C, SEL_SCIF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP11_26_24, IIC2_SDA_B, SEL_IIC2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_26_24, I2C2_SDA_B, SEL_I2C2_1),
+       PINMUX_IPSR_MSEL(IP11_26_24, SCIFB1_RXD_D, SEL_SCIFB1_3),
+       PINMUX_IPSR_MSEL(IP11_26_24, RX1_C, SEL_SCIF1_2),
+       PINMUX_IPSR_MSEL(IP11_26_24, IIC2_SDA_B, SEL_IIC2_1),
+       PINMUX_IPSR_MSEL(IP11_26_24, I2C2_SDA_B, SEL_I2C2_1),
        PINMUX_IPSR_DATA(IP11_29_27, MLB_DAT),
-       PINMUX_IPSR_MODSEL_DATA(IP11_29_27, SCIFB1_TXD_D, SEL_SCIFB1_3),
-       PINMUX_IPSR_MODSEL_DATA(IP11_29_27, TX1_C, SEL_SCIF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP11_29_27, BPFCLK_C, SEL_FM_2),
+       PINMUX_IPSR_MSEL(IP11_29_27, SCIFB1_TXD_D, SEL_SCIFB1_3),
+       PINMUX_IPSR_MSEL(IP11_29_27, TX1_C, SEL_SCIF1_2),
+       PINMUX_IPSR_MSEL(IP11_29_27, BPFCLK_C, SEL_FM_2),
        PINMUX_IPSR_DATA(IP11_31_30, SSI_SCK0129),
-       PINMUX_IPSR_MODSEL_DATA(IP11_31_30, CAN_CLK_B, SEL_CANCLK_1),
+       PINMUX_IPSR_MSEL(IP11_31_30, CAN_CLK_B, SEL_CANCLK_1),
        PINMUX_IPSR_DATA(IP11_31_30, MOUT0),
 
        PINMUX_IPSR_DATA(IP12_1_0, SSI_WS0129),
-       PINMUX_IPSR_MODSEL_DATA(IP12_1_0, CAN0_TX_B, SEL_CAN0_1),
+       PINMUX_IPSR_MSEL(IP12_1_0, CAN0_TX_B, SEL_CAN0_1),
        PINMUX_IPSR_DATA(IP12_1_0, MOUT1),
        PINMUX_IPSR_DATA(IP12_3_2, SSI_SDATA0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_3_2, CAN0_RX_B, SEL_CAN0_1),
+       PINMUX_IPSR_MSEL(IP12_3_2, CAN0_RX_B, SEL_CAN0_1),
        PINMUX_IPSR_DATA(IP12_3_2, MOUT2),
        PINMUX_IPSR_DATA(IP12_5_4, SSI_SDATA1),
-       PINMUX_IPSR_MODSEL_DATA(IP12_5_4, CAN1_TX_B, SEL_CAN1_1),
+       PINMUX_IPSR_MSEL(IP12_5_4, CAN1_TX_B, SEL_CAN1_1),
        PINMUX_IPSR_DATA(IP12_5_4, MOUT5),
        PINMUX_IPSR_DATA(IP12_7_6, SSI_SDATA2),
-       PINMUX_IPSR_MODSEL_DATA(IP12_7_6, CAN1_RX_B, SEL_CAN1_1),
+       PINMUX_IPSR_MSEL(IP12_7_6, CAN1_RX_B, SEL_CAN1_1),
        PINMUX_IPSR_DATA(IP12_7_6, SSI_SCK1),
        PINMUX_IPSR_DATA(IP12_7_6, MOUT6),
        PINMUX_IPSR_DATA(IP12_10_8, SSI_SCK34),
        PINMUX_IPSR_DATA(IP12_10_8, STP_OPWM_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_10_8, SCIFB0_SCK, SEL_SCIFB_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_10_8, MSIOF1_SCK, SEL_SOF1_0),
+       PINMUX_IPSR_MSEL(IP12_10_8, SCIFB0_SCK, SEL_SCIFB_0),
+       PINMUX_IPSR_MSEL(IP12_10_8, MSIOF1_SCK, SEL_SOF1_0),
        PINMUX_IPSR_DATA(IP12_10_8, CAN_DEBUG_HW_TRIGGER),
        PINMUX_IPSR_DATA(IP12_13_11, SSI_WS34),
-       PINMUX_IPSR_MODSEL_DATA(IP12_13_11, STP_IVCXO27_0, SEL_SSP_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_13_11, SCIFB0_RXD, SEL_SCIFB_0),
+       PINMUX_IPSR_MSEL(IP12_13_11, STP_IVCXO27_0, SEL_SSP_0),
+       PINMUX_IPSR_MSEL(IP12_13_11, SCIFB0_RXD, SEL_SCIFB_0),
        PINMUX_IPSR_DATA(IP12_13_11, MSIOF1_SYNC),
        PINMUX_IPSR_DATA(IP12_13_11, CAN_STEP0),
        PINMUX_IPSR_DATA(IP12_16_14, SSI_SDATA3),
-       PINMUX_IPSR_MODSEL_DATA(IP12_16_14, STP_ISCLK_0, SEL_SSP_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_16_14, SCIFB0_TXD, SEL_SCIFB_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_16_14, MSIOF1_SS1, SEL_SOF1_0),
+       PINMUX_IPSR_MSEL(IP12_16_14, STP_ISCLK_0, SEL_SSP_0),
+       PINMUX_IPSR_MSEL(IP12_16_14, SCIFB0_TXD, SEL_SCIFB_0),
+       PINMUX_IPSR_MSEL(IP12_16_14, MSIOF1_SS1, SEL_SOF1_0),
        PINMUX_IPSR_DATA(IP12_16_14, CAN_TXCLK),
        PINMUX_IPSR_DATA(IP12_19_17, SSI_SCK4),
-       PINMUX_IPSR_MODSEL_DATA(IP12_19_17, STP_ISD_0, SEL_SSP_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_19_17, SCIFB0_CTS_N, SEL_SCIFB_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_19_17, MSIOF1_SS2, SEL_SOF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_19_17, SSI_SCK5_C, SEL_SSI5_2),
+       PINMUX_IPSR_MSEL(IP12_19_17, STP_ISD_0, SEL_SSP_0),
+       PINMUX_IPSR_MSEL(IP12_19_17, SCIFB0_CTS_N, SEL_SCIFB_0),
+       PINMUX_IPSR_MSEL(IP12_19_17, MSIOF1_SS2, SEL_SOF1_0),
+       PINMUX_IPSR_MSEL(IP12_19_17, SSI_SCK5_C, SEL_SSI5_2),
        PINMUX_IPSR_DATA(IP12_19_17, CAN_DEBUGOUT0),
        PINMUX_IPSR_DATA(IP12_22_20, SSI_WS4),
-       PINMUX_IPSR_MODSEL_DATA(IP12_22_20, STP_ISEN_0, SEL_SSP_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_22_20, SCIFB0_RTS_N, SEL_SCIFB_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_22_20, MSIOF1_TXD, SEL_SOF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_22_20, SSI_WS5_C, SEL_SSI5_2),
+       PINMUX_IPSR_MSEL(IP12_22_20, STP_ISEN_0, SEL_SSP_0),
+       PINMUX_IPSR_MSEL(IP12_22_20, SCIFB0_RTS_N, SEL_SCIFB_0),
+       PINMUX_IPSR_MSEL(IP12_22_20, MSIOF1_TXD, SEL_SOF1_0),
+       PINMUX_IPSR_MSEL(IP12_22_20, SSI_WS5_C, SEL_SSI5_2),
        PINMUX_IPSR_DATA(IP12_22_20, CAN_DEBUGOUT1),
        PINMUX_IPSR_DATA(IP12_24_23, SSI_SDATA4),
-       PINMUX_IPSR_MODSEL_DATA(IP12_24_23, STP_ISSYNC_0, SEL_SSP_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_24_23, MSIOF1_RXD, SEL_SOF1_0),
+       PINMUX_IPSR_MSEL(IP12_24_23, STP_ISSYNC_0, SEL_SSP_0),
+       PINMUX_IPSR_MSEL(IP12_24_23, MSIOF1_RXD, SEL_SOF1_0),
        PINMUX_IPSR_DATA(IP12_24_23, CAN_DEBUGOUT2),
-       PINMUX_IPSR_MODSEL_DATA(IP12_27_25, SSI_SCK5, SEL_SSI5_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_27_25, SCIFB1_SCK, SEL_SCIFB1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_27_25, IERX_B, SEL_IEB_1),
+       PINMUX_IPSR_MSEL(IP12_27_25, SSI_SCK5, SEL_SSI5_0),
+       PINMUX_IPSR_MSEL(IP12_27_25, SCIFB1_SCK, SEL_SCIFB1_0),
+       PINMUX_IPSR_MSEL(IP12_27_25, IERX_B, SEL_IEB_1),
        PINMUX_IPSR_DATA(IP12_27_25, DU2_EXHSYNC_DU2_HSYNC),
        PINMUX_IPSR_DATA(IP12_27_25, QSTH_QHS),
        PINMUX_IPSR_DATA(IP12_27_25, CAN_DEBUGOUT3),
-       PINMUX_IPSR_MODSEL_DATA(IP12_30_28, SSI_WS5, SEL_SSI5_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_30_28, SCIFB1_RXD, SEL_SCIFB1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_30_28, IECLK_B, SEL_IEB_1),
+       PINMUX_IPSR_MSEL(IP12_30_28, SSI_WS5, SEL_SSI5_0),
+       PINMUX_IPSR_MSEL(IP12_30_28, SCIFB1_RXD, SEL_SCIFB1_0),
+       PINMUX_IPSR_MSEL(IP12_30_28, IECLK_B, SEL_IEB_1),
        PINMUX_IPSR_DATA(IP12_30_28, DU2_EXVSYNC_DU2_VSYNC),
        PINMUX_IPSR_DATA(IP12_30_28, QSTB_QHE),
        PINMUX_IPSR_DATA(IP12_30_28, CAN_DEBUGOUT4),
 
-       PINMUX_IPSR_MODSEL_DATA(IP13_2_0, SSI_SDATA5, SEL_SSI5_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_2_0, SCIFB1_TXD, SEL_SCIFB1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_2_0, IETX_B, SEL_IEB_1),
+       PINMUX_IPSR_MSEL(IP13_2_0, SSI_SDATA5, SEL_SSI5_0),
+       PINMUX_IPSR_MSEL(IP13_2_0, SCIFB1_TXD, SEL_SCIFB1_0),
+       PINMUX_IPSR_MSEL(IP13_2_0, IETX_B, SEL_IEB_1),
        PINMUX_IPSR_DATA(IP13_2_0, DU2_DR2),
        PINMUX_IPSR_DATA(IP13_2_0, LCDOUT2),
        PINMUX_IPSR_DATA(IP13_2_0, CAN_DEBUGOUT5),
-       PINMUX_IPSR_MODSEL_DATA(IP13_6_3, SSI_SCK6, SEL_SSI6_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_6_3, SCIFB1_CTS_N, SEL_SCIFB1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_6_3, BPFCLK_D, SEL_FM_3),
+       PINMUX_IPSR_MSEL(IP13_6_3, SSI_SCK6, SEL_SSI6_0),
+       PINMUX_IPSR_MSEL(IP13_6_3, SCIFB1_CTS_N, SEL_SCIFB1_0),
+       PINMUX_IPSR_MSEL(IP13_6_3, BPFCLK_D, SEL_FM_3),
        PINMUX_IPSR_DATA(IP13_6_3, DU2_DR3),
        PINMUX_IPSR_DATA(IP13_6_3, LCDOUT3),
        PINMUX_IPSR_DATA(IP13_6_3, CAN_DEBUGOUT6),
-       PINMUX_IPSR_MODSEL_DATA(IP13_6_3, BPFCLK_F, SEL_FM_5),
-       PINMUX_IPSR_MODSEL_DATA(IP13_9_7, SSI_WS6, SEL_SSI6_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_9_7, SCIFB1_RTS_N, SEL_SCIFB1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_9_7, CAN0_TX_D, SEL_CAN0_3),
+       PINMUX_IPSR_MSEL(IP13_6_3, BPFCLK_F, SEL_FM_5),
+       PINMUX_IPSR_MSEL(IP13_9_7, SSI_WS6, SEL_SSI6_0),
+       PINMUX_IPSR_MSEL(IP13_9_7, SCIFB1_RTS_N, SEL_SCIFB1_0),
+       PINMUX_IPSR_MSEL(IP13_9_7, CAN0_TX_D, SEL_CAN0_3),
        PINMUX_IPSR_DATA(IP13_9_7, DU2_DR4),
        PINMUX_IPSR_DATA(IP13_9_7, LCDOUT4),
        PINMUX_IPSR_DATA(IP13_9_7, CAN_DEBUGOUT7),
-       PINMUX_IPSR_MODSEL_DATA(IP13_12_10, SSI_SDATA6, SEL_SSI6_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_12_10, FMIN_D, SEL_FM_3),
+       PINMUX_IPSR_MSEL(IP13_12_10, SSI_SDATA6, SEL_SSI6_0),
+       PINMUX_IPSR_MSEL(IP13_12_10, FMIN_D, SEL_FM_3),
        PINMUX_IPSR_DATA(IP13_12_10, DU2_DR5),
        PINMUX_IPSR_DATA(IP13_12_10, LCDOUT5),
        PINMUX_IPSR_DATA(IP13_12_10, CAN_DEBUGOUT8),
-       PINMUX_IPSR_MODSEL_DATA(IP13_15_13, SSI_SCK78, SEL_SSI7_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_15_13, STP_IVCXO27_1, SEL_SSP_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_15_13, SCK1, SEL_SCIF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_15_13, SCIFA1_SCK, SEL_SCIFA1_0),
+       PINMUX_IPSR_MSEL(IP13_15_13, SSI_SCK78, SEL_SSI7_0),
+       PINMUX_IPSR_MSEL(IP13_15_13, STP_IVCXO27_1, SEL_SSP_0),
+       PINMUX_IPSR_MSEL(IP13_15_13, SCK1, SEL_SCIF1_0),
+       PINMUX_IPSR_MSEL(IP13_15_13, SCIFA1_SCK, SEL_SCIFA1_0),
        PINMUX_IPSR_DATA(IP13_15_13, DU2_DR6),
        PINMUX_IPSR_DATA(IP13_15_13, LCDOUT6),
        PINMUX_IPSR_DATA(IP13_15_13, CAN_DEBUGOUT9),
-       PINMUX_IPSR_MODSEL_DATA(IP13_18_16, SSI_WS78, SEL_SSI7_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_18_16, STP_ISCLK_1, SEL_SSP_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_18_16, SCIFB2_SCK, SEL_SCIFB2_0),
+       PINMUX_IPSR_MSEL(IP13_18_16, SSI_WS78, SEL_SSI7_0),
+       PINMUX_IPSR_MSEL(IP13_18_16, STP_ISCLK_1, SEL_SSP_0),
+       PINMUX_IPSR_MSEL(IP13_18_16, SCIFB2_SCK, SEL_SCIFB2_0),
        PINMUX_IPSR_DATA(IP13_18_16, SCIFA2_CTS_N),
        PINMUX_IPSR_DATA(IP13_18_16, DU2_DR7),
        PINMUX_IPSR_DATA(IP13_18_16, LCDOUT7),
        PINMUX_IPSR_DATA(IP13_18_16, CAN_DEBUGOUT10),
-       PINMUX_IPSR_MODSEL_DATA(IP13_22_19, SSI_SDATA7, SEL_SSI7_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_22_19, STP_ISD_1, SEL_SSP_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_22_19, SCIFB2_RXD, SEL_SCIFB2_0),
+       PINMUX_IPSR_MSEL(IP13_22_19, SSI_SDATA7, SEL_SSI7_0),
+       PINMUX_IPSR_MSEL(IP13_22_19, STP_ISD_1, SEL_SSP_0),
+       PINMUX_IPSR_MSEL(IP13_22_19, SCIFB2_RXD, SEL_SCIFB2_0),
        PINMUX_IPSR_DATA(IP13_22_19, SCIFA2_RTS_N),
        PINMUX_IPSR_DATA(IP13_22_19, TCLK2),
        PINMUX_IPSR_DATA(IP13_22_19, QSTVA_QVS),
        PINMUX_IPSR_DATA(IP13_22_19, CAN_DEBUGOUT11),
-       PINMUX_IPSR_MODSEL_DATA(IP13_22_19, BPFCLK_E, SEL_FM_4),
-       PINMUX_IPSR_MODSEL_DATA(IP13_22_19, SSI_SDATA7_B, SEL_SSI7_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_22_19, FMIN_G, SEL_FM_6),
-       PINMUX_IPSR_MODSEL_DATA(IP13_25_23, SSI_SDATA8, SEL_SSI8_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_25_23, STP_ISEN_1, SEL_SSP_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_25_23, SCIFB2_TXD, SEL_SCIFB2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_25_23, CAN0_TX_C, SEL_CAN0_2),
+       PINMUX_IPSR_MSEL(IP13_22_19, BPFCLK_E, SEL_FM_4),
+       PINMUX_IPSR_MSEL(IP13_22_19, SSI_SDATA7_B, SEL_SSI7_1),
+       PINMUX_IPSR_MSEL(IP13_22_19, FMIN_G, SEL_FM_6),
+       PINMUX_IPSR_MSEL(IP13_25_23, SSI_SDATA8, SEL_SSI8_0),
+       PINMUX_IPSR_MSEL(IP13_25_23, STP_ISEN_1, SEL_SSP_0),
+       PINMUX_IPSR_MSEL(IP13_25_23, SCIFB2_TXD, SEL_SCIFB2_0),
+       PINMUX_IPSR_MSEL(IP13_25_23, CAN0_TX_C, SEL_CAN0_2),
        PINMUX_IPSR_DATA(IP13_25_23, CAN_DEBUGOUT12),
-       PINMUX_IPSR_MODSEL_DATA(IP13_25_23, SSI_SDATA8_B, SEL_SSI8_1),
+       PINMUX_IPSR_MSEL(IP13_25_23, SSI_SDATA8_B, SEL_SSI8_1),
        PINMUX_IPSR_DATA(IP13_28_26, SSI_SDATA9),
-       PINMUX_IPSR_MODSEL_DATA(IP13_28_26, STP_ISSYNC_1, SEL_SSP_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_28_26, SCIFB2_CTS_N, SEL_SCIFB2_0),
+       PINMUX_IPSR_MSEL(IP13_28_26, STP_ISSYNC_1, SEL_SSP_0),
+       PINMUX_IPSR_MSEL(IP13_28_26, SCIFB2_CTS_N, SEL_SCIFB2_0),
        PINMUX_IPSR_DATA(IP13_28_26, SSI_WS1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_28_26, SSI_SDATA5_C, SEL_SSI5_2),
+       PINMUX_IPSR_MSEL(IP13_28_26, SSI_SDATA5_C, SEL_SSI5_2),
        PINMUX_IPSR_DATA(IP13_28_26, CAN_DEBUGOUT13),
        PINMUX_IPSR_DATA(IP13_30_29, AUDIO_CLKA),
-       PINMUX_IPSR_MODSEL_DATA(IP13_30_29, SCIFB2_RTS_N, SEL_SCIFB2_0),
+       PINMUX_IPSR_MSEL(IP13_30_29, SCIFB2_RTS_N, SEL_SCIFB2_0),
        PINMUX_IPSR_DATA(IP13_30_29, CAN_DEBUGOUT14),
 
        PINMUX_IPSR_DATA(IP14_2_0, AUDIO_CLKB),
-       PINMUX_IPSR_MODSEL_DATA(IP14_2_0, SCIF_CLK, SEL_SCIFCLK_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_2_0, CAN0_RX_D, SEL_CAN0_3),
+       PINMUX_IPSR_MSEL(IP14_2_0, SCIF_CLK, SEL_SCIFCLK_0),
+       PINMUX_IPSR_MSEL(IP14_2_0, CAN0_RX_D, SEL_CAN0_3),
        PINMUX_IPSR_DATA(IP14_2_0, DVC_MUTE),
-       PINMUX_IPSR_MODSEL_DATA(IP14_2_0, CAN0_RX_C, SEL_CAN0_2),
+       PINMUX_IPSR_MSEL(IP14_2_0, CAN0_RX_C, SEL_CAN0_2),
        PINMUX_IPSR_DATA(IP14_2_0, CAN_DEBUGOUT15),
        PINMUX_IPSR_DATA(IP14_2_0, REMOCON),
-       PINMUX_IPSR_MODSEL_DATA(IP14_5_3, SCIFA0_SCK, SEL_SCFA_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_5_3, HSCK1, SEL_HSCIF1_0),
+       PINMUX_IPSR_MSEL(IP14_5_3, SCIFA0_SCK, SEL_SCFA_0),
+       PINMUX_IPSR_MSEL(IP14_5_3, HSCK1, SEL_HSCIF1_0),
        PINMUX_IPSR_DATA(IP14_5_3, SCK0),
        PINMUX_IPSR_DATA(IP14_5_3, MSIOF3_SS2),
        PINMUX_IPSR_DATA(IP14_5_3, DU2_DG2),
        PINMUX_IPSR_DATA(IP14_5_3, LCDOUT10),
-       PINMUX_IPSR_MODSEL_DATA(IP14_5_3, IIC1_SDA_C, SEL_IIC1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP14_5_3, I2C1_SDA_C, SEL_I2C1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP14_8_6, SCIFA0_RXD, SEL_SCFA_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_8_6, HRX1, SEL_HSCIF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_8_6, RX0, SEL_SCIF0_0),
+       PINMUX_IPSR_MSEL(IP14_5_3, IIC1_SDA_C, SEL_IIC1_2),
+       PINMUX_IPSR_MSEL(IP14_5_3, I2C1_SDA_C, SEL_I2C1_2),
+       PINMUX_IPSR_MSEL(IP14_8_6, SCIFA0_RXD, SEL_SCFA_0),
+       PINMUX_IPSR_MSEL(IP14_8_6, HRX1, SEL_HSCIF1_0),
+       PINMUX_IPSR_MSEL(IP14_8_6, RX0, SEL_SCIF0_0),
        PINMUX_IPSR_DATA(IP14_8_6, DU2_DR0),
        PINMUX_IPSR_DATA(IP14_8_6, LCDOUT0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_11_9, SCIFA0_TXD, SEL_SCFA_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_11_9, HTX1, SEL_HSCIF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_11_9, TX0, SEL_SCIF0_0),
+       PINMUX_IPSR_MSEL(IP14_11_9, SCIFA0_TXD, SEL_SCFA_0),
+       PINMUX_IPSR_MSEL(IP14_11_9, HTX1, SEL_HSCIF1_0),
+       PINMUX_IPSR_MSEL(IP14_11_9, TX0, SEL_SCIF0_0),
        PINMUX_IPSR_DATA(IP14_11_9, DU2_DR1),
        PINMUX_IPSR_DATA(IP14_11_9, LCDOUT1),
-       PINMUX_IPSR_MODSEL_DATA(IP14_15_12, SCIFA0_CTS_N, SEL_SCFA_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_15_12, HCTS1_N, SEL_HSCIF1_0),
+       PINMUX_IPSR_MSEL(IP14_15_12, SCIFA0_CTS_N, SEL_SCFA_0),
+       PINMUX_IPSR_MSEL(IP14_15_12, HCTS1_N, SEL_HSCIF1_0),
        PINMUX_IPSR_DATA(IP14_15_12, CTS0_N),
-       PINMUX_IPSR_MODSEL_DATA(IP14_15_12, MSIOF3_SYNC, SEL_SOF3_0),
+       PINMUX_IPSR_MSEL(IP14_15_12, MSIOF3_SYNC, SEL_SOF3_0),
        PINMUX_IPSR_DATA(IP14_15_12, DU2_DG3),
        PINMUX_IPSR_DATA(IP14_15_12, LCDOUT11),
        PINMUX_IPSR_DATA(IP14_15_12, PWM0_B),
-       PINMUX_IPSR_MODSEL_DATA(IP14_15_12, IIC1_SCL_C, SEL_IIC1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP14_15_12, I2C1_SCL_C, SEL_I2C1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP14_18_16, SCIFA0_RTS_N, SEL_SCFA_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_18_16, HRTS1_N, SEL_HSCIF1_0),
+       PINMUX_IPSR_MSEL(IP14_15_12, IIC1_SCL_C, SEL_IIC1_2),
+       PINMUX_IPSR_MSEL(IP14_15_12, I2C1_SCL_C, SEL_I2C1_2),
+       PINMUX_IPSR_MSEL(IP14_18_16, SCIFA0_RTS_N, SEL_SCFA_0),
+       PINMUX_IPSR_MSEL(IP14_18_16, HRTS1_N, SEL_HSCIF1_0),
        PINMUX_IPSR_DATA(IP14_18_16, RTS0_N),
        PINMUX_IPSR_DATA(IP14_18_16, MSIOF3_SS1),
        PINMUX_IPSR_DATA(IP14_18_16, DU2_DG0),
        PINMUX_IPSR_DATA(IP14_18_16, LCDOUT8),
        PINMUX_IPSR_DATA(IP14_18_16, PWM1_B),
-       PINMUX_IPSR_MODSEL_DATA(IP14_21_19, SCIFA1_RXD, SEL_SCIFA1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_21_19, AD_DI, SEL_ADI_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_21_19, RX1, SEL_SCIF1_0),
+       PINMUX_IPSR_MSEL(IP14_21_19, SCIFA1_RXD, SEL_SCIFA1_0),
+       PINMUX_IPSR_MSEL(IP14_21_19, AD_DI, SEL_ADI_0),
+       PINMUX_IPSR_MSEL(IP14_21_19, RX1, SEL_SCIF1_0),
        PINMUX_IPSR_DATA(IP14_21_19, DU2_EXODDF_DU2_ODDF_DISP_CDE),
        PINMUX_IPSR_DATA(IP14_21_19, QCPV_QDE),
-       PINMUX_IPSR_MODSEL_DATA(IP14_24_22, SCIFA1_TXD, SEL_SCIFA1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_24_22, AD_DO, SEL_ADI_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_24_22, TX1, SEL_SCIF1_0),
+       PINMUX_IPSR_MSEL(IP14_24_22, SCIFA1_TXD, SEL_SCIFA1_0),
+       PINMUX_IPSR_MSEL(IP14_24_22, AD_DO, SEL_ADI_0),
+       PINMUX_IPSR_MSEL(IP14_24_22, TX1, SEL_SCIF1_0),
        PINMUX_IPSR_DATA(IP14_24_22, DU2_DG1),
        PINMUX_IPSR_DATA(IP14_24_22, LCDOUT9),
-       PINMUX_IPSR_MODSEL_DATA(IP14_27_25, SCIFA1_CTS_N, SEL_SCIFA1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_27_25, AD_CLK, SEL_ADI_0),
+       PINMUX_IPSR_MSEL(IP14_27_25, SCIFA1_CTS_N, SEL_SCIFA1_0),
+       PINMUX_IPSR_MSEL(IP14_27_25, AD_CLK, SEL_ADI_0),
        PINMUX_IPSR_DATA(IP14_27_25, CTS1_N),
-       PINMUX_IPSR_MODSEL_DATA(IP14_27_25, MSIOF3_RXD, SEL_SOF3_0),
+       PINMUX_IPSR_MSEL(IP14_27_25, MSIOF3_RXD, SEL_SOF3_0),
        PINMUX_IPSR_DATA(IP14_27_25, DU0_DOTCLKOUT),
        PINMUX_IPSR_DATA(IP14_27_25, QCLK),
-       PINMUX_IPSR_MODSEL_DATA(IP14_30_28, SCIFA1_RTS_N, SEL_SCIFA1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_30_28, AD_NCS_N, SEL_ADI_0),
+       PINMUX_IPSR_MSEL(IP14_30_28, SCIFA1_RTS_N, SEL_SCIFA1_0),
+       PINMUX_IPSR_MSEL(IP14_30_28, AD_NCS_N, SEL_ADI_0),
        PINMUX_IPSR_DATA(IP14_30_28, RTS1_N),
-       PINMUX_IPSR_MODSEL_DATA(IP14_30_28, MSIOF3_TXD, SEL_SOF3_0),
+       PINMUX_IPSR_MSEL(IP14_30_28, MSIOF3_TXD, SEL_SOF3_0),
        PINMUX_IPSR_DATA(IP14_30_28, DU1_DOTCLKOUT),
        PINMUX_IPSR_DATA(IP14_30_28, QSTVB_QVE),
-       PINMUX_IPSR_MODSEL_DATA(IP14_30_28, HRTS0_N_C, SEL_HSCIF0_2),
+       PINMUX_IPSR_MSEL(IP14_30_28, HRTS0_N_C, SEL_HSCIF0_2),
 
-       PINMUX_IPSR_MODSEL_DATA(IP15_2_0, SCIFA2_SCK, SEL_SCIFA2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_2_0, FMCLK, SEL_FM_0),
+       PINMUX_IPSR_MSEL(IP15_2_0, SCIFA2_SCK, SEL_SCIFA2_0),
+       PINMUX_IPSR_MSEL(IP15_2_0, FMCLK, SEL_FM_0),
        PINMUX_IPSR_DATA(IP15_2_0, SCK2),
-       PINMUX_IPSR_MODSEL_DATA(IP15_2_0, MSIOF3_SCK, SEL_SOF3_0),
+       PINMUX_IPSR_MSEL(IP15_2_0, MSIOF3_SCK, SEL_SOF3_0),
        PINMUX_IPSR_DATA(IP15_2_0, DU2_DG7),
        PINMUX_IPSR_DATA(IP15_2_0, LCDOUT15),
-       PINMUX_IPSR_MODSEL_DATA(IP15_2_0, SCIF_CLK_B, SEL_SCIFCLK_1),
-       PINMUX_IPSR_MODSEL_DATA(IP15_5_3, SCIFA2_RXD, SEL_SCIFA2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_5_3, FMIN, SEL_FM_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_5_3, TX2, SEL_SCIF2_0),
+       PINMUX_IPSR_MSEL(IP15_2_0, SCIF_CLK_B, SEL_SCIFCLK_1),
+       PINMUX_IPSR_MSEL(IP15_5_3, SCIFA2_RXD, SEL_SCIFA2_0),
+       PINMUX_IPSR_MSEL(IP15_5_3, FMIN, SEL_FM_0),
+       PINMUX_IPSR_MSEL(IP15_5_3, TX2, SEL_SCIF2_0),
        PINMUX_IPSR_DATA(IP15_5_3, DU2_DB0),
        PINMUX_IPSR_DATA(IP15_5_3, LCDOUT16),
-       PINMUX_IPSR_MODSEL_DATA(IP15_5_3, IIC2_SCL, SEL_IIC2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_5_3, I2C2_SCL, SEL_I2C2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_8_6, SCIFA2_TXD, SEL_SCIFA2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_8_6, BPFCLK, SEL_FM_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_8_6, RX2, SEL_SCIF2_0),
+       PINMUX_IPSR_MSEL(IP15_5_3, IIC2_SCL, SEL_IIC2_0),
+       PINMUX_IPSR_MSEL(IP15_5_3, I2C2_SCL, SEL_I2C2_0),
+       PINMUX_IPSR_MSEL(IP15_8_6, SCIFA2_TXD, SEL_SCIFA2_0),
+       PINMUX_IPSR_MSEL(IP15_8_6, BPFCLK, SEL_FM_0),
+       PINMUX_IPSR_MSEL(IP15_8_6, RX2, SEL_SCIF2_0),
        PINMUX_IPSR_DATA(IP15_8_6, DU2_DB1),
        PINMUX_IPSR_DATA(IP15_8_6, LCDOUT17),
-       PINMUX_IPSR_MODSEL_DATA(IP15_8_6, IIC2_SDA, SEL_IIC2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_8_6, I2C2_SDA, SEL_I2C2_0),
+       PINMUX_IPSR_MSEL(IP15_8_6, IIC2_SDA, SEL_IIC2_0),
+       PINMUX_IPSR_MSEL(IP15_8_6, I2C2_SDA, SEL_I2C2_0),
        PINMUX_IPSR_DATA(IP15_11_9, HSCK0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_11_9, TS_SDEN0, SEL_TSIF0_0),
+       PINMUX_IPSR_MSEL(IP15_11_9, TS_SDEN0, SEL_TSIF0_0),
        PINMUX_IPSR_DATA(IP15_11_9, DU2_DG4),
        PINMUX_IPSR_DATA(IP15_11_9, LCDOUT12),
-       PINMUX_IPSR_MODSEL_DATA(IP15_11_9, HCTS0_N_C, SEL_HSCIF0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP15_13_12, HRX0, SEL_HSCIF0_0),
+       PINMUX_IPSR_MSEL(IP15_11_9, HCTS0_N_C, SEL_HSCIF0_2),
+       PINMUX_IPSR_MSEL(IP15_13_12, HRX0, SEL_HSCIF0_0),
        PINMUX_IPSR_DATA(IP15_13_12, DU2_DB2),
        PINMUX_IPSR_DATA(IP15_13_12, LCDOUT18),
-       PINMUX_IPSR_MODSEL_DATA(IP15_15_14, HTX0, SEL_HSCIF0_0),
+       PINMUX_IPSR_MSEL(IP15_15_14, HTX0, SEL_HSCIF0_0),
        PINMUX_IPSR_DATA(IP15_15_14, DU2_DB3),
        PINMUX_IPSR_DATA(IP15_15_14, LCDOUT19),
-       PINMUX_IPSR_MODSEL_DATA(IP15_17_16, HCTS0_N, SEL_HSCIF0_0),
+       PINMUX_IPSR_MSEL(IP15_17_16, HCTS0_N, SEL_HSCIF0_0),
        PINMUX_IPSR_DATA(IP15_17_16, SSI_SCK9),
        PINMUX_IPSR_DATA(IP15_17_16, DU2_DB4),
        PINMUX_IPSR_DATA(IP15_17_16, LCDOUT20),
-       PINMUX_IPSR_MODSEL_DATA(IP15_19_18, HRTS0_N, SEL_HSCIF0_0),
+       PINMUX_IPSR_MSEL(IP15_19_18, HRTS0_N, SEL_HSCIF0_0),
        PINMUX_IPSR_DATA(IP15_19_18, SSI_WS9),
        PINMUX_IPSR_DATA(IP15_19_18, DU2_DB5),
        PINMUX_IPSR_DATA(IP15_19_18, LCDOUT21),
-       PINMUX_IPSR_MODSEL_DATA(IP15_22_20, MSIOF0_SCK, SEL_SOF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_22_20, TS_SDAT0, SEL_TSIF0_0),
+       PINMUX_IPSR_MSEL(IP15_22_20, MSIOF0_SCK, SEL_SOF0_0),
+       PINMUX_IPSR_MSEL(IP15_22_20, TS_SDAT0, SEL_TSIF0_0),
        PINMUX_IPSR_DATA(IP15_22_20, ADICLK),
        PINMUX_IPSR_DATA(IP15_22_20, DU2_DB6),
        PINMUX_IPSR_DATA(IP15_22_20, LCDOUT22),
        PINMUX_IPSR_DATA(IP15_25_23, MSIOF0_SYNC),
-       PINMUX_IPSR_MODSEL_DATA(IP15_25_23, TS_SCK0, SEL_TSIF0_0),
+       PINMUX_IPSR_MSEL(IP15_25_23, TS_SCK0, SEL_TSIF0_0),
        PINMUX_IPSR_DATA(IP15_25_23, SSI_SCK2),
        PINMUX_IPSR_DATA(IP15_25_23, ADIDATA),
        PINMUX_IPSR_DATA(IP15_25_23, DU2_DB7),
        PINMUX_IPSR_DATA(IP15_25_23, LCDOUT23),
-       PINMUX_IPSR_MODSEL_DATA(IP15_25_23, HRX0_C, SEL_SCIFA2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP15_27_26, MSIOF0_SS1, SEL_SOF0_0),
+       PINMUX_IPSR_MSEL(IP15_25_23, HRX0_C, SEL_SCIFA2_1),
+       PINMUX_IPSR_MSEL(IP15_27_26, MSIOF0_SS1, SEL_SOF0_0),
        PINMUX_IPSR_DATA(IP15_27_26, ADICHS0),
        PINMUX_IPSR_DATA(IP15_27_26, DU2_DG5),
        PINMUX_IPSR_DATA(IP15_27_26, LCDOUT13),
-       PINMUX_IPSR_MODSEL_DATA(IP15_29_28, MSIOF0_TXD, SEL_SOF0_0),
+       PINMUX_IPSR_MSEL(IP15_29_28, MSIOF0_TXD, SEL_SOF0_0),
        PINMUX_IPSR_DATA(IP15_29_28, ADICHS1),
        PINMUX_IPSR_DATA(IP15_29_28, DU2_DG6),
        PINMUX_IPSR_DATA(IP15_29_28, LCDOUT14),
 
-       PINMUX_IPSR_MODSEL_DATA(IP16_2_0, MSIOF0_SS2, SEL_SOF0_0),
+       PINMUX_IPSR_MSEL(IP16_2_0, MSIOF0_SS2, SEL_SOF0_0),
        PINMUX_IPSR_DATA(IP16_2_0, AUDIO_CLKOUT),
        PINMUX_IPSR_DATA(IP16_2_0, ADICHS2),
        PINMUX_IPSR_DATA(IP16_2_0, DU2_DISP),
        PINMUX_IPSR_DATA(IP16_2_0, QPOLA),
-       PINMUX_IPSR_MODSEL_DATA(IP16_2_0, HTX0_C, SEL_HSCIF0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP16_2_0, SCIFA2_TXD_B, SEL_SCIFA2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP16_5_3, MSIOF0_RXD, SEL_SOF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP16_5_3, TS_SPSYNC0, SEL_TSIF0_0),
+       PINMUX_IPSR_MSEL(IP16_2_0, HTX0_C, SEL_HSCIF0_2),
+       PINMUX_IPSR_MSEL(IP16_2_0, SCIFA2_TXD_B, SEL_SCIFA2_1),
+       PINMUX_IPSR_MSEL(IP16_5_3, MSIOF0_RXD, SEL_SOF0_0),
+       PINMUX_IPSR_MSEL(IP16_5_3, TS_SPSYNC0, SEL_TSIF0_0),
        PINMUX_IPSR_DATA(IP16_5_3, SSI_WS2),
        PINMUX_IPSR_DATA(IP16_5_3, ADICS_SAMP),
        PINMUX_IPSR_DATA(IP16_5_3, DU2_CDE),
        PINMUX_IPSR_DATA(IP16_5_3, QPOLB),
-       PINMUX_IPSR_MODSEL_DATA(IP16_5_3, SCIFA2_RXD_B, SEL_HSCIF0_2),
+       PINMUX_IPSR_MSEL(IP16_5_3, SCIFA2_RXD_B, SEL_HSCIF0_2),
        PINMUX_IPSR_DATA(IP16_6, USB1_PWEN),
        PINMUX_IPSR_DATA(IP16_6, AUDIO_CLKOUT_D),
        PINMUX_IPSR_DATA(IP16_7, USB1_OVC),
-       PINMUX_IPSR_MODSEL_DATA(IP16_7, TCLK1_B, SEL_TMU1_1),
+       PINMUX_IPSR_MSEL(IP16_7, TCLK1_B, SEL_TMU1_1),
 
        PINMUX_DATA(IIC0_SCL_MARK, FN_SEL_IIC0_0),
        PINMUX_DATA(IIC0_SDA_MARK, FN_SEL_IIC0_0),
@@ -3624,25 +3623,6 @@ static const unsigned int usb2_pins[] = {
 static const unsigned int usb2_mux[] = {
        USB2_PWEN_MARK, USB2_OVC_MARK,
 };
-
-union vin_data {
-       unsigned int data24[24];
-       unsigned int data20[20];
-       unsigned int data16[16];
-       unsigned int data12[12];
-       unsigned int data10[10];
-       unsigned int data8[8];
-       unsigned int data4[4];
-};
-
-#define VIN_DATA_PIN_GROUP(n, s)                               \
-       {                                                       \
-               .name = #n#s,                                   \
-               .pins = n##_pins.data##s,                       \
-               .mux = n##_mux.data##s,                         \
-               .nr_pins = ARRAY_SIZE(n##_pins.data##s),        \
-       }
-
 /* - VIN0 ------------------------------------------------------------------- */
 static const union vin_data vin0_data_pins = {
        .data24 = {
@@ -5719,6 +5699,6 @@ const struct sh_pfc_soc_info r8a7790_pinmux_info = {
 
        .cfg_regs = pinmux_config_regs,
 
-       .gpio_data = pinmux_data,
-       .gpio_data_size = ARRAY_SIZE(pinmux_data),
+       .pinmux_data = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
 };
index 25e8117f5a1ac04ad738cf3d26e0038f2759e56a..87a4f44147c1d5bcd9c286e86475b0e8370f9a86 100644 (file)
@@ -9,7 +9,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/platform_data/gpio-rcar.h>
 
 #include "core.h"
 #include "sh_pfc.h"
@@ -824,459 +823,459 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP0_14, D14),
        PINMUX_IPSR_DATA(IP0_15, D15),
        PINMUX_IPSR_DATA(IP0_18_16, A0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_18_16, ATAWR0_N_C, SEL_LBS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP0_18_16, MSIOF0_SCK_B, SEL_SOF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP0_18_16, SCL0_C, SEL_IIC0_2),
+       PINMUX_IPSR_MSEL(IP0_18_16, ATAWR0_N_C, SEL_LBS_2),
+       PINMUX_IPSR_MSEL(IP0_18_16, MSIOF0_SCK_B, SEL_SOF0_1),
+       PINMUX_IPSR_MSEL(IP0_18_16, SCL0_C, SEL_IIC0_2),
        PINMUX_IPSR_DATA(IP0_18_16, PWM2_B),
        PINMUX_IPSR_DATA(IP0_20_19, A1),
-       PINMUX_IPSR_MODSEL_DATA(IP0_20_19, MSIOF0_SYNC_B, SEL_SOF0_1),
+       PINMUX_IPSR_MSEL(IP0_20_19, MSIOF0_SYNC_B, SEL_SOF0_1),
        PINMUX_IPSR_DATA(IP0_22_21, A2),
-       PINMUX_IPSR_MODSEL_DATA(IP0_22_21, MSIOF0_SS1_B, SEL_SOF0_1),
+       PINMUX_IPSR_MSEL(IP0_22_21, MSIOF0_SS1_B, SEL_SOF0_1),
        PINMUX_IPSR_DATA(IP0_24_23, A3),
-       PINMUX_IPSR_MODSEL_DATA(IP0_24_23, MSIOF0_SS2_B, SEL_SOF0_1),
+       PINMUX_IPSR_MSEL(IP0_24_23, MSIOF0_SS2_B, SEL_SOF0_1),
        PINMUX_IPSR_DATA(IP0_26_25, A4),
-       PINMUX_IPSR_MODSEL_DATA(IP0_26_25, MSIOF0_TXD_B, SEL_SOF0_1),
+       PINMUX_IPSR_MSEL(IP0_26_25, MSIOF0_TXD_B, SEL_SOF0_1),
        PINMUX_IPSR_DATA(IP0_28_27, A5),
-       PINMUX_IPSR_MODSEL_DATA(IP0_28_27, MSIOF0_RXD_B, SEL_SOF0_1),
+       PINMUX_IPSR_MSEL(IP0_28_27, MSIOF0_RXD_B, SEL_SOF0_1),
        PINMUX_IPSR_DATA(IP0_30_29, A6),
-       PINMUX_IPSR_MODSEL_DATA(IP0_30_29, MSIOF1_SCK, SEL_SOF1_0),
+       PINMUX_IPSR_MSEL(IP0_30_29, MSIOF1_SCK, SEL_SOF1_0),
 
        /* IPSR1 */
        PINMUX_IPSR_DATA(IP1_1_0, A7),
-       PINMUX_IPSR_MODSEL_DATA(IP1_1_0, MSIOF1_SYNC, SEL_SOF1_0),
+       PINMUX_IPSR_MSEL(IP1_1_0, MSIOF1_SYNC, SEL_SOF1_0),
        PINMUX_IPSR_DATA(IP1_3_2, A8),
-       PINMUX_IPSR_MODSEL_DATA(IP1_3_2, MSIOF1_SS1, SEL_SOF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_3_2, SCL0, SEL_IIC0_0),
+       PINMUX_IPSR_MSEL(IP1_3_2, MSIOF1_SS1, SEL_SOF1_0),
+       PINMUX_IPSR_MSEL(IP1_3_2, SCL0, SEL_IIC0_0),
        PINMUX_IPSR_DATA(IP1_5_4, A9),
-       PINMUX_IPSR_MODSEL_DATA(IP1_5_4, MSIOF1_SS2, SEL_SOF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_5_4, SDA0, SEL_IIC0_0),
+       PINMUX_IPSR_MSEL(IP1_5_4, MSIOF1_SS2, SEL_SOF1_0),
+       PINMUX_IPSR_MSEL(IP1_5_4, SDA0, SEL_IIC0_0),
        PINMUX_IPSR_DATA(IP1_7_6, A10),
-       PINMUX_IPSR_MODSEL_DATA(IP1_7_6, MSIOF1_TXD, SEL_SOF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_7_6, MSIOF1_TXD_D, SEL_SOF1_3),
+       PINMUX_IPSR_MSEL(IP1_7_6, MSIOF1_TXD, SEL_SOF1_0),
+       PINMUX_IPSR_MSEL(IP1_7_6, MSIOF1_TXD_D, SEL_SOF1_3),
        PINMUX_IPSR_DATA(IP1_10_8, A11),
-       PINMUX_IPSR_MODSEL_DATA(IP1_10_8, MSIOF1_RXD, SEL_SOF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_10_8, SCL3_D, SEL_IIC3_3),
-       PINMUX_IPSR_MODSEL_DATA(IP1_10_8, MSIOF1_RXD_D, SEL_SOF1_3),
+       PINMUX_IPSR_MSEL(IP1_10_8, MSIOF1_RXD, SEL_SOF1_0),
+       PINMUX_IPSR_MSEL(IP1_10_8, SCL3_D, SEL_IIC3_3),
+       PINMUX_IPSR_MSEL(IP1_10_8, MSIOF1_RXD_D, SEL_SOF1_3),
        PINMUX_IPSR_DATA(IP1_13_11, A12),
-       PINMUX_IPSR_MODSEL_DATA(IP1_13_11, FMCLK, SEL_FM_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_13_11, SDA3_D, SEL_IIC3_3),
-       PINMUX_IPSR_MODSEL_DATA(IP1_13_11, MSIOF1_SCK_D, SEL_SOF1_3),
+       PINMUX_IPSR_MSEL(IP1_13_11, FMCLK, SEL_FM_0),
+       PINMUX_IPSR_MSEL(IP1_13_11, SDA3_D, SEL_IIC3_3),
+       PINMUX_IPSR_MSEL(IP1_13_11, MSIOF1_SCK_D, SEL_SOF1_3),
        PINMUX_IPSR_DATA(IP1_16_14, A13),
-       PINMUX_IPSR_MODSEL_DATA(IP1_16_14, ATAG0_N_C, SEL_LBS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP1_16_14, BPFCLK, SEL_FM_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_16_14, MSIOF1_SS1_D, SEL_SOF1_3),
+       PINMUX_IPSR_MSEL(IP1_16_14, ATAG0_N_C, SEL_LBS_2),
+       PINMUX_IPSR_MSEL(IP1_16_14, BPFCLK, SEL_FM_0),
+       PINMUX_IPSR_MSEL(IP1_16_14, MSIOF1_SS1_D, SEL_SOF1_3),
        PINMUX_IPSR_DATA(IP1_19_17, A14),
-       PINMUX_IPSR_MODSEL_DATA(IP1_19_17, ATADIR0_N_C, SEL_LBS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP1_19_17, FMIN, SEL_FM_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_19_17, FMIN_C, SEL_FM_2),
-       PINMUX_IPSR_MODSEL_DATA(IP1_19_17, MSIOF1_SYNC_D, SEL_SOF1_3),
+       PINMUX_IPSR_MSEL(IP1_19_17, ATADIR0_N_C, SEL_LBS_2),
+       PINMUX_IPSR_MSEL(IP1_19_17, FMIN, SEL_FM_0),
+       PINMUX_IPSR_MSEL(IP1_19_17, FMIN_C, SEL_FM_2),
+       PINMUX_IPSR_MSEL(IP1_19_17, MSIOF1_SYNC_D, SEL_SOF1_3),
        PINMUX_IPSR_DATA(IP1_22_20, A15),
-       PINMUX_IPSR_MODSEL_DATA(IP1_22_20, BPFCLK_C, SEL_FM_2),
+       PINMUX_IPSR_MSEL(IP1_22_20, BPFCLK_C, SEL_FM_2),
        PINMUX_IPSR_DATA(IP1_25_23, A16),
-       PINMUX_IPSR_MODSEL_DATA(IP1_25_23, DREQ2_B, SEL_LBS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP1_25_23, FMCLK_C, SEL_FM_2),
-       PINMUX_IPSR_MODSEL_DATA(IP1_25_23, SCIFA1_SCK_B, SEL_SCIFA1_1),
+       PINMUX_IPSR_MSEL(IP1_25_23, DREQ2_B, SEL_LBS_1),
+       PINMUX_IPSR_MSEL(IP1_25_23, FMCLK_C, SEL_FM_2),
+       PINMUX_IPSR_MSEL(IP1_25_23, SCIFA1_SCK_B, SEL_SCIFA1_1),
        PINMUX_IPSR_DATA(IP1_28_26, A17),
-       PINMUX_IPSR_MODSEL_DATA(IP1_28_26, DACK2_B, SEL_LBS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP1_28_26, SDA0_C, SEL_IIC0_2),
+       PINMUX_IPSR_MSEL(IP1_28_26, DACK2_B, SEL_LBS_1),
+       PINMUX_IPSR_MSEL(IP1_28_26, SDA0_C, SEL_IIC0_2),
        PINMUX_IPSR_DATA(IP1_31_29, A18),
-       PINMUX_IPSR_MODSEL_DATA(IP1_31_29, DREQ1, SEL_LBS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_31_29, SCIFA1_RXD_C, SEL_SCIFA1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP1_31_29, SCIFB1_RXD_C, SEL_SCIFB1_2),
+       PINMUX_IPSR_MSEL(IP1_31_29, DREQ1, SEL_LBS_0),
+       PINMUX_IPSR_MSEL(IP1_31_29, SCIFA1_RXD_C, SEL_SCIFA1_2),
+       PINMUX_IPSR_MSEL(IP1_31_29, SCIFB1_RXD_C, SEL_SCIFB1_2),
 
        /* IPSR2 */
        PINMUX_IPSR_DATA(IP2_2_0, A19),
        PINMUX_IPSR_DATA(IP2_2_0, DACK1),
-       PINMUX_IPSR_MODSEL_DATA(IP2_2_0, SCIFA1_TXD_C, SEL_SCIFA1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP2_2_0, SCIFB1_TXD_C, SEL_SCIFB1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP2_2_0, SCIFB1_SCK_B, SEL_SCIFB1_0),
+       PINMUX_IPSR_MSEL(IP2_2_0, SCIFA1_TXD_C, SEL_SCIFA1_2),
+       PINMUX_IPSR_MSEL(IP2_2_0, SCIFB1_TXD_C, SEL_SCIFB1_2),
+       PINMUX_IPSR_MSEL(IP2_2_0, SCIFB1_SCK_B, SEL_SCIFB1_1),
        PINMUX_IPSR_DATA(IP2_2_0, A20),
-       PINMUX_IPSR_MODSEL_DATA(IP2_4_3, SPCLK, SEL_QSP_0),
+       PINMUX_IPSR_MSEL(IP2_4_3, SPCLK, SEL_QSP_0),
        PINMUX_IPSR_DATA(IP2_6_5, A21),
-       PINMUX_IPSR_MODSEL_DATA(IP2_6_5, ATAWR0_N_B, SEL_LBS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP2_6_5, MOSI_IO0, SEL_QSP_0),
+       PINMUX_IPSR_MSEL(IP2_6_5, ATAWR0_N_B, SEL_LBS_1),
+       PINMUX_IPSR_MSEL(IP2_6_5, MOSI_IO0, SEL_QSP_0),
        PINMUX_IPSR_DATA(IP2_9_7, A22),
-       PINMUX_IPSR_MODSEL_DATA(IP2_9_7, MISO_IO1, SEL_QSP_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_9_7, FMCLK_B, SEL_FM_1),
-       PINMUX_IPSR_MODSEL_DATA(IP2_9_7, TX0, SEL_SCIF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_9_7, SCIFA0_TXD, SEL_SCFA_0),
+       PINMUX_IPSR_MSEL(IP2_9_7, MISO_IO1, SEL_QSP_0),
+       PINMUX_IPSR_MSEL(IP2_9_7, FMCLK_B, SEL_FM_1),
+       PINMUX_IPSR_MSEL(IP2_9_7, TX0, SEL_SCIF0_0),
+       PINMUX_IPSR_MSEL(IP2_9_7, SCIFA0_TXD, SEL_SCFA_0),
        PINMUX_IPSR_DATA(IP2_12_10, A23),
-       PINMUX_IPSR_MODSEL_DATA(IP2_12_10, IO2, SEL_QSP_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_12_10, BPFCLK_B, SEL_FM_1),
-       PINMUX_IPSR_MODSEL_DATA(IP2_12_10, RX0, SEL_SCIF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_12_10, SCIFA0_RXD, SEL_SCFA_0),
+       PINMUX_IPSR_MSEL(IP2_12_10, IO2, SEL_QSP_0),
+       PINMUX_IPSR_MSEL(IP2_12_10, BPFCLK_B, SEL_FM_1),
+       PINMUX_IPSR_MSEL(IP2_12_10, RX0, SEL_SCIF0_0),
+       PINMUX_IPSR_MSEL(IP2_12_10, SCIFA0_RXD, SEL_SCFA_0),
        PINMUX_IPSR_DATA(IP2_15_13, A24),
-       PINMUX_IPSR_MODSEL_DATA(IP2_15_13, DREQ2, SEL_LBS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_15_13, IO3, SEL_QSP_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_15_13, TX1, SEL_SCIF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_15_13, SCIFA1_TXD, SEL_SCIFA1_0),
+       PINMUX_IPSR_MSEL(IP2_15_13, DREQ2, SEL_LBS_0),
+       PINMUX_IPSR_MSEL(IP2_15_13, IO3, SEL_QSP_0),
+       PINMUX_IPSR_MSEL(IP2_15_13, TX1, SEL_SCIF1_0),
+       PINMUX_IPSR_MSEL(IP2_15_13, SCIFA1_TXD, SEL_SCIFA1_0),
        PINMUX_IPSR_DATA(IP2_18_16, A25),
-       PINMUX_IPSR_MODSEL_DATA(IP2_18_16, DACK2, SEL_LBS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_18_16, SSL, SEL_QSP_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_18_16, DREQ1_C, SEL_LBS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP2_18_16, RX1, SEL_SCIF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_18_16, SCIFA1_RXD, SEL_SCIFA1_0),
+       PINMUX_IPSR_MSEL(IP2_18_16, DACK2, SEL_LBS_0),
+       PINMUX_IPSR_MSEL(IP2_18_16, SSL, SEL_QSP_0),
+       PINMUX_IPSR_MSEL(IP2_18_16, DREQ1_C, SEL_LBS_2),
+       PINMUX_IPSR_MSEL(IP2_18_16, RX1, SEL_SCIF1_0),
+       PINMUX_IPSR_MSEL(IP2_18_16, SCIFA1_RXD, SEL_SCIFA1_0),
        PINMUX_IPSR_DATA(IP2_20_19, CS0_N),
-       PINMUX_IPSR_MODSEL_DATA(IP2_20_19, ATAG0_N_B, SEL_LBS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP2_20_19, SCL1, SEL_IIC1_0),
+       PINMUX_IPSR_MSEL(IP2_20_19, ATAG0_N_B, SEL_LBS_1),
+       PINMUX_IPSR_MSEL(IP2_20_19, SCL1, SEL_IIC1_0),
        PINMUX_IPSR_DATA(IP2_22_21, CS1_N_A26),
-       PINMUX_IPSR_MODSEL_DATA(IP2_22_21, ATADIR0_N_B, SEL_LBS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP2_22_21, SDA1, SEL_IIC1_0),
+       PINMUX_IPSR_MSEL(IP2_22_21, ATADIR0_N_B, SEL_LBS_1),
+       PINMUX_IPSR_MSEL(IP2_22_21, SDA1, SEL_IIC1_0),
        PINMUX_IPSR_DATA(IP2_24_23, EX_CS1_N),
-       PINMUX_IPSR_MODSEL_DATA(IP2_24_23, MSIOF2_SCK, SEL_SOF2_0),
+       PINMUX_IPSR_MSEL(IP2_24_23, MSIOF2_SCK, SEL_SOF2_0),
        PINMUX_IPSR_DATA(IP2_26_25, EX_CS2_N),
-       PINMUX_IPSR_MODSEL_DATA(IP2_26_25, ATAWR0_N, SEL_LBS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_26_25, MSIOF2_SYNC, SEL_SOF2_0),
+       PINMUX_IPSR_MSEL(IP2_26_25, ATAWR0_N, SEL_LBS_0),
+       PINMUX_IPSR_MSEL(IP2_26_25, MSIOF2_SYNC, SEL_SOF2_0),
        PINMUX_IPSR_DATA(IP2_29_27, EX_CS3_N),
-       PINMUX_IPSR_MODSEL_DATA(IP2_29_27, ATADIR0_N, SEL_LBS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_29_27, MSIOF2_TXD, SEL_SOF2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_29_27, ATAG0_N, SEL_LBS_0),
+       PINMUX_IPSR_MSEL(IP2_29_27, ATADIR0_N, SEL_LBS_0),
+       PINMUX_IPSR_MSEL(IP2_29_27, MSIOF2_TXD, SEL_SOF2_0),
+       PINMUX_IPSR_MSEL(IP2_29_27, ATAG0_N, SEL_LBS_0),
        PINMUX_IPSR_DATA(IP2_29_27, EX_WAIT1),
 
        /* IPSR3 */
        PINMUX_IPSR_DATA(IP3_2_0, EX_CS4_N),
-       PINMUX_IPSR_MODSEL_DATA(IP3_2_0, ATARD0_N, SEL_LBS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_2_0, MSIOF2_RXD, SEL_SOF2_0),
+       PINMUX_IPSR_MSEL(IP3_2_0, ATARD0_N, SEL_LBS_0),
+       PINMUX_IPSR_MSEL(IP3_2_0, MSIOF2_RXD, SEL_SOF2_0),
        PINMUX_IPSR_DATA(IP3_2_0, EX_WAIT2),
        PINMUX_IPSR_DATA(IP3_5_3, EX_CS5_N),
        PINMUX_IPSR_DATA(IP3_5_3, ATACS00_N),
-       PINMUX_IPSR_MODSEL_DATA(IP3_5_3, MSIOF2_SS1, SEL_SOF2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_5_3, HRX1_B, SEL_HSCIF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_5_3, SCIFB1_RXD_B, SEL_SCIFB1_1),
+       PINMUX_IPSR_MSEL(IP3_5_3, MSIOF2_SS1, SEL_SOF2_0),
+       PINMUX_IPSR_MSEL(IP3_5_3, HRX1_B, SEL_HSCIF1_1),
+       PINMUX_IPSR_MSEL(IP3_5_3, SCIFB1_RXD_B, SEL_SCIFB1_1),
        PINMUX_IPSR_DATA(IP3_5_3, PWM1),
        PINMUX_IPSR_DATA(IP3_5_3, TPU_TO1),
        PINMUX_IPSR_DATA(IP3_8_6, BS_N),
        PINMUX_IPSR_DATA(IP3_8_6, ATACS10_N),
-       PINMUX_IPSR_MODSEL_DATA(IP3_8_6, MSIOF2_SS2, SEL_SOF2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_8_6, HTX1_B, SEL_HSCIF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_8_6, SCIFB1_TXD_B, SEL_SCIFB1_1),
+       PINMUX_IPSR_MSEL(IP3_8_6, MSIOF2_SS2, SEL_SOF2_0),
+       PINMUX_IPSR_MSEL(IP3_8_6, HTX1_B, SEL_HSCIF1_1),
+       PINMUX_IPSR_MSEL(IP3_8_6, SCIFB1_TXD_B, SEL_SCIFB1_1),
        PINMUX_IPSR_DATA(IP3_8_6, PWM2),
        PINMUX_IPSR_DATA(IP3_8_6, TPU_TO2),
        PINMUX_IPSR_DATA(IP3_11_9, RD_WR_N),
-       PINMUX_IPSR_MODSEL_DATA(IP3_11_9, HRX2_B, SEL_HSCIF2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_11_9, FMIN_B, SEL_FM_1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_11_9, SCIFB0_RXD_B, SEL_SCIFB_1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_11_9, DREQ1_D, SEL_LBS_1),
+       PINMUX_IPSR_MSEL(IP3_11_9, HRX2_B, SEL_HSCIF2_1),
+       PINMUX_IPSR_MSEL(IP3_11_9, FMIN_B, SEL_FM_1),
+       PINMUX_IPSR_MSEL(IP3_11_9, SCIFB0_RXD_B, SEL_SCIFB_1),
+       PINMUX_IPSR_MSEL(IP3_11_9, DREQ1_D, SEL_LBS_1),
        PINMUX_IPSR_DATA(IP3_13_12, WE0_N),
-       PINMUX_IPSR_MODSEL_DATA(IP3_13_12, HCTS2_N_B, SEL_HSCIF2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_13_12, SCIFB0_TXD_B, SEL_SCIFB_1),
+       PINMUX_IPSR_MSEL(IP3_13_12, HCTS2_N_B, SEL_HSCIF2_1),
+       PINMUX_IPSR_MSEL(IP3_13_12, SCIFB0_TXD_B, SEL_SCIFB_1),
        PINMUX_IPSR_DATA(IP3_15_14, WE1_N),
-       PINMUX_IPSR_MODSEL_DATA(IP3_15_14, ATARD0_N_B, SEL_LBS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_15_14, HTX2_B, SEL_HSCIF2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_15_14, SCIFB0_RTS_N_B, SEL_SCIFB_1),
+       PINMUX_IPSR_MSEL(IP3_15_14, ATARD0_N_B, SEL_LBS_1),
+       PINMUX_IPSR_MSEL(IP3_15_14, HTX2_B, SEL_HSCIF2_1),
+       PINMUX_IPSR_MSEL(IP3_15_14, SCIFB0_RTS_N_B, SEL_SCIFB_1),
        PINMUX_IPSR_DATA(IP3_17_16, EX_WAIT0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_17_16, HRTS2_N_B, SEL_HSCIF2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_17_16, SCIFB0_CTS_N_B, SEL_SCIFB_1),
+       PINMUX_IPSR_MSEL(IP3_17_16, HRTS2_N_B, SEL_HSCIF2_1),
+       PINMUX_IPSR_MSEL(IP3_17_16, SCIFB0_CTS_N_B, SEL_SCIFB_1),
        PINMUX_IPSR_DATA(IP3_19_18, DREQ0),
        PINMUX_IPSR_DATA(IP3_19_18, PWM3),
        PINMUX_IPSR_DATA(IP3_19_18, TPU_TO3),
        PINMUX_IPSR_DATA(IP3_21_20, DACK0),
        PINMUX_IPSR_DATA(IP3_21_20, DRACK0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_21_20, REMOCON, SEL_RCN_0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_24_22, SPEEDIN, SEL_RSP_0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_24_22, HSCK0_C, SEL_HSCIF0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP3_24_22, HSCK2_C, SEL_HSCIF2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP3_24_22, SCIFB0_SCK_B, SEL_SCIFB_1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_24_22, SCIFB2_SCK_B, SEL_SCIFB2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_24_22, DREQ2_C, SEL_LBS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP3_30_28, HTX2_C, SEL_HSCIF2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP3_27_25, SSI_SCK0129, SEL_SSI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_27_25, HRX0_C, SEL_HSCIF0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP3_27_25, HRX2_C, SEL_HSCIF2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP3_27_25, SCIFB0_RXD_C, SEL_SCIFB_2),
-       PINMUX_IPSR_MODSEL_DATA(IP3_27_25, SCIFB2_RXD_C, SEL_SCIFB2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP3_30_28, SSI_WS0129, SEL_SSI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_30_28, HTX0_C, SEL_HSCIF0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP3_30_28, HTX2_C, SEL_HSCIF2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP3_30_28, SCIFB0_TXD_C, SEL_SCIFB_2),
-       PINMUX_IPSR_MODSEL_DATA(IP3_30_28, SCIFB2_TXD_C, SEL_SCIFB2_2),
+       PINMUX_IPSR_MSEL(IP3_21_20, REMOCON, SEL_RCN_0),
+       PINMUX_IPSR_MSEL(IP3_24_22, SPEEDIN, SEL_RSP_0),
+       PINMUX_IPSR_MSEL(IP3_24_22, HSCK0_C, SEL_HSCIF0_2),
+       PINMUX_IPSR_MSEL(IP3_24_22, HSCK2_C, SEL_HSCIF2_2),
+       PINMUX_IPSR_MSEL(IP3_24_22, SCIFB0_SCK_B, SEL_SCIFB_1),
+       PINMUX_IPSR_MSEL(IP3_24_22, SCIFB2_SCK_B, SEL_SCIFB2_1),
+       PINMUX_IPSR_MSEL(IP3_24_22, DREQ2_C, SEL_LBS_2),
+       PINMUX_IPSR_MSEL(IP3_30_28, HTX2_C, SEL_HSCIF2_2),
+       PINMUX_IPSR_MSEL(IP3_27_25, SSI_SCK0129, SEL_SSI0_0),
+       PINMUX_IPSR_MSEL(IP3_27_25, HRX0_C, SEL_HSCIF0_2),
+       PINMUX_IPSR_MSEL(IP3_27_25, HRX2_C, SEL_HSCIF2_2),
+       PINMUX_IPSR_MSEL(IP3_27_25, SCIFB0_RXD_C, SEL_SCIFB_2),
+       PINMUX_IPSR_MSEL(IP3_27_25, SCIFB2_RXD_C, SEL_SCIFB2_2),
+       PINMUX_IPSR_MSEL(IP3_30_28, SSI_WS0129, SEL_SSI0_0),
+       PINMUX_IPSR_MSEL(IP3_30_28, HTX0_C, SEL_HSCIF0_2),
+       PINMUX_IPSR_MSEL(IP3_30_28, HTX2_C, SEL_HSCIF2_2),
+       PINMUX_IPSR_MSEL(IP3_30_28, SCIFB0_TXD_C, SEL_SCIFB_2),
+       PINMUX_IPSR_MSEL(IP3_30_28, SCIFB2_TXD_C, SEL_SCIFB2_2),
 
        /* IPSR4 */
-       PINMUX_IPSR_MODSEL_DATA(IP4_1_0, SSI_SDATA0, SEL_SSI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_1_0, SCL0_B, SEL_IIC0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP4_1_0, SCL7_B, SEL_IIC7_1),
-       PINMUX_IPSR_MODSEL_DATA(IP4_1_0, MSIOF2_SCK_C, SEL_SOF2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP4_4_2, SSI_SCK1, SEL_SSI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_4_2, SDA0_B, SEL_IIC0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP4_4_2, SDA7_B, SEL_IIC7_1),
-       PINMUX_IPSR_MODSEL_DATA(IP4_4_2, MSIOF2_SYNC_C, SEL_SOF2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP4_4_2, GLO_I0_D, SEL_GPS_3),
-       PINMUX_IPSR_MODSEL_DATA(IP4_7_5, SSI_WS1, SEL_SSI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_7_5, SCL1_B, SEL_IIC1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP4_7_5, SCL8_B, SEL_IIC8_1),
-       PINMUX_IPSR_MODSEL_DATA(IP4_7_5, MSIOF2_TXD_C, SEL_SOF2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP4_7_5, GLO_I1_D, SEL_GPS_3),
-       PINMUX_IPSR_MODSEL_DATA(IP4_9_8, SSI_SDATA1, SEL_SSI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_9_8, SDA1_B, SEL_IIC1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP4_9_8, SDA8_B, SEL_IIC8_1),
-       PINMUX_IPSR_MODSEL_DATA(IP4_9_8, MSIOF2_RXD_C, SEL_SOF2_2),
+       PINMUX_IPSR_MSEL(IP4_1_0, SSI_SDATA0, SEL_SSI0_0),
+       PINMUX_IPSR_MSEL(IP4_1_0, SCL0_B, SEL_IIC0_1),
+       PINMUX_IPSR_MSEL(IP4_1_0, SCL7_B, SEL_IIC7_1),
+       PINMUX_IPSR_MSEL(IP4_1_0, MSIOF2_SCK_C, SEL_SOF2_2),
+       PINMUX_IPSR_MSEL(IP4_4_2, SSI_SCK1, SEL_SSI1_0),
+       PINMUX_IPSR_MSEL(IP4_4_2, SDA0_B, SEL_IIC0_1),
+       PINMUX_IPSR_MSEL(IP4_4_2, SDA7_B, SEL_IIC7_1),
+       PINMUX_IPSR_MSEL(IP4_4_2, MSIOF2_SYNC_C, SEL_SOF2_2),
+       PINMUX_IPSR_MSEL(IP4_4_2, GLO_I0_D, SEL_GPS_3),
+       PINMUX_IPSR_MSEL(IP4_7_5, SSI_WS1, SEL_SSI1_0),
+       PINMUX_IPSR_MSEL(IP4_7_5, SCL1_B, SEL_IIC1_1),
+       PINMUX_IPSR_MSEL(IP4_7_5, SCL8_B, SEL_IIC8_1),
+       PINMUX_IPSR_MSEL(IP4_7_5, MSIOF2_TXD_C, SEL_SOF2_2),
+       PINMUX_IPSR_MSEL(IP4_7_5, GLO_I1_D, SEL_GPS_3),
+       PINMUX_IPSR_MSEL(IP4_9_8, SSI_SDATA1, SEL_SSI1_0),
+       PINMUX_IPSR_MSEL(IP4_9_8, SDA1_B, SEL_IIC1_1),
+       PINMUX_IPSR_MSEL(IP4_9_8, SDA8_B, SEL_IIC8_1),
+       PINMUX_IPSR_MSEL(IP4_9_8, MSIOF2_RXD_C, SEL_SOF2_2),
        PINMUX_IPSR_DATA(IP4_12_10, SSI_SCK2),
-       PINMUX_IPSR_MODSEL_DATA(IP4_12_10, SCL2, SEL_IIC2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_12_10, GPS_CLK_B, SEL_GPS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP4_12_10, GLO_Q0_D, SEL_GPS_3),
+       PINMUX_IPSR_MSEL(IP4_12_10, SCL2, SEL_IIC2_0),
+       PINMUX_IPSR_MSEL(IP4_12_10, GPS_CLK_B, SEL_GPS_1),
+       PINMUX_IPSR_MSEL(IP4_12_10, GLO_Q0_D, SEL_GPS_3),
        PINMUX_IPSR_DATA(IP4_15_13, SSI_WS2),
-       PINMUX_IPSR_MODSEL_DATA(IP4_15_13, SDA2, SEL_IIC2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_15_13, GPS_SIGN_B, SEL_GPS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP4_15_13, RX2_E, SEL_SCIF2_4),
-       PINMUX_IPSR_MODSEL_DATA(IP4_15_13, GLO_Q1_D, SEL_GPS_3),
+       PINMUX_IPSR_MSEL(IP4_15_13, SDA2, SEL_IIC2_0),
+       PINMUX_IPSR_MSEL(IP4_15_13, GPS_SIGN_B, SEL_GPS_1),
+       PINMUX_IPSR_MSEL(IP4_15_13, RX2_E, SEL_SCIF2_4),
+       PINMUX_IPSR_MSEL(IP4_15_13, GLO_Q1_D, SEL_GPS_3),
        PINMUX_IPSR_DATA(IP4_18_16, SSI_SDATA2),
-       PINMUX_IPSR_MODSEL_DATA(IP4_18_16, GPS_MAG_B, SEL_GPS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP4_18_16, TX2_E, SEL_SCIF2_4),
+       PINMUX_IPSR_MSEL(IP4_18_16, GPS_MAG_B, SEL_GPS_1),
+       PINMUX_IPSR_MSEL(IP4_18_16, TX2_E, SEL_SCIF2_4),
        PINMUX_IPSR_DATA(IP4_19, SSI_SCK34),
        PINMUX_IPSR_DATA(IP4_20, SSI_WS34),
        PINMUX_IPSR_DATA(IP4_21, SSI_SDATA3),
        PINMUX_IPSR_DATA(IP4_23_22, SSI_SCK4),
-       PINMUX_IPSR_MODSEL_DATA(IP4_23_22, GLO_SS_D, SEL_GPS_3),
+       PINMUX_IPSR_MSEL(IP4_23_22, GLO_SS_D, SEL_GPS_3),
        PINMUX_IPSR_DATA(IP4_25_24, SSI_WS4),
-       PINMUX_IPSR_MODSEL_DATA(IP4_25_24, GLO_RFON_D, SEL_GPS_3),
+       PINMUX_IPSR_MSEL(IP4_25_24, GLO_RFON_D, SEL_GPS_3),
        PINMUX_IPSR_DATA(IP4_27_26, SSI_SDATA4),
-       PINMUX_IPSR_MODSEL_DATA(IP4_27_26, MSIOF2_SCK_D, SEL_SOF2_3),
+       PINMUX_IPSR_MSEL(IP4_27_26, MSIOF2_SCK_D, SEL_SOF2_3),
        PINMUX_IPSR_DATA(IP4_30_28, SSI_SCK5),
-       PINMUX_IPSR_MODSEL_DATA(IP4_30_28, MSIOF1_SCK_C, SEL_SOF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP4_30_28, TS_SDATA0, SEL_TSIF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_30_28, GLO_I0, SEL_GPS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_30_28, MSIOF2_SYNC_D, SEL_SOF2_3),
+       PINMUX_IPSR_MSEL(IP4_30_28, MSIOF1_SCK_C, SEL_SOF1_2),
+       PINMUX_IPSR_MSEL(IP4_30_28, TS_SDATA0, SEL_TSIF0_0),
+       PINMUX_IPSR_MSEL(IP4_30_28, GLO_I0, SEL_GPS_0),
+       PINMUX_IPSR_MSEL(IP4_30_28, MSIOF2_SYNC_D, SEL_SOF2_3),
        PINMUX_IPSR_DATA(IP4_30_28, VI1_R2_B),
 
        /* IPSR5 */
        PINMUX_IPSR_DATA(IP5_2_0, SSI_WS5),
-       PINMUX_IPSR_MODSEL_DATA(IP5_2_0, MSIOF1_SYNC_C, SEL_SOF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP5_2_0, TS_SCK0, SEL_TSIF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_2_0, GLO_I1, SEL_GPS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_2_0, MSIOF2_TXD_D, SEL_SOF2_3),
+       PINMUX_IPSR_MSEL(IP5_2_0, MSIOF1_SYNC_C, SEL_SOF1_2),
+       PINMUX_IPSR_MSEL(IP5_2_0, TS_SCK0, SEL_TSIF0_0),
+       PINMUX_IPSR_MSEL(IP5_2_0, GLO_I1, SEL_GPS_0),
+       PINMUX_IPSR_MSEL(IP5_2_0, MSIOF2_TXD_D, SEL_SOF2_3),
        PINMUX_IPSR_DATA(IP5_2_0, VI1_R3_B),
        PINMUX_IPSR_DATA(IP5_5_3, SSI_SDATA5),
-       PINMUX_IPSR_MODSEL_DATA(IP5_5_3, MSIOF1_TXD_C, SEL_SOF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP5_5_3, TS_SDEN0, SEL_TSIF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_5_3, GLO_Q0, SEL_GPS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_5_3, MSIOF2_SS1_D, SEL_SOF2_3),
+       PINMUX_IPSR_MSEL(IP5_5_3, MSIOF1_TXD_C, SEL_SOF1_2),
+       PINMUX_IPSR_MSEL(IP5_5_3, TS_SDEN0, SEL_TSIF0_0),
+       PINMUX_IPSR_MSEL(IP5_5_3, GLO_Q0, SEL_GPS_0),
+       PINMUX_IPSR_MSEL(IP5_5_3, MSIOF2_SS1_D, SEL_SOF2_3),
        PINMUX_IPSR_DATA(IP5_5_3, VI1_R4_B),
        PINMUX_IPSR_DATA(IP5_8_6, SSI_SCK6),
-       PINMUX_IPSR_MODSEL_DATA(IP5_8_6, MSIOF1_RXD_C, SEL_SOF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP5_8_6, TS_SPSYNC0, SEL_TSIF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_8_6, GLO_Q1, SEL_GPS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_8_6, MSIOF2_RXD_D, SEL_SOF2_3),
+       PINMUX_IPSR_MSEL(IP5_8_6, MSIOF1_RXD_C, SEL_SOF1_2),
+       PINMUX_IPSR_MSEL(IP5_8_6, TS_SPSYNC0, SEL_TSIF0_0),
+       PINMUX_IPSR_MSEL(IP5_8_6, GLO_Q1, SEL_GPS_0),
+       PINMUX_IPSR_MSEL(IP5_8_6, MSIOF2_RXD_D, SEL_SOF2_3),
        PINMUX_IPSR_DATA(IP5_8_6, VI1_R5_B),
        PINMUX_IPSR_DATA(IP5_11_9, SSI_WS6),
-       PINMUX_IPSR_MODSEL_DATA(IP5_11_9, GLO_SCLK, SEL_GPS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_11_9, MSIOF2_SS2_D, SEL_SOF2_3),
+       PINMUX_IPSR_MSEL(IP5_11_9, GLO_SCLK, SEL_GPS_0),
+       PINMUX_IPSR_MSEL(IP5_11_9, MSIOF2_SS2_D, SEL_SOF2_3),
        PINMUX_IPSR_DATA(IP5_11_9, VI1_R6_B),
        PINMUX_IPSR_DATA(IP5_14_12, SSI_SDATA6),
-       PINMUX_IPSR_MODSEL_DATA(IP5_14_12, STP_IVCXO27_0_B, SEL_SSP_1),
-       PINMUX_IPSR_MODSEL_DATA(IP5_14_12, GLO_SDATA, SEL_GPS_0),
+       PINMUX_IPSR_MSEL(IP5_14_12, STP_IVCXO27_0_B, SEL_SSP_1),
+       PINMUX_IPSR_MSEL(IP5_14_12, GLO_SDATA, SEL_GPS_0),
        PINMUX_IPSR_DATA(IP5_14_12, VI1_R7_B),
-       PINMUX_IPSR_MODSEL_DATA(IP5_16_15, SSI_SCK78, SEL_SSI7_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_16_15, STP_ISCLK_0_B, SEL_SSP_1),
-       PINMUX_IPSR_MODSEL_DATA(IP5_16_15, GLO_SS, SEL_GPS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_19_17, SSI_WS78, SEL_SSI7_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_19_17, TX0_D, SEL_SCIF0_3),
-       PINMUX_IPSR_MODSEL_DATA(IP5_19_17, STP_ISD_0_B, SEL_SSP_1),
-       PINMUX_IPSR_MODSEL_DATA(IP5_19_17, GLO_RFON, SEL_GPS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_21_20, SSI_SDATA7, SEL_SSI7_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_21_20, RX0_D, SEL_SCIF0_3),
-       PINMUX_IPSR_MODSEL_DATA(IP5_21_20, STP_ISEN_0_B, SEL_SSP_1),
-       PINMUX_IPSR_MODSEL_DATA(IP5_23_22, SSI_SDATA8, SEL_SSI8_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_23_22, TX1_D, SEL_SCIF1_3),
-       PINMUX_IPSR_MODSEL_DATA(IP5_23_22, STP_ISSYNC_0_B, SEL_SSP_1),
-       PINMUX_IPSR_MODSEL_DATA(IP5_25_24, SSI_SCK9, SEL_SSI9_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_25_24, RX1_D, SEL_SCIF1_3),
-       PINMUX_IPSR_MODSEL_DATA(IP5_25_24, GLO_SCLK_D, SEL_GPS_3),
-       PINMUX_IPSR_MODSEL_DATA(IP5_28_26, SSI_WS9, SEL_SSI9_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_28_26, TX3_D, SEL_SCIF3_3),
-       PINMUX_IPSR_MODSEL_DATA(IP5_28_26, CAN0_TX_D, SEL_CAN0_3),
-       PINMUX_IPSR_MODSEL_DATA(IP5_28_26, GLO_SDATA_D, SEL_GPS_3),
-       PINMUX_IPSR_MODSEL_DATA(IP5_31_29, SSI_SDATA9, SEL_SSI9_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_31_29, RX3_D, SEL_SCIF3_3),
-       PINMUX_IPSR_MODSEL_DATA(IP5_31_29, CAN0_RX_D, SEL_CAN0_3),
+       PINMUX_IPSR_MSEL(IP5_16_15, SSI_SCK78, SEL_SSI7_0),
+       PINMUX_IPSR_MSEL(IP5_16_15, STP_ISCLK_0_B, SEL_SSP_1),
+       PINMUX_IPSR_MSEL(IP5_16_15, GLO_SS, SEL_GPS_0),
+       PINMUX_IPSR_MSEL(IP5_19_17, SSI_WS78, SEL_SSI7_0),
+       PINMUX_IPSR_MSEL(IP5_19_17, TX0_D, SEL_SCIF0_3),
+       PINMUX_IPSR_MSEL(IP5_19_17, STP_ISD_0_B, SEL_SSP_1),
+       PINMUX_IPSR_MSEL(IP5_19_17, GLO_RFON, SEL_GPS_0),
+       PINMUX_IPSR_MSEL(IP5_21_20, SSI_SDATA7, SEL_SSI7_0),
+       PINMUX_IPSR_MSEL(IP5_21_20, RX0_D, SEL_SCIF0_3),
+       PINMUX_IPSR_MSEL(IP5_21_20, STP_ISEN_0_B, SEL_SSP_1),
+       PINMUX_IPSR_MSEL(IP5_23_22, SSI_SDATA8, SEL_SSI8_0),
+       PINMUX_IPSR_MSEL(IP5_23_22, TX1_D, SEL_SCIF1_3),
+       PINMUX_IPSR_MSEL(IP5_23_22, STP_ISSYNC_0_B, SEL_SSP_1),
+       PINMUX_IPSR_MSEL(IP5_25_24, SSI_SCK9, SEL_SSI9_0),
+       PINMUX_IPSR_MSEL(IP5_25_24, RX1_D, SEL_SCIF1_3),
+       PINMUX_IPSR_MSEL(IP5_25_24, GLO_SCLK_D, SEL_GPS_3),
+       PINMUX_IPSR_MSEL(IP5_28_26, SSI_WS9, SEL_SSI9_0),
+       PINMUX_IPSR_MSEL(IP5_28_26, TX3_D, SEL_SCIF3_3),
+       PINMUX_IPSR_MSEL(IP5_28_26, CAN0_TX_D, SEL_CAN0_3),
+       PINMUX_IPSR_MSEL(IP5_28_26, GLO_SDATA_D, SEL_GPS_3),
+       PINMUX_IPSR_MSEL(IP5_31_29, SSI_SDATA9, SEL_SSI9_0),
+       PINMUX_IPSR_MSEL(IP5_31_29, RX3_D, SEL_SCIF3_3),
+       PINMUX_IPSR_MSEL(IP5_31_29, CAN0_RX_D, SEL_CAN0_3),
 
        /* IPSR6 */
-       PINMUX_IPSR_MODSEL_DATA(IP6_2_0, AUDIO_CLKB, SEL_ADG_0),
-       PINMUX_IPSR_MODSEL_DATA(IP6_2_0, STP_OPWM_0_B, SEL_SSP_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_2_0, MSIOF1_SCK_B, SEL_SOF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_2_0, SCIF_CLK, SEL_SCIF_0),
-       PINMUX_IPSR_MODSEL_DATA(IP6_2_0, BPFCLK_E, SEL_FM_4),
+       PINMUX_IPSR_MSEL(IP6_2_0, AUDIO_CLKB, SEL_ADG_0),
+       PINMUX_IPSR_MSEL(IP6_2_0, STP_OPWM_0_B, SEL_SSP_1),
+       PINMUX_IPSR_MSEL(IP6_2_0, MSIOF1_SCK_B, SEL_SOF1_1),
+       PINMUX_IPSR_MSEL(IP6_2_0, SCIF_CLK, SEL_SCIF_0),
+       PINMUX_IPSR_MSEL(IP6_2_0, BPFCLK_E, SEL_FM_4),
        PINMUX_IPSR_DATA(IP6_5_3, AUDIO_CLKC),
-       PINMUX_IPSR_MODSEL_DATA(IP6_5_3, SCIFB0_SCK_C, SEL_SCIFB_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_5_3, MSIOF1_SYNC_B, SEL_SOF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_5_3, RX2, SEL_SCIF2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP6_5_3, SCIFA2_RXD, SEL_SCIFA2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP6_5_3, FMIN_E, SEL_FM_4),
+       PINMUX_IPSR_MSEL(IP6_5_3, SCIFB0_SCK_C, SEL_SCIFB_2),
+       PINMUX_IPSR_MSEL(IP6_5_3, MSIOF1_SYNC_B, SEL_SOF1_1),
+       PINMUX_IPSR_MSEL(IP6_5_3, RX2, SEL_SCIF2_0),
+       PINMUX_IPSR_MSEL(IP6_5_3, SCIFA2_RXD, SEL_SCIFA2_0),
+       PINMUX_IPSR_MSEL(IP6_5_3, FMIN_E, SEL_FM_4),
        PINMUX_IPSR_DATA(IP6_7_6, AUDIO_CLKOUT),
-       PINMUX_IPSR_MODSEL_DATA(IP6_7_6, MSIOF1_SS1_B, SEL_SOF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_5_3, TX2, SEL_SCIF2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP6_7_6, SCIFA2_TXD, SEL_SCIFA2_0),
+       PINMUX_IPSR_MSEL(IP6_7_6, MSIOF1_SS1_B, SEL_SOF1_1),
+       PINMUX_IPSR_MSEL(IP6_5_3, TX2, SEL_SCIF2_0),
+       PINMUX_IPSR_MSEL(IP6_7_6, SCIFA2_TXD, SEL_SCIFA2_0),
        PINMUX_IPSR_DATA(IP6_9_8, IRQ0),
-       PINMUX_IPSR_MODSEL_DATA(IP6_9_8, SCIFB1_RXD_D, SEL_SCIFB1_3),
+       PINMUX_IPSR_MSEL(IP6_9_8, SCIFB1_RXD_D, SEL_SCIFB1_3),
        PINMUX_IPSR_DATA(IP6_9_8, INTC_IRQ0_N),
        PINMUX_IPSR_DATA(IP6_11_10, IRQ1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_11_10, SCIFB1_SCK_C, SEL_SCIFB1_2),
+       PINMUX_IPSR_MSEL(IP6_11_10, SCIFB1_SCK_C, SEL_SCIFB1_2),
        PINMUX_IPSR_DATA(IP6_11_10, INTC_IRQ1_N),
        PINMUX_IPSR_DATA(IP6_13_12, IRQ2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_13_12, SCIFB1_TXD_D, SEL_SCIFB1_3),
+       PINMUX_IPSR_MSEL(IP6_13_12, SCIFB1_TXD_D, SEL_SCIFB1_3),
        PINMUX_IPSR_DATA(IP6_13_12, INTC_IRQ2_N),
        PINMUX_IPSR_DATA(IP6_15_14, IRQ3),
-       PINMUX_IPSR_MODSEL_DATA(IP6_15_14, SCL4_C, SEL_IIC4_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_15_14, MSIOF2_TXD_E, SEL_SOF2_4),
+       PINMUX_IPSR_MSEL(IP6_15_14, SCL4_C, SEL_IIC4_2),
+       PINMUX_IPSR_MSEL(IP6_15_14, MSIOF2_TXD_E, SEL_SOF2_4),
        PINMUX_IPSR_DATA(IP6_15_14, INTC_IRQ4_N),
        PINMUX_IPSR_DATA(IP6_18_16, IRQ4),
-       PINMUX_IPSR_MODSEL_DATA(IP6_18_16, HRX1_C, SEL_HSCIF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_18_16, SDA4_C, SEL_IIC4_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_18_16, MSIOF2_RXD_E, SEL_SOF2_4),
+       PINMUX_IPSR_MSEL(IP6_18_16, HRX1_C, SEL_HSCIF1_2),
+       PINMUX_IPSR_MSEL(IP6_18_16, SDA4_C, SEL_IIC4_2),
+       PINMUX_IPSR_MSEL(IP6_18_16, MSIOF2_RXD_E, SEL_SOF2_4),
        PINMUX_IPSR_DATA(IP6_18_16, INTC_IRQ4_N),
        PINMUX_IPSR_DATA(IP6_20_19, IRQ5),
-       PINMUX_IPSR_MODSEL_DATA(IP6_20_19, HTX1_C, SEL_HSCIF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_20_19, SCL1_E, SEL_IIC1_4),
-       PINMUX_IPSR_MODSEL_DATA(IP6_20_19, MSIOF2_SCK_E, SEL_SOF2_4),
+       PINMUX_IPSR_MSEL(IP6_20_19, HTX1_C, SEL_HSCIF1_2),
+       PINMUX_IPSR_MSEL(IP6_20_19, SCL1_E, SEL_IIC1_4),
+       PINMUX_IPSR_MSEL(IP6_20_19, MSIOF2_SCK_E, SEL_SOF2_4),
        PINMUX_IPSR_DATA(IP6_23_21, IRQ6),
-       PINMUX_IPSR_MODSEL_DATA(IP6_23_21, HSCK1_C, SEL_HSCIF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_23_21, MSIOF1_SS2_B, SEL_SOF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_23_21, SDA1_E, SEL_IIC1_4),
-       PINMUX_IPSR_MODSEL_DATA(IP6_23_21, MSIOF2_SYNC_E, SEL_SOF2_4),
+       PINMUX_IPSR_MSEL(IP6_23_21, HSCK1_C, SEL_HSCIF1_2),
+       PINMUX_IPSR_MSEL(IP6_23_21, MSIOF1_SS2_B, SEL_SOF1_1),
+       PINMUX_IPSR_MSEL(IP6_23_21, SDA1_E, SEL_IIC1_4),
+       PINMUX_IPSR_MSEL(IP6_23_21, MSIOF2_SYNC_E, SEL_SOF2_4),
        PINMUX_IPSR_DATA(IP6_26_24, IRQ7),
-       PINMUX_IPSR_MODSEL_DATA(IP6_26_24, HCTS1_N_C, SEL_HSCIF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_26_24, MSIOF1_TXD_B, SEL_SOF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_26_24, GPS_CLK_C, SEL_GPS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_26_24, GPS_CLK_D, SEL_GPS_3),
+       PINMUX_IPSR_MSEL(IP6_26_24, HCTS1_N_C, SEL_HSCIF1_2),
+       PINMUX_IPSR_MSEL(IP6_26_24, MSIOF1_TXD_B, SEL_SOF1_1),
+       PINMUX_IPSR_MSEL(IP6_26_24, GPS_CLK_C, SEL_GPS_2),
+       PINMUX_IPSR_MSEL(IP6_26_24, GPS_CLK_D, SEL_GPS_3),
        PINMUX_IPSR_DATA(IP6_29_27, IRQ8),
-       PINMUX_IPSR_MODSEL_DATA(IP6_29_27, HRTS1_N_C, SEL_HSCIF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_29_27, MSIOF1_RXD_B, SEL_SOF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_29_27, GPS_SIGN_C, SEL_GPS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_29_27, GPS_SIGN_D, SEL_GPS_3),
+       PINMUX_IPSR_MSEL(IP6_29_27, HRTS1_N_C, SEL_HSCIF1_2),
+       PINMUX_IPSR_MSEL(IP6_29_27, MSIOF1_RXD_B, SEL_SOF1_1),
+       PINMUX_IPSR_MSEL(IP6_29_27, GPS_SIGN_C, SEL_GPS_2),
+       PINMUX_IPSR_MSEL(IP6_29_27, GPS_SIGN_D, SEL_GPS_3),
 
        /* IPSR7 */
        PINMUX_IPSR_DATA(IP7_2_0, IRQ9),
-       PINMUX_IPSR_MODSEL_DATA(IP7_2_0, DU1_DOTCLKIN_B, SEL_DIS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_2_0, CAN_CLK_D, SEL_CANCLK_3),
-       PINMUX_IPSR_MODSEL_DATA(IP7_2_0, GPS_MAG_C, SEL_GPS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_2_0, SCIF_CLK_B, SEL_SCIF_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_2_0, GPS_MAG_D, SEL_GPS_3),
+       PINMUX_IPSR_MSEL(IP7_2_0, DU1_DOTCLKIN_B, SEL_DIS_1),
+       PINMUX_IPSR_MSEL(IP7_2_0, CAN_CLK_D, SEL_CANCLK_3),
+       PINMUX_IPSR_MSEL(IP7_2_0, GPS_MAG_C, SEL_GPS_2),
+       PINMUX_IPSR_MSEL(IP7_2_0, SCIF_CLK_B, SEL_SCIF_1),
+       PINMUX_IPSR_MSEL(IP7_2_0, GPS_MAG_D, SEL_GPS_3),
        PINMUX_IPSR_DATA(IP7_5_3, DU1_DR0),
        PINMUX_IPSR_DATA(IP7_5_3, LCDOUT0),
-       PINMUX_IPSR_MODSEL_DATA(IP7_5_3, VI1_DATA0_B, SEL_VI1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_5_3, TX0_B, SEL_SCIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_5_3, SCIFA0_TXD_B, SEL_SCFA_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_5_3, MSIOF2_SCK_B, SEL_SOF2_1),
+       PINMUX_IPSR_MSEL(IP7_5_3, VI1_DATA0_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP7_5_3, TX0_B, SEL_SCIF0_1),
+       PINMUX_IPSR_MSEL(IP7_5_3, SCIFA0_TXD_B, SEL_SCFA_1),
+       PINMUX_IPSR_MSEL(IP7_5_3, MSIOF2_SCK_B, SEL_SOF2_1),
        PINMUX_IPSR_DATA(IP7_8_6, DU1_DR1),
        PINMUX_IPSR_DATA(IP7_8_6, LCDOUT1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_8_6, VI1_DATA1_B, SEL_VI1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_8_6, RX0_B, SEL_SCIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_8_6, SCIFA0_RXD_B, SEL_SCFA_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_8_6, MSIOF2_SYNC_B, SEL_SOF2_1),
+       PINMUX_IPSR_MSEL(IP7_8_6, VI1_DATA1_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP7_8_6, RX0_B, SEL_SCIF0_1),
+       PINMUX_IPSR_MSEL(IP7_8_6, SCIFA0_RXD_B, SEL_SCFA_1),
+       PINMUX_IPSR_MSEL(IP7_8_6, MSIOF2_SYNC_B, SEL_SOF2_1),
        PINMUX_IPSR_DATA(IP7_10_9, DU1_DR2),
        PINMUX_IPSR_DATA(IP7_10_9, LCDOUT2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_10_9, SSI_SCK0129_B, SEL_SSI0_1),
+       PINMUX_IPSR_MSEL(IP7_10_9, SSI_SCK0129_B, SEL_SSI0_1),
        PINMUX_IPSR_DATA(IP7_12_11, DU1_DR3),
        PINMUX_IPSR_DATA(IP7_12_11, LCDOUT3),
-       PINMUX_IPSR_MODSEL_DATA(IP7_12_11, SSI_WS0129_B, SEL_SSI0_1),
+       PINMUX_IPSR_MSEL(IP7_12_11, SSI_WS0129_B, SEL_SSI0_1),
        PINMUX_IPSR_DATA(IP7_14_13, DU1_DR4),
        PINMUX_IPSR_DATA(IP7_14_13, LCDOUT4),
-       PINMUX_IPSR_MODSEL_DATA(IP7_14_13, SSI_SDATA0_B, SEL_SSI0_1),
+       PINMUX_IPSR_MSEL(IP7_14_13, SSI_SDATA0_B, SEL_SSI0_1),
        PINMUX_IPSR_DATA(IP7_16_15, DU1_DR5),
        PINMUX_IPSR_DATA(IP7_16_15, LCDOUT5),
-       PINMUX_IPSR_MODSEL_DATA(IP7_16_15, SSI_SCK1_B, SEL_SSI1_1),
+       PINMUX_IPSR_MSEL(IP7_16_15, SSI_SCK1_B, SEL_SSI1_1),
        PINMUX_IPSR_DATA(IP7_18_17, DU1_DR6),
        PINMUX_IPSR_DATA(IP7_18_17, LCDOUT6),
-       PINMUX_IPSR_MODSEL_DATA(IP7_18_17, SSI_WS1_B, SEL_SSI1_1),
+       PINMUX_IPSR_MSEL(IP7_18_17, SSI_WS1_B, SEL_SSI1_1),
        PINMUX_IPSR_DATA(IP7_20_19, DU1_DR7),
        PINMUX_IPSR_DATA(IP7_20_19, LCDOUT7),
-       PINMUX_IPSR_MODSEL_DATA(IP7_20_19, SSI_SDATA1_B, SEL_SSI1_1),
+       PINMUX_IPSR_MSEL(IP7_20_19, SSI_SDATA1_B, SEL_SSI1_1),
        PINMUX_IPSR_DATA(IP7_23_21, DU1_DG0),
        PINMUX_IPSR_DATA(IP7_23_21, LCDOUT8),
-       PINMUX_IPSR_MODSEL_DATA(IP7_23_21, VI1_DATA2_B, SEL_VI1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_23_21, TX1_B, SEL_SCIF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_23_21, SCIFA1_TXD_B, SEL_SCIFA1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_23_21, MSIOF2_SS1_B, SEL_SOF2_1),
+       PINMUX_IPSR_MSEL(IP7_23_21, VI1_DATA2_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP7_23_21, TX1_B, SEL_SCIF1_1),
+       PINMUX_IPSR_MSEL(IP7_23_21, SCIFA1_TXD_B, SEL_SCIFA1_1),
+       PINMUX_IPSR_MSEL(IP7_23_21, MSIOF2_SS1_B, SEL_SOF2_1),
        PINMUX_IPSR_DATA(IP7_26_24, DU1_DG1),
        PINMUX_IPSR_DATA(IP7_26_24, LCDOUT9),
-       PINMUX_IPSR_MODSEL_DATA(IP7_26_24, VI1_DATA3_B, SEL_VI1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_26_24, RX1_B, SEL_SCIF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_26_24, SCIFA1_RXD_B, SEL_SCIFA1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_26_24, MSIOF2_SS2_B, SEL_SOF2_1),
+       PINMUX_IPSR_MSEL(IP7_26_24, VI1_DATA3_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP7_26_24, RX1_B, SEL_SCIF1_1),
+       PINMUX_IPSR_MSEL(IP7_26_24, SCIFA1_RXD_B, SEL_SCIFA1_1),
+       PINMUX_IPSR_MSEL(IP7_26_24, MSIOF2_SS2_B, SEL_SOF2_1),
        PINMUX_IPSR_DATA(IP7_29_27, DU1_DG2),
        PINMUX_IPSR_DATA(IP7_29_27, LCDOUT10),
-       PINMUX_IPSR_MODSEL_DATA(IP7_29_27, VI1_DATA4_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP7_29_27, VI1_DATA4_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP7_29_27, SCIF1_SCK_B),
-       PINMUX_IPSR_MODSEL_DATA(IP7_29_27, SCIFA1_SCK, SEL_SCIFA1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP7_29_27, SSI_SCK78_B, SEL_SSI7_1),
+       PINMUX_IPSR_MSEL(IP7_29_27, SCIFA1_SCK, SEL_SCIFA1_0),
+       PINMUX_IPSR_MSEL(IP7_29_27, SSI_SCK78_B, SEL_SSI7_1),
 
        /* IPSR8 */
        PINMUX_IPSR_DATA(IP8_2_0, DU1_DG3),
        PINMUX_IPSR_DATA(IP8_2_0, LCDOUT11),
-       PINMUX_IPSR_MODSEL_DATA(IP8_2_0, VI1_DATA5_B, SEL_VI1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_2_0, SSI_WS78_B, SEL_SSI7_1),
+       PINMUX_IPSR_MSEL(IP8_2_0, VI1_DATA5_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP8_2_0, SSI_WS78_B, SEL_SSI7_1),
        PINMUX_IPSR_DATA(IP8_5_3, DU1_DG4),
        PINMUX_IPSR_DATA(IP8_5_3, LCDOUT12),
-       PINMUX_IPSR_MODSEL_DATA(IP8_5_3, VI1_DATA6_B, SEL_VI1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_5_3, HRX0_B, SEL_HSCIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_5_3, SCIFB2_RXD_B, SEL_SCIFB2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_5_3, SSI_SDATA7_B, SEL_SSI7_1),
+       PINMUX_IPSR_MSEL(IP8_5_3, VI1_DATA6_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP8_5_3, HRX0_B, SEL_HSCIF0_1),
+       PINMUX_IPSR_MSEL(IP8_5_3, SCIFB2_RXD_B, SEL_SCIFB2_1),
+       PINMUX_IPSR_MSEL(IP8_5_3, SSI_SDATA7_B, SEL_SSI7_1),
        PINMUX_IPSR_DATA(IP8_8_6, DU1_DG5),
        PINMUX_IPSR_DATA(IP8_8_6, LCDOUT13),
-       PINMUX_IPSR_MODSEL_DATA(IP8_8_6, VI1_DATA7_B, SEL_VI1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_8_6, HCTS0_N_B, SEL_HSCIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_8_6, SCIFB2_TXD_B, SEL_SCIFB2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_8_6, SSI_SDATA8_B, SEL_SSI8_1),
+       PINMUX_IPSR_MSEL(IP8_8_6, VI1_DATA7_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP8_8_6, HCTS0_N_B, SEL_HSCIF0_1),
+       PINMUX_IPSR_MSEL(IP8_8_6, SCIFB2_TXD_B, SEL_SCIFB2_1),
+       PINMUX_IPSR_MSEL(IP8_8_6, SSI_SDATA8_B, SEL_SSI8_1),
        PINMUX_IPSR_DATA(IP8_11_9, DU1_DG6),
        PINMUX_IPSR_DATA(IP8_11_9, LCDOUT14),
-       PINMUX_IPSR_MODSEL_DATA(IP8_11_9, HRTS0_N_B, SEL_HSCIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_11_9, SCIFB2_CTS_N_B, SEL_SCIFB2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_11_9, SSI_SCK9_B, SEL_SSI9_1),
+       PINMUX_IPSR_MSEL(IP8_11_9, HRTS0_N_B, SEL_HSCIF0_1),
+       PINMUX_IPSR_MSEL(IP8_11_9, SCIFB2_CTS_N_B, SEL_SCIFB2_1),
+       PINMUX_IPSR_MSEL(IP8_11_9, SSI_SCK9_B, SEL_SSI9_1),
        PINMUX_IPSR_DATA(IP8_14_12, DU1_DG7),
        PINMUX_IPSR_DATA(IP8_14_12, LCDOUT15),
-       PINMUX_IPSR_MODSEL_DATA(IP8_14_12, HTX0_B, SEL_HSCIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_14_12, SCIFB2_RTS_N_B, SEL_SCIFB2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_14_12, SSI_WS9_B, SEL_SSI9_1),
+       PINMUX_IPSR_MSEL(IP8_14_12, HTX0_B, SEL_HSCIF0_1),
+       PINMUX_IPSR_MSEL(IP8_14_12, SCIFB2_RTS_N_B, SEL_SCIFB2_1),
+       PINMUX_IPSR_MSEL(IP8_14_12, SSI_WS9_B, SEL_SSI9_1),
        PINMUX_IPSR_DATA(IP8_17_15, DU1_DB0),
        PINMUX_IPSR_DATA(IP8_17_15, LCDOUT16),
-       PINMUX_IPSR_MODSEL_DATA(IP8_17_15, VI1_CLK_B, SEL_VI1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_17_15, TX2_B, SEL_SCIF2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_17_15, SCIFA2_TXD_B, SEL_SCIFA2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_17_15, MSIOF2_TXD_B, SEL_SOF2_1),
+       PINMUX_IPSR_MSEL(IP8_17_15, VI1_CLK_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP8_17_15, TX2_B, SEL_SCIF2_1),
+       PINMUX_IPSR_MSEL(IP8_17_15, SCIFA2_TXD_B, SEL_SCIFA2_1),
+       PINMUX_IPSR_MSEL(IP8_17_15, MSIOF2_TXD_B, SEL_SOF2_1),
        PINMUX_IPSR_DATA(IP8_20_18, DU1_DB1),
        PINMUX_IPSR_DATA(IP8_20_18, LCDOUT17),
-       PINMUX_IPSR_MODSEL_DATA(IP8_20_18, VI1_HSYNC_N_B, SEL_VI1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_20_18, RX2_B, SEL_SCIF2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_20_18, SCIFA2_RXD_B, SEL_SCIFA2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_20_18, MSIOF2_RXD_B, SEL_SOF2_1),
+       PINMUX_IPSR_MSEL(IP8_20_18, VI1_HSYNC_N_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP8_20_18, RX2_B, SEL_SCIF2_1),
+       PINMUX_IPSR_MSEL(IP8_20_18, SCIFA2_RXD_B, SEL_SCIFA2_1),
+       PINMUX_IPSR_MSEL(IP8_20_18, MSIOF2_RXD_B, SEL_SOF2_1),
        PINMUX_IPSR_DATA(IP8_23_21, DU1_DB2),
        PINMUX_IPSR_DATA(IP8_23_21, LCDOUT18),
-       PINMUX_IPSR_MODSEL_DATA(IP8_23_21, VI1_VSYNC_N_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP8_23_21, VI1_VSYNC_N_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP8_23_21, SCIF2_SCK_B),
-       PINMUX_IPSR_MODSEL_DATA(IP8_23_21, SCIFA2_SCK, SEL_SCIFA2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_23_21, SSI_SDATA9_B, SEL_SSI9_1),
+       PINMUX_IPSR_MSEL(IP8_23_21, SCIFA2_SCK, SEL_SCIFA2_1),
+       PINMUX_IPSR_MSEL(IP8_23_21, SSI_SDATA9_B, SEL_SSI9_1),
        PINMUX_IPSR_DATA(IP8_25_24, DU1_DB3),
        PINMUX_IPSR_DATA(IP8_25_24, LCDOUT19),
-       PINMUX_IPSR_MODSEL_DATA(IP8_25_24, VI1_CLKENB_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP8_25_24, VI1_CLKENB_B, SEL_VI1_1),
        PINMUX_IPSR_DATA(IP8_27_26, DU1_DB4),
        PINMUX_IPSR_DATA(IP8_27_26, LCDOUT20),
-       PINMUX_IPSR_MODSEL_DATA(IP8_27_26, VI1_FIELD_B, SEL_VI1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_27_26, CAN1_RX, SEL_CAN1_0),
+       PINMUX_IPSR_MSEL(IP8_27_26, VI1_FIELD_B, SEL_VI1_1),
+       PINMUX_IPSR_MSEL(IP8_27_26, CAN1_RX, SEL_CAN1_0),
        PINMUX_IPSR_DATA(IP8_30_28, DU1_DB5),
        PINMUX_IPSR_DATA(IP8_30_28, LCDOUT21),
-       PINMUX_IPSR_MODSEL_DATA(IP8_30_28, TX3, SEL_SCIF3_0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_30_28, SCIFA3_TXD, SEL_SCIFA3_0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_30_28, CAN1_TX, SEL_CAN1_0),
+       PINMUX_IPSR_MSEL(IP8_30_28, TX3, SEL_SCIF3_0),
+       PINMUX_IPSR_MSEL(IP8_30_28, SCIFA3_TXD, SEL_SCIFA3_0),
+       PINMUX_IPSR_MSEL(IP8_30_28, CAN1_TX, SEL_CAN1_0),
 
        /* IPSR9 */
        PINMUX_IPSR_DATA(IP9_2_0, DU1_DB6),
        PINMUX_IPSR_DATA(IP9_2_0, LCDOUT22),
-       PINMUX_IPSR_MODSEL_DATA(IP9_2_0, SCL3_C, SEL_IIC3_2),
-       PINMUX_IPSR_MODSEL_DATA(IP9_2_0, RX3, SEL_SCIF3_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_2_0, SCIFA3_RXD, SEL_SCIFA3_0),
+       PINMUX_IPSR_MSEL(IP9_2_0, SCL3_C, SEL_IIC3_2),
+       PINMUX_IPSR_MSEL(IP9_2_0, RX3, SEL_SCIF3_0),
+       PINMUX_IPSR_MSEL(IP9_2_0, SCIFA3_RXD, SEL_SCIFA3_0),
        PINMUX_IPSR_DATA(IP9_5_3, DU1_DB7),
        PINMUX_IPSR_DATA(IP9_5_3, LCDOUT23),
-       PINMUX_IPSR_MODSEL_DATA(IP9_5_3, SDA3_C, SEL_IIC3_2),
-       PINMUX_IPSR_MODSEL_DATA(IP9_5_3, SCIF3_SCK, SEL_SCIF3_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_5_3, SCIFA3_SCK, SEL_SCIFA3_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_6, DU1_DOTCLKIN, SEL_DIS_0),
+       PINMUX_IPSR_MSEL(IP9_5_3, SDA3_C, SEL_IIC3_2),
+       PINMUX_IPSR_MSEL(IP9_5_3, SCIF3_SCK, SEL_SCIF3_0),
+       PINMUX_IPSR_MSEL(IP9_5_3, SCIFA3_SCK, SEL_SCIFA3_0),
+       PINMUX_IPSR_MSEL(IP9_6, DU1_DOTCLKIN, SEL_DIS_0),
        PINMUX_IPSR_DATA(IP9_6, QSTVA_QVS),
        PINMUX_IPSR_DATA(IP9_7, DU1_DOTCLKOUT0),
        PINMUX_IPSR_DATA(IP9_7, QCLK),
        PINMUX_IPSR_DATA(IP9_10_8, DU1_DOTCLKOUT1),
        PINMUX_IPSR_DATA(IP9_10_8, QSTVB_QVE),
-       PINMUX_IPSR_MODSEL_DATA(IP9_10_8, CAN0_TX, SEL_CAN0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_10_8, TX3_B, SEL_SCIF3_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_10_8, SCL2_B, SEL_IIC2_1),
+       PINMUX_IPSR_MSEL(IP9_10_8, CAN0_TX, SEL_CAN0_0),
+       PINMUX_IPSR_MSEL(IP9_10_8, TX3_B, SEL_SCIF3_1),
+       PINMUX_IPSR_MSEL(IP9_10_8, SCL2_B, SEL_IIC2_1),
        PINMUX_IPSR_DATA(IP9_10_8, PWM4),
        PINMUX_IPSR_DATA(IP9_11, DU1_EXHSYNC_DU1_HSYNC),
        PINMUX_IPSR_DATA(IP9_11, QSTH_QHS),
@@ -1284,280 +1283,280 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP9_12, QSTB_QHE),
        PINMUX_IPSR_DATA(IP9_15_13, DU1_EXODDF_DU1_ODDF_DISP_CDE),
        PINMUX_IPSR_DATA(IP9_15_13, QCPV_QDE),
-       PINMUX_IPSR_MODSEL_DATA(IP9_15_13, CAN0_RX, SEL_CAN0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_15_13, RX3_B, SEL_SCIF3_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_15_13, SDA2_B, SEL_IIC2_1),
+       PINMUX_IPSR_MSEL(IP9_15_13, CAN0_RX, SEL_CAN0_0),
+       PINMUX_IPSR_MSEL(IP9_15_13, RX3_B, SEL_SCIF3_1),
+       PINMUX_IPSR_MSEL(IP9_15_13, SDA2_B, SEL_IIC2_1),
        PINMUX_IPSR_DATA(IP9_16, DU1_DISP),
        PINMUX_IPSR_DATA(IP9_16, QPOLA),
        PINMUX_IPSR_DATA(IP9_18_17, DU1_CDE),
        PINMUX_IPSR_DATA(IP9_18_17, QPOLB),
        PINMUX_IPSR_DATA(IP9_18_17, PWM4_B),
        PINMUX_IPSR_DATA(IP9_20_19, VI0_CLKENB),
-       PINMUX_IPSR_MODSEL_DATA(IP9_20_19, TX4, SEL_SCIF4_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_20_19, SCIFA4_TXD, SEL_SCIFA4_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_20_19, TS_SDATA0_D, SEL_TSIF0_3),
+       PINMUX_IPSR_MSEL(IP9_20_19, TX4, SEL_SCIF4_0),
+       PINMUX_IPSR_MSEL(IP9_20_19, SCIFA4_TXD, SEL_SCIFA4_0),
+       PINMUX_IPSR_MSEL(IP9_20_19, TS_SDATA0_D, SEL_TSIF0_3),
        PINMUX_IPSR_DATA(IP9_22_21, VI0_FIELD),
-       PINMUX_IPSR_MODSEL_DATA(IP9_22_21, RX4, SEL_SCIF4_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_22_21, SCIFA4_RXD, SEL_SCIFA4_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_22_21, TS_SCK0_D, SEL_TSIF0_3),
+       PINMUX_IPSR_MSEL(IP9_22_21, RX4, SEL_SCIF4_0),
+       PINMUX_IPSR_MSEL(IP9_22_21, SCIFA4_RXD, SEL_SCIFA4_0),
+       PINMUX_IPSR_MSEL(IP9_22_21, TS_SCK0_D, SEL_TSIF0_3),
        PINMUX_IPSR_DATA(IP9_24_23, VI0_HSYNC_N),
-       PINMUX_IPSR_MODSEL_DATA(IP9_24_23, TX5, SEL_SCIF5_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_24_23, SCIFA5_TXD, SEL_SCIFA5_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_24_23, TS_SDEN0_D, SEL_TSIF0_3),
+       PINMUX_IPSR_MSEL(IP9_24_23, TX5, SEL_SCIF5_0),
+       PINMUX_IPSR_MSEL(IP9_24_23, SCIFA5_TXD, SEL_SCIFA5_0),
+       PINMUX_IPSR_MSEL(IP9_24_23, TS_SDEN0_D, SEL_TSIF0_3),
        PINMUX_IPSR_DATA(IP9_26_25, VI0_VSYNC_N),
-       PINMUX_IPSR_MODSEL_DATA(IP9_26_25, RX5, SEL_SCIF5_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_26_25, SCIFA5_RXD, SEL_SCIFA5_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_26_25, TS_SPSYNC0_D, SEL_TSIF0_3),
+       PINMUX_IPSR_MSEL(IP9_26_25, RX5, SEL_SCIF5_0),
+       PINMUX_IPSR_MSEL(IP9_26_25, SCIFA5_RXD, SEL_SCIFA5_0),
+       PINMUX_IPSR_MSEL(IP9_26_25, TS_SPSYNC0_D, SEL_TSIF0_3),
        PINMUX_IPSR_DATA(IP9_28_27, VI0_DATA3_VI0_B3),
-       PINMUX_IPSR_MODSEL_DATA(IP9_28_27, SCIF3_SCK_B, SEL_SCIF3_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_28_27, SCIFA3_SCK_B, SEL_SCIFA3_1),
+       PINMUX_IPSR_MSEL(IP9_28_27, SCIF3_SCK_B, SEL_SCIF3_1),
+       PINMUX_IPSR_MSEL(IP9_28_27, SCIFA3_SCK_B, SEL_SCIFA3_1),
        PINMUX_IPSR_DATA(IP9_31_29, VI0_G0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_31_29, SCL8, SEL_IIC8_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_31_29, STP_IVCXO27_0_C, SEL_SSP_2),
-       PINMUX_IPSR_MODSEL_DATA(IP9_31_29, SCL4, SEL_IIC4_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_31_29, HCTS2_N, SEL_HSCIF2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_31_29, SCIFB2_CTS_N, SEL_SCIFB2_0),
+       PINMUX_IPSR_MSEL(IP9_31_29, SCL8, SEL_IIC8_0),
+       PINMUX_IPSR_MSEL(IP9_31_29, STP_IVCXO27_0_C, SEL_SSP_2),
+       PINMUX_IPSR_MSEL(IP9_31_29, SCL4, SEL_IIC4_0),
+       PINMUX_IPSR_MSEL(IP9_31_29, HCTS2_N, SEL_HSCIF2_0),
+       PINMUX_IPSR_MSEL(IP9_31_29, SCIFB2_CTS_N, SEL_SCIFB2_0),
        PINMUX_IPSR_DATA(IP9_31_29, ATAWR1_N),
 
        /* IPSR10 */
        PINMUX_IPSR_DATA(IP10_2_0, VI0_G1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_2_0, SDA8, SEL_IIC8_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_2_0, STP_ISCLK_0_C, SEL_SSP_2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_2_0, SDA4, SEL_IIC4_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_2_0, HRTS2_N, SEL_HSCIF2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_2_0, SCIFB2_RTS_N, SEL_SCIFB2_0),
+       PINMUX_IPSR_MSEL(IP10_2_0, SDA8, SEL_IIC8_0),
+       PINMUX_IPSR_MSEL(IP10_2_0, STP_ISCLK_0_C, SEL_SSP_2),
+       PINMUX_IPSR_MSEL(IP10_2_0, SDA4, SEL_IIC4_0),
+       PINMUX_IPSR_MSEL(IP10_2_0, HRTS2_N, SEL_HSCIF2_0),
+       PINMUX_IPSR_MSEL(IP10_2_0, SCIFB2_RTS_N, SEL_SCIFB2_0),
        PINMUX_IPSR_DATA(IP10_2_0, ATADIR1_N),
        PINMUX_IPSR_DATA(IP10_5_3, VI0_G2),
        PINMUX_IPSR_DATA(IP10_5_3, VI2_HSYNC_N),
-       PINMUX_IPSR_MODSEL_DATA(IP10_5_3, STP_ISD_0_C, SEL_SSP_2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_5_3, SCL3_B, SEL_IIC3_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_5_3, HSCK2, SEL_HSCIF2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_5_3, SCIFB2_SCK, SEL_SCIFB2_0),
+       PINMUX_IPSR_MSEL(IP10_5_3, STP_ISD_0_C, SEL_SSP_2),
+       PINMUX_IPSR_MSEL(IP10_5_3, SCL3_B, SEL_IIC3_1),
+       PINMUX_IPSR_MSEL(IP10_5_3, HSCK2, SEL_HSCIF2_0),
+       PINMUX_IPSR_MSEL(IP10_5_3, SCIFB2_SCK, SEL_SCIFB2_0),
        PINMUX_IPSR_DATA(IP10_5_3, ATARD1_N),
        PINMUX_IPSR_DATA(IP10_8_6, VI0_G3),
        PINMUX_IPSR_DATA(IP10_8_6, VI2_VSYNC_N),
-       PINMUX_IPSR_MODSEL_DATA(IP10_8_6, STP_ISEN_0_C, SEL_SSP_2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_8_6, SDA3_B, SEL_IIC3_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_8_6, HRX2, SEL_HSCIF2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_8_6, SCIFB2_RXD, SEL_SCIFB2_0),
+       PINMUX_IPSR_MSEL(IP10_8_6, STP_ISEN_0_C, SEL_SSP_2),
+       PINMUX_IPSR_MSEL(IP10_8_6, SDA3_B, SEL_IIC3_1),
+       PINMUX_IPSR_MSEL(IP10_8_6, HRX2, SEL_HSCIF2_0),
+       PINMUX_IPSR_MSEL(IP10_8_6, SCIFB2_RXD, SEL_SCIFB2_0),
        PINMUX_IPSR_DATA(IP10_8_6, ATACS01_N),
        PINMUX_IPSR_DATA(IP10_11_9, VI0_G4),
        PINMUX_IPSR_DATA(IP10_11_9, VI2_CLKENB),
-       PINMUX_IPSR_MODSEL_DATA(IP10_11_9, STP_ISSYNC_0_C, SEL_SSP_2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_11_9, HTX2, SEL_HSCIF2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_11_9, SCIFB2_TXD, SEL_SCIFB2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_11_9, SCIFB0_SCK_D, SEL_SCIFB_3),
+       PINMUX_IPSR_MSEL(IP10_11_9, STP_ISSYNC_0_C, SEL_SSP_2),
+       PINMUX_IPSR_MSEL(IP10_11_9, HTX2, SEL_HSCIF2_0),
+       PINMUX_IPSR_MSEL(IP10_11_9, SCIFB2_TXD, SEL_SCIFB2_0),
+       PINMUX_IPSR_MSEL(IP10_11_9, SCIFB0_SCK_D, SEL_SCIFB_3),
        PINMUX_IPSR_DATA(IP10_14_12, VI0_G5),
        PINMUX_IPSR_DATA(IP10_14_12, VI2_FIELD),
-       PINMUX_IPSR_MODSEL_DATA(IP10_14_12, STP_OPWM_0_C, SEL_SSP_2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_14_12, FMCLK_D, SEL_FM_3),
-       PINMUX_IPSR_MODSEL_DATA(IP10_14_12, CAN0_TX_E, SEL_CAN0_4),
-       PINMUX_IPSR_MODSEL_DATA(IP10_14_12, HTX1_D, SEL_HSCIF1_3),
-       PINMUX_IPSR_MODSEL_DATA(IP10_14_12, SCIFB0_TXD_D, SEL_SCIFB_3),
+       PINMUX_IPSR_MSEL(IP10_14_12, STP_OPWM_0_C, SEL_SSP_2),
+       PINMUX_IPSR_MSEL(IP10_14_12, FMCLK_D, SEL_FM_3),
+       PINMUX_IPSR_MSEL(IP10_14_12, CAN0_TX_E, SEL_CAN0_4),
+       PINMUX_IPSR_MSEL(IP10_14_12, HTX1_D, SEL_HSCIF1_3),
+       PINMUX_IPSR_MSEL(IP10_14_12, SCIFB0_TXD_D, SEL_SCIFB_3),
        PINMUX_IPSR_DATA(IP10_16_15, VI0_G6),
        PINMUX_IPSR_DATA(IP10_16_15, VI2_CLK),
-       PINMUX_IPSR_MODSEL_DATA(IP10_16_15, BPFCLK_D, SEL_FM_3),
+       PINMUX_IPSR_MSEL(IP10_16_15, BPFCLK_D, SEL_FM_3),
        PINMUX_IPSR_DATA(IP10_18_17, VI0_G7),
        PINMUX_IPSR_DATA(IP10_18_17, VI2_DATA0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_18_17, FMIN_D, SEL_FM_3),
+       PINMUX_IPSR_MSEL(IP10_18_17, FMIN_D, SEL_FM_3),
        PINMUX_IPSR_DATA(IP10_21_19, VI0_R0),
        PINMUX_IPSR_DATA(IP10_21_19, VI2_DATA1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_21_19, GLO_I0_B, SEL_GPS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_21_19, TS_SDATA0_C, SEL_TSIF0_2),
+       PINMUX_IPSR_MSEL(IP10_21_19, GLO_I0_B, SEL_GPS_1),
+       PINMUX_IPSR_MSEL(IP10_21_19, TS_SDATA0_C, SEL_TSIF0_2),
        PINMUX_IPSR_DATA(IP10_21_19, ATACS11_N),
        PINMUX_IPSR_DATA(IP10_24_22, VI0_R1),
        PINMUX_IPSR_DATA(IP10_24_22, VI2_DATA2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_24_22, GLO_I1_B, SEL_GPS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_24_22, TS_SCK0_C, SEL_TSIF0_2),
+       PINMUX_IPSR_MSEL(IP10_24_22, GLO_I1_B, SEL_GPS_1),
+       PINMUX_IPSR_MSEL(IP10_24_22, TS_SCK0_C, SEL_TSIF0_2),
        PINMUX_IPSR_DATA(IP10_24_22, ATAG1_N),
        PINMUX_IPSR_DATA(IP10_26_25, VI0_R2),
        PINMUX_IPSR_DATA(IP10_26_25, VI2_DATA3),
-       PINMUX_IPSR_MODSEL_DATA(IP10_26_25, GLO_Q0_B, SEL_GPS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_26_25, TS_SDEN0_C, SEL_TSIF0_2),
+       PINMUX_IPSR_MSEL(IP10_26_25, GLO_Q0_B, SEL_GPS_1),
+       PINMUX_IPSR_MSEL(IP10_26_25, TS_SDEN0_C, SEL_TSIF0_2),
        PINMUX_IPSR_DATA(IP10_28_27, VI0_R3),
        PINMUX_IPSR_DATA(IP10_28_27, VI2_DATA4),
-       PINMUX_IPSR_MODSEL_DATA(IP10_28_27, GLO_Q1_B, SEL_GPS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_28_27, TS_SPSYNC0_C, SEL_TSIF0_2),
+       PINMUX_IPSR_MSEL(IP10_28_27, GLO_Q1_B, SEL_GPS_1),
+       PINMUX_IPSR_MSEL(IP10_28_27, TS_SPSYNC0_C, SEL_TSIF0_2),
        PINMUX_IPSR_DATA(IP10_31_29, VI0_R4),
        PINMUX_IPSR_DATA(IP10_31_29, VI2_DATA5),
-       PINMUX_IPSR_MODSEL_DATA(IP10_31_29, GLO_SCLK_B, SEL_GPS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_31_29, TX0_C, SEL_SCIF0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_31_29, SCL1_D, SEL_IIC1_3),
+       PINMUX_IPSR_MSEL(IP10_31_29, GLO_SCLK_B, SEL_GPS_1),
+       PINMUX_IPSR_MSEL(IP10_31_29, TX0_C, SEL_SCIF0_2),
+       PINMUX_IPSR_MSEL(IP10_31_29, SCL1_D, SEL_IIC1_3),
 
        /* IPSR11 */
        PINMUX_IPSR_DATA(IP11_2_0, VI0_R5),
        PINMUX_IPSR_DATA(IP11_2_0, VI2_DATA6),
-       PINMUX_IPSR_MODSEL_DATA(IP11_2_0, GLO_SDATA_B, SEL_GPS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_2_0, RX0_C, SEL_SCIF0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP11_2_0, SDA1_D, SEL_IIC1_3),
+       PINMUX_IPSR_MSEL(IP11_2_0, GLO_SDATA_B, SEL_GPS_1),
+       PINMUX_IPSR_MSEL(IP11_2_0, RX0_C, SEL_SCIF0_2),
+       PINMUX_IPSR_MSEL(IP11_2_0, SDA1_D, SEL_IIC1_3),
        PINMUX_IPSR_DATA(IP11_5_3, VI0_R6),
        PINMUX_IPSR_DATA(IP11_5_3, VI2_DATA7),
-       PINMUX_IPSR_MODSEL_DATA(IP11_5_3, GLO_SS_B, SEL_GPS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_5_3, TX1_C, SEL_SCIF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP11_5_3, SCL4_B, SEL_IIC4_1),
+       PINMUX_IPSR_MSEL(IP11_5_3, GLO_SS_B, SEL_GPS_1),
+       PINMUX_IPSR_MSEL(IP11_5_3, TX1_C, SEL_SCIF1_2),
+       PINMUX_IPSR_MSEL(IP11_5_3, SCL4_B, SEL_IIC4_1),
        PINMUX_IPSR_DATA(IP11_8_6, VI0_R7),
-       PINMUX_IPSR_MODSEL_DATA(IP11_8_6, GLO_RFON_B, SEL_GPS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_8_6, RX1_C, SEL_SCIF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP11_8_6, CAN0_RX_E, SEL_CAN0_4),
-       PINMUX_IPSR_MODSEL_DATA(IP11_8_6, SDA4_B, SEL_IIC4_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_8_6, HRX1_D, SEL_HSCIF1_3),
-       PINMUX_IPSR_MODSEL_DATA(IP11_8_6, SCIFB0_RXD_D, SEL_SCIFB_3),
-       PINMUX_IPSR_MODSEL_DATA(IP11_11_9, VI1_HSYNC_N, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP11_8_6, GLO_RFON_B, SEL_GPS_1),
+       PINMUX_IPSR_MSEL(IP11_8_6, RX1_C, SEL_SCIF1_2),
+       PINMUX_IPSR_MSEL(IP11_8_6, CAN0_RX_E, SEL_CAN0_4),
+       PINMUX_IPSR_MSEL(IP11_8_6, SDA4_B, SEL_IIC4_1),
+       PINMUX_IPSR_MSEL(IP11_8_6, HRX1_D, SEL_HSCIF1_3),
+       PINMUX_IPSR_MSEL(IP11_8_6, SCIFB0_RXD_D, SEL_SCIFB_3),
+       PINMUX_IPSR_MSEL(IP11_11_9, VI1_HSYNC_N, SEL_VI1_0),
        PINMUX_IPSR_DATA(IP11_11_9, AVB_RXD0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_11_9, TS_SDATA0_B, SEL_TSIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_11_9, TX4_B, SEL_SCIF4_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_11_9, SCIFA4_TXD_B, SEL_SCIFA4_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_14_12, VI1_VSYNC_N, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP11_11_9, TS_SDATA0_B, SEL_TSIF0_1),
+       PINMUX_IPSR_MSEL(IP11_11_9, TX4_B, SEL_SCIF4_1),
+       PINMUX_IPSR_MSEL(IP11_11_9, SCIFA4_TXD_B, SEL_SCIFA4_1),
+       PINMUX_IPSR_MSEL(IP11_14_12, VI1_VSYNC_N, SEL_VI1_0),
        PINMUX_IPSR_DATA(IP11_14_12, AVB_RXD1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_14_12, TS_SCK0_B, SEL_TSIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_14_12, RX4_B, SEL_SCIF4_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_14_12, SCIFA4_RXD_B, SEL_SCIFA4_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_16_15, VI1_CLKENB, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP11_14_12, TS_SCK0_B, SEL_TSIF0_1),
+       PINMUX_IPSR_MSEL(IP11_14_12, RX4_B, SEL_SCIF4_1),
+       PINMUX_IPSR_MSEL(IP11_14_12, SCIFA4_RXD_B, SEL_SCIFA4_1),
+       PINMUX_IPSR_MSEL(IP11_16_15, VI1_CLKENB, SEL_VI1_0),
        PINMUX_IPSR_DATA(IP11_16_15, AVB_RXD2),
-       PINMUX_IPSR_MODSEL_DATA(IP11_16_15, TS_SDEN0_B, SEL_TSIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_18_17, VI1_FIELD, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP11_16_15, TS_SDEN0_B, SEL_TSIF0_1),
+       PINMUX_IPSR_MSEL(IP11_18_17, VI1_FIELD, SEL_VI1_0),
        PINMUX_IPSR_DATA(IP11_18_17, AVB_RXD3),
-       PINMUX_IPSR_MODSEL_DATA(IP11_18_17, TS_SPSYNC0_B, SEL_TSIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_19, VI1_CLK, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP11_18_17, TS_SPSYNC0_B, SEL_TSIF0_1),
+       PINMUX_IPSR_MSEL(IP11_19, VI1_CLK, SEL_VI1_0),
        PINMUX_IPSR_DATA(IP11_19, AVB_RXD4),
-       PINMUX_IPSR_MODSEL_DATA(IP11_20, VI1_DATA0, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP11_20, VI1_DATA0, SEL_VI1_0),
        PINMUX_IPSR_DATA(IP11_20, AVB_RXD5),
-       PINMUX_IPSR_MODSEL_DATA(IP11_21, VI1_DATA1, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP11_21, VI1_DATA1, SEL_VI1_0),
        PINMUX_IPSR_DATA(IP11_21, AVB_RXD6),
-       PINMUX_IPSR_MODSEL_DATA(IP11_22, VI1_DATA2, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP11_22, VI1_DATA2, SEL_VI1_0),
        PINMUX_IPSR_DATA(IP11_22, AVB_RXD7),
-       PINMUX_IPSR_MODSEL_DATA(IP11_23, VI1_DATA3, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP11_23, VI1_DATA3, SEL_VI1_0),
        PINMUX_IPSR_DATA(IP11_23, AVB_RX_ER),
-       PINMUX_IPSR_MODSEL_DATA(IP11_24, VI1_DATA4, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP11_24, VI1_DATA4, SEL_VI1_0),
        PINMUX_IPSR_DATA(IP11_24, AVB_MDIO),
-       PINMUX_IPSR_MODSEL_DATA(IP11_25, VI1_DATA5, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP11_25, VI1_DATA5, SEL_VI1_0),
        PINMUX_IPSR_DATA(IP11_25, AVB_RX_DV),
-       PINMUX_IPSR_MODSEL_DATA(IP11_26, VI1_DATA6, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP11_26, VI1_DATA6, SEL_VI1_0),
        PINMUX_IPSR_DATA(IP11_26, AVB_MAGIC),
-       PINMUX_IPSR_MODSEL_DATA(IP11_27, VI1_DATA7, SEL_VI1_0),
+       PINMUX_IPSR_MSEL(IP11_27, VI1_DATA7, SEL_VI1_0),
        PINMUX_IPSR_DATA(IP11_27, AVB_MDC),
        PINMUX_IPSR_DATA(IP11_29_28, ETH_MDIO),
        PINMUX_IPSR_DATA(IP11_29_28, AVB_RX_CLK),
-       PINMUX_IPSR_MODSEL_DATA(IP11_29_28, SCL2_C, SEL_IIC2_2),
+       PINMUX_IPSR_MSEL(IP11_29_28, SCL2_C, SEL_IIC2_2),
        PINMUX_IPSR_DATA(IP11_31_30, ETH_CRS_DV),
        PINMUX_IPSR_DATA(IP11_31_30, AVB_LINK),
-       PINMUX_IPSR_MODSEL_DATA(IP11_31_30, SDA2_C, SEL_IIC2_2),
+       PINMUX_IPSR_MSEL(IP11_31_30, SDA2_C, SEL_IIC2_2),
 
        /* IPSR12 */
        PINMUX_IPSR_DATA(IP12_1_0, ETH_RX_ER),
        PINMUX_IPSR_DATA(IP12_1_0, AVB_CRS),
-       PINMUX_IPSR_MODSEL_DATA(IP12_1_0, SCL3, SEL_IIC3_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_1_0, SCL7, SEL_IIC7_0),
+       PINMUX_IPSR_MSEL(IP12_1_0, SCL3, SEL_IIC3_0),
+       PINMUX_IPSR_MSEL(IP12_1_0, SCL7, SEL_IIC7_0),
        PINMUX_IPSR_DATA(IP12_3_2, ETH_RXD0),
        PINMUX_IPSR_DATA(IP12_3_2, AVB_PHY_INT),
-       PINMUX_IPSR_MODSEL_DATA(IP12_3_2, SDA3, SEL_IIC3_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_3_2, SDA7, SEL_IIC7_0),
+       PINMUX_IPSR_MSEL(IP12_3_2, SDA3, SEL_IIC3_0),
+       PINMUX_IPSR_MSEL(IP12_3_2, SDA7, SEL_IIC7_0),
        PINMUX_IPSR_DATA(IP12_6_4, ETH_RXD1),
        PINMUX_IPSR_DATA(IP12_6_4, AVB_GTXREFCLK),
-       PINMUX_IPSR_MODSEL_DATA(IP12_6_4, CAN0_TX_C, SEL_CAN0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP12_6_4, SCL2_D, SEL_IIC2_3),
-       PINMUX_IPSR_MODSEL_DATA(IP12_6_4, MSIOF1_RXD_E, SEL_SOF1_4),
+       PINMUX_IPSR_MSEL(IP12_6_4, CAN0_TX_C, SEL_CAN0_2),
+       PINMUX_IPSR_MSEL(IP12_6_4, SCL2_D, SEL_IIC2_3),
+       PINMUX_IPSR_MSEL(IP12_6_4, MSIOF1_RXD_E, SEL_SOF1_4),
        PINMUX_IPSR_DATA(IP12_9_7, ETH_LINK),
        PINMUX_IPSR_DATA(IP12_9_7, AVB_TXD0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_9_7, CAN0_RX_C, SEL_CAN0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP12_9_7, SDA2_D, SEL_IIC2_3),
-       PINMUX_IPSR_MODSEL_DATA(IP12_9_7, MSIOF1_SCK_E, SEL_SOF1_4),
+       PINMUX_IPSR_MSEL(IP12_9_7, CAN0_RX_C, SEL_CAN0_2),
+       PINMUX_IPSR_MSEL(IP12_9_7, SDA2_D, SEL_IIC2_3),
+       PINMUX_IPSR_MSEL(IP12_9_7, MSIOF1_SCK_E, SEL_SOF1_4),
        PINMUX_IPSR_DATA(IP12_12_10, ETH_REFCLK),
        PINMUX_IPSR_DATA(IP12_12_10, AVB_TXD1),
-       PINMUX_IPSR_MODSEL_DATA(IP12_12_10, SCIFA3_RXD_B, SEL_SCIFA3_1),
-       PINMUX_IPSR_MODSEL_DATA(IP12_12_10, CAN1_RX_C, SEL_CAN1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP12_12_10, MSIOF1_SYNC_E, SEL_SOF1_4),
+       PINMUX_IPSR_MSEL(IP12_12_10, SCIFA3_RXD_B, SEL_SCIFA3_1),
+       PINMUX_IPSR_MSEL(IP12_12_10, CAN1_RX_C, SEL_CAN1_2),
+       PINMUX_IPSR_MSEL(IP12_12_10, MSIOF1_SYNC_E, SEL_SOF1_4),
        PINMUX_IPSR_DATA(IP12_15_13, ETH_TXD1),
        PINMUX_IPSR_DATA(IP12_15_13, AVB_TXD2),
-       PINMUX_IPSR_MODSEL_DATA(IP12_15_13, SCIFA3_TXD_B, SEL_SCIFA3_1),
-       PINMUX_IPSR_MODSEL_DATA(IP12_15_13, CAN1_TX_C, SEL_CAN1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP12_15_13, MSIOF1_TXD_E, SEL_SOF1_4),
+       PINMUX_IPSR_MSEL(IP12_15_13, SCIFA3_TXD_B, SEL_SCIFA3_1),
+       PINMUX_IPSR_MSEL(IP12_15_13, CAN1_TX_C, SEL_CAN1_2),
+       PINMUX_IPSR_MSEL(IP12_15_13, MSIOF1_TXD_E, SEL_SOF1_4),
        PINMUX_IPSR_DATA(IP12_17_16, ETH_TX_EN),
        PINMUX_IPSR_DATA(IP12_17_16, AVB_TXD3),
-       PINMUX_IPSR_MODSEL_DATA(IP12_17_16, TCLK1_B, SEL_TMU1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_17_16, CAN_CLK_B, SEL_CANCLK_1),
+       PINMUX_IPSR_MSEL(IP12_17_16, TCLK1_B, SEL_TMU1_0),
+       PINMUX_IPSR_MSEL(IP12_17_16, CAN_CLK_B, SEL_CANCLK_1),
        PINMUX_IPSR_DATA(IP12_19_18, ETH_MAGIC),
        PINMUX_IPSR_DATA(IP12_19_18, AVB_TXD4),
-       PINMUX_IPSR_MODSEL_DATA(IP12_19_18, IETX_C, SEL_IEB_2),
+       PINMUX_IPSR_MSEL(IP12_19_18, IETX_C, SEL_IEB_2),
        PINMUX_IPSR_DATA(IP12_21_20, ETH_TXD0),
        PINMUX_IPSR_DATA(IP12_21_20, AVB_TXD5),
-       PINMUX_IPSR_MODSEL_DATA(IP12_21_20, IECLK_C, SEL_IEB_2),
+       PINMUX_IPSR_MSEL(IP12_21_20, IECLK_C, SEL_IEB_2),
        PINMUX_IPSR_DATA(IP12_23_22, ETH_MDC),
        PINMUX_IPSR_DATA(IP12_23_22, AVB_TXD6),
-       PINMUX_IPSR_MODSEL_DATA(IP12_23_22, IERX_C, SEL_IEB_2),
-       PINMUX_IPSR_MODSEL_DATA(IP12_26_24, STP_IVCXO27_0, SEL_SSP_0),
+       PINMUX_IPSR_MSEL(IP12_23_22, IERX_C, SEL_IEB_2),
+       PINMUX_IPSR_MSEL(IP12_26_24, STP_IVCXO27_0, SEL_SSP_0),
        PINMUX_IPSR_DATA(IP12_26_24, AVB_TXD7),
-       PINMUX_IPSR_MODSEL_DATA(IP12_26_24, SCIFB2_TXD_D, SEL_SCIFB2_3),
-       PINMUX_IPSR_MODSEL_DATA(IP12_26_24, ADIDATA_B, SEL_RAD_1),
-       PINMUX_IPSR_MODSEL_DATA(IP12_26_24, MSIOF0_SYNC_C, SEL_SOF0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP12_29_27, STP_ISCLK_0, SEL_SSP_0),
+       PINMUX_IPSR_MSEL(IP12_26_24, SCIFB2_TXD_D, SEL_SCIFB2_3),
+       PINMUX_IPSR_MSEL(IP12_26_24, ADIDATA_B, SEL_RAD_1),
+       PINMUX_IPSR_MSEL(IP12_26_24, MSIOF0_SYNC_C, SEL_SOF0_2),
+       PINMUX_IPSR_MSEL(IP12_29_27, STP_ISCLK_0, SEL_SSP_0),
        PINMUX_IPSR_DATA(IP12_29_27, AVB_TX_EN),
-       PINMUX_IPSR_MODSEL_DATA(IP12_29_27, SCIFB2_RXD_D, SEL_SCIFB2_3),
-       PINMUX_IPSR_MODSEL_DATA(IP12_29_27, ADICS_SAMP_B, SEL_RAD_1),
-       PINMUX_IPSR_MODSEL_DATA(IP12_29_27, MSIOF0_SCK_C, SEL_SOF0_2),
+       PINMUX_IPSR_MSEL(IP12_29_27, SCIFB2_RXD_D, SEL_SCIFB2_3),
+       PINMUX_IPSR_MSEL(IP12_29_27, ADICS_SAMP_B, SEL_RAD_1),
+       PINMUX_IPSR_MSEL(IP12_29_27, MSIOF0_SCK_C, SEL_SOF0_2),
 
        /* IPSR13 */
-       PINMUX_IPSR_MODSEL_DATA(IP13_2_0, STP_ISD_0, SEL_SSP_0),
+       PINMUX_IPSR_MSEL(IP13_2_0, STP_ISD_0, SEL_SSP_0),
        PINMUX_IPSR_DATA(IP13_2_0, AVB_TX_ER),
-       PINMUX_IPSR_MODSEL_DATA(IP13_2_0, SCIFB2_SCK_C, SEL_SCIFB2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP13_2_0, ADICLK_B, SEL_RAD_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_2_0, MSIOF0_SS1_C, SEL_SOF0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP13_4_3, STP_ISEN_0, SEL_SSP_0),
+       PINMUX_IPSR_MSEL(IP13_2_0, SCIFB2_SCK_C, SEL_SCIFB2_2),
+       PINMUX_IPSR_MSEL(IP13_2_0, ADICLK_B, SEL_RAD_1),
+       PINMUX_IPSR_MSEL(IP13_2_0, MSIOF0_SS1_C, SEL_SOF0_2),
+       PINMUX_IPSR_MSEL(IP13_4_3, STP_ISEN_0, SEL_SSP_0),
        PINMUX_IPSR_DATA(IP13_4_3, AVB_TX_CLK),
-       PINMUX_IPSR_MODSEL_DATA(IP13_4_3, ADICHS0_B, SEL_RAD_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_4_3, MSIOF0_SS2_C, SEL_SOF0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP13_6_5, STP_ISSYNC_0, SEL_SSP_0),
+       PINMUX_IPSR_MSEL(IP13_4_3, ADICHS0_B, SEL_RAD_1),
+       PINMUX_IPSR_MSEL(IP13_4_3, MSIOF0_SS2_C, SEL_SOF0_2),
+       PINMUX_IPSR_MSEL(IP13_6_5, STP_ISSYNC_0, SEL_SSP_0),
        PINMUX_IPSR_DATA(IP13_6_5, AVB_COL),
-       PINMUX_IPSR_MODSEL_DATA(IP13_6_5, ADICHS1_B, SEL_RAD_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_6_5, MSIOF0_RXD_C, SEL_SOF0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP13_9_7, STP_OPWM_0, SEL_SSP_0),
+       PINMUX_IPSR_MSEL(IP13_6_5, ADICHS1_B, SEL_RAD_1),
+       PINMUX_IPSR_MSEL(IP13_6_5, MSIOF0_RXD_C, SEL_SOF0_2),
+       PINMUX_IPSR_MSEL(IP13_9_7, STP_OPWM_0, SEL_SSP_0),
        PINMUX_IPSR_DATA(IP13_9_7, AVB_GTX_CLK),
        PINMUX_IPSR_DATA(IP13_9_7, PWM0_B),
-       PINMUX_IPSR_MODSEL_DATA(IP13_9_7, ADICHS2_B, SEL_RAD_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_9_7, MSIOF0_TXD_C, SEL_SOF0_2),
+       PINMUX_IPSR_MSEL(IP13_9_7, ADICHS2_B, SEL_RAD_1),
+       PINMUX_IPSR_MSEL(IP13_9_7, MSIOF0_TXD_C, SEL_SOF0_2),
        PINMUX_IPSR_DATA(IP13_10, SD0_CLK),
-       PINMUX_IPSR_MODSEL_DATA(IP13_10, SPCLK_B, SEL_QSP_1),
+       PINMUX_IPSR_MSEL(IP13_10, SPCLK_B, SEL_QSP_1),
        PINMUX_IPSR_DATA(IP13_11, SD0_CMD),
-       PINMUX_IPSR_MODSEL_DATA(IP13_11, MOSI_IO0_B, SEL_QSP_1),
+       PINMUX_IPSR_MSEL(IP13_11, MOSI_IO0_B, SEL_QSP_1),
        PINMUX_IPSR_DATA(IP13_12, SD0_DATA0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_12, MISO_IO1_B, SEL_QSP_1),
+       PINMUX_IPSR_MSEL(IP13_12, MISO_IO1_B, SEL_QSP_1),
        PINMUX_IPSR_DATA(IP13_13, SD0_DATA1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_13, IO2_B, SEL_QSP_1),
+       PINMUX_IPSR_MSEL(IP13_13, IO2_B, SEL_QSP_1),
        PINMUX_IPSR_DATA(IP13_14, SD0_DATA2),
-       PINMUX_IPSR_MODSEL_DATA(IP13_14, IO3_B, SEL_QSP_1),
+       PINMUX_IPSR_MSEL(IP13_14, IO3_B, SEL_QSP_1),
        PINMUX_IPSR_DATA(IP13_15, SD0_DATA3),
-       PINMUX_IPSR_MODSEL_DATA(IP13_15, SSL_B, SEL_QSP_1),
+       PINMUX_IPSR_MSEL(IP13_15, SSL_B, SEL_QSP_1),
        PINMUX_IPSR_DATA(IP13_18_16, SD0_CD),
-       PINMUX_IPSR_MODSEL_DATA(IP13_18_16, MMC_D6_B, SEL_MMC_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_18_16, SIM0_RST_B, SEL_SIM_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_18_16, CAN0_RX_F, SEL_CAN0_5),
-       PINMUX_IPSR_MODSEL_DATA(IP13_18_16, SCIFA5_TXD_B, SEL_SCIFA5_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_18_16, TX3_C, SEL_SCIF3_2),
+       PINMUX_IPSR_MSEL(IP13_18_16, MMC_D6_B, SEL_MMC_1),
+       PINMUX_IPSR_MSEL(IP13_18_16, SIM0_RST_B, SEL_SIM_1),
+       PINMUX_IPSR_MSEL(IP13_18_16, CAN0_RX_F, SEL_CAN0_5),
+       PINMUX_IPSR_MSEL(IP13_18_16, SCIFA5_TXD_B, SEL_SCIFA5_1),
+       PINMUX_IPSR_MSEL(IP13_18_16, TX3_C, SEL_SCIF3_2),
        PINMUX_IPSR_DATA(IP13_21_19, SD0_WP),
-       PINMUX_IPSR_MODSEL_DATA(IP13_21_19, MMC_D7_B, SEL_MMC_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_21_19, SIM0_D_B, SEL_SIM_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_21_19, CAN0_TX_F, SEL_CAN0_5),
-       PINMUX_IPSR_MODSEL_DATA(IP13_21_19, SCIFA5_RXD_B, SEL_SCIFA5_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_21_19, RX3_C, SEL_SCIF3_2),
+       PINMUX_IPSR_MSEL(IP13_21_19, MMC_D7_B, SEL_MMC_1),
+       PINMUX_IPSR_MSEL(IP13_21_19, SIM0_D_B, SEL_SIM_1),
+       PINMUX_IPSR_MSEL(IP13_21_19, CAN0_TX_F, SEL_CAN0_5),
+       PINMUX_IPSR_MSEL(IP13_21_19, SCIFA5_RXD_B, SEL_SCIFA5_1),
+       PINMUX_IPSR_MSEL(IP13_21_19, RX3_C, SEL_SCIF3_2),
        PINMUX_IPSR_DATA(IP13_22, SD1_CMD),
-       PINMUX_IPSR_MODSEL_DATA(IP13_22, REMOCON_B, SEL_RCN_1),
+       PINMUX_IPSR_MSEL(IP13_22, REMOCON_B, SEL_RCN_1),
        PINMUX_IPSR_DATA(IP13_24_23, SD1_DATA0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_24_23, SPEEDIN_B, SEL_RSP_1),
+       PINMUX_IPSR_MSEL(IP13_24_23, SPEEDIN_B, SEL_RSP_1),
        PINMUX_IPSR_DATA(IP13_25, SD1_DATA1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_25, IETX_B, SEL_IEB_1),
+       PINMUX_IPSR_MSEL(IP13_25, IETX_B, SEL_IEB_1),
        PINMUX_IPSR_DATA(IP13_26, SD1_DATA2),
-       PINMUX_IPSR_MODSEL_DATA(IP13_26, IECLK_B, SEL_IEB_1),
+       PINMUX_IPSR_MSEL(IP13_26, IECLK_B, SEL_IEB_1),
        PINMUX_IPSR_DATA(IP13_27, SD1_DATA3),
-       PINMUX_IPSR_MODSEL_DATA(IP13_27, IERX_B, SEL_IEB_1),
+       PINMUX_IPSR_MSEL(IP13_27, IERX_B, SEL_IEB_1),
        PINMUX_IPSR_DATA(IP13_30_28, SD1_CD),
        PINMUX_IPSR_DATA(IP13_30_28, PWM0),
        PINMUX_IPSR_DATA(IP13_30_28, TPU_TO0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_30_28, SCL1_C, SEL_IIC1_2),
+       PINMUX_IPSR_MSEL(IP13_30_28, SCL1_C, SEL_IIC1_2),
 
        /* IPSR14 */
        PINMUX_IPSR_DATA(IP14_1_0, SD1_WP),
        PINMUX_IPSR_DATA(IP14_1_0, PWM1_B),
-       PINMUX_IPSR_MODSEL_DATA(IP14_1_0, SDA1_C, SEL_IIC1_2),
+       PINMUX_IPSR_MSEL(IP14_1_0, SDA1_C, SEL_IIC1_2),
        PINMUX_IPSR_DATA(IP14_2, SD2_CLK),
        PINMUX_IPSR_DATA(IP14_2, MMC_CLK),
        PINMUX_IPSR_DATA(IP14_3, SD2_CMD),
@@ -1572,123 +1571,123 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP14_7, MMC_D3),
        PINMUX_IPSR_DATA(IP14_10_8, SD2_CD),
        PINMUX_IPSR_DATA(IP14_10_8, MMC_D4),
-       PINMUX_IPSR_MODSEL_DATA(IP14_10_8, SCL8_C, SEL_IIC8_2),
-       PINMUX_IPSR_MODSEL_DATA(IP14_10_8, TX5_B, SEL_SCIF5_1),
-       PINMUX_IPSR_MODSEL_DATA(IP14_10_8, SCIFA5_TXD_C, SEL_SCIFA5_2),
+       PINMUX_IPSR_MSEL(IP14_10_8, SCL8_C, SEL_IIC8_2),
+       PINMUX_IPSR_MSEL(IP14_10_8, TX5_B, SEL_SCIF5_1),
+       PINMUX_IPSR_MSEL(IP14_10_8, SCIFA5_TXD_C, SEL_SCIFA5_2),
        PINMUX_IPSR_DATA(IP14_13_11, SD2_WP),
        PINMUX_IPSR_DATA(IP14_13_11, MMC_D5),
-       PINMUX_IPSR_MODSEL_DATA(IP14_13_11, SDA8_C, SEL_IIC8_2),
-       PINMUX_IPSR_MODSEL_DATA(IP14_13_11, RX5_B, SEL_SCIF5_1),
-       PINMUX_IPSR_MODSEL_DATA(IP14_13_11, SCIFA5_RXD_C, SEL_SCIFA5_2),
-       PINMUX_IPSR_MODSEL_DATA(IP14_16_14, MSIOF0_SCK, SEL_SOF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_16_14, RX2_C, SEL_SCIF2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP14_16_14, ADIDATA, SEL_RAD_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_16_14, VI1_CLK_C, SEL_VI1_2),
+       PINMUX_IPSR_MSEL(IP14_13_11, SDA8_C, SEL_IIC8_2),
+       PINMUX_IPSR_MSEL(IP14_13_11, RX5_B, SEL_SCIF5_1),
+       PINMUX_IPSR_MSEL(IP14_13_11, SCIFA5_RXD_C, SEL_SCIFA5_2),
+       PINMUX_IPSR_MSEL(IP14_16_14, MSIOF0_SCK, SEL_SOF0_0),
+       PINMUX_IPSR_MSEL(IP14_16_14, RX2_C, SEL_SCIF2_2),
+       PINMUX_IPSR_MSEL(IP14_16_14, ADIDATA, SEL_RAD_0),
+       PINMUX_IPSR_MSEL(IP14_16_14, VI1_CLK_C, SEL_VI1_2),
        PINMUX_IPSR_DATA(IP14_16_14, VI1_G0_B),
-       PINMUX_IPSR_MODSEL_DATA(IP14_19_17, MSIOF0_SYNC, SEL_SOF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_19_17, TX2_C, SEL_SCIF2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP14_19_17, ADICS_SAMP, SEL_RAD_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_19_17, VI1_CLKENB_C, SEL_VI1_2),
+       PINMUX_IPSR_MSEL(IP14_19_17, MSIOF0_SYNC, SEL_SOF0_0),
+       PINMUX_IPSR_MSEL(IP14_19_17, TX2_C, SEL_SCIF2_2),
+       PINMUX_IPSR_MSEL(IP14_19_17, ADICS_SAMP, SEL_RAD_0),
+       PINMUX_IPSR_MSEL(IP14_19_17, VI1_CLKENB_C, SEL_VI1_2),
        PINMUX_IPSR_DATA(IP14_19_17, VI1_G1_B),
-       PINMUX_IPSR_MODSEL_DATA(IP14_22_20, MSIOF0_TXD, SEL_SOF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_22_20, ADICLK, SEL_RAD_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_22_20, VI1_FIELD_C, SEL_VI1_2),
+       PINMUX_IPSR_MSEL(IP14_22_20, MSIOF0_TXD, SEL_SOF0_0),
+       PINMUX_IPSR_MSEL(IP14_22_20, ADICLK, SEL_RAD_0),
+       PINMUX_IPSR_MSEL(IP14_22_20, VI1_FIELD_C, SEL_VI1_2),
        PINMUX_IPSR_DATA(IP14_22_20, VI1_G2_B),
-       PINMUX_IPSR_MODSEL_DATA(IP14_25_23, MSIOF0_RXD, SEL_SOF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_25_23, ADICHS0, SEL_RAD_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_25_23, VI1_DATA0_C, SEL_VI1_2),
+       PINMUX_IPSR_MSEL(IP14_25_23, MSIOF0_RXD, SEL_SOF0_0),
+       PINMUX_IPSR_MSEL(IP14_25_23, ADICHS0, SEL_RAD_0),
+       PINMUX_IPSR_MSEL(IP14_25_23, VI1_DATA0_C, SEL_VI1_2),
        PINMUX_IPSR_DATA(IP14_25_23, VI1_G3_B),
-       PINMUX_IPSR_MODSEL_DATA(IP14_28_26, MSIOF0_SS1, SEL_SOF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_28_26, MMC_D6, SEL_MMC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_28_26, ADICHS1, SEL_RAD_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_28_26, TX0_E, SEL_SCIF0_4),
-       PINMUX_IPSR_MODSEL_DATA(IP14_28_26, VI1_HSYNC_N_C, SEL_VI1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP14_28_26, SCL7_C, SEL_IIC7_2),
+       PINMUX_IPSR_MSEL(IP14_28_26, MSIOF0_SS1, SEL_SOF0_0),
+       PINMUX_IPSR_MSEL(IP14_28_26, MMC_D6, SEL_MMC_0),
+       PINMUX_IPSR_MSEL(IP14_28_26, ADICHS1, SEL_RAD_0),
+       PINMUX_IPSR_MSEL(IP14_28_26, TX0_E, SEL_SCIF0_4),
+       PINMUX_IPSR_MSEL(IP14_28_26, VI1_HSYNC_N_C, SEL_VI1_2),
+       PINMUX_IPSR_MSEL(IP14_28_26, SCL7_C, SEL_IIC7_2),
        PINMUX_IPSR_DATA(IP14_28_26, VI1_G4_B),
-       PINMUX_IPSR_MODSEL_DATA(IP14_31_29, MSIOF0_SS2, SEL_SOF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_31_29, MMC_D7, SEL_MMC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_31_29, ADICHS2, SEL_RAD_0),
-       PINMUX_IPSR_MODSEL_DATA(IP14_31_29, RX0_E, SEL_SCIF0_4),
-       PINMUX_IPSR_MODSEL_DATA(IP14_31_29, VI1_VSYNC_N_C, SEL_VI1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP14_31_29, SDA7_C, SEL_IIC7_2),
+       PINMUX_IPSR_MSEL(IP14_31_29, MSIOF0_SS2, SEL_SOF0_0),
+       PINMUX_IPSR_MSEL(IP14_31_29, MMC_D7, SEL_MMC_0),
+       PINMUX_IPSR_MSEL(IP14_31_29, ADICHS2, SEL_RAD_0),
+       PINMUX_IPSR_MSEL(IP14_31_29, RX0_E, SEL_SCIF0_4),
+       PINMUX_IPSR_MSEL(IP14_31_29, VI1_VSYNC_N_C, SEL_VI1_2),
+       PINMUX_IPSR_MSEL(IP14_31_29, SDA7_C, SEL_IIC7_2),
        PINMUX_IPSR_DATA(IP14_31_29, VI1_G5_B),
 
        /* IPSR15 */
-       PINMUX_IPSR_MODSEL_DATA(IP15_1_0, SIM0_RST, SEL_SIM_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_1_0, IETX, SEL_IEB_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_1_0, CAN1_TX_D, SEL_CAN1_3),
+       PINMUX_IPSR_MSEL(IP15_1_0, SIM0_RST, SEL_SIM_0),
+       PINMUX_IPSR_MSEL(IP15_1_0, IETX, SEL_IEB_0),
+       PINMUX_IPSR_MSEL(IP15_1_0, CAN1_TX_D, SEL_CAN1_3),
        PINMUX_IPSR_DATA(IP15_3_2, SIM0_CLK),
-       PINMUX_IPSR_MODSEL_DATA(IP15_3_2, IECLK, SEL_IEB_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_3_2, CAN_CLK_C, SEL_CANCLK_2),
-       PINMUX_IPSR_MODSEL_DATA(IP15_5_4, SIM0_D, SEL_SIM_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_5_4, IERX, SEL_IEB_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_5_4, CAN1_RX_D, SEL_CAN1_3),
-       PINMUX_IPSR_MODSEL_DATA(IP15_8_6, GPS_CLK, SEL_GPS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_8_6, DU1_DOTCLKIN_C, SEL_DIS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP15_8_6, AUDIO_CLKB_B, SEL_ADG_1),
+       PINMUX_IPSR_MSEL(IP15_3_2, IECLK, SEL_IEB_0),
+       PINMUX_IPSR_MSEL(IP15_3_2, CAN_CLK_C, SEL_CANCLK_2),
+       PINMUX_IPSR_MSEL(IP15_5_4, SIM0_D, SEL_SIM_0),
+       PINMUX_IPSR_MSEL(IP15_5_4, IERX, SEL_IEB_0),
+       PINMUX_IPSR_MSEL(IP15_5_4, CAN1_RX_D, SEL_CAN1_3),
+       PINMUX_IPSR_MSEL(IP15_8_6, GPS_CLK, SEL_GPS_0),
+       PINMUX_IPSR_MSEL(IP15_8_6, DU1_DOTCLKIN_C, SEL_DIS_2),
+       PINMUX_IPSR_MSEL(IP15_8_6, AUDIO_CLKB_B, SEL_ADG_1),
        PINMUX_IPSR_DATA(IP15_8_6, PWM5_B),
-       PINMUX_IPSR_MODSEL_DATA(IP15_8_6, SCIFA3_TXD_C, SEL_SCIFA3_2),
-       PINMUX_IPSR_MODSEL_DATA(IP15_11_9, GPS_SIGN, SEL_GPS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_11_9, TX4_C, SEL_SCIF4_2),
-       PINMUX_IPSR_MODSEL_DATA(IP15_11_9, SCIFA4_TXD_C, SEL_SCIFA4_2),
+       PINMUX_IPSR_MSEL(IP15_8_6, SCIFA3_TXD_C, SEL_SCIFA3_2),
+       PINMUX_IPSR_MSEL(IP15_11_9, GPS_SIGN, SEL_GPS_0),
+       PINMUX_IPSR_MSEL(IP15_11_9, TX4_C, SEL_SCIF4_2),
+       PINMUX_IPSR_MSEL(IP15_11_9, SCIFA4_TXD_C, SEL_SCIFA4_2),
        PINMUX_IPSR_DATA(IP15_11_9, PWM5),
        PINMUX_IPSR_DATA(IP15_11_9, VI1_G6_B),
-       PINMUX_IPSR_MODSEL_DATA(IP15_11_9, SCIFA3_RXD_C, SEL_SCIFA3_2),
-       PINMUX_IPSR_MODSEL_DATA(IP15_14_12, GPS_MAG, SEL_GPS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_14_12, RX4_C, SEL_SCIF4_2),
-       PINMUX_IPSR_MODSEL_DATA(IP15_14_12, SCIFA4_RXD_C, SEL_SCIFA4_2),
+       PINMUX_IPSR_MSEL(IP15_11_9, SCIFA3_RXD_C, SEL_SCIFA3_2),
+       PINMUX_IPSR_MSEL(IP15_14_12, GPS_MAG, SEL_GPS_0),
+       PINMUX_IPSR_MSEL(IP15_14_12, RX4_C, SEL_SCIF4_2),
+       PINMUX_IPSR_MSEL(IP15_14_12, SCIFA4_RXD_C, SEL_SCIFA4_2),
        PINMUX_IPSR_DATA(IP15_14_12, PWM6),
        PINMUX_IPSR_DATA(IP15_14_12, VI1_G7_B),
-       PINMUX_IPSR_MODSEL_DATA(IP15_14_12, SCIFA3_SCK_C, SEL_SCIFA3_2),
-       PINMUX_IPSR_MODSEL_DATA(IP15_17_15, HCTS0_N, SEL_HSCIF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_17_15, SCIFB0_CTS_N, SEL_SCIFB_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_17_15, GLO_I0_C, SEL_GPS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP15_17_15, TCLK1, SEL_TMU1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_17_15, VI1_DATA1_C, SEL_VI1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP15_20_18, HRTS0_N, SEL_HSCIF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_20_18, SCIFB0_RTS_N, SEL_SCIFB_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_20_18, GLO_I1_C, SEL_GPS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP15_20_18, VI1_DATA2_C, SEL_VI1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP15_23_21, HSCK0, SEL_HSCIF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_23_21, SCIFB0_SCK, SEL_SCIFB_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_23_21, GLO_Q0_C, SEL_GPS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP15_23_21, CAN_CLK, SEL_CANCLK_0),
+       PINMUX_IPSR_MSEL(IP15_14_12, SCIFA3_SCK_C, SEL_SCIFA3_2),
+       PINMUX_IPSR_MSEL(IP15_17_15, HCTS0_N, SEL_HSCIF0_0),
+       PINMUX_IPSR_MSEL(IP15_17_15, SCIFB0_CTS_N, SEL_SCIFB_0),
+       PINMUX_IPSR_MSEL(IP15_17_15, GLO_I0_C, SEL_GPS_2),
+       PINMUX_IPSR_MSEL(IP15_17_15, TCLK1, SEL_TMU1_0),
+       PINMUX_IPSR_MSEL(IP15_17_15, VI1_DATA1_C, SEL_VI1_2),
+       PINMUX_IPSR_MSEL(IP15_20_18, HRTS0_N, SEL_HSCIF0_0),
+       PINMUX_IPSR_MSEL(IP15_20_18, SCIFB0_RTS_N, SEL_SCIFB_0),
+       PINMUX_IPSR_MSEL(IP15_20_18, GLO_I1_C, SEL_GPS_2),
+       PINMUX_IPSR_MSEL(IP15_20_18, VI1_DATA2_C, SEL_VI1_2),
+       PINMUX_IPSR_MSEL(IP15_23_21, HSCK0, SEL_HSCIF0_0),
+       PINMUX_IPSR_MSEL(IP15_23_21, SCIFB0_SCK, SEL_SCIFB_0),
+       PINMUX_IPSR_MSEL(IP15_23_21, GLO_Q0_C, SEL_GPS_2),
+       PINMUX_IPSR_MSEL(IP15_23_21, CAN_CLK, SEL_CANCLK_0),
        PINMUX_IPSR_DATA(IP15_23_21, TCLK2),
-       PINMUX_IPSR_MODSEL_DATA(IP15_23_21, VI1_DATA3_C, SEL_VI1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP15_26_24, HRX0, SEL_HSCIF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_26_24, SCIFB0_RXD, SEL_SCIFB_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_26_24, GLO_Q1_C, SEL_GPS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP15_26_24, CAN0_RX_B, SEL_CAN0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP15_26_24, VI1_DATA4_C, SEL_VI1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP15_29_27, HTX0, SEL_HSCIF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_29_27, SCIFB0_TXD, SEL_SCIFB_0),
-       PINMUX_IPSR_MODSEL_DATA(IP15_29_27, GLO_SCLK_C, SEL_GPS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP15_29_27, CAN0_TX_B, SEL_CAN0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP15_29_27, VI1_DATA5_C, SEL_VI1_2),
+       PINMUX_IPSR_MSEL(IP15_23_21, VI1_DATA3_C, SEL_VI1_2),
+       PINMUX_IPSR_MSEL(IP15_26_24, HRX0, SEL_HSCIF0_0),
+       PINMUX_IPSR_MSEL(IP15_26_24, SCIFB0_RXD, SEL_SCIFB_0),
+       PINMUX_IPSR_MSEL(IP15_26_24, GLO_Q1_C, SEL_GPS_2),
+       PINMUX_IPSR_MSEL(IP15_26_24, CAN0_RX_B, SEL_CAN0_1),
+       PINMUX_IPSR_MSEL(IP15_26_24, VI1_DATA4_C, SEL_VI1_2),
+       PINMUX_IPSR_MSEL(IP15_29_27, HTX0, SEL_HSCIF0_0),
+       PINMUX_IPSR_MSEL(IP15_29_27, SCIFB0_TXD, SEL_SCIFB_0),
+       PINMUX_IPSR_MSEL(IP15_29_27, GLO_SCLK_C, SEL_GPS_2),
+       PINMUX_IPSR_MSEL(IP15_29_27, CAN0_TX_B, SEL_CAN0_1),
+       PINMUX_IPSR_MSEL(IP15_29_27, VI1_DATA5_C, SEL_VI1_2),
 
        /* IPSR16 */
-       PINMUX_IPSR_MODSEL_DATA(IP16_2_0, HRX1, SEL_HSCIF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP16_2_0, SCIFB1_RXD, SEL_SCIFB1_0),
+       PINMUX_IPSR_MSEL(IP16_2_0, HRX1, SEL_HSCIF1_0),
+       PINMUX_IPSR_MSEL(IP16_2_0, SCIFB1_RXD, SEL_SCIFB1_0),
        PINMUX_IPSR_DATA(IP16_2_0, VI1_R0_B),
-       PINMUX_IPSR_MODSEL_DATA(IP16_2_0, GLO_SDATA_C, SEL_GPS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP16_2_0, VI1_DATA6_C, SEL_VI1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP16_5_3, HTX1, SEL_HSCIF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP16_5_3, SCIFB1_TXD, SEL_SCIFB1_0),
+       PINMUX_IPSR_MSEL(IP16_2_0, GLO_SDATA_C, SEL_GPS_2),
+       PINMUX_IPSR_MSEL(IP16_2_0, VI1_DATA6_C, SEL_VI1_2),
+       PINMUX_IPSR_MSEL(IP16_5_3, HTX1, SEL_HSCIF1_0),
+       PINMUX_IPSR_MSEL(IP16_5_3, SCIFB1_TXD, SEL_SCIFB1_0),
        PINMUX_IPSR_DATA(IP16_5_3, VI1_R1_B),
-       PINMUX_IPSR_MODSEL_DATA(IP16_5_3, GLO_SS_C, SEL_GPS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP16_5_3, VI1_DATA7_C, SEL_VI1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP16_7_6, HSCK1, SEL_HSCIF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP16_7_6, SCIFB1_SCK, SEL_SCIFB1_0),
+       PINMUX_IPSR_MSEL(IP16_5_3, GLO_SS_C, SEL_GPS_2),
+       PINMUX_IPSR_MSEL(IP16_5_3, VI1_DATA7_C, SEL_VI1_2),
+       PINMUX_IPSR_MSEL(IP16_7_6, HSCK1, SEL_HSCIF1_0),
+       PINMUX_IPSR_MSEL(IP16_7_6, SCIFB1_SCK, SEL_SCIFB1_0),
        PINMUX_IPSR_DATA(IP16_7_6, MLB_CLK),
-       PINMUX_IPSR_MODSEL_DATA(IP16_7_6, GLO_RFON_C, SEL_GPS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP16_9_8, HCTS1_N, SEL_HSCIF1_0),
+       PINMUX_IPSR_MSEL(IP16_7_6, GLO_RFON_C, SEL_GPS_2),
+       PINMUX_IPSR_MSEL(IP16_9_8, HCTS1_N, SEL_HSCIF1_0),
        PINMUX_IPSR_DATA(IP16_9_8, SCIFB1_CTS_N),
        PINMUX_IPSR_DATA(IP16_9_8, MLB_SIG),
-       PINMUX_IPSR_MODSEL_DATA(IP16_9_8, CAN1_TX_B, SEL_CAN1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP16_11_10, HRTS1_N, SEL_HSCIF1_0),
+       PINMUX_IPSR_MSEL(IP16_9_8, CAN1_TX_B, SEL_CAN1_1),
+       PINMUX_IPSR_MSEL(IP16_11_10, HRTS1_N, SEL_HSCIF1_0),
        PINMUX_IPSR_DATA(IP16_11_10, SCIFB1_RTS_N),
        PINMUX_IPSR_DATA(IP16_11_10, MLB_DAT),
-       PINMUX_IPSR_MODSEL_DATA(IP16_11_10, CAN1_RX_B, SEL_CAN1_1),
+       PINMUX_IPSR_MSEL(IP16_11_10, CAN1_RX_B, SEL_CAN1_1),
 };
 
 static const struct sh_pfc_pin pinmux_pins[] = {
@@ -3986,24 +3985,6 @@ static const unsigned int usb1_mux[] = {
        USB1_PWEN_MARK,
        USB1_OVC_MARK,
 };
-
-union vin_data {
-       unsigned int data24[24];
-       unsigned int data20[20];
-       unsigned int data16[16];
-       unsigned int data12[12];
-       unsigned int data10[10];
-       unsigned int data8[8];
-};
-
-#define VIN_DATA_PIN_GROUP(n, s)                               \
-       {                                                       \
-               .name = #n#s,                                   \
-               .pins = n##_pins.data##s,                       \
-               .mux = n##_mux.data##s,                         \
-               .nr_pins = ARRAY_SIZE(n##_pins.data##s),        \
-       }
-
 /* - VIN0 ------------------------------------------------------------------- */
 static const union vin_data vin0_data_pins = {
        .data24 = {
@@ -6337,8 +6318,8 @@ const struct sh_pfc_soc_info r8a7791_pinmux_info = {
 
        .cfg_regs = pinmux_config_regs,
 
-       .gpio_data = pinmux_data,
-       .gpio_data_size = ARRAY_SIZE(pinmux_data),
+       .pinmux_data = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
 };
 #endif
 
@@ -6358,7 +6339,7 @@ const struct sh_pfc_soc_info r8a7793_pinmux_info = {
 
        .cfg_regs = pinmux_config_regs,
 
-       .gpio_data = pinmux_data,
-       .gpio_data_size = ARRAY_SIZE(pinmux_data),
+       .pinmux_data = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
 };
 #endif
index 5248685dbb4e55ef73c44aebd6c2c887a839a434..086f6798b1294564e425e0ab93c129fab16831ea 100644 (file)
@@ -11,7 +11,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/platform_data/gpio-rcar.h>
 
 #include "core.h"
 #include "sh_pfc.h"
@@ -644,10 +643,10 @@ static const u16 pinmux_data[] = {
 
        /* IPSR0 */
        PINMUX_IPSR_DATA(IP0_0, SD1_CD),
-       PINMUX_IPSR_MODSEL_DATA(IP0_0, CAN0_RX, SEL_CAN0_0),
+       PINMUX_IPSR_MSEL(IP0_0, CAN0_RX, SEL_CAN0_0),
        PINMUX_IPSR_DATA(IP0_9_8, SD1_WP),
        PINMUX_IPSR_DATA(IP0_9_8, IRQ7),
-       PINMUX_IPSR_MODSEL_DATA(IP0_9_8, CAN0_TX, SEL_CAN0_0),
+       PINMUX_IPSR_MSEL(IP0_9_8, CAN0_TX, SEL_CAN0_0),
        PINMUX_IPSR_DATA(IP0_10, MMC_CLK),
        PINMUX_IPSR_DATA(IP0_10, SD2_CLK),
        PINMUX_IPSR_DATA(IP0_11, MMC_CMD),
@@ -665,68 +664,68 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP0_17, MMC_D5),
        PINMUX_IPSR_DATA(IP0_17, SD2_WP),
        PINMUX_IPSR_DATA(IP0_19_18, MMC_D6),
-       PINMUX_IPSR_MODSEL_DATA(IP0_19_18, SCIF0_RXD, SEL_SCIF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_19_18, I2C2_SCL_B, SEL_I2C02_1),
-       PINMUX_IPSR_MODSEL_DATA(IP0_19_18, CAN1_RX, SEL_CAN1_0),
+       PINMUX_IPSR_MSEL(IP0_19_18, SCIF0_RXD, SEL_SCIF0_0),
+       PINMUX_IPSR_MSEL(IP0_19_18, I2C2_SCL_B, SEL_I2C02_1),
+       PINMUX_IPSR_MSEL(IP0_19_18, CAN1_RX, SEL_CAN1_0),
        PINMUX_IPSR_DATA(IP0_21_20, MMC_D7),
-       PINMUX_IPSR_MODSEL_DATA(IP0_21_20, SCIF0_TXD, SEL_SCIF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_21_20, I2C2_SDA_B, SEL_I2C02_1),
-       PINMUX_IPSR_MODSEL_DATA(IP0_21_20, CAN1_TX, SEL_CAN1_0),
+       PINMUX_IPSR_MSEL(IP0_21_20, SCIF0_TXD, SEL_SCIF0_0),
+       PINMUX_IPSR_MSEL(IP0_21_20, I2C2_SDA_B, SEL_I2C02_1),
+       PINMUX_IPSR_MSEL(IP0_21_20, CAN1_TX, SEL_CAN1_0),
        PINMUX_IPSR_DATA(IP0_23_22, D0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_23_22, SCIFA3_SCK_B, SEL_SCIFA3_1),
+       PINMUX_IPSR_MSEL(IP0_23_22, SCIFA3_SCK_B, SEL_SCIFA3_1),
        PINMUX_IPSR_DATA(IP0_23_22, IRQ4),
        PINMUX_IPSR_DATA(IP0_24, D1),
-       PINMUX_IPSR_MODSEL_DATA(IP0_24, SCIFA3_RXD_B, SEL_SCIFA3_1),
+       PINMUX_IPSR_MSEL(IP0_24, SCIFA3_RXD_B, SEL_SCIFA3_1),
        PINMUX_IPSR_DATA(IP0_25, D2),
-       PINMUX_IPSR_MODSEL_DATA(IP0_25, SCIFA3_TXD_B, SEL_SCIFA3_1),
+       PINMUX_IPSR_MSEL(IP0_25, SCIFA3_TXD_B, SEL_SCIFA3_1),
        PINMUX_IPSR_DATA(IP0_27_26, D3),
-       PINMUX_IPSR_MODSEL_DATA(IP0_27_26, I2C3_SCL_B, SEL_I2C03_1),
-       PINMUX_IPSR_MODSEL_DATA(IP0_27_26, SCIF5_RXD_B, SEL_SCIF5_1),
+       PINMUX_IPSR_MSEL(IP0_27_26, I2C3_SCL_B, SEL_I2C03_1),
+       PINMUX_IPSR_MSEL(IP0_27_26, SCIF5_RXD_B, SEL_SCIF5_1),
        PINMUX_IPSR_DATA(IP0_29_28, D4),
-       PINMUX_IPSR_MODSEL_DATA(IP0_29_28, I2C3_SDA_B, SEL_I2C03_1),
-       PINMUX_IPSR_MODSEL_DATA(IP0_29_28, SCIF5_TXD_B, SEL_SCIF5_1),
+       PINMUX_IPSR_MSEL(IP0_29_28, I2C3_SDA_B, SEL_I2C03_1),
+       PINMUX_IPSR_MSEL(IP0_29_28, SCIF5_TXD_B, SEL_SCIF5_1),
        PINMUX_IPSR_DATA(IP0_31_30, D5),
-       PINMUX_IPSR_MODSEL_DATA(IP0_31_30, SCIF4_RXD_B, SEL_SCIF4_1),
-       PINMUX_IPSR_MODSEL_DATA(IP0_31_30, I2C0_SCL_D, SEL_I2C00_3),
+       PINMUX_IPSR_MSEL(IP0_31_30, SCIF4_RXD_B, SEL_SCIF4_1),
+       PINMUX_IPSR_MSEL(IP0_31_30, I2C0_SCL_D, SEL_I2C00_3),
 
        /* IPSR1 */
        PINMUX_IPSR_DATA(IP1_1_0, D6),
-       PINMUX_IPSR_MODSEL_DATA(IP1_1_0, SCIF4_TXD_B, SEL_SCIF4_1),
-       PINMUX_IPSR_MODSEL_DATA(IP1_1_0, I2C0_SDA_D, SEL_I2C00_3),
+       PINMUX_IPSR_MSEL(IP1_1_0, SCIF4_TXD_B, SEL_SCIF4_1),
+       PINMUX_IPSR_MSEL(IP1_1_0, I2C0_SDA_D, SEL_I2C00_3),
        PINMUX_IPSR_DATA(IP1_3_2, D7),
        PINMUX_IPSR_DATA(IP1_3_2, IRQ3),
-       PINMUX_IPSR_MODSEL_DATA(IP1_3_2, TCLK1, SEL_TMU_0),
+       PINMUX_IPSR_MSEL(IP1_3_2, TCLK1, SEL_TMU_0),
        PINMUX_IPSR_DATA(IP1_3_2, PWM6_B),
        PINMUX_IPSR_DATA(IP1_5_4, D8),
        PINMUX_IPSR_DATA(IP1_5_4, HSCIF2_HRX),
-       PINMUX_IPSR_MODSEL_DATA(IP1_5_4, I2C1_SCL_B, SEL_I2C01_1),
+       PINMUX_IPSR_MSEL(IP1_5_4, I2C1_SCL_B, SEL_I2C01_1),
        PINMUX_IPSR_DATA(IP1_7_6, D9),
        PINMUX_IPSR_DATA(IP1_7_6, HSCIF2_HTX),
-       PINMUX_IPSR_MODSEL_DATA(IP1_7_6, I2C1_SDA_B, SEL_I2C01_1),
+       PINMUX_IPSR_MSEL(IP1_7_6, I2C1_SDA_B, SEL_I2C01_1),
        PINMUX_IPSR_DATA(IP1_10_8, D10),
        PINMUX_IPSR_DATA(IP1_10_8, HSCIF2_HSCK),
-       PINMUX_IPSR_MODSEL_DATA(IP1_10_8, SCIF1_SCK_C, SEL_SCIF1_2),
+       PINMUX_IPSR_MSEL(IP1_10_8, SCIF1_SCK_C, SEL_SCIF1_2),
        PINMUX_IPSR_DATA(IP1_10_8, IRQ6),
        PINMUX_IPSR_DATA(IP1_10_8, PWM5_C),
        PINMUX_IPSR_DATA(IP1_12_11, D11),
        PINMUX_IPSR_DATA(IP1_12_11, HSCIF2_HCTS_N),
-       PINMUX_IPSR_MODSEL_DATA(IP1_12_11, SCIF1_RXD_C, SEL_SCIF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP1_12_11, I2C1_SCL_D, SEL_I2C01_3),
+       PINMUX_IPSR_MSEL(IP1_12_11, SCIF1_RXD_C, SEL_SCIF1_2),
+       PINMUX_IPSR_MSEL(IP1_12_11, I2C1_SCL_D, SEL_I2C01_3),
        PINMUX_IPSR_DATA(IP1_14_13, D12),
        PINMUX_IPSR_DATA(IP1_14_13, HSCIF2_HRTS_N),
-       PINMUX_IPSR_MODSEL_DATA(IP1_14_13, SCIF1_TXD_C, SEL_SCIF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP1_14_13, I2C1_SDA_D, SEL_I2C01_3),
+       PINMUX_IPSR_MSEL(IP1_14_13, SCIF1_TXD_C, SEL_SCIF1_2),
+       PINMUX_IPSR_MSEL(IP1_14_13, I2C1_SDA_D, SEL_I2C01_3),
        PINMUX_IPSR_DATA(IP1_17_15, D13),
-       PINMUX_IPSR_MODSEL_DATA(IP1_17_15, SCIFA1_SCK, SEL_SCIFA1_0),
+       PINMUX_IPSR_MSEL(IP1_17_15, SCIFA1_SCK, SEL_SCIFA1_0),
        PINMUX_IPSR_DATA(IP1_17_15, TANS1),
        PINMUX_IPSR_DATA(IP1_17_15, PWM2_C),
-       PINMUX_IPSR_MODSEL_DATA(IP1_17_15, TCLK2_B, SEL_TMU_1),
+       PINMUX_IPSR_MSEL(IP1_17_15, TCLK2_B, SEL_TMU_1),
        PINMUX_IPSR_DATA(IP1_19_18, D14),
-       PINMUX_IPSR_MODSEL_DATA(IP1_19_18, SCIFA1_RXD, SEL_SCIFA1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_19_18, IIC0_SCL_B, SEL_IIC00_1),
+       PINMUX_IPSR_MSEL(IP1_19_18, SCIFA1_RXD, SEL_SCIFA1_0),
+       PINMUX_IPSR_MSEL(IP1_19_18, IIC0_SCL_B, SEL_IIC00_1),
        PINMUX_IPSR_DATA(IP1_21_20, D15),
-       PINMUX_IPSR_MODSEL_DATA(IP1_21_20, SCIFA1_TXD, SEL_SCIFA1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_21_20, IIC0_SDA_B, SEL_IIC00_1),
+       PINMUX_IPSR_MSEL(IP1_21_20, SCIFA1_TXD, SEL_SCIFA1_0),
+       PINMUX_IPSR_MSEL(IP1_21_20, IIC0_SDA_B, SEL_IIC00_1),
        PINMUX_IPSR_DATA(IP1_23_22, A0),
        PINMUX_IPSR_DATA(IP1_23_22, SCIFB1_SCK),
        PINMUX_IPSR_DATA(IP1_23_22, PWM3_B),
@@ -742,58 +741,58 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP1_29_28, TPUTO3_C),
        PINMUX_IPSR_DATA(IP1_31_30, A6),
        PINMUX_IPSR_DATA(IP1_31_30, SCIFB0_CTS_N),
-       PINMUX_IPSR_MODSEL_DATA(IP1_31_30, SCIFA4_RXD_B, SEL_SCIFA4_1),
+       PINMUX_IPSR_MSEL(IP1_31_30, SCIFA4_RXD_B, SEL_SCIFA4_1),
        PINMUX_IPSR_DATA(IP1_31_30, TPUTO2_C),
 
        /* IPSR2 */
        PINMUX_IPSR_DATA(IP2_1_0, A7),
        PINMUX_IPSR_DATA(IP2_1_0, SCIFB0_RTS_N),
-       PINMUX_IPSR_MODSEL_DATA(IP2_1_0, SCIFA4_TXD_B, SEL_SCIFA4_1),
+       PINMUX_IPSR_MSEL(IP2_1_0, SCIFA4_TXD_B, SEL_SCIFA4_1),
        PINMUX_IPSR_DATA(IP2_3_2, A8),
-       PINMUX_IPSR_MODSEL_DATA(IP2_3_2, MSIOF1_RXD, SEL_MSI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_3_2, SCIFA0_RXD_B, SEL_SCIFA0_1),
+       PINMUX_IPSR_MSEL(IP2_3_2, MSIOF1_RXD, SEL_MSI1_0),
+       PINMUX_IPSR_MSEL(IP2_3_2, SCIFA0_RXD_B, SEL_SCIFA0_1),
        PINMUX_IPSR_DATA(IP2_5_4, A9),
-       PINMUX_IPSR_MODSEL_DATA(IP2_5_4, MSIOF1_TXD, SEL_MSI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_5_4, SCIFA0_TXD_B, SEL_SCIFA0_1),
+       PINMUX_IPSR_MSEL(IP2_5_4, MSIOF1_TXD, SEL_MSI1_0),
+       PINMUX_IPSR_MSEL(IP2_5_4, SCIFA0_TXD_B, SEL_SCIFA0_1),
        PINMUX_IPSR_DATA(IP2_7_6, A10),
-       PINMUX_IPSR_MODSEL_DATA(IP2_7_6, MSIOF1_SCK, SEL_MSI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_7_6, IIC1_SCL_B, SEL_IIC01_1),
+       PINMUX_IPSR_MSEL(IP2_7_6, MSIOF1_SCK, SEL_MSI1_0),
+       PINMUX_IPSR_MSEL(IP2_7_6, IIC1_SCL_B, SEL_IIC01_1),
        PINMUX_IPSR_DATA(IP2_9_8, A11),
-       PINMUX_IPSR_MODSEL_DATA(IP2_9_8, MSIOF1_SYNC, SEL_MSI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_9_8, IIC1_SDA_B, SEL_IIC01_1),
+       PINMUX_IPSR_MSEL(IP2_9_8, MSIOF1_SYNC, SEL_MSI1_0),
+       PINMUX_IPSR_MSEL(IP2_9_8, IIC1_SDA_B, SEL_IIC01_1),
        PINMUX_IPSR_DATA(IP2_11_10, A12),
-       PINMUX_IPSR_MODSEL_DATA(IP2_11_10, MSIOF1_SS1, SEL_MSI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_11_10, SCIFA5_RXD_B, SEL_SCIFA5_1),
+       PINMUX_IPSR_MSEL(IP2_11_10, MSIOF1_SS1, SEL_MSI1_0),
+       PINMUX_IPSR_MSEL(IP2_11_10, SCIFA5_RXD_B, SEL_SCIFA5_1),
        PINMUX_IPSR_DATA(IP2_13_12, A13),
-       PINMUX_IPSR_MODSEL_DATA(IP2_13_12, MSIOF1_SS2, SEL_MSI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_13_12, SCIFA5_TXD_B, SEL_SCIFA5_1),
+       PINMUX_IPSR_MSEL(IP2_13_12, MSIOF1_SS2, SEL_MSI1_0),
+       PINMUX_IPSR_MSEL(IP2_13_12, SCIFA5_TXD_B, SEL_SCIFA5_1),
        PINMUX_IPSR_DATA(IP2_15_14, A14),
-       PINMUX_IPSR_MODSEL_DATA(IP2_15_14, MSIOF2_RXD, SEL_MSI2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_15_14, HSCIF0_HRX_B, SEL_HSCIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP2_15_14, DREQ1_N, SEL_LBS_0),
+       PINMUX_IPSR_MSEL(IP2_15_14, MSIOF2_RXD, SEL_MSI2_0),
+       PINMUX_IPSR_MSEL(IP2_15_14, HSCIF0_HRX_B, SEL_HSCIF0_1),
+       PINMUX_IPSR_MSEL(IP2_15_14, DREQ1_N, SEL_LBS_0),
        PINMUX_IPSR_DATA(IP2_17_16, A15),
-       PINMUX_IPSR_MODSEL_DATA(IP2_17_16, MSIOF2_TXD, SEL_MSI2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_17_16, HSCIF0_HTX_B, SEL_HSCIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP2_17_16, DACK1, SEL_LBS_0),
+       PINMUX_IPSR_MSEL(IP2_17_16, MSIOF2_TXD, SEL_MSI2_0),
+       PINMUX_IPSR_MSEL(IP2_17_16, HSCIF0_HTX_B, SEL_HSCIF0_1),
+       PINMUX_IPSR_MSEL(IP2_17_16, DACK1, SEL_LBS_0),
        PINMUX_IPSR_DATA(IP2_20_18, A16),
-       PINMUX_IPSR_MODSEL_DATA(IP2_20_18, MSIOF2_SCK, SEL_MSI2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_20_18, HSCIF0_HSCK_B, SEL_HSCIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP2_20_18, SPEEDIN, SEL_RSP_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_20_18, VSP, SEL_SPDM_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_20_18, CAN_CLK_C, SEL_CAN_2),
+       PINMUX_IPSR_MSEL(IP2_20_18, MSIOF2_SCK, SEL_MSI2_0),
+       PINMUX_IPSR_MSEL(IP2_20_18, HSCIF0_HSCK_B, SEL_HSCIF0_1),
+       PINMUX_IPSR_MSEL(IP2_20_18, SPEEDIN, SEL_RSP_0),
+       PINMUX_IPSR_MSEL(IP2_20_18, VSP, SEL_SPDM_0),
+       PINMUX_IPSR_MSEL(IP2_20_18, CAN_CLK_C, SEL_CAN_2),
        PINMUX_IPSR_DATA(IP2_20_18, TPUTO2_B),
        PINMUX_IPSR_DATA(IP2_23_21, A17),
-       PINMUX_IPSR_MODSEL_DATA(IP2_23_21, MSIOF2_SYNC, SEL_MSI2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_23_21, SCIF4_RXD_E, SEL_SCIF4_4),
-       PINMUX_IPSR_MODSEL_DATA(IP2_23_21, CAN1_RX_B, SEL_CAN1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP2_23_21, AVB_AVTP_CAPTURE_B, SEL_AVB_1),
+       PINMUX_IPSR_MSEL(IP2_23_21, MSIOF2_SYNC, SEL_MSI2_0),
+       PINMUX_IPSR_MSEL(IP2_23_21, SCIF4_RXD_E, SEL_SCIF4_4),
+       PINMUX_IPSR_MSEL(IP2_23_21, CAN1_RX_B, SEL_CAN1_1),
+       PINMUX_IPSR_MSEL(IP2_23_21, AVB_AVTP_CAPTURE_B, SEL_AVB_1),
        PINMUX_IPSR_DATA(IP2_26_24, A18),
-       PINMUX_IPSR_MODSEL_DATA(IP2_26_24, MSIOF2_SS1, SEL_MSI2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_26_24, SCIF4_TXD_E, SEL_SCIF4_4),
-       PINMUX_IPSR_MODSEL_DATA(IP2_26_24, CAN1_TX_B, SEL_CAN1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP2_26_24, AVB_AVTP_MATCH_B, SEL_AVB_1),
+       PINMUX_IPSR_MSEL(IP2_26_24, MSIOF2_SS1, SEL_MSI2_0),
+       PINMUX_IPSR_MSEL(IP2_26_24, SCIF4_TXD_E, SEL_SCIF4_4),
+       PINMUX_IPSR_MSEL(IP2_26_24, CAN1_TX_B, SEL_CAN1_1),
+       PINMUX_IPSR_MSEL(IP2_26_24, AVB_AVTP_MATCH_B, SEL_AVB_1),
        PINMUX_IPSR_DATA(IP2_29_27, A19),
-       PINMUX_IPSR_MODSEL_DATA(IP2_29_27, MSIOF2_SS2, SEL_MSI2_0),
+       PINMUX_IPSR_MSEL(IP2_29_27, MSIOF2_SS2, SEL_MSI2_0),
        PINMUX_IPSR_DATA(IP2_29_27, PWM4),
        PINMUX_IPSR_DATA(IP2_29_27, TPUTO2),
        PINMUX_IPSR_DATA(IP2_29_27, MOUT0),
@@ -831,42 +830,42 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP3_14_13, VI1_DATA11),
        PINMUX_IPSR_DATA(IP3_17_15, EX_CS2_N),
        PINMUX_IPSR_DATA(IP3_17_15, PWM0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_17_15, SCIF4_RXD_C, SEL_SCIF4_2),
-       PINMUX_IPSR_MODSEL_DATA(IP3_17_15, TS_SDATA_B, SEL_TSIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_17_15, RIF0_SYNC, SEL_DR0_0),
+       PINMUX_IPSR_MSEL(IP3_17_15, SCIF4_RXD_C, SEL_SCIF4_2),
+       PINMUX_IPSR_MSEL(IP3_17_15, TS_SDATA_B, SEL_TSIF0_1),
+       PINMUX_IPSR_MSEL(IP3_17_15, RIF0_SYNC, SEL_DR0_0),
        PINMUX_IPSR_DATA(IP3_17_15, TPUTO3),
        PINMUX_IPSR_DATA(IP3_17_15, SCIFB2_TXD),
-       PINMUX_IPSR_MODSEL_DATA(IP3_17_15, SDATA_B, SEL_FSN_1),
+       PINMUX_IPSR_MSEL(IP3_17_15, SDATA_B, SEL_FSN_1),
        PINMUX_IPSR_DATA(IP3_20_18, EX_CS3_N),
-       PINMUX_IPSR_MODSEL_DATA(IP3_20_18, SCIFA2_SCK, SEL_SCIFA2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_20_18, SCIF4_TXD_C, SEL_SCIF4_2),
-       PINMUX_IPSR_MODSEL_DATA(IP3_20_18, TS_SCK_B, SEL_TSIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_20_18, RIF0_CLK, SEL_DR0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_20_18, BPFCLK, SEL_DARC_0),
+       PINMUX_IPSR_MSEL(IP3_20_18, SCIFA2_SCK, SEL_SCIFA2_0),
+       PINMUX_IPSR_MSEL(IP3_20_18, SCIF4_TXD_C, SEL_SCIF4_2),
+       PINMUX_IPSR_MSEL(IP3_20_18, TS_SCK_B, SEL_TSIF0_1),
+       PINMUX_IPSR_MSEL(IP3_20_18, RIF0_CLK, SEL_DR0_0),
+       PINMUX_IPSR_MSEL(IP3_20_18, BPFCLK, SEL_DARC_0),
        PINMUX_IPSR_DATA(IP3_20_18, SCIFB2_SCK),
-       PINMUX_IPSR_MODSEL_DATA(IP3_20_18, MDATA_B, SEL_FSN_1),
+       PINMUX_IPSR_MSEL(IP3_20_18, MDATA_B, SEL_FSN_1),
        PINMUX_IPSR_DATA(IP3_23_21, EX_CS4_N),
-       PINMUX_IPSR_MODSEL_DATA(IP3_23_21, SCIFA2_RXD, SEL_SCIFA2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_23_21, I2C2_SCL_E, SEL_I2C02_4),
-       PINMUX_IPSR_MODSEL_DATA(IP3_23_21, TS_SDEN_B, SEL_TSIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_23_21, RIF0_D0, SEL_DR0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_23_21, FMCLK, SEL_DARC_0),
+       PINMUX_IPSR_MSEL(IP3_23_21, SCIFA2_RXD, SEL_SCIFA2_0),
+       PINMUX_IPSR_MSEL(IP3_23_21, I2C2_SCL_E, SEL_I2C02_4),
+       PINMUX_IPSR_MSEL(IP3_23_21, TS_SDEN_B, SEL_TSIF0_1),
+       PINMUX_IPSR_MSEL(IP3_23_21, RIF0_D0, SEL_DR0_0),
+       PINMUX_IPSR_MSEL(IP3_23_21, FMCLK, SEL_DARC_0),
        PINMUX_IPSR_DATA(IP3_23_21, SCIFB2_CTS_N),
-       PINMUX_IPSR_MODSEL_DATA(IP3_23_21, SCKZ_B, SEL_FSN_1),
+       PINMUX_IPSR_MSEL(IP3_23_21, SCKZ_B, SEL_FSN_1),
        PINMUX_IPSR_DATA(IP3_26_24, EX_CS5_N),
-       PINMUX_IPSR_MODSEL_DATA(IP3_26_24, SCIFA2_TXD, SEL_SCIFA2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_26_24, I2C2_SDA_E, SEL_I2C02_4),
-       PINMUX_IPSR_MODSEL_DATA(IP3_26_24, TS_SPSYNC_B, SEL_TSIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_26_24, RIF0_D1, SEL_DR1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_26_24, FMIN, SEL_DARC_0),
+       PINMUX_IPSR_MSEL(IP3_26_24, SCIFA2_TXD, SEL_SCIFA2_0),
+       PINMUX_IPSR_MSEL(IP3_26_24, I2C2_SDA_E, SEL_I2C02_4),
+       PINMUX_IPSR_MSEL(IP3_26_24, TS_SPSYNC_B, SEL_TSIF0_1),
+       PINMUX_IPSR_MSEL(IP3_26_24, RIF0_D1, SEL_DR1_0),
+       PINMUX_IPSR_MSEL(IP3_26_24, FMIN, SEL_DARC_0),
        PINMUX_IPSR_DATA(IP3_26_24, SCIFB2_RTS_N),
-       PINMUX_IPSR_MODSEL_DATA(IP3_26_24, STM_N_B, SEL_FSN_1),
+       PINMUX_IPSR_MSEL(IP3_26_24, STM_N_B, SEL_FSN_1),
        PINMUX_IPSR_DATA(IP3_29_27, BS_N),
        PINMUX_IPSR_DATA(IP3_29_27, DRACK0),
        PINMUX_IPSR_DATA(IP3_29_27, PWM1_C),
        PINMUX_IPSR_DATA(IP3_29_27, TPUTO0_C),
        PINMUX_IPSR_DATA(IP3_29_27, ATACS01_N),
-       PINMUX_IPSR_MODSEL_DATA(IP3_29_27, MTS_N_B, SEL_FSN_1),
+       PINMUX_IPSR_MSEL(IP3_29_27, MTS_N_B, SEL_FSN_1),
        PINMUX_IPSR_DATA(IP3_30, RD_N),
        PINMUX_IPSR_DATA(IP3_30, ATACS11_N),
        PINMUX_IPSR_DATA(IP3_31, RD_WR_N),
@@ -874,18 +873,18 @@ static const u16 pinmux_data[] = {
 
        /* IPSR4 */
        PINMUX_IPSR_DATA(IP4_1_0, EX_WAIT0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_1_0, CAN_CLK_B, SEL_CAN_1),
-       PINMUX_IPSR_MODSEL_DATA(IP4_1_0, SCIF_CLK, SEL_SCIF0_0),
+       PINMUX_IPSR_MSEL(IP4_1_0, CAN_CLK_B, SEL_CAN_1),
+       PINMUX_IPSR_MSEL(IP4_1_0, SCIF_CLK, SEL_SCIF0_0),
        PINMUX_IPSR_DATA(IP4_1_0, PWMFSW0),
        PINMUX_IPSR_DATA(IP4_4_2, DU0_DR0),
        PINMUX_IPSR_DATA(IP4_4_2, LCDOUT16),
-       PINMUX_IPSR_MODSEL_DATA(IP4_4_2, SCIF5_RXD_C, SEL_SCIF5_2),
-       PINMUX_IPSR_MODSEL_DATA(IP4_4_2, I2C2_SCL_D, SEL_I2C02_3),
+       PINMUX_IPSR_MSEL(IP4_4_2, SCIF5_RXD_C, SEL_SCIF5_2),
+       PINMUX_IPSR_MSEL(IP4_4_2, I2C2_SCL_D, SEL_I2C02_3),
        PINMUX_IPSR_DATA(IP4_4_2, CC50_STATE0),
        PINMUX_IPSR_DATA(IP4_7_5, DU0_DR1),
        PINMUX_IPSR_DATA(IP4_7_5, LCDOUT17),
-       PINMUX_IPSR_MODSEL_DATA(IP4_7_5, SCIF5_TXD_C, SEL_SCIF5_2),
-       PINMUX_IPSR_MODSEL_DATA(IP4_7_5, I2C2_SDA_D, SEL_I2C02_3),
+       PINMUX_IPSR_MSEL(IP4_7_5, SCIF5_TXD_C, SEL_SCIF5_2),
+       PINMUX_IPSR_MSEL(IP4_7_5, I2C2_SDA_D, SEL_I2C02_3),
        PINMUX_IPSR_DATA(IP4_9_8, CC50_STATE1),
        PINMUX_IPSR_DATA(IP4_9_8, DU0_DR2),
        PINMUX_IPSR_DATA(IP4_9_8, LCDOUT18),
@@ -907,13 +906,13 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP4_19_18, CC50_STATE7),
        PINMUX_IPSR_DATA(IP4_22_20, DU0_DG0),
        PINMUX_IPSR_DATA(IP4_22_20, LCDOUT8),
-       PINMUX_IPSR_MODSEL_DATA(IP4_22_20, SCIFA0_RXD_C, SEL_SCIFA0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP4_22_20, I2C3_SCL_D, SEL_I2C03_3),
+       PINMUX_IPSR_MSEL(IP4_22_20, SCIFA0_RXD_C, SEL_SCIFA0_2),
+       PINMUX_IPSR_MSEL(IP4_22_20, I2C3_SCL_D, SEL_I2C03_3),
        PINMUX_IPSR_DATA(IP4_22_20, CC50_STATE8),
        PINMUX_IPSR_DATA(IP4_25_23, DU0_DG1),
        PINMUX_IPSR_DATA(IP4_25_23, LCDOUT9),
-       PINMUX_IPSR_MODSEL_DATA(IP4_25_23, SCIFA0_TXD_C, SEL_SCIFA0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP4_25_23, I2C3_SDA_D, SEL_I2C03_3),
+       PINMUX_IPSR_MSEL(IP4_25_23, SCIFA0_TXD_C, SEL_SCIFA0_2),
+       PINMUX_IPSR_MSEL(IP4_25_23, I2C3_SDA_D, SEL_I2C03_3),
        PINMUX_IPSR_DATA(IP4_25_23, CC50_STATE9),
        PINMUX_IPSR_DATA(IP4_27_26, DU0_DG2),
        PINMUX_IPSR_DATA(IP4_27_26, LCDOUT10),
@@ -937,15 +936,15 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP5_5_4, CC50_STATE15),
        PINMUX_IPSR_DATA(IP5_8_6, DU0_DB0),
        PINMUX_IPSR_DATA(IP5_8_6, LCDOUT0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_8_6, SCIFA4_RXD_C, SEL_SCIFA4_2),
-       PINMUX_IPSR_MODSEL_DATA(IP5_8_6, I2C4_SCL_D, SEL_I2C04_3),
-       PINMUX_IPSR_MODSEL_DATA(IP7_8_6, CAN0_RX_C, SEL_CAN0_2),
+       PINMUX_IPSR_MSEL(IP5_8_6, SCIFA4_RXD_C, SEL_SCIFA4_2),
+       PINMUX_IPSR_MSEL(IP5_8_6, I2C4_SCL_D, SEL_I2C04_3),
+       PINMUX_IPSR_MSEL(IP7_8_6, CAN0_RX_C, SEL_CAN0_2),
        PINMUX_IPSR_DATA(IP5_8_6, CC50_STATE16),
        PINMUX_IPSR_DATA(IP5_11_9, DU0_DB1),
        PINMUX_IPSR_DATA(IP5_11_9, LCDOUT1),
-       PINMUX_IPSR_MODSEL_DATA(IP5_11_9, SCIFA4_TXD_C, SEL_SCIFA4_2),
-       PINMUX_IPSR_MODSEL_DATA(IP5_11_9, I2C4_SDA_D, SEL_I2C04_3),
-       PINMUX_IPSR_MODSEL_DATA(IP5_11_9, CAN0_TX_C, SEL_CAN0_2),
+       PINMUX_IPSR_MSEL(IP5_11_9, SCIFA4_TXD_C, SEL_SCIFA4_2),
+       PINMUX_IPSR_MSEL(IP5_11_9, I2C4_SDA_D, SEL_I2C04_3),
+       PINMUX_IPSR_MSEL(IP5_11_9, CAN0_TX_C, SEL_CAN0_2),
        PINMUX_IPSR_DATA(IP5_11_9, CC50_STATE17),
        PINMUX_IPSR_DATA(IP5_13_12, DU0_DB2),
        PINMUX_IPSR_DATA(IP5_13_12, LCDOUT2),
@@ -1010,501 +1009,501 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP6_16, VI0_DATA7_VI0_B7),
        PINMUX_IPSR_DATA(IP6_16, AVB_RXD6),
        PINMUX_IPSR_DATA(IP6_19_17, VI0_CLKENB),
-       PINMUX_IPSR_MODSEL_DATA(IP6_19_17, I2C3_SCL, SEL_I2C03_0),
-       PINMUX_IPSR_MODSEL_DATA(IP6_19_17, SCIFA5_RXD_C, SEL_SCIFA5_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_19_17, IETX_C, SEL_IEB_2),
+       PINMUX_IPSR_MSEL(IP6_19_17, I2C3_SCL, SEL_I2C03_0),
+       PINMUX_IPSR_MSEL(IP6_19_17, SCIFA5_RXD_C, SEL_SCIFA5_2),
+       PINMUX_IPSR_MSEL(IP6_19_17, IETX_C, SEL_IEB_2),
        PINMUX_IPSR_DATA(IP6_19_17, AVB_RXD7),
        PINMUX_IPSR_DATA(IP6_22_20, VI0_FIELD),
-       PINMUX_IPSR_MODSEL_DATA(IP6_22_20, I2C3_SDA, SEL_I2C03_0),
-       PINMUX_IPSR_MODSEL_DATA(IP6_22_20, SCIFA5_TXD_C, SEL_SCIFA5_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_22_20, IECLK_C, SEL_IEB_2),
+       PINMUX_IPSR_MSEL(IP6_22_20, I2C3_SDA, SEL_I2C03_0),
+       PINMUX_IPSR_MSEL(IP6_22_20, SCIFA5_TXD_C, SEL_SCIFA5_2),
+       PINMUX_IPSR_MSEL(IP6_22_20, IECLK_C, SEL_IEB_2),
        PINMUX_IPSR_DATA(IP6_22_20, AVB_RX_ER),
        PINMUX_IPSR_DATA(IP6_25_23, VI0_HSYNC_N),
-       PINMUX_IPSR_MODSEL_DATA(IP6_25_23, SCIF0_RXD_B, SEL_SCIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_25_23, I2C0_SCL_C, SEL_I2C00_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_25_23, IERX_C, SEL_IEB_2),
+       PINMUX_IPSR_MSEL(IP6_25_23, SCIF0_RXD_B, SEL_SCIF0_1),
+       PINMUX_IPSR_MSEL(IP6_25_23, I2C0_SCL_C, SEL_I2C00_2),
+       PINMUX_IPSR_MSEL(IP6_25_23, IERX_C, SEL_IEB_2),
        PINMUX_IPSR_DATA(IP6_25_23, AVB_COL),
        PINMUX_IPSR_DATA(IP6_28_26, VI0_VSYNC_N),
-       PINMUX_IPSR_MODSEL_DATA(IP6_28_26, SCIF0_TXD_B, SEL_SCIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_28_26, I2C0_SDA_C, SEL_I2C00_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_28_26, AUDIO_CLKOUT_B, SEL_ADG_1),
+       PINMUX_IPSR_MSEL(IP6_28_26, SCIF0_TXD_B, SEL_SCIF0_1),
+       PINMUX_IPSR_MSEL(IP6_28_26, I2C0_SDA_C, SEL_I2C00_2),
+       PINMUX_IPSR_MSEL(IP6_28_26, AUDIO_CLKOUT_B, SEL_ADG_1),
        PINMUX_IPSR_DATA(IP6_28_26, AVB_TX_EN),
-       PINMUX_IPSR_MODSEL_DATA(IP6_31_29, ETH_MDIO, SEL_ETH_0),
+       PINMUX_IPSR_MSEL(IP6_31_29, ETH_MDIO, SEL_ETH_0),
        PINMUX_IPSR_DATA(IP6_31_29, VI0_G0),
-       PINMUX_IPSR_MODSEL_DATA(IP6_31_29, MSIOF2_RXD_B, SEL_MSI2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_31_29, IIC0_SCL_D, SEL_IIC00_3),
+       PINMUX_IPSR_MSEL(IP6_31_29, MSIOF2_RXD_B, SEL_MSI2_1),
+       PINMUX_IPSR_MSEL(IP6_31_29, IIC0_SCL_D, SEL_IIC00_3),
        PINMUX_IPSR_DATA(IP6_31_29, AVB_TX_CLK),
-       PINMUX_IPSR_MODSEL_DATA(IP6_31_29, ADIDATA, SEL_RAD_0),
-       PINMUX_IPSR_MODSEL_DATA(IP6_31_29, AD_DI, SEL_ADI_0),
+       PINMUX_IPSR_MSEL(IP6_31_29, ADIDATA, SEL_RAD_0),
+       PINMUX_IPSR_MSEL(IP6_31_29, AD_DI, SEL_ADI_0),
 
        /* IPSR7 */
-       PINMUX_IPSR_MODSEL_DATA(IP7_2_0, ETH_CRS_DV, SEL_ETH_0),
+       PINMUX_IPSR_MSEL(IP7_2_0, ETH_CRS_DV, SEL_ETH_0),
        PINMUX_IPSR_DATA(IP7_2_0, VI0_G1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_2_0, MSIOF2_TXD_B, SEL_MSI2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_2_0, IIC0_SDA_D, SEL_IIC00_3),
+       PINMUX_IPSR_MSEL(IP7_2_0, MSIOF2_TXD_B, SEL_MSI2_1),
+       PINMUX_IPSR_MSEL(IP7_2_0, IIC0_SDA_D, SEL_IIC00_3),
        PINMUX_IPSR_DATA(IP7_2_0, AVB_TXD0),
-       PINMUX_IPSR_MODSEL_DATA(IP7_2_0, ADICS_SAMP, SEL_RAD_0),
-       PINMUX_IPSR_MODSEL_DATA(IP7_2_0, AD_DO, SEL_ADI_0),
-       PINMUX_IPSR_MODSEL_DATA(IP7_5_3, ETH_RX_ER, SEL_ETH_0),
+       PINMUX_IPSR_MSEL(IP7_2_0, ADICS_SAMP, SEL_RAD_0),
+       PINMUX_IPSR_MSEL(IP7_2_0, AD_DO, SEL_ADI_0),
+       PINMUX_IPSR_MSEL(IP7_5_3, ETH_RX_ER, SEL_ETH_0),
        PINMUX_IPSR_DATA(IP7_5_3, VI0_G2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_5_3, MSIOF2_SCK_B, SEL_MSI2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_5_3, CAN0_RX_B, SEL_CAN0_1),
+       PINMUX_IPSR_MSEL(IP7_5_3, MSIOF2_SCK_B, SEL_MSI2_1),
+       PINMUX_IPSR_MSEL(IP7_5_3, CAN0_RX_B, SEL_CAN0_1),
        PINMUX_IPSR_DATA(IP7_5_3, AVB_TXD1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_5_3, ADICLK, SEL_RAD_0),
-       PINMUX_IPSR_MODSEL_DATA(IP7_5_3, AD_CLK, SEL_ADI_0),
-       PINMUX_IPSR_MODSEL_DATA(IP7_8_6, ETH_RXD0, SEL_ETH_0),
+       PINMUX_IPSR_MSEL(IP7_5_3, ADICLK, SEL_RAD_0),
+       PINMUX_IPSR_MSEL(IP7_5_3, AD_CLK, SEL_ADI_0),
+       PINMUX_IPSR_MSEL(IP7_8_6, ETH_RXD0, SEL_ETH_0),
        PINMUX_IPSR_DATA(IP7_8_6, VI0_G3),
-       PINMUX_IPSR_MODSEL_DATA(IP7_8_6, MSIOF2_SYNC_B, SEL_MSI2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_8_6, CAN0_TX_B, SEL_CAN0_1),
+       PINMUX_IPSR_MSEL(IP7_8_6, MSIOF2_SYNC_B, SEL_MSI2_1),
+       PINMUX_IPSR_MSEL(IP7_8_6, CAN0_TX_B, SEL_CAN0_1),
        PINMUX_IPSR_DATA(IP7_8_6, AVB_TXD2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_8_6, ADICHS0, SEL_RAD_0),
-       PINMUX_IPSR_MODSEL_DATA(IP7_8_6, AD_NCS_N, SEL_ADI_0),
-       PINMUX_IPSR_MODSEL_DATA(IP7_11_9, ETH_RXD1, SEL_ETH_0),
+       PINMUX_IPSR_MSEL(IP7_8_6, ADICHS0, SEL_RAD_0),
+       PINMUX_IPSR_MSEL(IP7_8_6, AD_NCS_N, SEL_ADI_0),
+       PINMUX_IPSR_MSEL(IP7_11_9, ETH_RXD1, SEL_ETH_0),
        PINMUX_IPSR_DATA(IP7_11_9, VI0_G4),
-       PINMUX_IPSR_MODSEL_DATA(IP7_11_9, MSIOF2_SS1_B, SEL_MSI2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_11_9, SCIF4_RXD_D, SEL_SCIF4_3),
+       PINMUX_IPSR_MSEL(IP7_11_9, MSIOF2_SS1_B, SEL_MSI2_1),
+       PINMUX_IPSR_MSEL(IP7_11_9, SCIF4_RXD_D, SEL_SCIF4_3),
        PINMUX_IPSR_DATA(IP7_11_9, AVB_TXD3),
-       PINMUX_IPSR_MODSEL_DATA(IP7_11_9, ADICHS1, SEL_RAD_0),
-       PINMUX_IPSR_MODSEL_DATA(IP7_14_12, ETH_LINK, SEL_ETH_0),
+       PINMUX_IPSR_MSEL(IP7_11_9, ADICHS1, SEL_RAD_0),
+       PINMUX_IPSR_MSEL(IP7_14_12, ETH_LINK, SEL_ETH_0),
        PINMUX_IPSR_DATA(IP7_14_12, VI0_G5),
-       PINMUX_IPSR_MODSEL_DATA(IP7_14_12, MSIOF2_SS2_B, SEL_MSI2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_14_12, SCIF4_TXD_D, SEL_SCIF4_3),
+       PINMUX_IPSR_MSEL(IP7_14_12, MSIOF2_SS2_B, SEL_MSI2_1),
+       PINMUX_IPSR_MSEL(IP7_14_12, SCIF4_TXD_D, SEL_SCIF4_3),
        PINMUX_IPSR_DATA(IP7_14_12, AVB_TXD4),
-       PINMUX_IPSR_MODSEL_DATA(IP7_14_12, ADICHS2, SEL_RAD_0),
-       PINMUX_IPSR_MODSEL_DATA(IP7_17_15, ETH_REFCLK, SEL_ETH_0),
+       PINMUX_IPSR_MSEL(IP7_14_12, ADICHS2, SEL_RAD_0),
+       PINMUX_IPSR_MSEL(IP7_17_15, ETH_REFCLK, SEL_ETH_0),
        PINMUX_IPSR_DATA(IP7_17_15, VI0_G6),
-       PINMUX_IPSR_MODSEL_DATA(IP7_17_15, SCIF2_SCK_C, SEL_SCIF2_2),
+       PINMUX_IPSR_MSEL(IP7_17_15, SCIF2_SCK_C, SEL_SCIF2_2),
        PINMUX_IPSR_DATA(IP7_17_15, AVB_TXD5),
-       PINMUX_IPSR_MODSEL_DATA(IP7_17_15, SSI_SCK5_B, SEL_SSI5_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_20_18, ETH_TXD1, SEL_ETH_0),
+       PINMUX_IPSR_MSEL(IP7_17_15, SSI_SCK5_B, SEL_SSI5_1),
+       PINMUX_IPSR_MSEL(IP7_20_18, ETH_TXD1, SEL_ETH_0),
        PINMUX_IPSR_DATA(IP7_20_18, VI0_G7),
-       PINMUX_IPSR_MODSEL_DATA(IP7_20_18, SCIF2_RXD_C, SEL_SCIF2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_20_18, IIC1_SCL_D, SEL_IIC01_3),
+       PINMUX_IPSR_MSEL(IP7_20_18, SCIF2_RXD_C, SEL_SCIF2_2),
+       PINMUX_IPSR_MSEL(IP7_20_18, IIC1_SCL_D, SEL_IIC01_3),
        PINMUX_IPSR_DATA(IP7_20_18, AVB_TXD6),
-       PINMUX_IPSR_MODSEL_DATA(IP7_20_18, SSI_WS5_B, SEL_SSI5_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_23_21, ETH_TX_EN, SEL_ETH_0),
+       PINMUX_IPSR_MSEL(IP7_20_18, SSI_WS5_B, SEL_SSI5_1),
+       PINMUX_IPSR_MSEL(IP7_23_21, ETH_TX_EN, SEL_ETH_0),
        PINMUX_IPSR_DATA(IP7_23_21, VI0_R0),
-       PINMUX_IPSR_MODSEL_DATA(IP7_23_21, SCIF2_TXD_C, SEL_SCIF2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_23_21, IIC1_SDA_D, SEL_IIC01_3),
+       PINMUX_IPSR_MSEL(IP7_23_21, SCIF2_TXD_C, SEL_SCIF2_2),
+       PINMUX_IPSR_MSEL(IP7_23_21, IIC1_SDA_D, SEL_IIC01_3),
        PINMUX_IPSR_DATA(IP7_23_21, AVB_TXD7),
-       PINMUX_IPSR_MODSEL_DATA(IP7_23_21, SSI_SDATA5_B, SEL_SSI5_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_26_24, ETH_MAGIC, SEL_ETH_0),
+       PINMUX_IPSR_MSEL(IP7_23_21, SSI_SDATA5_B, SEL_SSI5_1),
+       PINMUX_IPSR_MSEL(IP7_26_24, ETH_MAGIC, SEL_ETH_0),
        PINMUX_IPSR_DATA(IP7_26_24, VI0_R1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_26_24, SCIF3_SCK_B, SEL_SCIF3_1),
+       PINMUX_IPSR_MSEL(IP7_26_24, SCIF3_SCK_B, SEL_SCIF3_1),
        PINMUX_IPSR_DATA(IP7_26_24, AVB_TX_ER),
-       PINMUX_IPSR_MODSEL_DATA(IP7_26_24, SSI_SCK6_B, SEL_SSI6_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_29_27, ETH_TXD0, SEL_ETH_0),
+       PINMUX_IPSR_MSEL(IP7_26_24, SSI_SCK6_B, SEL_SSI6_1),
+       PINMUX_IPSR_MSEL(IP7_29_27, ETH_TXD0, SEL_ETH_0),
        PINMUX_IPSR_DATA(IP7_29_27, VI0_R2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_29_27, SCIF3_RXD_B, SEL_SCIF3_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_29_27, I2C4_SCL_E, SEL_I2C04_4),
+       PINMUX_IPSR_MSEL(IP7_29_27, SCIF3_RXD_B, SEL_SCIF3_1),
+       PINMUX_IPSR_MSEL(IP7_29_27, I2C4_SCL_E, SEL_I2C04_4),
        PINMUX_IPSR_DATA(IP7_29_27, AVB_GTX_CLK),
-       PINMUX_IPSR_MODSEL_DATA(IP7_29_27, SSI_WS6_B, SEL_SSI6_1),
+       PINMUX_IPSR_MSEL(IP7_29_27, SSI_WS6_B, SEL_SSI6_1),
        PINMUX_IPSR_DATA(IP7_31, DREQ0_N),
        PINMUX_IPSR_DATA(IP7_31, SCIFB1_RXD),
 
        /* IPSR8 */
-       PINMUX_IPSR_MODSEL_DATA(IP8_2_0, ETH_MDC, SEL_ETH_0),
+       PINMUX_IPSR_MSEL(IP8_2_0, ETH_MDC, SEL_ETH_0),
        PINMUX_IPSR_DATA(IP8_2_0, VI0_R3),
-       PINMUX_IPSR_MODSEL_DATA(IP8_2_0, SCIF3_TXD_B, SEL_SCIF3_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_2_0, I2C4_SDA_E, SEL_I2C04_4),
+       PINMUX_IPSR_MSEL(IP8_2_0, SCIF3_TXD_B, SEL_SCIF3_1),
+       PINMUX_IPSR_MSEL(IP8_2_0, I2C4_SDA_E, SEL_I2C04_4),
        PINMUX_IPSR_DATA(IP8_2_0, AVB_MDC),
-       PINMUX_IPSR_MODSEL_DATA(IP8_2_0, SSI_SDATA6_B, SEL_SSI6_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_5_3, HSCIF0_HRX, SEL_HSCIF0_0),
+       PINMUX_IPSR_MSEL(IP8_2_0, SSI_SDATA6_B, SEL_SSI6_1),
+       PINMUX_IPSR_MSEL(IP8_5_3, HSCIF0_HRX, SEL_HSCIF0_0),
        PINMUX_IPSR_DATA(IP8_5_3, VI0_R4),
-       PINMUX_IPSR_MODSEL_DATA(IP8_5_3, I2C1_SCL_C, SEL_I2C01_2),
-       PINMUX_IPSR_MODSEL_DATA(IP8_5_3, AUDIO_CLKA_B, SEL_ADG_1),
+       PINMUX_IPSR_MSEL(IP8_5_3, I2C1_SCL_C, SEL_I2C01_2),
+       PINMUX_IPSR_MSEL(IP8_5_3, AUDIO_CLKA_B, SEL_ADG_1),
        PINMUX_IPSR_DATA(IP8_5_3, AVB_MDIO),
-       PINMUX_IPSR_MODSEL_DATA(IP8_5_3, SSI_SCK78_B, SEL_SSI7_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_8_6, HSCIF0_HTX, SEL_HSCIF0_0),
+       PINMUX_IPSR_MSEL(IP8_5_3, SSI_SCK78_B, SEL_SSI7_1),
+       PINMUX_IPSR_MSEL(IP8_8_6, HSCIF0_HTX, SEL_HSCIF0_0),
        PINMUX_IPSR_DATA(IP8_8_6, VI0_R5),
-       PINMUX_IPSR_MODSEL_DATA(IP8_8_6, I2C1_SDA_C, SEL_I2C01_2),
-       PINMUX_IPSR_MODSEL_DATA(IP8_8_6, AUDIO_CLKB_B, SEL_ADG_1),
+       PINMUX_IPSR_MSEL(IP8_8_6, I2C1_SDA_C, SEL_I2C01_2),
+       PINMUX_IPSR_MSEL(IP8_8_6, AUDIO_CLKB_B, SEL_ADG_1),
        PINMUX_IPSR_DATA(IP8_5_3, AVB_LINK),
-       PINMUX_IPSR_MODSEL_DATA(IP8_8_6, SSI_WS78_B, SEL_SSI7_1),
+       PINMUX_IPSR_MSEL(IP8_8_6, SSI_WS78_B, SEL_SSI7_1),
        PINMUX_IPSR_DATA(IP8_11_9, HSCIF0_HCTS_N),
        PINMUX_IPSR_DATA(IP8_11_9, VI0_R6),
-       PINMUX_IPSR_MODSEL_DATA(IP8_11_9, SCIF0_RXD_D, SEL_SCIF0_3),
-       PINMUX_IPSR_MODSEL_DATA(IP8_11_9, I2C0_SCL_E, SEL_I2C00_4),
+       PINMUX_IPSR_MSEL(IP8_11_9, SCIF0_RXD_D, SEL_SCIF0_3),
+       PINMUX_IPSR_MSEL(IP8_11_9, I2C0_SCL_E, SEL_I2C00_4),
        PINMUX_IPSR_DATA(IP8_11_9, AVB_MAGIC),
-       PINMUX_IPSR_MODSEL_DATA(IP8_11_9, SSI_SDATA7_B, SEL_SSI7_1),
+       PINMUX_IPSR_MSEL(IP8_11_9, SSI_SDATA7_B, SEL_SSI7_1),
        PINMUX_IPSR_DATA(IP8_14_12, HSCIF0_HRTS_N),
        PINMUX_IPSR_DATA(IP8_14_12, VI0_R7),
-       PINMUX_IPSR_MODSEL_DATA(IP8_14_12, SCIF0_TXD_D, SEL_SCIF0_3),
-       PINMUX_IPSR_MODSEL_DATA(IP8_14_12, I2C0_SDA_E, SEL_I2C00_4),
+       PINMUX_IPSR_MSEL(IP8_14_12, SCIF0_TXD_D, SEL_SCIF0_3),
+       PINMUX_IPSR_MSEL(IP8_14_12, I2C0_SDA_E, SEL_I2C00_4),
        PINMUX_IPSR_DATA(IP8_14_12, AVB_PHY_INT),
-       PINMUX_IPSR_MODSEL_DATA(IP8_14_12, SSI_SDATA8_B, SEL_SSI8_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_16_15, HSCIF0_HSCK, SEL_HSCIF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_16_15, SCIF_CLK_B, SEL_SCIF0_1),
+       PINMUX_IPSR_MSEL(IP8_14_12, SSI_SDATA8_B, SEL_SSI8_1),
+       PINMUX_IPSR_MSEL(IP8_16_15, HSCIF0_HSCK, SEL_HSCIF0_0),
+       PINMUX_IPSR_MSEL(IP8_16_15, SCIF_CLK_B, SEL_SCIF0_1),
        PINMUX_IPSR_DATA(IP8_16_15, AVB_CRS),
-       PINMUX_IPSR_MODSEL_DATA(IP8_16_15, AUDIO_CLKC_B, SEL_ADG_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_19_17, I2C0_SCL, SEL_I2C00_0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_19_17, SCIF0_RXD_C, SEL_SCIF0_2),
+       PINMUX_IPSR_MSEL(IP8_16_15, AUDIO_CLKC_B, SEL_ADG_1),
+       PINMUX_IPSR_MSEL(IP8_19_17, I2C0_SCL, SEL_I2C00_0),
+       PINMUX_IPSR_MSEL(IP8_19_17, SCIF0_RXD_C, SEL_SCIF0_2),
        PINMUX_IPSR_DATA(IP8_19_17, PWM5),
-       PINMUX_IPSR_MODSEL_DATA(IP8_19_17, TCLK1_B, SEL_TMU_1),
+       PINMUX_IPSR_MSEL(IP8_19_17, TCLK1_B, SEL_TMU_1),
        PINMUX_IPSR_DATA(IP8_19_17, AVB_GTXREFCLK),
-       PINMUX_IPSR_MODSEL_DATA(IP8_19_17, CAN1_RX_D, SEL_CAN1_3),
+       PINMUX_IPSR_MSEL(IP8_19_17, CAN1_RX_D, SEL_CAN1_3),
        PINMUX_IPSR_DATA(IP8_19_17, TPUTO0_B),
-       PINMUX_IPSR_MODSEL_DATA(IP8_22_20, I2C0_SDA, SEL_I2C00_0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_22_20, SCIF0_TXD_C, SEL_SCIF0_2),
+       PINMUX_IPSR_MSEL(IP8_22_20, I2C0_SDA, SEL_I2C00_0),
+       PINMUX_IPSR_MSEL(IP8_22_20, SCIF0_TXD_C, SEL_SCIF0_2),
        PINMUX_IPSR_DATA(IP8_22_20, TPUTO0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_22_20, CAN_CLK, SEL_CAN_0),
+       PINMUX_IPSR_MSEL(IP8_22_20, CAN_CLK, SEL_CAN_0),
        PINMUX_IPSR_DATA(IP8_22_20, DVC_MUTE),
-       PINMUX_IPSR_MODSEL_DATA(IP8_22_20, CAN1_TX_D, SEL_CAN1_3),
-       PINMUX_IPSR_MODSEL_DATA(IP8_25_23, I2C1_SCL, SEL_I2C01_0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_25_23, SCIF4_RXD, SEL_SCIF4_0),
+       PINMUX_IPSR_MSEL(IP8_22_20, CAN1_TX_D, SEL_CAN1_3),
+       PINMUX_IPSR_MSEL(IP8_25_23, I2C1_SCL, SEL_I2C01_0),
+       PINMUX_IPSR_MSEL(IP8_25_23, SCIF4_RXD, SEL_SCIF4_0),
        PINMUX_IPSR_DATA(IP8_25_23, PWM5_B),
        PINMUX_IPSR_DATA(IP8_25_23, DU1_DR0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_25_23, RIF1_SYNC_B, SEL_DR2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_25_23, TS_SDATA_D, SEL_TSIF0_3),
+       PINMUX_IPSR_MSEL(IP8_25_23, RIF1_SYNC_B, SEL_DR2_1),
+       PINMUX_IPSR_MSEL(IP8_25_23, TS_SDATA_D, SEL_TSIF0_3),
        PINMUX_IPSR_DATA(IP8_25_23, TPUTO1_B),
-       PINMUX_IPSR_MODSEL_DATA(IP8_28_26, I2C1_SDA, SEL_I2C01_0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_28_26, SCIF4_TXD, SEL_SCIF4_0),
+       PINMUX_IPSR_MSEL(IP8_28_26, I2C1_SDA, SEL_I2C01_0),
+       PINMUX_IPSR_MSEL(IP8_28_26, SCIF4_TXD, SEL_SCIF4_0),
        PINMUX_IPSR_DATA(IP8_28_26, IRQ5),
        PINMUX_IPSR_DATA(IP8_28_26, DU1_DR1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_28_26, RIF1_CLK_B, SEL_DR2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_28_26, TS_SCK_D, SEL_TSIF0_3),
-       PINMUX_IPSR_MODSEL_DATA(IP8_28_26, BPFCLK_C, SEL_DARC_2),
+       PINMUX_IPSR_MSEL(IP8_28_26, RIF1_CLK_B, SEL_DR2_1),
+       PINMUX_IPSR_MSEL(IP8_28_26, TS_SCK_D, SEL_TSIF0_3),
+       PINMUX_IPSR_MSEL(IP8_28_26, BPFCLK_C, SEL_DARC_2),
        PINMUX_IPSR_DATA(IP8_31_29, MSIOF0_RXD),
-       PINMUX_IPSR_MODSEL_DATA(IP8_31_29, SCIF5_RXD, SEL_SCIF5_0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_31_29, I2C2_SCL_C, SEL_I2C02_2),
+       PINMUX_IPSR_MSEL(IP8_31_29, SCIF5_RXD, SEL_SCIF5_0),
+       PINMUX_IPSR_MSEL(IP8_31_29, I2C2_SCL_C, SEL_I2C02_2),
        PINMUX_IPSR_DATA(IP8_31_29, DU1_DR2),
-       PINMUX_IPSR_MODSEL_DATA(IP8_31_29, RIF1_D0_B, SEL_DR2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_31_29, TS_SDEN_D, SEL_TSIF0_3),
-       PINMUX_IPSR_MODSEL_DATA(IP8_31_29, FMCLK_C, SEL_DARC_2),
-       PINMUX_IPSR_MODSEL_DATA(IP8_31_29, RDS_CLK, SEL_RDS_0),
+       PINMUX_IPSR_MSEL(IP8_31_29, RIF1_D0_B, SEL_DR2_1),
+       PINMUX_IPSR_MSEL(IP8_31_29, TS_SDEN_D, SEL_TSIF0_3),
+       PINMUX_IPSR_MSEL(IP8_31_29, FMCLK_C, SEL_DARC_2),
+       PINMUX_IPSR_MSEL(IP8_31_29, RDS_CLK, SEL_RDS_0),
 
        /* IPSR9 */
        PINMUX_IPSR_DATA(IP9_2_0, MSIOF0_TXD),
-       PINMUX_IPSR_MODSEL_DATA(IP9_2_0, SCIF5_TXD, SEL_SCIF5_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_2_0, I2C2_SDA_C, SEL_I2C02_2),
+       PINMUX_IPSR_MSEL(IP9_2_0, SCIF5_TXD, SEL_SCIF5_0),
+       PINMUX_IPSR_MSEL(IP9_2_0, I2C2_SDA_C, SEL_I2C02_2),
        PINMUX_IPSR_DATA(IP9_2_0, DU1_DR3),
-       PINMUX_IPSR_MODSEL_DATA(IP9_2_0, RIF1_D1_B, SEL_DR3_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_2_0, TS_SPSYNC_D, SEL_TSIF0_3),
-       PINMUX_IPSR_MODSEL_DATA(IP9_2_0, FMIN_C, SEL_DARC_2),
-       PINMUX_IPSR_MODSEL_DATA(IP9_2_0, RDS_DATA, SEL_RDS_0),
+       PINMUX_IPSR_MSEL(IP9_2_0, RIF1_D1_B, SEL_DR3_1),
+       PINMUX_IPSR_MSEL(IP9_2_0, TS_SPSYNC_D, SEL_TSIF0_3),
+       PINMUX_IPSR_MSEL(IP9_2_0, FMIN_C, SEL_DARC_2),
+       PINMUX_IPSR_MSEL(IP9_2_0, RDS_DATA, SEL_RDS_0),
        PINMUX_IPSR_DATA(IP9_5_3, MSIOF0_SCK),
        PINMUX_IPSR_DATA(IP9_5_3, IRQ0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_5_3, TS_SDATA, SEL_TSIF0_0),
+       PINMUX_IPSR_MSEL(IP9_5_3, TS_SDATA, SEL_TSIF0_0),
        PINMUX_IPSR_DATA(IP9_5_3, DU1_DR4),
-       PINMUX_IPSR_MODSEL_DATA(IP9_5_3, RIF1_SYNC, SEL_DR2_0),
+       PINMUX_IPSR_MSEL(IP9_5_3, RIF1_SYNC, SEL_DR2_0),
        PINMUX_IPSR_DATA(IP9_5_3, TPUTO1_C),
        PINMUX_IPSR_DATA(IP9_8_6, MSIOF0_SYNC),
        PINMUX_IPSR_DATA(IP9_8_6, PWM1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_8_6, TS_SCK, SEL_TSIF0_0),
+       PINMUX_IPSR_MSEL(IP9_8_6, TS_SCK, SEL_TSIF0_0),
        PINMUX_IPSR_DATA(IP9_8_6, DU1_DR5),
-       PINMUX_IPSR_MODSEL_DATA(IP9_8_6, RIF1_CLK, SEL_DR2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_8_6, BPFCLK_B, SEL_DARC_1),
+       PINMUX_IPSR_MSEL(IP9_8_6, RIF1_CLK, SEL_DR2_0),
+       PINMUX_IPSR_MSEL(IP9_8_6, BPFCLK_B, SEL_DARC_1),
        PINMUX_IPSR_DATA(IP9_11_9, MSIOF0_SS1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_11_9, SCIFA0_RXD, SEL_SCIFA0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_11_9, TS_SDEN, SEL_TSIF0_0),
+       PINMUX_IPSR_MSEL(IP9_11_9, SCIFA0_RXD, SEL_SCIFA0_0),
+       PINMUX_IPSR_MSEL(IP9_11_9, TS_SDEN, SEL_TSIF0_0),
        PINMUX_IPSR_DATA(IP9_11_9, DU1_DR6),
-       PINMUX_IPSR_MODSEL_DATA(IP9_11_9, RIF1_D0, SEL_DR2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_11_9, FMCLK_B, SEL_DARC_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_11_9, RDS_CLK_B, SEL_RDS_1),
+       PINMUX_IPSR_MSEL(IP9_11_9, RIF1_D0, SEL_DR2_0),
+       PINMUX_IPSR_MSEL(IP9_11_9, FMCLK_B, SEL_DARC_1),
+       PINMUX_IPSR_MSEL(IP9_11_9, RDS_CLK_B, SEL_RDS_1),
        PINMUX_IPSR_DATA(IP9_14_12, MSIOF0_SS2),
-       PINMUX_IPSR_MODSEL_DATA(IP9_14_12, SCIFA0_TXD, SEL_SCIFA0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_14_12, TS_SPSYNC, SEL_TSIF0_0),
+       PINMUX_IPSR_MSEL(IP9_14_12, SCIFA0_TXD, SEL_SCIFA0_0),
+       PINMUX_IPSR_MSEL(IP9_14_12, TS_SPSYNC, SEL_TSIF0_0),
        PINMUX_IPSR_DATA(IP9_14_12, DU1_DR7),
-       PINMUX_IPSR_MODSEL_DATA(IP9_14_12, RIF1_D1, SEL_DR3_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_14_12, FMIN_B, SEL_DARC_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_14_12, RDS_DATA_B, SEL_RDS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_16_15, HSCIF1_HRX, SEL_HSCIF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_16_15, I2C4_SCL, SEL_I2C04_0),
+       PINMUX_IPSR_MSEL(IP9_14_12, RIF1_D1, SEL_DR3_0),
+       PINMUX_IPSR_MSEL(IP9_14_12, FMIN_B, SEL_DARC_1),
+       PINMUX_IPSR_MSEL(IP9_14_12, RDS_DATA_B, SEL_RDS_1),
+       PINMUX_IPSR_MSEL(IP9_16_15, HSCIF1_HRX, SEL_HSCIF1_0),
+       PINMUX_IPSR_MSEL(IP9_16_15, I2C4_SCL, SEL_I2C04_0),
        PINMUX_IPSR_DATA(IP9_16_15, PWM6),
        PINMUX_IPSR_DATA(IP9_16_15, DU1_DG0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_18_17, HSCIF1_HTX, SEL_HSCIF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_18_17, I2C4_SDA, SEL_I2C04_0),
+       PINMUX_IPSR_MSEL(IP9_18_17, HSCIF1_HTX, SEL_HSCIF1_0),
+       PINMUX_IPSR_MSEL(IP9_18_17, I2C4_SDA, SEL_I2C04_0),
        PINMUX_IPSR_DATA(IP9_18_17, TPUTO1),
        PINMUX_IPSR_DATA(IP9_18_17, DU1_DG1),
        PINMUX_IPSR_DATA(IP9_21_19, HSCIF1_HSCK),
        PINMUX_IPSR_DATA(IP9_21_19, PWM2),
-       PINMUX_IPSR_MODSEL_DATA(IP9_21_19, IETX, SEL_IEB_0),
+       PINMUX_IPSR_MSEL(IP9_21_19, IETX, SEL_IEB_0),
        PINMUX_IPSR_DATA(IP9_21_19, DU1_DG2),
-       PINMUX_IPSR_MODSEL_DATA(IP9_21_19, REMOCON_B, SEL_RCN_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_21_19, SPEEDIN_B, SEL_RSP_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_21_19, VSP_B, SEL_SPDM_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_24_22, HSCIF1_HCTS_N, SEL_HSCIF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_24_22, SCIFA4_RXD, SEL_SCIFA4_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_24_22, IECLK, SEL_IEB_0),
+       PINMUX_IPSR_MSEL(IP9_21_19, REMOCON_B, SEL_RCN_1),
+       PINMUX_IPSR_MSEL(IP9_21_19, SPEEDIN_B, SEL_RSP_1),
+       PINMUX_IPSR_MSEL(IP9_21_19, VSP_B, SEL_SPDM_1),
+       PINMUX_IPSR_MSEL(IP9_24_22, HSCIF1_HCTS_N, SEL_HSCIF1_0),
+       PINMUX_IPSR_MSEL(IP9_24_22, SCIFA4_RXD, SEL_SCIFA4_0),
+       PINMUX_IPSR_MSEL(IP9_24_22, IECLK, SEL_IEB_0),
        PINMUX_IPSR_DATA(IP9_24_22, DU1_DG3),
-       PINMUX_IPSR_MODSEL_DATA(IP9_24_22, SSI_SCK1_B, SEL_SSI1_1),
+       PINMUX_IPSR_MSEL(IP9_24_22, SSI_SCK1_B, SEL_SSI1_1),
        PINMUX_IPSR_DATA(IP9_24_22, CAN_DEBUG_HW_TRIGGER),
        PINMUX_IPSR_DATA(IP9_24_22, CC50_STATE32),
-       PINMUX_IPSR_MODSEL_DATA(IP9_27_25, HSCIF1_HRTS_N, SEL_HSCIF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_27_25, SCIFA4_TXD, SEL_SCIFA4_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_27_25, IERX, SEL_IEB_0),
+       PINMUX_IPSR_MSEL(IP9_27_25, HSCIF1_HRTS_N, SEL_HSCIF1_0),
+       PINMUX_IPSR_MSEL(IP9_27_25, SCIFA4_TXD, SEL_SCIFA4_0),
+       PINMUX_IPSR_MSEL(IP9_27_25, IERX, SEL_IEB_0),
        PINMUX_IPSR_DATA(IP9_27_25, DU1_DG4),
-       PINMUX_IPSR_MODSEL_DATA(IP9_27_25, SSI_WS1_B, SEL_SSI1_1),
+       PINMUX_IPSR_MSEL(IP9_27_25, SSI_WS1_B, SEL_SSI1_1),
        PINMUX_IPSR_DATA(IP9_27_25, CAN_STEP0),
        PINMUX_IPSR_DATA(IP9_27_25, CC50_STATE33),
-       PINMUX_IPSR_MODSEL_DATA(IP9_30_28, SCIF1_SCK, SEL_SCIF1_0),
+       PINMUX_IPSR_MSEL(IP9_30_28, SCIF1_SCK, SEL_SCIF1_0),
        PINMUX_IPSR_DATA(IP9_30_28, PWM3),
-       PINMUX_IPSR_MODSEL_DATA(IP9_30_28, TCLK2, SEL_TMU_0),
+       PINMUX_IPSR_MSEL(IP9_30_28, TCLK2, SEL_TMU_0),
        PINMUX_IPSR_DATA(IP9_30_28, DU1_DG5),
-       PINMUX_IPSR_MODSEL_DATA(IP9_30_28, SSI_SDATA1_B, SEL_SSI1_1),
+       PINMUX_IPSR_MSEL(IP9_30_28, SSI_SDATA1_B, SEL_SSI1_1),
        PINMUX_IPSR_DATA(IP9_30_28, CAN_TXCLK),
        PINMUX_IPSR_DATA(IP9_30_28, CC50_STATE34),
 
        /* IPSR10 */
-       PINMUX_IPSR_MODSEL_DATA(IP10_2_0, SCIF1_RXD, SEL_SCIF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_2_0, IIC0_SCL, SEL_IIC00_0),
+       PINMUX_IPSR_MSEL(IP10_2_0, SCIF1_RXD, SEL_SCIF1_0),
+       PINMUX_IPSR_MSEL(IP10_2_0, IIC0_SCL, SEL_IIC00_0),
        PINMUX_IPSR_DATA(IP10_2_0, DU1_DG6),
-       PINMUX_IPSR_MODSEL_DATA(IP10_2_0, SSI_SCK2_B, SEL_SSI2_1),
+       PINMUX_IPSR_MSEL(IP10_2_0, SSI_SCK2_B, SEL_SSI2_1),
        PINMUX_IPSR_DATA(IP10_2_0, CAN_DEBUGOUT0),
        PINMUX_IPSR_DATA(IP10_2_0, CC50_STATE35),
-       PINMUX_IPSR_MODSEL_DATA(IP10_5_3, SCIF1_TXD, SEL_SCIF1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_5_3, IIC0_SDA, SEL_IIC00_0),
+       PINMUX_IPSR_MSEL(IP10_5_3, SCIF1_TXD, SEL_SCIF1_0),
+       PINMUX_IPSR_MSEL(IP10_5_3, IIC0_SDA, SEL_IIC00_0),
        PINMUX_IPSR_DATA(IP10_5_3, DU1_DG7),
-       PINMUX_IPSR_MODSEL_DATA(IP10_5_3, SSI_WS2_B, SEL_SSI2_1),
+       PINMUX_IPSR_MSEL(IP10_5_3, SSI_WS2_B, SEL_SSI2_1),
        PINMUX_IPSR_DATA(IP10_5_3, CAN_DEBUGOUT1),
        PINMUX_IPSR_DATA(IP10_5_3, CC50_STATE36),
-       PINMUX_IPSR_MODSEL_DATA(IP10_8_6, SCIF2_RXD, SEL_SCIF2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_8_6, IIC1_SCL, SEL_IIC01_0),
+       PINMUX_IPSR_MSEL(IP10_8_6, SCIF2_RXD, SEL_SCIF2_0),
+       PINMUX_IPSR_MSEL(IP10_8_6, IIC1_SCL, SEL_IIC01_0),
        PINMUX_IPSR_DATA(IP10_8_6, DU1_DB0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_8_6, SSI_SDATA2_B, SEL_SSI2_1),
+       PINMUX_IPSR_MSEL(IP10_8_6, SSI_SDATA2_B, SEL_SSI2_1),
        PINMUX_IPSR_DATA(IP10_8_6, USB0_EXTLP),
        PINMUX_IPSR_DATA(IP10_8_6, CAN_DEBUGOUT2),
        PINMUX_IPSR_DATA(IP10_8_6, CC50_STATE37),
-       PINMUX_IPSR_MODSEL_DATA(IP10_11_9, SCIF2_TXD, SEL_SCIF2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_11_9, IIC1_SDA, SEL_IIC01_0),
+       PINMUX_IPSR_MSEL(IP10_11_9, SCIF2_TXD, SEL_SCIF2_0),
+       PINMUX_IPSR_MSEL(IP10_11_9, IIC1_SDA, SEL_IIC01_0),
        PINMUX_IPSR_DATA(IP10_11_9, DU1_DB1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_11_9, SSI_SCK9_B, SEL_SSI9_1),
+       PINMUX_IPSR_MSEL(IP10_11_9, SSI_SCK9_B, SEL_SSI9_1),
        PINMUX_IPSR_DATA(IP10_11_9, USB0_OVC1),
        PINMUX_IPSR_DATA(IP10_11_9, CAN_DEBUGOUT3),
        PINMUX_IPSR_DATA(IP10_11_9, CC50_STATE38),
-       PINMUX_IPSR_MODSEL_DATA(IP10_14_12, SCIF2_SCK, SEL_SCIF2_0),
+       PINMUX_IPSR_MSEL(IP10_14_12, SCIF2_SCK, SEL_SCIF2_0),
        PINMUX_IPSR_DATA(IP10_14_12, IRQ1),
        PINMUX_IPSR_DATA(IP10_14_12, DU1_DB2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_14_12, SSI_WS9_B, SEL_SSI9_1),
+       PINMUX_IPSR_MSEL(IP10_14_12, SSI_WS9_B, SEL_SSI9_1),
        PINMUX_IPSR_DATA(IP10_14_12, USB0_IDIN),
        PINMUX_IPSR_DATA(IP10_14_12, CAN_DEBUGOUT4),
        PINMUX_IPSR_DATA(IP10_14_12, CC50_STATE39),
-       PINMUX_IPSR_MODSEL_DATA(IP10_17_15, SCIF3_SCK, SEL_SCIF3_0),
+       PINMUX_IPSR_MSEL(IP10_17_15, SCIF3_SCK, SEL_SCIF3_0),
        PINMUX_IPSR_DATA(IP10_17_15, IRQ2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_17_15, BPFCLK_D, SEL_DARC_3),
+       PINMUX_IPSR_MSEL(IP10_17_15, BPFCLK_D, SEL_DARC_3),
        PINMUX_IPSR_DATA(IP10_17_15, DU1_DB3),
-       PINMUX_IPSR_MODSEL_DATA(IP10_17_15, SSI_SDATA9_B, SEL_SSI9_1),
+       PINMUX_IPSR_MSEL(IP10_17_15, SSI_SDATA9_B, SEL_SSI9_1),
        PINMUX_IPSR_DATA(IP10_17_15, TANS2),
        PINMUX_IPSR_DATA(IP10_17_15, CAN_DEBUGOUT5),
        PINMUX_IPSR_DATA(IP10_17_15, CC50_OSCOUT),
-       PINMUX_IPSR_MODSEL_DATA(IP10_20_18, SCIF3_RXD, SEL_SCIF3_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_20_18, I2C1_SCL_E, SEL_I2C01_4),
-       PINMUX_IPSR_MODSEL_DATA(IP10_20_18, FMCLK_D, SEL_DARC_3),
+       PINMUX_IPSR_MSEL(IP10_20_18, SCIF3_RXD, SEL_SCIF3_0),
+       PINMUX_IPSR_MSEL(IP10_20_18, I2C1_SCL_E, SEL_I2C01_4),
+       PINMUX_IPSR_MSEL(IP10_20_18, FMCLK_D, SEL_DARC_3),
        PINMUX_IPSR_DATA(IP10_20_18, DU1_DB4),
-       PINMUX_IPSR_MODSEL_DATA(IP10_20_18, AUDIO_CLKA_C, SEL_ADG_2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_20_18, SSI_SCK4_B, SEL_SSI4_1),
+       PINMUX_IPSR_MSEL(IP10_20_18, AUDIO_CLKA_C, SEL_ADG_2),
+       PINMUX_IPSR_MSEL(IP10_20_18, SSI_SCK4_B, SEL_SSI4_1),
        PINMUX_IPSR_DATA(IP10_20_18, CAN_DEBUGOUT6),
-       PINMUX_IPSR_MODSEL_DATA(IP10_20_18, RDS_CLK_C, SEL_RDS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_23_21, SCIF3_TXD, SEL_SCIF3_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_23_21, I2C1_SDA_E, SEL_I2C01_4),
-       PINMUX_IPSR_MODSEL_DATA(IP10_23_21, FMIN_D, SEL_DARC_3),
+       PINMUX_IPSR_MSEL(IP10_20_18, RDS_CLK_C, SEL_RDS_2),
+       PINMUX_IPSR_MSEL(IP10_23_21, SCIF3_TXD, SEL_SCIF3_0),
+       PINMUX_IPSR_MSEL(IP10_23_21, I2C1_SDA_E, SEL_I2C01_4),
+       PINMUX_IPSR_MSEL(IP10_23_21, FMIN_D, SEL_DARC_3),
        PINMUX_IPSR_DATA(IP10_23_21, DU1_DB5),
-       PINMUX_IPSR_MODSEL_DATA(IP10_23_21, AUDIO_CLKB_C, SEL_ADG_2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_23_21, SSI_WS4_B, SEL_SSI4_1),
+       PINMUX_IPSR_MSEL(IP10_23_21, AUDIO_CLKB_C, SEL_ADG_2),
+       PINMUX_IPSR_MSEL(IP10_23_21, SSI_WS4_B, SEL_SSI4_1),
        PINMUX_IPSR_DATA(IP10_23_21, CAN_DEBUGOUT7),
-       PINMUX_IPSR_MODSEL_DATA(IP10_23_21, RDS_DATA_C, SEL_RDS_2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_26_24, I2C2_SCL, SEL_I2C02_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_26_24, SCIFA5_RXD, SEL_SCIFA5_0),
+       PINMUX_IPSR_MSEL(IP10_23_21, RDS_DATA_C, SEL_RDS_2),
+       PINMUX_IPSR_MSEL(IP10_26_24, I2C2_SCL, SEL_I2C02_0),
+       PINMUX_IPSR_MSEL(IP10_26_24, SCIFA5_RXD, SEL_SCIFA5_0),
        PINMUX_IPSR_DATA(IP10_26_24, DU1_DB6),
-       PINMUX_IPSR_MODSEL_DATA(IP10_26_24, AUDIO_CLKC_C, SEL_ADG_2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_26_24, SSI_SDATA4_B, SEL_SSI4_1),
+       PINMUX_IPSR_MSEL(IP10_26_24, AUDIO_CLKC_C, SEL_ADG_2),
+       PINMUX_IPSR_MSEL(IP10_26_24, SSI_SDATA4_B, SEL_SSI4_1),
        PINMUX_IPSR_DATA(IP10_26_24, CAN_DEBUGOUT8),
-       PINMUX_IPSR_MODSEL_DATA(IP10_29_27, I2C2_SDA, SEL_I2C02_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_29_27, SCIFA5_TXD, SEL_SCIFA5_0),
+       PINMUX_IPSR_MSEL(IP10_29_27, I2C2_SDA, SEL_I2C02_0),
+       PINMUX_IPSR_MSEL(IP10_29_27, SCIFA5_TXD, SEL_SCIFA5_0),
        PINMUX_IPSR_DATA(IP10_29_27, DU1_DB7),
-       PINMUX_IPSR_MODSEL_DATA(IP10_29_27, AUDIO_CLKOUT_C, SEL_ADG_2),
+       PINMUX_IPSR_MSEL(IP10_29_27, AUDIO_CLKOUT_C, SEL_ADG_2),
        PINMUX_IPSR_DATA(IP10_29_27, CAN_DEBUGOUT9),
-       PINMUX_IPSR_MODSEL_DATA(IP10_31_30, SSI_SCK5, SEL_SSI5_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_31_30, SCIFA3_SCK, SEL_SCIFA3_0),
+       PINMUX_IPSR_MSEL(IP10_31_30, SSI_SCK5, SEL_SSI5_0),
+       PINMUX_IPSR_MSEL(IP10_31_30, SCIFA3_SCK, SEL_SCIFA3_0),
        PINMUX_IPSR_DATA(IP10_31_30, DU1_DOTCLKIN),
        PINMUX_IPSR_DATA(IP10_31_30, CAN_DEBUGOUT10),
 
        /* IPSR11 */
-       PINMUX_IPSR_MODSEL_DATA(IP11_2_0, SSI_WS5, SEL_SSI5_0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_2_0, SCIFA3_RXD, SEL_SCIFA3_0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_2_0, I2C3_SCL_C, SEL_I2C03_2),
+       PINMUX_IPSR_MSEL(IP11_2_0, SSI_WS5, SEL_SSI5_0),
+       PINMUX_IPSR_MSEL(IP11_2_0, SCIFA3_RXD, SEL_SCIFA3_0),
+       PINMUX_IPSR_MSEL(IP11_2_0, I2C3_SCL_C, SEL_I2C03_2),
        PINMUX_IPSR_DATA(IP11_2_0, DU1_DOTCLKOUT0),
        PINMUX_IPSR_DATA(IP11_2_0, CAN_DEBUGOUT11),
-       PINMUX_IPSR_MODSEL_DATA(IP11_5_3, SSI_SDATA5, SEL_SSI5_0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_5_3, SCIFA3_TXD, SEL_SCIFA3_0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_5_3, I2C3_SDA_C, SEL_I2C03_2),
+       PINMUX_IPSR_MSEL(IP11_5_3, SSI_SDATA5, SEL_SSI5_0),
+       PINMUX_IPSR_MSEL(IP11_5_3, SCIFA3_TXD, SEL_SCIFA3_0),
+       PINMUX_IPSR_MSEL(IP11_5_3, I2C3_SDA_C, SEL_I2C03_2),
        PINMUX_IPSR_DATA(IP11_5_3, DU1_DOTCLKOUT1),
        PINMUX_IPSR_DATA(IP11_5_3, CAN_DEBUGOUT12),
-       PINMUX_IPSR_MODSEL_DATA(IP11_7_6, SSI_SCK6, SEL_SSI6_0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_7_6, SCIFA1_SCK_B, SEL_SCIFA1_1),
+       PINMUX_IPSR_MSEL(IP11_7_6, SSI_SCK6, SEL_SSI6_0),
+       PINMUX_IPSR_MSEL(IP11_7_6, SCIFA1_SCK_B, SEL_SCIFA1_1),
        PINMUX_IPSR_DATA(IP11_7_6, DU1_EXHSYNC_DU1_HSYNC),
        PINMUX_IPSR_DATA(IP11_7_6, CAN_DEBUGOUT13),
-       PINMUX_IPSR_MODSEL_DATA(IP11_10_8, SSI_WS6, SEL_SSI6_0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_10_8, SCIFA1_RXD_B, SEL_SCIFA1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_10_8, I2C4_SCL_C, SEL_I2C04_2),
+       PINMUX_IPSR_MSEL(IP11_10_8, SSI_WS6, SEL_SSI6_0),
+       PINMUX_IPSR_MSEL(IP11_10_8, SCIFA1_RXD_B, SEL_SCIFA1_1),
+       PINMUX_IPSR_MSEL(IP11_10_8, I2C4_SCL_C, SEL_I2C04_2),
        PINMUX_IPSR_DATA(IP11_10_8, DU1_EXVSYNC_DU1_VSYNC),
        PINMUX_IPSR_DATA(IP11_10_8, CAN_DEBUGOUT14),
-       PINMUX_IPSR_MODSEL_DATA(IP11_13_11, SSI_SDATA6, SEL_SSI6_0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_13_11, SCIFA1_TXD_B, SEL_SCIFA1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_13_11, I2C4_SDA_C, SEL_I2C04_2),
+       PINMUX_IPSR_MSEL(IP11_13_11, SSI_SDATA6, SEL_SSI6_0),
+       PINMUX_IPSR_MSEL(IP11_13_11, SCIFA1_TXD_B, SEL_SCIFA1_1),
+       PINMUX_IPSR_MSEL(IP11_13_11, I2C4_SDA_C, SEL_I2C04_2),
        PINMUX_IPSR_DATA(IP11_13_11, DU1_EXODDF_DU1_ODDF_DISP_CDE),
        PINMUX_IPSR_DATA(IP11_13_11, CAN_DEBUGOUT15),
-       PINMUX_IPSR_MODSEL_DATA(IP11_15_14, SSI_SCK78, SEL_SSI7_0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_15_14, SCIFA2_SCK_B, SEL_SCIFA2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_15_14, IIC0_SDA_C, SEL_IIC00_2),
+       PINMUX_IPSR_MSEL(IP11_15_14, SSI_SCK78, SEL_SSI7_0),
+       PINMUX_IPSR_MSEL(IP11_15_14, SCIFA2_SCK_B, SEL_SCIFA2_1),
+       PINMUX_IPSR_MSEL(IP11_15_14, IIC0_SDA_C, SEL_IIC00_2),
        PINMUX_IPSR_DATA(IP11_15_14, DU1_DISP),
-       PINMUX_IPSR_MODSEL_DATA(IP11_17_16, SSI_WS78, SEL_SSI7_0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_17_16, SCIFA2_RXD_B, SEL_SCIFA2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_17_16, IIC0_SCL_C, SEL_IIC00_2),
+       PINMUX_IPSR_MSEL(IP11_17_16, SSI_WS78, SEL_SSI7_0),
+       PINMUX_IPSR_MSEL(IP11_17_16, SCIFA2_RXD_B, SEL_SCIFA2_1),
+       PINMUX_IPSR_MSEL(IP11_17_16, IIC0_SCL_C, SEL_IIC00_2),
        PINMUX_IPSR_DATA(IP11_17_16, DU1_CDE),
-       PINMUX_IPSR_MODSEL_DATA(IP11_20_18, SSI_SDATA7, SEL_SSI7_0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_20_18, SCIFA2_TXD_B, SEL_SCIFA2_1),
+       PINMUX_IPSR_MSEL(IP11_20_18, SSI_SDATA7, SEL_SSI7_0),
+       PINMUX_IPSR_MSEL(IP11_20_18, SCIFA2_TXD_B, SEL_SCIFA2_1),
        PINMUX_IPSR_DATA(IP11_20_18, IRQ8),
-       PINMUX_IPSR_MODSEL_DATA(IP11_20_18, AUDIO_CLKA_D, SEL_ADG_3),
-       PINMUX_IPSR_MODSEL_DATA(IP11_20_18, CAN_CLK_D, SEL_CAN_3),
+       PINMUX_IPSR_MSEL(IP11_20_18, AUDIO_CLKA_D, SEL_ADG_3),
+       PINMUX_IPSR_MSEL(IP11_20_18, CAN_CLK_D, SEL_CAN_3),
        PINMUX_IPSR_DATA(IP11_20_18, PCMOE_N),
        PINMUX_IPSR_DATA(IP11_23_21, SSI_SCK0129),
-       PINMUX_IPSR_MODSEL_DATA(IP11_23_21, MSIOF1_RXD_B, SEL_MSI1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_23_21, SCIF5_RXD_D, SEL_SCIF5_3),
-       PINMUX_IPSR_MODSEL_DATA(IP11_23_21, ADIDATA_B, SEL_RAD_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_23_21, AD_DI_B, SEL_ADI_1),
+       PINMUX_IPSR_MSEL(IP11_23_21, MSIOF1_RXD_B, SEL_MSI1_1),
+       PINMUX_IPSR_MSEL(IP11_23_21, SCIF5_RXD_D, SEL_SCIF5_3),
+       PINMUX_IPSR_MSEL(IP11_23_21, ADIDATA_B, SEL_RAD_1),
+       PINMUX_IPSR_MSEL(IP11_23_21, AD_DI_B, SEL_ADI_1),
        PINMUX_IPSR_DATA(IP11_23_21, PCMWE_N),
        PINMUX_IPSR_DATA(IP11_26_24, SSI_WS0129),
-       PINMUX_IPSR_MODSEL_DATA(IP11_26_24, MSIOF1_TXD_B, SEL_MSI1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_26_24, SCIF5_TXD_D, SEL_SCIF5_3),
-       PINMUX_IPSR_MODSEL_DATA(IP11_26_24, ADICS_SAMP_B, SEL_RAD_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_26_24, AD_DO_B, SEL_ADI_1),
+       PINMUX_IPSR_MSEL(IP11_26_24, MSIOF1_TXD_B, SEL_MSI1_1),
+       PINMUX_IPSR_MSEL(IP11_26_24, SCIF5_TXD_D, SEL_SCIF5_3),
+       PINMUX_IPSR_MSEL(IP11_26_24, ADICS_SAMP_B, SEL_RAD_1),
+       PINMUX_IPSR_MSEL(IP11_26_24, AD_DO_B, SEL_ADI_1),
        PINMUX_IPSR_DATA(IP11_29_27, SSI_SDATA0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_29_27, MSIOF1_SCK_B, SEL_MSI1_1),
+       PINMUX_IPSR_MSEL(IP11_29_27, MSIOF1_SCK_B, SEL_MSI1_1),
        PINMUX_IPSR_DATA(IP11_29_27, PWM0_B),
-       PINMUX_IPSR_MODSEL_DATA(IP11_29_27, ADICLK_B, SEL_RAD_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_29_27, AD_CLK_B, SEL_ADI_1),
+       PINMUX_IPSR_MSEL(IP11_29_27, ADICLK_B, SEL_RAD_1),
+       PINMUX_IPSR_MSEL(IP11_29_27, AD_CLK_B, SEL_ADI_1),
 
        /* IPSR12 */
        PINMUX_IPSR_DATA(IP12_2_0, SSI_SCK34),
-       PINMUX_IPSR_MODSEL_DATA(IP12_2_0, MSIOF1_SYNC_B, SEL_MSI1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP12_2_0, SCIFA1_SCK_C, SEL_SCIFA1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP12_2_0, ADICHS0_B, SEL_RAD_1),
-       PINMUX_IPSR_MODSEL_DATA(IP12_2_0, AD_NCS_N_B, SEL_ADI_1),
-       PINMUX_IPSR_MODSEL_DATA(IP12_2_0, DREQ1_N_B, SEL_LBS_1),
+       PINMUX_IPSR_MSEL(IP12_2_0, MSIOF1_SYNC_B, SEL_MSI1_1),
+       PINMUX_IPSR_MSEL(IP12_2_0, SCIFA1_SCK_C, SEL_SCIFA1_2),
+       PINMUX_IPSR_MSEL(IP12_2_0, ADICHS0_B, SEL_RAD_1),
+       PINMUX_IPSR_MSEL(IP12_2_0, AD_NCS_N_B, SEL_ADI_1),
+       PINMUX_IPSR_MSEL(IP12_2_0, DREQ1_N_B, SEL_LBS_1),
        PINMUX_IPSR_DATA(IP12_5_3, SSI_WS34),
-       PINMUX_IPSR_MODSEL_DATA(IP12_5_3, MSIOF1_SS1_B, SEL_MSI1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP12_5_3, SCIFA1_RXD_C, SEL_SCIFA1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP12_5_3, ADICHS1_B, SEL_RAD_1),
-       PINMUX_IPSR_MODSEL_DATA(IP12_5_3, CAN1_RX_C, SEL_CAN1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP12_5_3, DACK1_B, SEL_LBS_1),
+       PINMUX_IPSR_MSEL(IP12_5_3, MSIOF1_SS1_B, SEL_MSI1_1),
+       PINMUX_IPSR_MSEL(IP12_5_3, SCIFA1_RXD_C, SEL_SCIFA1_2),
+       PINMUX_IPSR_MSEL(IP12_5_3, ADICHS1_B, SEL_RAD_1),
+       PINMUX_IPSR_MSEL(IP12_5_3, CAN1_RX_C, SEL_CAN1_2),
+       PINMUX_IPSR_MSEL(IP12_5_3, DACK1_B, SEL_LBS_1),
        PINMUX_IPSR_DATA(IP12_8_6, SSI_SDATA3),
-       PINMUX_IPSR_MODSEL_DATA(IP12_8_6, MSIOF1_SS2_B, SEL_MSI1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP12_8_6, SCIFA1_TXD_C, SEL_SCIFA1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP12_8_6, ADICHS2_B, SEL_RAD_1),
-       PINMUX_IPSR_MODSEL_DATA(IP12_8_6, CAN1_TX_C, SEL_CAN1_2),
+       PINMUX_IPSR_MSEL(IP12_8_6, MSIOF1_SS2_B, SEL_MSI1_1),
+       PINMUX_IPSR_MSEL(IP12_8_6, SCIFA1_TXD_C, SEL_SCIFA1_2),
+       PINMUX_IPSR_MSEL(IP12_8_6, ADICHS2_B, SEL_RAD_1),
+       PINMUX_IPSR_MSEL(IP12_8_6, CAN1_TX_C, SEL_CAN1_2),
        PINMUX_IPSR_DATA(IP12_8_6, DREQ2_N),
-       PINMUX_IPSR_MODSEL_DATA(IP12_10_9, SSI_SCK4, SEL_SSI4_0),
+       PINMUX_IPSR_MSEL(IP12_10_9, SSI_SCK4, SEL_SSI4_0),
        PINMUX_IPSR_DATA(IP12_10_9, MLB_CLK),
-       PINMUX_IPSR_MODSEL_DATA(IP12_10_9, IETX_B, SEL_IEB_1),
+       PINMUX_IPSR_MSEL(IP12_10_9, IETX_B, SEL_IEB_1),
        PINMUX_IPSR_DATA(IP12_10_9, IRD_TX),
-       PINMUX_IPSR_MODSEL_DATA(IP12_12_11, SSI_WS4, SEL_SSI4_0),
+       PINMUX_IPSR_MSEL(IP12_12_11, SSI_WS4, SEL_SSI4_0),
        PINMUX_IPSR_DATA(IP12_12_11, MLB_SIG),
-       PINMUX_IPSR_MODSEL_DATA(IP12_12_11, IECLK_B, SEL_IEB_1),
+       PINMUX_IPSR_MSEL(IP12_12_11, IECLK_B, SEL_IEB_1),
        PINMUX_IPSR_DATA(IP12_12_11, IRD_RX),
-       PINMUX_IPSR_MODSEL_DATA(IP12_14_13, SSI_SDATA4, SEL_SSI4_0),
+       PINMUX_IPSR_MSEL(IP12_14_13, SSI_SDATA4, SEL_SSI4_0),
        PINMUX_IPSR_DATA(IP12_14_13, MLB_DAT),
-       PINMUX_IPSR_MODSEL_DATA(IP12_14_13, IERX_B, SEL_IEB_1),
+       PINMUX_IPSR_MSEL(IP12_14_13, IERX_B, SEL_IEB_1),
        PINMUX_IPSR_DATA(IP12_14_13, IRD_SCK),
-       PINMUX_IPSR_MODSEL_DATA(IP12_17_15, SSI_SDATA8, SEL_SSI8_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_17_15, SCIF1_SCK_B, SEL_SCIF1_1),
+       PINMUX_IPSR_MSEL(IP12_17_15, SSI_SDATA8, SEL_SSI8_0),
+       PINMUX_IPSR_MSEL(IP12_17_15, SCIF1_SCK_B, SEL_SCIF1_1),
        PINMUX_IPSR_DATA(IP12_17_15, PWM1_B),
        PINMUX_IPSR_DATA(IP12_17_15, IRQ9),
-       PINMUX_IPSR_MODSEL_DATA(IP12_17_15, REMOCON, SEL_RCN_0),
+       PINMUX_IPSR_MSEL(IP12_17_15, REMOCON, SEL_RCN_0),
        PINMUX_IPSR_DATA(IP12_17_15, DACK2),
-       PINMUX_IPSR_MODSEL_DATA(IP12_17_15, ETH_MDIO_B, SEL_ETH_1),
-       PINMUX_IPSR_MODSEL_DATA(IP12_20_18, SSI_SCK1, SEL_SSI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_20_18, SCIF1_RXD_B, SEL_SCIF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP12_20_18, IIC1_SCL_C, SEL_IIC01_2),
+       PINMUX_IPSR_MSEL(IP12_17_15, ETH_MDIO_B, SEL_ETH_1),
+       PINMUX_IPSR_MSEL(IP12_20_18, SSI_SCK1, SEL_SSI1_0),
+       PINMUX_IPSR_MSEL(IP12_20_18, SCIF1_RXD_B, SEL_SCIF1_1),
+       PINMUX_IPSR_MSEL(IP12_20_18, IIC1_SCL_C, SEL_IIC01_2),
        PINMUX_IPSR_DATA(IP12_20_18, VI1_CLK),
-       PINMUX_IPSR_MODSEL_DATA(IP12_20_18, CAN0_RX_D, SEL_CAN0_3),
-       PINMUX_IPSR_MODSEL_DATA(IP12_20_18, AVB_AVTP_CAPTURE, SEL_AVB_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_20_18, ETH_CRS_DV_B, SEL_ETH_1),
-       PINMUX_IPSR_MODSEL_DATA(IP12_23_21, SSI_WS1, SEL_SSI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_23_21, SCIF1_TXD_B, SEL_SCIF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP12_23_21, IIC1_SDA_C, SEL_IIC01_2),
+       PINMUX_IPSR_MSEL(IP12_20_18, CAN0_RX_D, SEL_CAN0_3),
+       PINMUX_IPSR_MSEL(IP12_20_18, AVB_AVTP_CAPTURE, SEL_AVB_0),
+       PINMUX_IPSR_MSEL(IP12_20_18, ETH_CRS_DV_B, SEL_ETH_1),
+       PINMUX_IPSR_MSEL(IP12_23_21, SSI_WS1, SEL_SSI1_0),
+       PINMUX_IPSR_MSEL(IP12_23_21, SCIF1_TXD_B, SEL_SCIF1_1),
+       PINMUX_IPSR_MSEL(IP12_23_21, IIC1_SDA_C, SEL_IIC01_2),
        PINMUX_IPSR_DATA(IP12_23_21, VI1_DATA0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_23_21, CAN0_TX_D, SEL_CAN0_3),
-       PINMUX_IPSR_MODSEL_DATA(IP12_23_21, AVB_AVTP_MATCH, SEL_AVB_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_23_21, ETH_RX_ER_B, SEL_ETH_1),
-       PINMUX_IPSR_MODSEL_DATA(IP12_26_24, SSI_SDATA1, SEL_SSI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_26_24, HSCIF1_HRX_B, SEL_HSCIF1_1),
+       PINMUX_IPSR_MSEL(IP12_23_21, CAN0_TX_D, SEL_CAN0_3),
+       PINMUX_IPSR_MSEL(IP12_23_21, AVB_AVTP_MATCH, SEL_AVB_0),
+       PINMUX_IPSR_MSEL(IP12_23_21, ETH_RX_ER_B, SEL_ETH_1),
+       PINMUX_IPSR_MSEL(IP12_26_24, SSI_SDATA1, SEL_SSI1_0),
+       PINMUX_IPSR_MSEL(IP12_26_24, HSCIF1_HRX_B, SEL_HSCIF1_1),
        PINMUX_IPSR_DATA(IP12_26_24, VI1_DATA1),
-       PINMUX_IPSR_MODSEL_DATA(IP12_26_24, SDATA, SEL_FSN_0),
+       PINMUX_IPSR_MSEL(IP12_26_24, SDATA, SEL_FSN_0),
        PINMUX_IPSR_DATA(IP12_26_24, ATAG0_N),
-       PINMUX_IPSR_MODSEL_DATA(IP12_26_24, ETH_RXD0_B, SEL_ETH_1),
-       PINMUX_IPSR_MODSEL_DATA(IP12_29_27, SSI_SCK2, SEL_SSI2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP12_29_27, HSCIF1_HTX_B, SEL_HSCIF1_1),
+       PINMUX_IPSR_MSEL(IP12_26_24, ETH_RXD0_B, SEL_ETH_1),
+       PINMUX_IPSR_MSEL(IP12_29_27, SSI_SCK2, SEL_SSI2_0),
+       PINMUX_IPSR_MSEL(IP12_29_27, HSCIF1_HTX_B, SEL_HSCIF1_1),
        PINMUX_IPSR_DATA(IP12_29_27, VI1_DATA2),
-       PINMUX_IPSR_MODSEL_DATA(IP12_29_27, MDATA, SEL_FSN_0),
+       PINMUX_IPSR_MSEL(IP12_29_27, MDATA, SEL_FSN_0),
        PINMUX_IPSR_DATA(IP12_29_27, ATAWR0_N),
-       PINMUX_IPSR_MODSEL_DATA(IP12_29_27, ETH_RXD1_B, SEL_ETH_1),
+       PINMUX_IPSR_MSEL(IP12_29_27, ETH_RXD1_B, SEL_ETH_1),
 
        /* IPSR13 */
-       PINMUX_IPSR_MODSEL_DATA(IP13_2_0, SSI_WS2, SEL_SSI2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_2_0, HSCIF1_HCTS_N_B, SEL_HSCIF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_2_0, SCIFA0_RXD_D, SEL_SCIFA0_3),
+       PINMUX_IPSR_MSEL(IP13_2_0, SSI_WS2, SEL_SSI2_0),
+       PINMUX_IPSR_MSEL(IP13_2_0, HSCIF1_HCTS_N_B, SEL_HSCIF1_1),
+       PINMUX_IPSR_MSEL(IP13_2_0, SCIFA0_RXD_D, SEL_SCIFA0_3),
        PINMUX_IPSR_DATA(IP13_2_0, VI1_DATA3),
-       PINMUX_IPSR_MODSEL_DATA(IP13_2_0, SCKZ, SEL_FSN_0),
+       PINMUX_IPSR_MSEL(IP13_2_0, SCKZ, SEL_FSN_0),
        PINMUX_IPSR_DATA(IP13_2_0, ATACS00_N),
-       PINMUX_IPSR_MODSEL_DATA(IP13_2_0, ETH_LINK_B, SEL_ETH_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_5_3, SSI_SDATA2, SEL_SSI2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_5_3, HSCIF1_HRTS_N_B, SEL_HSCIF1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_5_3, SCIFA0_TXD_D, SEL_SCIFA0_3),
+       PINMUX_IPSR_MSEL(IP13_2_0, ETH_LINK_B, SEL_ETH_1),
+       PINMUX_IPSR_MSEL(IP13_5_3, SSI_SDATA2, SEL_SSI2_0),
+       PINMUX_IPSR_MSEL(IP13_5_3, HSCIF1_HRTS_N_B, SEL_HSCIF1_1),
+       PINMUX_IPSR_MSEL(IP13_5_3, SCIFA0_TXD_D, SEL_SCIFA0_3),
        PINMUX_IPSR_DATA(IP13_5_3, VI1_DATA4),
-       PINMUX_IPSR_MODSEL_DATA(IP13_5_3, STM_N, SEL_FSN_0),
+       PINMUX_IPSR_MSEL(IP13_5_3, STM_N, SEL_FSN_0),
        PINMUX_IPSR_DATA(IP13_5_3, ATACS10_N),
-       PINMUX_IPSR_MODSEL_DATA(IP13_5_3, ETH_REFCLK_B, SEL_ETH_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_8_6, SSI_SCK9, SEL_SSI9_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_8_6, SCIF2_SCK_B, SEL_SCIF2_1),
+       PINMUX_IPSR_MSEL(IP13_5_3, ETH_REFCLK_B, SEL_ETH_1),
+       PINMUX_IPSR_MSEL(IP13_8_6, SSI_SCK9, SEL_SSI9_0),
+       PINMUX_IPSR_MSEL(IP13_8_6, SCIF2_SCK_B, SEL_SCIF2_1),
        PINMUX_IPSR_DATA(IP13_8_6, PWM2_B),
        PINMUX_IPSR_DATA(IP13_8_6, VI1_DATA5),
-       PINMUX_IPSR_MODSEL_DATA(IP13_8_6, MTS_N, SEL_FSN_0),
+       PINMUX_IPSR_MSEL(IP13_8_6, MTS_N, SEL_FSN_0),
        PINMUX_IPSR_DATA(IP13_8_6, EX_WAIT1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_8_6, ETH_TXD1_B, SEL_ETH_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_11_9, SSI_WS9, SEL_SSI9_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_11_9, SCIF2_RXD_B, SEL_SCIF2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_11_9, I2C3_SCL_E, SEL_I2C03_4),
+       PINMUX_IPSR_MSEL(IP13_8_6, ETH_TXD1_B, SEL_ETH_1),
+       PINMUX_IPSR_MSEL(IP13_11_9, SSI_WS9, SEL_SSI9_0),
+       PINMUX_IPSR_MSEL(IP13_11_9, SCIF2_RXD_B, SEL_SCIF2_1),
+       PINMUX_IPSR_MSEL(IP13_11_9, I2C3_SCL_E, SEL_I2C03_4),
        PINMUX_IPSR_DATA(IP13_11_9, VI1_DATA6),
        PINMUX_IPSR_DATA(IP13_11_9, ATARD0_N),
-       PINMUX_IPSR_MODSEL_DATA(IP13_11_9, ETH_TX_EN_B, SEL_ETH_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_14_12, SSI_SDATA9, SEL_SSI9_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_14_12, SCIF2_TXD_B, SEL_SCIF2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_14_12, I2C3_SDA_E, SEL_I2C03_4),
+       PINMUX_IPSR_MSEL(IP13_11_9, ETH_TX_EN_B, SEL_ETH_1),
+       PINMUX_IPSR_MSEL(IP13_14_12, SSI_SDATA9, SEL_SSI9_0),
+       PINMUX_IPSR_MSEL(IP13_14_12, SCIF2_TXD_B, SEL_SCIF2_1),
+       PINMUX_IPSR_MSEL(IP13_14_12, I2C3_SDA_E, SEL_I2C03_4),
        PINMUX_IPSR_DATA(IP13_14_12, VI1_DATA7),
        PINMUX_IPSR_DATA(IP13_14_12, ATADIR0_N),
-       PINMUX_IPSR_MODSEL_DATA(IP13_14_12, ETH_MAGIC_B, SEL_ETH_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_17_15, AUDIO_CLKA, SEL_ADG_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_17_15, I2C0_SCL_B, SEL_I2C00_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_17_15, SCIFA4_RXD_D, SEL_SCIFA4_3),
+       PINMUX_IPSR_MSEL(IP13_14_12, ETH_MAGIC_B, SEL_ETH_1),
+       PINMUX_IPSR_MSEL(IP13_17_15, AUDIO_CLKA, SEL_ADG_0),
+       PINMUX_IPSR_MSEL(IP13_17_15, I2C0_SCL_B, SEL_I2C00_1),
+       PINMUX_IPSR_MSEL(IP13_17_15, SCIFA4_RXD_D, SEL_SCIFA4_3),
        PINMUX_IPSR_DATA(IP13_17_15, VI1_CLKENB),
-       PINMUX_IPSR_MODSEL_DATA(IP13_17_15, TS_SDATA_C, SEL_TSIF0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP13_17_15, RIF0_SYNC_B, SEL_DR0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_17_15, ETH_TXD0_B, SEL_ETH_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_20_18, AUDIO_CLKB, SEL_ADG_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_20_18, I2C0_SDA_B, SEL_I2C00_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_20_18, SCIFA4_TXD_D, SEL_SCIFA4_3),
+       PINMUX_IPSR_MSEL(IP13_17_15, TS_SDATA_C, SEL_TSIF0_2),
+       PINMUX_IPSR_MSEL(IP13_17_15, RIF0_SYNC_B, SEL_DR0_1),
+       PINMUX_IPSR_MSEL(IP13_17_15, ETH_TXD0_B, SEL_ETH_1),
+       PINMUX_IPSR_MSEL(IP13_20_18, AUDIO_CLKB, SEL_ADG_0),
+       PINMUX_IPSR_MSEL(IP13_20_18, I2C0_SDA_B, SEL_I2C00_1),
+       PINMUX_IPSR_MSEL(IP13_20_18, SCIFA4_TXD_D, SEL_SCIFA4_3),
        PINMUX_IPSR_DATA(IP13_20_18, VI1_FIELD),
-       PINMUX_IPSR_MODSEL_DATA(IP13_20_18, TS_SCK_C, SEL_TSIF0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP13_20_18, RIF0_CLK_B, SEL_DR0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_20_18, BPFCLK_E, SEL_DARC_4),
-       PINMUX_IPSR_MODSEL_DATA(IP13_20_18, ETH_MDC_B, SEL_ETH_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_23_21, AUDIO_CLKC, SEL_ADG_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_23_21, I2C4_SCL_B, SEL_I2C04_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_23_21, SCIFA5_RXD_D, SEL_SCIFA5_3),
+       PINMUX_IPSR_MSEL(IP13_20_18, TS_SCK_C, SEL_TSIF0_2),
+       PINMUX_IPSR_MSEL(IP13_20_18, RIF0_CLK_B, SEL_DR0_1),
+       PINMUX_IPSR_MSEL(IP13_20_18, BPFCLK_E, SEL_DARC_4),
+       PINMUX_IPSR_MSEL(IP13_20_18, ETH_MDC_B, SEL_ETH_1),
+       PINMUX_IPSR_MSEL(IP13_23_21, AUDIO_CLKC, SEL_ADG_0),
+       PINMUX_IPSR_MSEL(IP13_23_21, I2C4_SCL_B, SEL_I2C04_1),
+       PINMUX_IPSR_MSEL(IP13_23_21, SCIFA5_RXD_D, SEL_SCIFA5_3),
        PINMUX_IPSR_DATA(IP13_23_21, VI1_HSYNC_N),
-       PINMUX_IPSR_MODSEL_DATA(IP13_23_21, TS_SDEN_C, SEL_TSIF0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP13_23_21, RIF0_D0_B, SEL_DR0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_23_21, FMCLK_E, SEL_DARC_4),
-       PINMUX_IPSR_MODSEL_DATA(IP13_23_21, RDS_CLK_D, SEL_RDS_3),
-       PINMUX_IPSR_MODSEL_DATA(IP13_26_24, AUDIO_CLKOUT, SEL_ADG_0),
-       PINMUX_IPSR_MODSEL_DATA(IP13_26_24, I2C4_SDA_B, SEL_I2C04_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_26_24, SCIFA5_TXD_D, SEL_SCIFA5_3),
+       PINMUX_IPSR_MSEL(IP13_23_21, TS_SDEN_C, SEL_TSIF0_2),
+       PINMUX_IPSR_MSEL(IP13_23_21, RIF0_D0_B, SEL_DR0_1),
+       PINMUX_IPSR_MSEL(IP13_23_21, FMCLK_E, SEL_DARC_4),
+       PINMUX_IPSR_MSEL(IP13_23_21, RDS_CLK_D, SEL_RDS_3),
+       PINMUX_IPSR_MSEL(IP13_26_24, AUDIO_CLKOUT, SEL_ADG_0),
+       PINMUX_IPSR_MSEL(IP13_26_24, I2C4_SDA_B, SEL_I2C04_1),
+       PINMUX_IPSR_MSEL(IP13_26_24, SCIFA5_TXD_D, SEL_SCIFA5_3),
        PINMUX_IPSR_DATA(IP13_26_24, VI1_VSYNC_N),
-       PINMUX_IPSR_MODSEL_DATA(IP13_26_24, TS_SPSYNC_C, SEL_TSIF0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP13_26_24, RIF0_D1_B, SEL_DR1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP13_26_24, FMIN_E, SEL_DARC_4),
-       PINMUX_IPSR_MODSEL_DATA(IP13_26_24, RDS_DATA_D, SEL_RDS_3),
+       PINMUX_IPSR_MSEL(IP13_26_24, TS_SPSYNC_C, SEL_TSIF0_2),
+       PINMUX_IPSR_MSEL(IP13_26_24, RIF0_D1_B, SEL_DR1_1),
+       PINMUX_IPSR_MSEL(IP13_26_24, FMIN_E, SEL_DARC_4),
+       PINMUX_IPSR_MSEL(IP13_26_24, RDS_DATA_D, SEL_RDS_3),
 };
 
 static const struct sh_pfc_pin pinmux_pins[] = {
@@ -2197,13 +2196,6 @@ static const unsigned int scif0_data_pins[] = {
 static const unsigned int scif0_data_mux[] = {
        SCIF0_RXD_MARK, SCIF0_TXD_MARK,
 };
-static const unsigned int scif0_clk_pins[] = {
-       /* SCK */
-       RCAR_GP_PIN(1, 23),
-};
-static const unsigned int scif0_clk_mux[] = {
-       SCIF_CLK_MARK,
-};
 static const unsigned int scif0_data_b_pins[] = {
        /* RX, TX */
        RCAR_GP_PIN(3, 11), RCAR_GP_PIN(3, 12),
@@ -2211,13 +2203,6 @@ static const unsigned int scif0_data_b_pins[] = {
 static const unsigned int scif0_data_b_mux[] = {
        SCIF0_RXD_B_MARK, SCIF0_TXD_B_MARK,
 };
-static const unsigned int scif0_clk_b_pins[] = {
-       /* SCK */
-       RCAR_GP_PIN(3, 29),
-};
-static const unsigned int scif0_clk_b_mux[] = {
-       SCIF_CLK_B_MARK,
-};
 static const unsigned int scif0_data_c_pins[] = {
        /* RX, TX */
        RCAR_GP_PIN(3, 30), RCAR_GP_PIN(3, 31),
@@ -2788,6 +2773,146 @@ static const unsigned int usb1_mux[] = {
        USB1_PWEN_MARK,
        USB1_OVC_MARK,
 };
+/* - VIN0 ------------------------------------------------------------------- */
+static const union vin_data vin0_data_pins = {
+       .data24 = {
+               /* B */
+               RCAR_GP_PIN(3, 1), RCAR_GP_PIN(3, 2),
+               RCAR_GP_PIN(3, 3), RCAR_GP_PIN(3, 4),
+               RCAR_GP_PIN(3, 5), RCAR_GP_PIN(3, 6),
+               RCAR_GP_PIN(3, 7), RCAR_GP_PIN(3, 8),
+               /* G */
+               RCAR_GP_PIN(3, 13), RCAR_GP_PIN(3, 14),
+               RCAR_GP_PIN(3, 15), RCAR_GP_PIN(3, 16),
+               RCAR_GP_PIN(3, 17), RCAR_GP_PIN(3, 18),
+               RCAR_GP_PIN(3, 19), RCAR_GP_PIN(3, 20),
+               /* R */
+               RCAR_GP_PIN(3, 21), RCAR_GP_PIN(3, 22),
+               RCAR_GP_PIN(3, 23), RCAR_GP_PIN(3, 24),
+               RCAR_GP_PIN(3, 25), RCAR_GP_PIN(3, 26),
+               RCAR_GP_PIN(3, 27), RCAR_GP_PIN(3, 28),
+       },
+};
+static const union vin_data vin0_data_mux = {
+       .data24 = {
+               /* B */
+               VI0_DATA0_VI0_B0_MARK, VI0_DATA1_VI0_B1_MARK,
+               VI0_DATA2_VI0_B2_MARK, VI0_DATA3_VI0_B3_MARK,
+               VI0_DATA4_VI0_B4_MARK, VI0_DATA5_VI0_B5_MARK,
+               VI0_DATA6_VI0_B6_MARK, VI0_DATA7_VI0_B7_MARK,
+               /* G */
+               VI0_G0_MARK, VI0_G1_MARK,
+               VI0_G2_MARK, VI0_G3_MARK,
+               VI0_G4_MARK, VI0_G5_MARK,
+               VI0_G6_MARK, VI0_G7_MARK,
+               /* R */
+               VI0_R0_MARK, VI0_R1_MARK,
+               VI0_R2_MARK, VI0_R3_MARK,
+               VI0_R4_MARK, VI0_R5_MARK,
+               VI0_R6_MARK, VI0_R7_MARK,
+       },
+};
+static const unsigned int vin0_data18_pins[] = {
+       /* B */
+       RCAR_GP_PIN(3, 3), RCAR_GP_PIN(3, 4),
+       RCAR_GP_PIN(3, 5), RCAR_GP_PIN(3, 6),
+       RCAR_GP_PIN(3, 7), RCAR_GP_PIN(3, 8),
+       /* G */
+       RCAR_GP_PIN(3, 15), RCAR_GP_PIN(3, 16),
+       RCAR_GP_PIN(3, 17), RCAR_GP_PIN(3, 18),
+       RCAR_GP_PIN(3, 19), RCAR_GP_PIN(3, 20),
+       /* R */
+       RCAR_GP_PIN(3, 23), RCAR_GP_PIN(3, 24),
+       RCAR_GP_PIN(3, 25), RCAR_GP_PIN(3, 26),
+       RCAR_GP_PIN(3, 27), RCAR_GP_PIN(3, 28),
+};
+static const unsigned int vin0_data18_mux[] = {
+       /* B */
+       VI0_DATA2_VI0_B2_MARK, VI0_DATA3_VI0_B3_MARK,
+       VI0_DATA4_VI0_B4_MARK, VI0_DATA5_VI0_B5_MARK,
+       VI0_DATA6_VI0_B6_MARK, VI0_DATA7_VI0_B7_MARK,
+       /* G */
+       VI0_G2_MARK, VI0_G3_MARK,
+       VI0_G4_MARK, VI0_G5_MARK,
+       VI0_G6_MARK, VI0_G7_MARK,
+       /* R */
+       VI0_R2_MARK, VI0_R3_MARK,
+       VI0_R4_MARK, VI0_R5_MARK,
+       VI0_R6_MARK, VI0_R7_MARK,
+};
+static const unsigned int vin0_sync_pins[] = {
+       RCAR_GP_PIN(3, 11), /* HSYNC */
+       RCAR_GP_PIN(3, 12), /* VSYNC */
+};
+static const unsigned int vin0_sync_mux[] = {
+       VI0_HSYNC_N_MARK,
+       VI0_VSYNC_N_MARK,
+};
+static const unsigned int vin0_field_pins[] = {
+       RCAR_GP_PIN(3, 10),
+};
+static const unsigned int vin0_field_mux[] = {
+       VI0_FIELD_MARK,
+};
+static const unsigned int vin0_clkenb_pins[] = {
+       RCAR_GP_PIN(3, 9),
+};
+static const unsigned int vin0_clkenb_mux[] = {
+       VI0_CLKENB_MARK,
+};
+static const unsigned int vin0_clk_pins[] = {
+       RCAR_GP_PIN(3, 0),
+};
+static const unsigned int vin0_clk_mux[] = {
+       VI0_CLK_MARK,
+};
+/* - VIN1 ------------------------------------------------------------------- */
+static const union vin_data vin1_data_pins = {
+       .data12 = {
+               RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 13),
+               RCAR_GP_PIN(5, 14), RCAR_GP_PIN(5, 15),
+               RCAR_GP_PIN(5, 16), RCAR_GP_PIN(5, 17),
+               RCAR_GP_PIN(5, 18), RCAR_GP_PIN(5, 19),
+               RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 11),
+               RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
+       },
+};
+static const union vin_data vin1_data_mux = {
+       .data12 = {
+               VI1_DATA0_MARK, VI1_DATA1_MARK,
+               VI1_DATA2_MARK, VI1_DATA3_MARK,
+               VI1_DATA4_MARK, VI1_DATA5_MARK,
+               VI1_DATA6_MARK, VI1_DATA7_MARK,
+               VI1_DATA8_MARK, VI1_DATA9_MARK,
+               VI1_DATA10_MARK, VI1_DATA11_MARK,
+       },
+};
+static const unsigned int vin1_sync_pins[] = {
+       RCAR_GP_PIN(5, 22), /* HSYNC */
+       RCAR_GP_PIN(5, 23), /* VSYNC */
+};
+static const unsigned int vin1_sync_mux[] = {
+       VI1_HSYNC_N_MARK,
+       VI1_VSYNC_N_MARK,
+};
+static const unsigned int vin1_field_pins[] = {
+       RCAR_GP_PIN(5, 21),
+};
+static const unsigned int vin1_field_mux[] = {
+       VI1_FIELD_MARK,
+};
+static const unsigned int vin1_clkenb_pins[] = {
+       RCAR_GP_PIN(5, 20),
+};
+static const unsigned int vin1_clkenb_mux[] = {
+       VI1_CLKENB_MARK,
+};
+static const unsigned int vin1_clk_pins[] = {
+       RCAR_GP_PIN(5, 11),
+};
+static const unsigned int vin1_clk_mux[] = {
+       VI1_CLK_MARK,
+};
 
 static const struct sh_pfc_pin_group pinmux_groups[] = {
        SH_PFC_PIN_GROUP(eth_link),
@@ -2884,9 +3009,7 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
        SH_PFC_PIN_GROUP(qspi_data2),
        SH_PFC_PIN_GROUP(qspi_data4),
        SH_PFC_PIN_GROUP(scif0_data),
-       SH_PFC_PIN_GROUP(scif0_clk),
        SH_PFC_PIN_GROUP(scif0_data_b),
-       SH_PFC_PIN_GROUP(scif0_clk_b),
        SH_PFC_PIN_GROUP(scif0_data_c),
        SH_PFC_PIN_GROUP(scif0_data_d),
        SH_PFC_PIN_GROUP(scif1_data),
@@ -2965,6 +3088,24 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
        SH_PFC_PIN_GROUP(sdhi2_wp),
        SH_PFC_PIN_GROUP(usb0),
        SH_PFC_PIN_GROUP(usb1),
+       VIN_DATA_PIN_GROUP(vin0_data, 24),
+       VIN_DATA_PIN_GROUP(vin0_data, 20),
+       SH_PFC_PIN_GROUP(vin0_data18),
+       VIN_DATA_PIN_GROUP(vin0_data, 16),
+       VIN_DATA_PIN_GROUP(vin0_data, 12),
+       VIN_DATA_PIN_GROUP(vin0_data, 10),
+       VIN_DATA_PIN_GROUP(vin0_data, 8),
+       SH_PFC_PIN_GROUP(vin0_sync),
+       SH_PFC_PIN_GROUP(vin0_field),
+       SH_PFC_PIN_GROUP(vin0_clkenb),
+       SH_PFC_PIN_GROUP(vin0_clk),
+       VIN_DATA_PIN_GROUP(vin1_data, 12),
+       VIN_DATA_PIN_GROUP(vin1_data, 10),
+       VIN_DATA_PIN_GROUP(vin1_data, 8),
+       SH_PFC_PIN_GROUP(vin1_sync),
+       SH_PFC_PIN_GROUP(vin1_field),
+       SH_PFC_PIN_GROUP(vin1_clkenb),
+       SH_PFC_PIN_GROUP(vin1_clk),
 };
 
 static const char * const eth_groups[] = {
@@ -3107,9 +3248,7 @@ static const char * const qspi_groups[] = {
 
 static const char * const scif0_groups[] = {
        "scif0_data",
-       "scif0_clk",
        "scif0_data_b",
-       "scif0_clk_b",
        "scif0_data_c",
        "scif0_data_d",
 };
@@ -3247,6 +3386,30 @@ static const char * const usb1_groups[] = {
        "usb1",
 };
 
+static const char * const vin0_groups[] = {
+       "vin0_data24",
+       "vin0_data20",
+       "vin0_data18",
+       "vin0_data16",
+       "vin0_data12",
+       "vin0_data10",
+       "vin0_data8",
+       "vin0_sync",
+       "vin0_field",
+       "vin0_clkenb",
+       "vin0_clk",
+};
+
+static const char * const vin1_groups[] = {
+       "vin1_data12",
+       "vin1_data10",
+       "vin1_data8",
+       "vin1_sync",
+       "vin1_field",
+       "vin1_clkenb",
+       "vin1_clk",
+};
+
 static const struct sh_pfc_function pinmux_functions[] = {
        SH_PFC_FUNCTION(eth),
        SH_PFC_FUNCTION(hscif0),
@@ -3283,6 +3446,8 @@ static const struct sh_pfc_function pinmux_functions[] = {
        SH_PFC_FUNCTION(sdhi2),
        SH_PFC_FUNCTION(usb0),
        SH_PFC_FUNCTION(usb1),
+       SH_PFC_FUNCTION(vin0),
+       SH_PFC_FUNCTION(vin1),
 };
 
 static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -4232,6 +4397,6 @@ const struct sh_pfc_soc_info r8a7794_pinmux_info = {
 
        .cfg_regs = pinmux_config_regs,
 
-       .gpio_data = pinmux_data,
-       .gpio_data_size = ARRAY_SIZE(pinmux_data),
+       .pinmux_data = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
 };
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
new file mode 100644 (file)
index 0000000..7ddb2ad
--- /dev/null
@@ -0,0 +1,2816 @@
+/*
+ * R-Car Gen3 processor support - PFC hardware block.
+ *
+ * Copyright (C) 2015  Renesas Electronics Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/kernel.h>
+
+#include "core.h"
+#include "sh_pfc.h"
+
+#define PORT_GP_3(bank, fn, sfx)                                       \
+       PORT_GP_1(bank, 0,  fn, sfx), PORT_GP_1(bank, 1,  fn, sfx),     \
+       PORT_GP_1(bank, 2,  fn, sfx), PORT_GP_1(bank, 3,  fn, sfx)
+
+#define PORT_GP_14(bank, fn, sfx)                                      \
+       PORT_GP_3(bank, fn, sfx),                                       \
+       PORT_GP_1(bank, 4,  fn, sfx), PORT_GP_1(bank, 5,  fn, sfx),     \
+       PORT_GP_1(bank, 6,  fn, sfx), PORT_GP_1(bank, 7,  fn, sfx),     \
+       PORT_GP_1(bank, 8,  fn, sfx), PORT_GP_1(bank, 9,  fn, sfx),     \
+       PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx),     \
+       PORT_GP_1(bank, 12, fn, sfx), PORT_GP_1(bank, 13, fn, sfx),     \
+       PORT_GP_1(bank, 14, fn, sfx)
+
+#define PORT_GP_15(bank, fn, sfx)                                      \
+       PORT_GP_14(bank, fn, sfx),   PORT_GP_1(bank, 15, fn, sfx)
+
+#define PORT_GP_17(bank, fn, sfx)                                      \
+       PORT_GP_15(bank, fn, sfx),                                      \
+       PORT_GP_1(bank, 16, fn, sfx), PORT_GP_1(bank, 17, fn, sfx)
+
+#define PORT_GP_25(bank, fn, sfx)                                      \
+       PORT_GP_17(bank, fn, sfx),                                      \
+       PORT_GP_1(bank, 18, fn, sfx), PORT_GP_1(bank, 19, fn, sfx),     \
+       PORT_GP_1(bank, 20, fn, sfx), PORT_GP_1(bank, 21, fn, sfx),     \
+       PORT_GP_1(bank, 22, fn, sfx), PORT_GP_1(bank, 23, fn, sfx),     \
+       PORT_GP_1(bank, 24, fn, sfx), PORT_GP_1(bank, 25, fn, sfx)
+
+#define PORT_GP_27(bank, fn, sfx)                                      \
+       PORT_GP_25(bank, fn, sfx),                                      \
+       PORT_GP_1(bank, 26, fn, sfx), PORT_GP_1(bank, 27, fn, sfx)
+
+#define CPU_ALL_PORT(fn, sfx)                                          \
+       PORT_GP_15(0, fn, sfx),                                         \
+       PORT_GP_27(1, fn, sfx),                                         \
+       PORT_GP_14(2, fn, sfx),                                         \
+       PORT_GP_15(3, fn, sfx),                                         \
+       PORT_GP_17(4, fn, sfx),                                         \
+       PORT_GP_25(5, fn, sfx),                                         \
+       PORT_GP_32(6, fn, sfx),                                         \
+       PORT_GP_3(7, fn, sfx)
+/*
+ * F_() : just information
+ * FM() : macro for FN_xxx / xxx_MARK
+ */
+
+/* GPSR0 */
+#define GPSR0_15       F_(D15,                 IP7_11_8)
+#define GPSR0_14       F_(D14,                 IP7_7_4)
+#define GPSR0_13       F_(D13,                 IP7_3_0)
+#define GPSR0_12       F_(D12,                 IP6_31_28)
+#define GPSR0_11       F_(D11,                 IP6_27_24)
+#define GPSR0_10       F_(D10,                 IP6_23_20)
+#define GPSR0_9                F_(D9,                  IP6_19_16)
+#define GPSR0_8                F_(D8,                  IP6_15_12)
+#define GPSR0_7                F_(D7,                  IP6_11_8)
+#define GPSR0_6                F_(D6,                  IP6_7_4)
+#define GPSR0_5                F_(D5,                  IP6_3_0)
+#define GPSR0_4                F_(D4,                  IP5_31_28)
+#define GPSR0_3                F_(D3,                  IP5_27_24)
+#define GPSR0_2                F_(D2,                  IP5_23_20)
+#define GPSR0_1                F_(D1,                  IP5_19_16)
+#define GPSR0_0                F_(D0,                  IP5_15_12)
+
+/* GPSR1 */
+#define GPSR1_27       F_(EX_WAIT0_A,          IP5_11_8)
+#define GPSR1_26       F_(WE1_N,               IP5_7_4)
+#define GPSR1_25       F_(WE0_N,               IP5_3_0)
+#define GPSR1_24       F_(RD_WR_N,             IP4_31_28)
+#define GPSR1_23       F_(RD_N,                IP4_27_24)
+#define GPSR1_22       F_(BS_N,                IP4_23_20)
+#define GPSR1_21       F_(CS1_N_A26,           IP4_19_16)
+#define GPSR1_20       F_(CS0_N,               IP4_15_12)
+#define GPSR1_19       F_(A19,                 IP4_11_8)
+#define GPSR1_18       F_(A18,                 IP4_7_4)
+#define GPSR1_17       F_(A17,                 IP4_3_0)
+#define GPSR1_16       F_(A16,                 IP3_31_28)
+#define GPSR1_15       F_(A15,                 IP3_27_24)
+#define GPSR1_14       F_(A14,                 IP3_23_20)
+#define GPSR1_13       F_(A13,                 IP3_19_16)
+#define GPSR1_12       F_(A12,                 IP3_15_12)
+#define GPSR1_11       F_(A11,                 IP3_11_8)
+#define GPSR1_10       F_(A10,                 IP3_7_4)
+#define GPSR1_9                F_(A9,                  IP3_3_0)
+#define GPSR1_8                F_(A8,                  IP2_31_28)
+#define GPSR1_7                F_(A7,                  IP2_27_24)
+#define GPSR1_6                F_(A6,                  IP2_23_20)
+#define GPSR1_5                F_(A5,                  IP2_19_16)
+#define GPSR1_4                F_(A4,                  IP2_15_12)
+#define GPSR1_3                F_(A3,                  IP2_11_8)
+#define GPSR1_2                F_(A2,                  IP2_7_4)
+#define GPSR1_1                F_(A1,                  IP2_3_0)
+#define GPSR1_0                F_(A0,                  IP1_31_28)
+
+/* GPSR2 */
+#define GPSR2_14       F_(AVB_AVTP_CAPTURE_A,  IP0_23_20)
+#define GPSR2_13       F_(AVB_AVTP_MATCH_A,    IP0_19_16)
+#define GPSR2_12       F_(AVB_LINK,            IP0_15_12)
+#define GPSR2_11       F_(AVB_PHY_INT,         IP0_11_8)
+#define GPSR2_10       F_(AVB_MAGIC,           IP0_7_4)
+#define GPSR2_9                F_(AVB_MDC,             IP0_3_0)
+#define GPSR2_8                F_(PWM2_A,              IP1_27_24)
+#define GPSR2_7                F_(PWM1_A,              IP1_23_20)
+#define GPSR2_6                F_(PWM0,                IP1_19_16)
+#define GPSR2_5                F_(IRQ5,                IP1_15_12)
+#define GPSR2_4                F_(IRQ4,                IP1_11_8)
+#define GPSR2_3                F_(IRQ3,                IP1_7_4)
+#define GPSR2_2                F_(IRQ2,                IP1_3_0)
+#define GPSR2_1                F_(IRQ1,                IP0_31_28)
+#define GPSR2_0                F_(IRQ0,                IP0_27_24)
+
+/* GPSR3 */
+#define GPSR3_15       F_(SD1_WP,              IP10_23_20)
+#define GPSR3_14       F_(SD1_CD,              IP10_19_16)
+#define GPSR3_13       F_(SD0_WP,              IP10_15_12)
+#define GPSR3_12       F_(SD0_CD,              IP10_11_8)
+#define GPSR3_11       F_(SD1_DAT3,            IP8_31_28)
+#define GPSR3_10       F_(SD1_DAT2,            IP8_27_24)
+#define GPSR3_9                F_(SD1_DAT1,            IP8_23_20)
+#define GPSR3_8                F_(SD1_DAT0,            IP8_19_16)
+#define GPSR3_7                F_(SD1_CMD,             IP8_15_12)
+#define GPSR3_6                F_(SD1_CLK,             IP8_11_8)
+#define GPSR3_5                F_(SD0_DAT3,            IP8_7_4)
+#define GPSR3_4                F_(SD0_DAT2,            IP8_3_0)
+#define GPSR3_3                F_(SD0_DAT1,            IP7_31_28)
+#define GPSR3_2                F_(SD0_DAT0,            IP7_27_24)
+#define GPSR3_1                F_(SD0_CMD,             IP7_23_20)
+#define GPSR3_0                F_(SD0_CLK,             IP7_19_16)
+
+/* GPSR4 */
+#define GPSR4_17       FM(SD3_DS)
+#define GPSR4_16       F_(SD3_DAT7,            IP10_7_4)
+#define GPSR4_15       F_(SD3_DAT6,            IP10_3_0)
+#define GPSR4_14       F_(SD3_DAT5,            IP9_31_28)
+#define GPSR4_13       F_(SD3_DAT4,            IP9_27_24)
+#define GPSR4_12       FM(SD3_DAT3)
+#define GPSR4_11       FM(SD3_DAT2)
+#define GPSR4_10       FM(SD3_DAT1)
+#define GPSR4_9                FM(SD3_DAT0)
+#define GPSR4_8                FM(SD3_CMD)
+#define GPSR4_7                FM(SD3_CLK)
+#define GPSR4_6                F_(SD2_DS,              IP9_23_20)
+#define GPSR4_5                F_(SD2_DAT3,            IP9_19_16)
+#define GPSR4_4                F_(SD2_DAT2,            IP9_15_12)
+#define GPSR4_3                F_(SD2_DAT1,            IP9_11_8)
+#define GPSR4_2                F_(SD2_DAT0,            IP9_7_4)
+#define GPSR4_1                FM(SD2_CMD)
+#define GPSR4_0                F_(SD2_CLK,             IP9_3_0)
+
+/* GPSR5 */
+#define GPSR5_25       F_(MLB_DAT,             IP13_19_16)
+#define GPSR5_24       F_(MLB_SIG,             IP13_15_12)
+#define GPSR5_23       F_(MLB_CLK,             IP13_11_8)
+#define GPSR5_22       FM(MSIOF0_RXD)
+#define GPSR5_21       F_(MSIOF0_SS2,          IP13_7_4)
+#define GPSR5_20       FM(MSIOF0_TXD)
+#define GPSR5_19       F_(MSIOF0_SS1,          IP13_3_0)
+#define GPSR5_18       F_(MSIOF0_SYNC,         IP12_31_28)
+#define GPSR5_17       FM(MSIOF0_SCK)
+#define GPSR5_16       F_(HRTS0_N,             IP12_27_24)
+#define GPSR5_15       F_(HCTS0_N,             IP12_23_20)
+#define GPSR5_14       F_(HTX0,                IP12_19_16)
+#define GPSR5_13       F_(HRX0,                IP12_15_12)
+#define GPSR5_12       F_(HSCK0,               IP12_11_8)
+#define GPSR5_11       F_(RX2_A,               IP12_7_4)
+#define GPSR5_10       F_(TX2_A,               IP12_3_0)
+#define GPSR5_9                F_(SCK2,                IP11_31_28)
+#define GPSR5_8                F_(RTS1_N_TANS,         IP11_27_24)
+#define GPSR5_7                F_(CTS1_N,              IP11_23_20)
+#define GPSR5_6                F_(TX1_A,               IP11_19_16)
+#define GPSR5_5                F_(RX1_A,               IP11_15_12)
+#define GPSR5_4                F_(RTS0_N_TANS,         IP11_11_8)
+#define GPSR5_3                F_(CTS0_N,              IP11_7_4)
+#define GPSR5_2                F_(TX0,                 IP11_3_0)
+#define GPSR5_1                F_(RX0,                 IP10_31_28)
+#define GPSR5_0                F_(SCK0,                IP10_27_24)
+
+/* GPSR6 */
+#define GPSR6_31       F_(USB31_OVC,           IP17_7_4)
+#define GPSR6_30       F_(USB31_PWEN,          IP17_3_0)
+#define GPSR6_29       F_(USB30_OVC,           IP16_31_28)
+#define GPSR6_28       F_(USB30_PWEN,          IP16_27_24)
+#define GPSR6_27       F_(USB1_OVC,            IP16_23_20)
+#define GPSR6_26       F_(USB1_PWEN,           IP16_19_16)
+#define GPSR6_25       F_(USB0_OVC,            IP16_15_12)
+#define GPSR6_24       F_(USB0_PWEN,           IP16_11_8)
+#define GPSR6_23       F_(AUDIO_CLKB_B,        IP16_7_4)
+#define GPSR6_22       F_(AUDIO_CLKA_A,        IP16_3_0)
+#define GPSR6_21       F_(SSI_SDATA9_A,        IP15_31_28)
+#define GPSR6_20       F_(SSI_SDATA8,          IP15_27_24)
+#define GPSR6_19       F_(SSI_SDATA7,          IP15_23_20)
+#define GPSR6_18       F_(SSI_WS78,            IP15_19_16)
+#define GPSR6_17       F_(SSI_SCK78,           IP15_15_12)
+#define GPSR6_16       F_(SSI_SDATA6,          IP15_11_8)
+#define GPSR6_15       F_(SSI_WS6,             IP15_7_4)
+#define GPSR6_14       F_(SSI_SCK6,            IP15_3_0)
+#define GPSR6_13       FM(SSI_SDATA5)
+#define GPSR6_12       FM(SSI_WS5)
+#define GPSR6_11       FM(SSI_SCK5)
+#define GPSR6_10       F_(SSI_SDATA4,          IP14_31_28)
+#define GPSR6_9                F_(SSI_WS4,             IP14_27_24)
+#define GPSR6_8                F_(SSI_SCK4,            IP14_23_20)
+#define GPSR6_7                F_(SSI_SDATA3,          IP14_19_16)
+#define GPSR6_6                F_(SSI_WS34,            IP14_15_12)
+#define GPSR6_5                F_(SSI_SCK34,           IP14_11_8)
+#define GPSR6_4                F_(SSI_SDATA2_A,        IP14_7_4)
+#define GPSR6_3                F_(SSI_SDATA1_A,        IP14_3_0)
+#define GPSR6_2                F_(SSI_SDATA0,          IP13_31_28)
+#define GPSR6_1                F_(SSI_WS0129,          IP13_27_24)
+#define GPSR6_0                F_(SSI_SCK0129,         IP13_23_20)
+
+/* GPSR7 */
+#define GPSR7_3                FM(HDMI1_CEC)
+#define GPSR7_2                FM(HDMI0_CEC)
+#define GPSR7_1                FM(AVS2)
+#define GPSR7_0                FM(AVS1)
+
+
+/* IPSRx */            /* 0 */                 /* 1 */         /* 2 */                 /* 3 */                         /* 4 */         /* 5 */         /* 6 */                 /* 7 */         /* 8 */                 /* 9 */         /* A */         /* B */         /* C - F */
+#define IP0_3_0                FM(AVB_MDC)             F_(0, 0)        FM(MSIOF2_SS2_C)        F_(0, 0)                        F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_7_4                FM(AVB_MAGIC)           F_(0, 0)        FM(MSIOF2_SS1_C)        FM(SCK4_A)                      F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_11_8       FM(AVB_PHY_INT)         F_(0, 0)        FM(MSIOF2_SYNC_C)       FM(RX4_A)                       F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_15_12      FM(AVB_LINK)            F_(0, 0)        FM(MSIOF2_SCK_C)        FM(TX4_A)                       F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_19_16      FM(AVB_AVTP_MATCH_A)    F_(0, 0)        FM(MSIOF2_RXD_C)        FM(CTS4_N_A)                    F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_23_20      FM(AVB_AVTP_CAPTURE_A)  F_(0, 0)        FM(MSIOF2_TXD_C)        FM(RTS4_N_TANS_A)               F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_27_24      FM(IRQ0)                FM(QPOLB)       F_(0, 0)                FM(DU_CDE)                      FM(VI4_DATA0_B) FM(CAN0_TX_B)   FM(CANFD0_TX_B)         F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_31_28      FM(IRQ1)                FM(QPOLA)       F_(0, 0)                FM(DU_DISP)                     FM(VI4_DATA1_B) FM(CAN0_RX_B)   FM(CANFD0_RX_B)         F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_3_0                FM(IRQ2)                FM(QCPV_QDE)    F_(0, 0)                FM(DU_EXODDF_DU_ODDF_DISP_CDE)  FM(VI4_DATA2_B) F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                FM(PWM3_B)      F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_7_4                FM(IRQ3)                FM(QSTVB_QVE)   FM(A25)                 FM(DU_DOTCLKOUT1)               FM(VI4_DATA3_B) F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                FM(PWM4_B)      F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_11_8       FM(IRQ4)                FM(QSTH_QHS)    FM(A24)                 FM(DU_EXHSYNC_DU_HSYNC)         FM(VI4_DATA4_B) F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                FM(PWM5_B)      F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_15_12      FM(IRQ5)                FM(QSTB_QHE)    FM(A23)                 FM(DU_EXVSYNC_DU_VSYNC)         FM(VI4_DATA5_B) F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                FM(PWM6_B)      F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_19_16      FM(PWM0)                FM(AVB_AVTP_PPS)FM(A22)                 F_(0, 0)                        FM(VI4_DATA6_B) F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                FM(IECLK_B)     F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_23_20      FM(PWM1_A)              F_(0, 0)        FM(A21)                 FM(HRX3_D)                      FM(VI4_DATA7_B) F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                FM(IERX_B)      F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_27_24      FM(PWM2_A)              F_(0, 0)        FM(A20)                 FM(HTX3_D)                      F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                FM(IETX_B)      F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_31_28      FM(A0)                  FM(LCDOUT16)    FM(MSIOF3_SYNC_B)       F_(0, 0)                        FM(VI4_DATA8)   F_(0, 0)        FM(DU_DB0)              F_(0, 0)        F_(0, 0)                FM(PWM3_A)      F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_3_0                FM(A1)                  FM(LCDOUT17)    FM(MSIOF3_TXD_B)        F_(0, 0)                        FM(VI4_DATA9)   F_(0, 0)        FM(DU_DB1)              F_(0, 0)        F_(0, 0)                FM(PWM4_A)      F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_7_4                FM(A2)                  FM(LCDOUT18)    FM(MSIOF3_SCK_B)        F_(0, 0)                        FM(VI4_DATA10)  F_(0, 0)        FM(DU_DB2)              F_(0, 0)        F_(0, 0)                FM(PWM5_A)      F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_11_8       FM(A3)                  FM(LCDOUT19)    FM(MSIOF3_RXD_B)        F_(0, 0)                        FM(VI4_DATA11)  F_(0, 0)        FM(DU_DB3)              F_(0, 0)        F_(0, 0)                FM(PWM6_A)      F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IPSRx */            /* 0 */                 /* 1 */         /* 2 */                 /* 3 */                         /* 4 */         /* 5 */         /* 6 */                 /* 7 */         /* 8 */                 /* 9 */         /* A */         /* B */         /* C - F */
+#define IP2_15_12      FM(A4)                  FM(LCDOUT20)    FM(MSIOF3_SS1_B)        F_(0, 0)                        FM(VI4_DATA12)  FM(VI5_DATA12)  FM(DU_DB4)              F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_19_16      FM(A5)                  FM(LCDOUT21)    FM(MSIOF3_SS2_B)        FM(SCK4_B)                      FM(VI4_DATA13)  FM(VI5_DATA13)  FM(DU_DB5)              F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_23_20      FM(A6)                  FM(LCDOUT22)    FM(MSIOF2_SS1_A)        FM(RX4_B)                       FM(VI4_DATA14)  FM(VI5_DATA14)  FM(DU_DB6)              F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_27_24      FM(A7)                  FM(LCDOUT23)    FM(MSIOF2_SS2_A)        FM(TX4_B)                       FM(VI4_DATA15)  FM(VI5_DATA15)  FM(DU_DB7)              F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_31_28      FM(A8)                  FM(RX3_B)       FM(MSIOF2_SYNC_A)       FM(HRX4_B)                      F_(0, 0)        F_(0, 0)        F_(0, 0)                FM(SDA6_A)      FM(AVB_AVTP_MATCH_B)    FM(PWM1_B)      F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_3_0                FM(A9)                  F_(0, 0)        FM(MSIOF2_SCK_A)        FM(CTS4_N_B)                    F_(0, 0)        FM(VI5_VSYNC_N) F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_7_4                FM(A10)                 F_(0, 0)        FM(MSIOF2_RXD_A)        FM(RTS4_N_TANS_B)               F_(0, 0)        FM(VI5_HSYNC_N) F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_11_8       FM(A11)                 FM(TX3_B)       FM(MSIOF2_TXD_A)        FM(HTX4_B)                      FM(HSCK4)       FM(VI5_FIELD)   F_(0, 0)                FM(SCL6_A)      FM(AVB_AVTP_CAPTURE_B)  FM(PWM2_B)      F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_15_12      FM(A12)                 FM(LCDOUT12)    FM(MSIOF3_SCK_C)        F_(0, 0)                        FM(HRX4_A)      FM(VI5_DATA8)   FM(DU_DG4)              F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_19_16      FM(A13)                 FM(LCDOUT13)    FM(MSIOF3_SYNC_C)       F_(0, 0)                        FM(HTX4_A)      FM(VI5_DATA9)   FM(DU_DG5)              F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_23_20      FM(A14)                 FM(LCDOUT14)    FM(MSIOF3_RXD_C)        F_(0, 0)                        FM(HCTS4_N)     FM(VI5_DATA10)  FM(DU_DG6)              F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_27_24      FM(A15)                 FM(LCDOUT15)    FM(MSIOF3_TXD_C)        F_(0, 0)                        FM(HRTS4_N)     FM(VI5_DATA11)  FM(DU_DG7)              F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_31_28      FM(A16)                 FM(LCDOUT8)     F_(0, 0)                F_(0, 0)                        FM(VI4_FIELD)   F_(0, 0)        FM(DU_DG0)              F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_3_0                FM(A17)                 FM(LCDOUT9)     F_(0, 0)                F_(0, 0)                        FM(VI4_VSYNC_N) F_(0, 0)        FM(DU_DG1)              F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_7_4                FM(A18)                 FM(LCDOUT10)    F_(0, 0)                F_(0, 0)                        FM(VI4_HSYNC_N) F_(0, 0)        FM(DU_DG2)              F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_11_8       FM(A19)                 FM(LCDOUT11)    F_(0, 0)                F_(0, 0)                        FM(VI4_CLKENB)  F_(0, 0)        FM(DU_DG3)              F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_15_12      FM(CS0_N)               F_(0, 0)        F_(0, 0)                F_(0, 0)                        F_(0, 0)        FM(VI5_CLKENB)  F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_19_16      FM(CS1_N_A26)           F_(0, 0)        F_(0, 0)                F_(0, 0)                        F_(0, 0)        FM(VI5_CLK)     F_(0, 0)                FM(EX_WAIT0_B)  F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_23_20      FM(BS_N)                FM(QSTVA_QVS)   FM(MSIOF3_SCK_D)        FM(SCK3)                        FM(HSCK3)       F_(0, 0)        F_(0, 0)                F_(0, 0)        FM(CAN1_TX)             FM(CANFD1_TX)   FM(IETX_A)      F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_27_24      FM(RD_N)                F_(0, 0)        FM(MSIOF3_SYNC_D)       FM(RX3_A)                       FM(HRX3_A)      F_(0, 0)        F_(0, 0)                F_(0, 0)        FM(CAN0_TX_A)           FM(CANFD0_TX_A) F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_31_28      FM(RD_WR_N)             F_(0, 0)        FM(MSIOF3_RXD_D)        FM(TX3_A)                       FM(HTX3_A)      F_(0, 0)        F_(0, 0)                F_(0, 0)        FM(CAN0_RX_A)           FM(CANFD0_RX_A) F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_3_0                FM(WE0_N)               F_(0, 0)        FM(MSIOF3_TXD_D)        FM(CTS3_N)                      FM(HCTS3_N)     F_(0, 0)        F_(0, 0)                FM(SCL6_B)      FM(CAN_CLK)             F_(0, 0)        FM(IECLK_A)     F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_7_4                FM(WE1_N)               F_(0, 0)        FM(MSIOF3_SS1_D)        FM(RTS3_N_TANS)                 FM(HRTS3_N)     F_(0, 0)        F_(0, 0)                FM(SDA6_B)      FM(CAN1_RX)             FM(CANFD1_RX)   FM(IERX_A)      F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_11_8       FM(EX_WAIT0_A)          FM(QCLK)        F_(0, 0)                F_(0, 0)                        FM(VI4_CLK)     F_(0, 0)        FM(DU_DOTCLKOUT0)       F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_15_12      FM(D0)                  FM(MSIOF2_SS1_B)FM(MSIOF3_SCK_A)        F_(0, 0)                        FM(VI4_DATA16)  FM(VI5_DATA0)   F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_19_16      FM(D1)                  FM(MSIOF2_SS2_B)FM(MSIOF3_SYNC_A)       F_(0, 0)                        FM(VI4_DATA17)  FM(VI5_DATA1)   F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_23_20      FM(D2)                  F_(0, 0)        FM(MSIOF3_RXD_A)        F_(0, 0)                        FM(VI4_DATA18)  FM(VI5_DATA2)   F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_27_24      FM(D3)                  F_(0, 0)        FM(MSIOF3_TXD_A)        F_(0, 0)                        FM(VI4_DATA19)  FM(VI5_DATA3)   F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_31_28      FM(D4)                  FM(MSIOF2_SCK_B)F_(0, 0)                F_(0, 0)                        FM(VI4_DATA20)  FM(VI5_DATA4)   F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_3_0                FM(D5)                  FM(MSIOF2_SYNC_B)F_(0, 0)               F_(0, 0)                        FM(VI4_DATA21)  FM(VI5_DATA5)   F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_7_4                FM(D6)                  FM(MSIOF2_RXD_B)F_(0, 0)                F_(0, 0)                        FM(VI4_DATA22)  FM(VI5_DATA6)   F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_11_8       FM(D7)                  FM(MSIOF2_TXD_B)F_(0, 0)                F_(0, 0)                        FM(VI4_DATA23)  FM(VI5_DATA7)   F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_15_12      FM(D8)                  FM(LCDOUT0)     FM(MSIOF2_SCK_D)        FM(SCK4_C)                      FM(VI4_DATA0_A) F_(0, 0)        FM(DU_DR0)              F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_19_16      FM(D9)                  FM(LCDOUT1)     FM(MSIOF2_SYNC_D)       F_(0, 0)                        FM(VI4_DATA1_A) F_(0, 0)        FM(DU_DR1)              F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_23_20      FM(D10)                 FM(LCDOUT2)     FM(MSIOF2_RXD_D)        FM(HRX3_B)                      FM(VI4_DATA2_A) FM(CTS4_N_C)    FM(DU_DR2)              F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_27_24      FM(D11)                 FM(LCDOUT3)     FM(MSIOF2_TXD_D)        FM(HTX3_B)                      FM(VI4_DATA3_A) FM(RTS4_N_TANS_C)FM(DU_DR3)             F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_31_28      FM(D12)                 FM(LCDOUT4)     FM(MSIOF2_SS1_D)        FM(RX4_C)                       FM(VI4_DATA4_A) F_(0, 0)        FM(DU_DR4)              F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_3_0                FM(D13)                 FM(LCDOUT5)     FM(MSIOF2_SS2_D)        FM(TX4_C)                       FM(VI4_DATA5_A) F_(0, 0)        FM(DU_DR5)              F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_7_4                FM(D14)                 FM(LCDOUT6)     FM(MSIOF3_SS1_A)        FM(HRX3_C)                      FM(VI4_DATA6_A) F_(0, 0)        FM(DU_DR6)              FM(SCL6_C)      F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_11_8       FM(D15)                 FM(LCDOUT7)     FM(MSIOF3_SS2_A)        FM(HTX3_C)                      FM(VI4_DATA7_A) F_(0, 0)        FM(DU_DR7)              FM(SDA6_C)      F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_15_12      FM(FSCLKST)             F_(0, 0)        F_(0, 0)                F_(0, 0)                        F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_19_16      FM(SD0_CLK)             F_(0, 0)        FM(MSIOF1_SCK_E)        F_(0, 0)                        F_(0, 0)        F_(0, 0)        FM(STP_OPWM_0_B)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IPSRx */            /* 0 */                 /* 1 */         /* 2 */                 /* 3 */                         /* 4 */         /* 5 */         /* 6 */                 /* 7 */         /* 8 */                 /* 9 */         /* A */         /* B */         /* C - F */
+#define IP7_23_20      FM(SD0_CMD)             F_(0, 0)        FM(MSIOF1_SYNC_E)       F_(0, 0)                        F_(0, 0)        F_(0, 0)        FM(STP_IVCXO27_0_B)     F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_27_24      FM(SD0_DAT0)            F_(0, 0)        FM(MSIOF1_RXD_E)        F_(0, 0)                        F_(0, 0)        FM(TS_SCK0_B)   FM(STP_ISCLK_0_B)       F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_31_28      FM(SD0_DAT1)            F_(0, 0)        FM(MSIOF1_TXD_E)        F_(0, 0)                        F_(0, 0)        FM(TS_SPSYNC0_B)FM(STP_ISSYNC_0_B)      F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_3_0                FM(SD0_DAT2)            F_(0, 0)        FM(MSIOF1_SS1_E)        F_(0, 0)                        F_(0, 0)        FM(TS_SDAT0_B)  FM(STP_ISD_0_B)         F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_7_4                FM(SD0_DAT3)            F_(0, 0)        FM(MSIOF1_SS2_E)        F_(0, 0)                        F_(0, 0)        FM(TS_SDEN0_B)  FM(STP_ISEN_0_B)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_11_8       FM(SD1_CLK)             F_(0, 0)        FM(MSIOF1_SCK_G)        F_(0, 0)                        F_(0, 0)        FM(SIM0_CLK_A)  F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_15_12      FM(SD1_CMD)             F_(0, 0)        FM(MSIOF1_SYNC_G)       F_(0, 0)                        F_(0, 0)        FM(SIM0_D_A)    FM(STP_IVCXO27_1_B)     F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_19_16      FM(SD1_DAT0)            FM(SD2_DAT4)    FM(MSIOF1_RXD_G)        F_(0, 0)                        F_(0, 0)        FM(TS_SCK1_B)   FM(STP_ISCLK_1_B)       F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_23_20      FM(SD1_DAT1)            FM(SD2_DAT5)    FM(MSIOF1_TXD_G)        F_(0, 0)                        F_(0, 0)        FM(TS_SPSYNC1_B)FM(STP_ISSYNC_1_B)      F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_27_24      FM(SD1_DAT2)            FM(SD2_DAT6)    FM(MSIOF1_SS1_G)        F_(0, 0)                        F_(0, 0)        FM(TS_SDAT1_B)  FM(STP_ISD_1_B)         F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_31_28      FM(SD1_DAT3)            FM(SD2_DAT7)    FM(MSIOF1_SS2_G)        F_(0, 0)                        F_(0, 0)        FM(TS_SDEN1_B)  FM(STP_ISEN_1_B)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_3_0                FM(SD2_CLK)             F_(0, 0)        F_(0, 0)                F_(0, 0)                        F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_7_4                FM(SD2_DAT0)            F_(0, 0)        F_(0, 0)                F_(0, 0)                        F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_11_8       FM(SD2_DAT1)            F_(0, 0)        F_(0, 0)                F_(0, 0)                        F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_15_12      FM(SD2_DAT2)            F_(0, 0)        F_(0, 0)                F_(0, 0)                        F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_19_16      FM(SD2_DAT3)            F_(0, 0)        F_(0, 0)                F_(0, 0)                        F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_23_20      FM(SD2_DS)              F_(0, 0)        F_(0, 0)                F_(0, 0)                        F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        FM(SATA_DEVSLP_B)       F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_27_24      FM(SD3_DAT4)            FM(SD2_CD_A)    F_(0, 0)                F_(0, 0)                        F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_31_28      FM(SD3_DAT5)            FM(SD2_WP_A)    F_(0, 0)                F_(0, 0)                        F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_3_0       FM(SD3_DAT6)            FM(SD3_CD)      F_(0, 0)                F_(0, 0)                        F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_7_4       FM(SD3_DAT7)            FM(SD3_WP)      F_(0, 0)                F_(0, 0)                        F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_11_8      FM(SD0_CD)              F_(0, 0)        F_(0, 0)                F_(0, 0)                        FM(SCL2_B)      FM(SIM0_RST_A)  F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_15_12     FM(SD0_WP)              F_(0, 0)        F_(0, 0)                F_(0, 0)                        FM(SDA2_B)      F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_19_16     FM(SD1_CD)              F_(0, 0)        F_(0, 0)                F_(0, 0)                        F_(0, 0)        FM(SIM0_CLK_B)  F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_23_20     FM(SD1_WP)              F_(0, 0)        F_(0, 0)                F_(0, 0)                        F_(0, 0)        FM(SIM0_D_B)    F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_27_24     FM(SCK0)                FM(HSCK1_B)     FM(MSIOF1_SS2_B)        FM(AUDIO_CLKC_B)                FM(SDA2_A)      FM(SIM0_RST_B)  FM(STP_OPWM_0_C)        FM(RIF0_CLK_B)  F_(0, 0)                FM(ADICHS2)     F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_31_28     FM(RX0)                 FM(HRX1_B)      F_(0, 0)                F_(0, 0)                        F_(0, 0)        FM(TS_SCK0_C)   FM(STP_ISCLK_0_C)       FM(RIF0_D0_B)   F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_3_0       FM(TX0)                 FM(HTX1_B)      F_(0, 0)                F_(0, 0)                        F_(0, 0)        FM(TS_SPSYNC0_C)FM(STP_ISSYNC_0_C)      FM(RIF0_D1_B)   F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_7_4       FM(CTS0_N)              FM(HCTS1_N_B)   FM(MSIOF1_SYNC_B)       F_(0, 0)                        F_(0, 0)        FM(TS_SPSYNC1_C)FM(STP_ISSYNC_1_C)      FM(RIF1_SYNC_B) FM(AUDIO_CLKOUT_C)      FM(ADICS_SAMP)  F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_11_8      FM(RTS0_N_TANS)         FM(HRTS1_N_B)   FM(MSIOF1_SS1_B)        FM(AUDIO_CLKA_B)                FM(SCL2_A)      F_(0, 0)        FM(STP_IVCXO27_1_C)     FM(RIF0_SYNC_B) F_(0, 0)                FM(ADICHS1)     F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_15_12     FM(RX1_A)               FM(HRX1_A)      F_(0, 0)                F_(0, 0)                        F_(0, 0)        FM(TS_SDAT0_C)  FM(STP_ISD_0_C)         FM(RIF1_CLK_C)  F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_19_16     FM(TX1_A)               FM(HTX1_A)      F_(0, 0)                F_(0, 0)                        F_(0, 0)        FM(TS_SDEN0_C)  FM(STP_ISEN_0_C)        FM(RIF1_D0_C)   F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_23_20     FM(CTS1_N)              FM(HCTS1_N_A)   FM(MSIOF1_RXD_B)        F_(0, 0)                        F_(0, 0)        FM(TS_SDEN1_C)  FM(STP_ISEN_1_C)        FM(RIF1_D0_B)   F_(0, 0)                FM(ADIDATA)     F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_27_24     FM(RTS1_N_TANS)         FM(HRTS1_N_A)   FM(MSIOF1_TXD_B)        F_(0, 0)                        F_(0, 0)        FM(TS_SDAT1_C)  FM(STP_ISD_1_C)         FM(RIF1_D1_B)   F_(0, 0)                FM(ADICHS0)     F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_31_28     FM(SCK2)                FM(SCIF_CLK_B)  FM(MSIOF1_SCK_B)        F_(0, 0)                        F_(0, 0)        FM(TS_SCK1_C)   FM(STP_ISCLK_1_C)       FM(RIF1_CLK_B)  F_(0, 0)                FM(ADICLK)      F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_3_0       FM(TX2_A)               F_(0, 0)        F_(0, 0)                FM(SD2_CD_B)                    FM(SCL1_A)      F_(0, 0)        FM(FMCLK_A)             FM(RIF1_D1_C)   F_(0, 0)                FM(FSO_CFE_0_B) F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_7_4       FM(RX2_A)               F_(0, 0)        F_(0, 0)                FM(SD2_WP_B)                    FM(SDA1_A)      F_(0, 0)        FM(FMIN_A)              FM(RIF1_SYNC_C) F_(0, 0)                FM(FSO_CFE_1_B) F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_11_8      FM(HSCK0)               F_(0, 0)        FM(MSIOF1_SCK_D)        FM(AUDIO_CLKB_A)                FM(SSI_SDATA1_B)FM(TS_SCK0_D)   FM(STP_ISCLK_0_D)       FM(RIF0_CLK_C)  F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_15_12     FM(HRX0)                F_(0, 0)        FM(MSIOF1_RXD_D)        F_(0, 0)                        FM(SSI_SDATA2_B)FM(TS_SDEN0_D)  FM(STP_ISEN_0_D)        FM(RIF0_D0_C)   F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_19_16     FM(HTX0)                F_(0, 0)        FM(MSIOF1_TXD_D)        F_(0, 0)                        FM(SSI_SDATA9_B)FM(TS_SDAT0_D)  FM(STP_ISD_0_D)         FM(RIF0_D1_C)   F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_23_20     FM(HCTS0_N)             FM(RX2_B)       FM(MSIOF1_SYNC_D)       F_(0, 0)                        FM(SSI_SCK9_A)  FM(TS_SPSYNC0_D)FM(STP_ISSYNC_0_D)      FM(RIF0_SYNC_C) FM(AUDIO_CLKOUT1_A)     F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_27_24     FM(HRTS0_N)             FM(TX2_B)       FM(MSIOF1_SS1_D)        F_(0, 0)                        FM(SSI_WS9_A)   F_(0, 0)        FM(STP_IVCXO27_0_D)     FM(BPFCLK_A)    FM(AUDIO_CLKOUT2_A)     F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IPSRx */            /* 0 */                 /* 1 */         /* 2 */                 /* 3 */                         /* 4 */         /* 5 */         /* 6 */                 /* 7 */         /* 8 */                 /* 9 */         /* A */         /* B */         /* C - F */
+#define IP12_31_28     FM(MSIOF0_SYNC)         F_(0, 0)        F_(0, 0)                F_(0, 0)                        F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        FM(AUDIO_CLKOUT_A)      F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_3_0       FM(MSIOF0_SS1)          FM(RX5)         F_(0, 0)                FM(AUDIO_CLKA_C)                FM(SSI_SCK2_A)  F_(0, 0)        FM(STP_IVCXO27_0_C)     F_(0, 0)        FM(AUDIO_CLKOUT3_A)     F_(0, 0)        FM(TCLK1_B)     F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_7_4       FM(MSIOF0_SS2)          FM(TX5)         FM(MSIOF1_SS2_D)        FM(AUDIO_CLKC_A)                FM(SSI_WS2_A)   F_(0, 0)        FM(STP_OPWM_0_D)        F_(0, 0)        FM(AUDIO_CLKOUT_D)      F_(0, 0)        FM(SPEEDIN_B)   F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_11_8      FM(MLB_CLK)             F_(0, 0)        FM(MSIOF1_SCK_F)        F_(0, 0)                        FM(SCL1_B)      F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_15_12     FM(MLB_SIG)             FM(RX1_B)       FM(MSIOF1_SYNC_F)       F_(0, 0)                        FM(SDA1_B)      F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_19_16     FM(MLB_DAT)             FM(TX1_B)       FM(MSIOF1_RXD_F)        F_(0, 0)                        F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_23_20     FM(SSI_SCK0129)         F_(0, 0)        FM(MSIOF1_TXD_F)        F_(0, 0)                        F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_27_24     FM(SSI_WS0129)          F_(0, 0)        FM(MSIOF1_SS1_F)        F_(0, 0)                        F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_31_28     FM(SSI_SDATA0)          F_(0, 0)        FM(MSIOF1_SS2_F)        F_(0, 0)                        F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_3_0       FM(SSI_SDATA1_A)        F_(0, 0)        F_(0, 0)                F_(0, 0)                        F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_7_4       FM(SSI_SDATA2_A)        F_(0, 0)        F_(0, 0)                F_(0, 0)                        FM(SSI_SCK1_B)  F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_11_8      FM(SSI_SCK34)           F_(0, 0)        FM(MSIOF1_SS1_A)        F_(0, 0)                        F_(0, 0)        F_(0, 0)        FM(STP_OPWM_0_A)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_15_12     FM(SSI_WS34)            FM(HCTS2_N_A)   FM(MSIOF1_SS2_A)        F_(0, 0)                        F_(0, 0)        F_(0, 0)        FM(STP_IVCXO27_0_A)     F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_19_16     FM(SSI_SDATA3)          FM(HRTS2_N_A)   FM(MSIOF1_TXD_A)        F_(0, 0)                        F_(0, 0)        FM(TS_SCK0_A)   FM(STP_ISCLK_0_A)       FM(RIF0_D1_A)   FM(RIF2_D0_A)           F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_23_20     FM(SSI_SCK4)            FM(HRX2_A)      FM(MSIOF1_SCK_A)        F_(0, 0)                        F_(0, 0)        FM(TS_SDAT0_A)  FM(STP_ISD_0_A)         FM(RIF0_CLK_A)  FM(RIF2_CLK_A)          F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_27_24     FM(SSI_WS4)             FM(HTX2_A)      FM(MSIOF1_SYNC_A)       F_(0, 0)                        F_(0, 0)        FM(TS_SDEN0_A)  FM(STP_ISEN_0_A)        FM(RIF0_SYNC_A) FM(RIF2_SYNC_A)         F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_31_28     FM(SSI_SDATA4)          FM(HSCK2_A)     FM(MSIOF1_RXD_A)        F_(0, 0)                        F_(0, 0)        FM(TS_SPSYNC0_A)FM(STP_ISSYNC_0_A)      FM(RIF0_D0_A)   FM(RIF2_D1_A)           F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_3_0       FM(SSI_SCK6)            FM(USB2_PWEN)   F_(0, 0)                FM(SIM0_RST_D)                  F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_7_4       FM(SSI_WS6)             FM(USB2_OVC)    F_(0, 0)                FM(SIM0_D_D)                    F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_11_8      FM(SSI_SDATA6)          F_(0, 0)        F_(0, 0)                FM(SIM0_CLK_D)                  F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        FM(SATA_DEVSLP_A)       F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_15_12     FM(SSI_SCK78)           FM(HRX2_B)      FM(MSIOF1_SCK_C)        F_(0, 0)                        F_(0, 0)        FM(TS_SCK1_A)   FM(STP_ISCLK_1_A)       FM(RIF1_CLK_A)  FM(RIF3_CLK_A)          F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_19_16     FM(SSI_WS78)            FM(HTX2_B)      FM(MSIOF1_SYNC_C)       F_(0, 0)                        F_(0, 0)        FM(TS_SDAT1_A)  FM(STP_ISD_1_A)         FM(RIF1_SYNC_A) FM(RIF3_SYNC_A)         F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_23_20     FM(SSI_SDATA7)          FM(HCTS2_N_B)   FM(MSIOF1_RXD_C)        F_(0, 0)                        F_(0, 0)        FM(TS_SDEN1_A)  FM(STP_ISEN_1_A)        FM(RIF1_D0_A)   FM(RIF3_D0_A)           F_(0, 0)        FM(TCLK2_A)     F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_27_24     FM(SSI_SDATA8)          FM(HRTS2_N_B)   FM(MSIOF1_TXD_C)        F_(0, 0)                        F_(0, 0)        FM(TS_SPSYNC1_A)FM(STP_ISSYNC_1_A)      FM(RIF1_D1_A)   FM(RIF3_D1_A)           F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_31_28     FM(SSI_SDATA9_A)        FM(HSCK2_B)     FM(MSIOF1_SS1_C)        FM(HSCK1_A)                     FM(SSI_WS1_B)   FM(SCK1)        FM(STP_IVCXO27_1_A)     FM(SCK5)        F_(0, 0)                F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_3_0       FM(AUDIO_CLKA_A)        F_(0, 0)        F_(0, 0)                F_(0, 0)                        F_(0, 0)        F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)                F_(0, 0)        F_(0, 0)        FM(CC5_OSCOUT)  F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_7_4       FM(AUDIO_CLKB_B)        FM(SCIF_CLK_A)  F_(0, 0)                F_(0, 0)                        F_(0, 0)        F_(0, 0)        FM(STP_IVCXO27_1_D)     FM(REMOCON_A)   F_(0, 0)                F_(0, 0)        FM(TCLK1_A)     F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_11_8      FM(USB0_PWEN)           F_(0, 0)        F_(0, 0)                FM(SIM0_RST_C)                  F_(0, 0)        FM(TS_SCK1_D)   FM(STP_ISCLK_1_D)       FM(BPFCLK_B)    FM(RIF3_CLK_B)          F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_15_12     FM(USB0_OVC)            F_(0, 0)        F_(0, 0)                FM(SIM0_D_C)                    F_(0, 0)        FM(TS_SDAT1_D)  FM(STP_ISD_1_D)         F_(0, 0)        FM(RIF3_SYNC_B)         F_(0, 0)        F_(0, 0)        F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_19_16     FM(USB1_PWEN)           F_(0, 0)        F_(0, 0)                FM(SIM0_CLK_C)                  FM(SSI_SCK1_A)  FM(TS_SCK0_E)   FM(STP_ISCLK_0_E)       FM(FMCLK_B)     FM(RIF2_CLK_B)          F_(0, 0)        FM(SPEEDIN_A)   F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_23_20     FM(USB1_OVC)            F_(0, 0)        FM(MSIOF1_SS2_C)        F_(0, 0)                        FM(SSI_WS1_A)   FM(TS_SDAT0_E)  FM(STP_ISD_0_E)         FM(FMIN_B)      FM(RIF2_SYNC_B)         F_(0, 0)        FM(REMOCON_B)   F_(0, 0)        F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_27_24     FM(USB30_PWEN)          F_(0, 0)        F_(0, 0)                FM(AUDIO_CLKOUT_B)              FM(SSI_SCK2_B)  FM(TS_SDEN1_D)  FM(STP_ISEN_1_D)        FM(STP_OPWM_0_E)FM(RIF3_D0_B)           F_(0, 0)        FM(TCLK2_B)     FM(TPU0TO0)     F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_31_28     FM(USB30_OVC)           F_(0, 0)        F_(0, 0)                FM(AUDIO_CLKOUT1_B)             FM(SSI_WS2_B)   FM(TS_SPSYNC1_D)FM(STP_ISSYNC_1_D)      FM(STP_IVCXO27_0_E)FM(RIF3_D1_B)        F_(0, 0)        FM(FSO_TOE_B)   FM(TPU0TO1)     F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP17_3_0       FM(USB31_PWEN)          F_(0, 0)        F_(0, 0)                FM(AUDIO_CLKOUT2_B)             FM(SSI_SCK9_B)  FM(TS_SDEN0_E)  FM(STP_ISEN_0_E)        F_(0, 0)        FM(RIF2_D0_B)           F_(0, 0)        F_(0, 0)        FM(TPU0TO2)     F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP17_7_4       FM(USB31_OVC)           F_(0, 0)        F_(0, 0)                FM(AUDIO_CLKOUT3_B)             FM(SSI_WS9_B)   FM(TS_SPSYNC0_E)FM(STP_ISSYNC_0_E)      F_(0, 0)        FM(RIF2_D1_B)           F_(0, 0)        F_(0, 0)        FM(TPU0TO3)     F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+#define PINMUX_GPSR    \
+\
+                                                                                               GPSR6_31 \
+                                                                                               GPSR6_30 \
+                                                                                               GPSR6_29 \
+                                                                                               GPSR6_28 \
+               GPSR1_27                                                                        GPSR6_27 \
+               GPSR1_26                                                                        GPSR6_26 \
+               GPSR1_25                                                        GPSR5_25        GPSR6_25 \
+               GPSR1_24                                                        GPSR5_24        GPSR6_24 \
+               GPSR1_23                                                        GPSR5_23        GPSR6_23 \
+               GPSR1_22                                                        GPSR5_22        GPSR6_22 \
+               GPSR1_21                                                        GPSR5_21        GPSR6_21 \
+               GPSR1_20                                                        GPSR5_20        GPSR6_20 \
+               GPSR1_19                                                        GPSR5_19        GPSR6_19 \
+               GPSR1_18                                                        GPSR5_18        GPSR6_18 \
+               GPSR1_17                                        GPSR4_17        GPSR5_17        GPSR6_17 \
+               GPSR1_16                                        GPSR4_16        GPSR5_16        GPSR6_16 \
+GPSR0_15       GPSR1_15                        GPSR3_15        GPSR4_15        GPSR5_15        GPSR6_15 \
+GPSR0_14       GPSR1_14        GPSR2_14        GPSR3_14        GPSR4_14        GPSR5_14        GPSR6_14 \
+GPSR0_13       GPSR1_13        GPSR2_13        GPSR3_13        GPSR4_13        GPSR5_13        GPSR6_13 \
+GPSR0_12       GPSR1_12        GPSR2_12        GPSR3_12        GPSR4_12        GPSR5_12        GPSR6_12 \
+GPSR0_11       GPSR1_11        GPSR2_11        GPSR3_11        GPSR4_11        GPSR5_11        GPSR6_11 \
+GPSR0_10       GPSR1_10        GPSR2_10        GPSR3_10        GPSR4_10        GPSR5_10        GPSR6_10 \
+GPSR0_9                GPSR1_9         GPSR2_9         GPSR3_9         GPSR4_9         GPSR5_9         GPSR6_9 \
+GPSR0_8                GPSR1_8         GPSR2_8         GPSR3_8         GPSR4_8         GPSR5_8         GPSR6_8 \
+GPSR0_7                GPSR1_7         GPSR2_7         GPSR3_7         GPSR4_7         GPSR5_7         GPSR6_7 \
+GPSR0_6                GPSR1_6         GPSR2_6         GPSR3_6         GPSR4_6         GPSR5_6         GPSR6_6 \
+GPSR0_5                GPSR1_5         GPSR2_5         GPSR3_5         GPSR4_5         GPSR5_5         GPSR6_5 \
+GPSR0_4                GPSR1_4         GPSR2_4         GPSR3_4         GPSR4_4         GPSR5_4         GPSR6_4 \
+GPSR0_3                GPSR1_3         GPSR2_3         GPSR3_3         GPSR4_3         GPSR5_3         GPSR6_3         GPSR7_3 \
+GPSR0_2                GPSR1_2         GPSR2_2         GPSR3_2         GPSR4_2         GPSR5_2         GPSR6_2         GPSR7_2 \
+GPSR0_1                GPSR1_1         GPSR2_1         GPSR3_1         GPSR4_1         GPSR5_1         GPSR6_1         GPSR7_1 \
+GPSR0_0                GPSR1_0         GPSR2_0         GPSR3_0         GPSR4_0         GPSR5_0         GPSR6_0         GPSR7_0
+
+#define PINMUX_IPSR                            \
+\
+FM(IP0_3_0)    IP0_3_0         FM(IP1_3_0)     IP1_3_0         FM(IP2_3_0)     IP2_3_0         FM(IP3_3_0)     IP3_3_0 \
+FM(IP0_7_4)    IP0_7_4         FM(IP1_7_4)     IP1_7_4         FM(IP2_7_4)     IP2_7_4         FM(IP3_7_4)     IP3_7_4 \
+FM(IP0_11_8)   IP0_11_8        FM(IP1_11_8)    IP1_11_8        FM(IP2_11_8)    IP2_11_8        FM(IP3_11_8)    IP3_11_8 \
+FM(IP0_15_12)  IP0_15_12       FM(IP1_15_12)   IP1_15_12       FM(IP2_15_12)   IP2_15_12       FM(IP3_15_12)   IP3_15_12 \
+FM(IP0_19_16)  IP0_19_16       FM(IP1_19_16)   IP1_19_16       FM(IP2_19_16)   IP2_19_16       FM(IP3_19_16)   IP3_19_16 \
+FM(IP0_23_20)  IP0_23_20       FM(IP1_23_20)   IP1_23_20       FM(IP2_23_20)   IP2_23_20       FM(IP3_23_20)   IP3_23_20 \
+FM(IP0_27_24)  IP0_27_24       FM(IP1_27_24)   IP1_27_24       FM(IP2_27_24)   IP2_27_24       FM(IP3_27_24)   IP3_27_24 \
+FM(IP0_31_28)  IP0_31_28       FM(IP1_31_28)   IP1_31_28       FM(IP2_31_28)   IP2_31_28       FM(IP3_31_28)   IP3_31_28 \
+\
+FM(IP4_3_0)    IP4_3_0         FM(IP5_3_0)     IP5_3_0         FM(IP6_3_0)     IP6_3_0         FM(IP7_3_0)     IP7_3_0 \
+FM(IP4_7_4)    IP4_7_4         FM(IP5_7_4)     IP5_7_4         FM(IP6_7_4)     IP6_7_4         FM(IP7_7_4)     IP7_7_4 \
+FM(IP4_11_8)   IP4_11_8        FM(IP5_11_8)    IP5_11_8        FM(IP6_11_8)    IP6_11_8        FM(IP7_11_8)    IP7_11_8 \
+FM(IP4_15_12)  IP4_15_12       FM(IP5_15_12)   IP5_15_12       FM(IP6_15_12)   IP6_15_12       FM(IP7_15_12)   IP7_15_12 \
+FM(IP4_19_16)  IP4_19_16       FM(IP5_19_16)   IP5_19_16       FM(IP6_19_16)   IP6_19_16       FM(IP7_19_16)   IP7_19_16 \
+FM(IP4_23_20)  IP4_23_20       FM(IP5_23_20)   IP5_23_20       FM(IP6_23_20)   IP6_23_20       FM(IP7_23_20)   IP7_23_20 \
+FM(IP4_27_24)  IP4_27_24       FM(IP5_27_24)   IP5_27_24       FM(IP6_27_24)   IP6_27_24       FM(IP7_27_24)   IP7_27_24 \
+FM(IP4_31_28)  IP4_31_28       FM(IP5_31_28)   IP5_31_28       FM(IP6_31_28)   IP6_31_28       FM(IP7_31_28)   IP7_31_28 \
+\
+FM(IP8_3_0)    IP8_3_0         FM(IP9_3_0)     IP9_3_0         FM(IP10_3_0)    IP10_3_0        FM(IP11_3_0)    IP11_3_0 \
+FM(IP8_7_4)    IP8_7_4         FM(IP9_7_4)     IP9_7_4         FM(IP10_7_4)    IP10_7_4        FM(IP11_7_4)    IP11_7_4 \
+FM(IP8_11_8)   IP8_11_8        FM(IP9_11_8)    IP9_11_8        FM(IP10_11_8)   IP10_11_8       FM(IP11_11_8)   IP11_11_8 \
+FM(IP8_15_12)  IP8_15_12       FM(IP9_15_12)   IP9_15_12       FM(IP10_15_12)  IP10_15_12      FM(IP11_15_12)  IP11_15_12 \
+FM(IP8_19_16)  IP8_19_16       FM(IP9_19_16)   IP9_19_16       FM(IP10_19_16)  IP10_19_16      FM(IP11_19_16)  IP11_19_16 \
+FM(IP8_23_20)  IP8_23_20       FM(IP9_23_20)   IP9_23_20       FM(IP10_23_20)  IP10_23_20      FM(IP11_23_20)  IP11_23_20 \
+FM(IP8_27_24)  IP8_27_24       FM(IP9_27_24)   IP9_27_24       FM(IP10_27_24)  IP10_27_24      FM(IP11_27_24)  IP11_27_24 \
+FM(IP8_31_28)  IP8_31_28       FM(IP9_31_28)   IP9_31_28       FM(IP10_31_28)  IP10_31_28      FM(IP11_31_28)  IP11_31_28 \
+\
+FM(IP12_3_0)   IP12_3_0        FM(IP13_3_0)    IP13_3_0        FM(IP14_3_0)    IP14_3_0        FM(IP15_3_0)    IP15_3_0 \
+FM(IP12_7_4)   IP12_7_4        FM(IP13_7_4)    IP13_7_4        FM(IP14_7_4)    IP14_7_4        FM(IP15_7_4)    IP15_7_4 \
+FM(IP12_11_8)  IP12_11_8       FM(IP13_11_8)   IP13_11_8       FM(IP14_11_8)   IP14_11_8       FM(IP15_11_8)   IP15_11_8 \
+FM(IP12_15_12) IP12_15_12      FM(IP13_15_12)  IP13_15_12      FM(IP14_15_12)  IP14_15_12      FM(IP15_15_12)  IP15_15_12 \
+FM(IP12_19_16) IP12_19_16      FM(IP13_19_16)  IP13_19_16      FM(IP14_19_16)  IP14_19_16      FM(IP15_19_16)  IP15_19_16 \
+FM(IP12_23_20) IP12_23_20      FM(IP13_23_20)  IP13_23_20      FM(IP14_23_20)  IP14_23_20      FM(IP15_23_20)  IP15_23_20 \
+FM(IP12_27_24) IP12_27_24      FM(IP13_27_24)  IP13_27_24      FM(IP14_27_24)  IP14_27_24      FM(IP15_27_24)  IP15_27_24 \
+FM(IP12_31_28) IP12_31_28      FM(IP13_31_28)  IP13_31_28      FM(IP14_31_28)  IP14_31_28      FM(IP15_31_28)  IP15_31_28 \
+\
+FM(IP16_3_0)   IP16_3_0        FM(IP17_3_0)    IP17_3_0 \
+FM(IP16_7_4)   IP16_7_4        FM(IP17_7_4)    IP17_7_4 \
+FM(IP16_11_8)  IP16_11_8 \
+FM(IP16_15_12) IP16_15_12 \
+FM(IP16_19_16) IP16_19_16 \
+FM(IP16_23_20) IP16_23_20 \
+FM(IP16_27_24) IP16_27_24 \
+FM(IP16_31_28) IP16_31_28
+
+/* MOD_SEL0 */                 /* 0 */                 /* 1 */                 /* 2 */                 /* 3 */                 /* 4 */                 /* 5 */                 /* 6 */                 /* 7 */
+#define MOD_SEL0_30_29         FM(SEL_MSIOF3_0)        FM(SEL_MSIOF3_1)        FM(SEL_MSIOF3_2)        FM(SEL_MSIOF3_3)
+#define MOD_SEL0_28_27         FM(SEL_MSIOF2_0)        FM(SEL_MSIOF2_1)        FM(SEL_MSIOF2_2)        FM(SEL_MSIOF2_3)
+#define MOD_SEL0_26_25_24      FM(SEL_MSIOF1_0)        FM(SEL_MSIOF1_1)        FM(SEL_MSIOF1_2)        FM(SEL_MSIOF1_3)        FM(SEL_MSIOF1_4)        FM(SEL_MSIOF1_5)        FM(SEL_MSIOF1_6)        F_(0, 0)
+#define MOD_SEL0_23            FM(SEL_LBSC_0)          FM(SEL_LBSC_1)
+#define MOD_SEL0_22            FM(SEL_IEBUS_0)         FM(SEL_IEBUS_1)
+#define MOD_SEL0_21_20         FM(SEL_I2C6_0)          FM(SEL_I2C6_1)          FM(SEL_I2C6_2)          F_(0, 0)
+#define MOD_SEL0_19            FM(SEL_I2C2_0)          FM(SEL_I2C2_1)
+#define MOD_SEL0_18            FM(SEL_I2C1_0)          FM(SEL_I2C1_1)
+#define MOD_SEL0_17            FM(SEL_HSCIF4_0)        FM(SEL_HSCIF4_1)
+#define MOD_SEL0_16_15         FM(SEL_HSCIF3_0)        FM(SEL_HSCIF3_1)        FM(SEL_HSCIF3_2)        FM(SEL_HSCIF3_3)
+#define MOD_SEL0_14            FM(SEL_HSCIF2_0)        FM(SEL_HSCIF2_1)
+#define MOD_SEL0_13            FM(SEL_HSCIF1_0)        FM(SEL_HSCIF1_1)
+#define MOD_SEL0_12            FM(SEL_FSO_0)           FM(SEL_FSO_1)
+#define MOD_SEL0_11            FM(SEL_FM_0)            FM(SEL_FM_1)
+#define MOD_SEL0_10            FM(SEL_ETHERAVB_0)      FM(SEL_ETHERAVB_1)
+#define MOD_SEL0_9             FM(SEL_DRIF3_0)         FM(SEL_DRIF3_1)
+#define MOD_SEL0_8             FM(SEL_DRIF2_0)         FM(SEL_DRIF2_1)
+#define MOD_SEL0_7_6           FM(SEL_DRIF1_0)         FM(SEL_DRIF1_1)         FM(SEL_DRIF1_2)         F_(0, 0)
+#define MOD_SEL0_5_4           FM(SEL_DRIF0_0)         FM(SEL_DRIF0_1)         FM(SEL_DRIF0_2)         F_(0, 0)
+#define MOD_SEL0_3             FM(SEL_CANFD0_0)        FM(SEL_CANFD0_1)
+#define MOD_SEL0_2_1           FM(SEL_ADG_0)           FM(SEL_ADG_1)           FM(SEL_ADG_2)           FM(SEL_ADG_3)
+
+/* MOD_SEL1 */                 /* 0 */                 /* 1 */                 /* 2 */                 /* 3 */                 /* 4 */                 /* 5 */                 /* 6 */                 /* 7 */
+#define MOD_SEL1_31_30         FM(SEL_TSIF1_0)         FM(SEL_TSIF1_1)         FM(SEL_TSIF1_2)         FM(SEL_TSIF1_3)
+#define MOD_SEL1_29_28_27      FM(SEL_TSIF0_0)         FM(SEL_TSIF0_1)         FM(SEL_TSIF0_2)         FM(SEL_TSIF0_3)         FM(SEL_TSIF0_4)         F_(0, 0)                F_(0, 0)                F_(0, 0)
+#define MOD_SEL1_26            FM(SEL_TIMER_TMU_0)     FM(SEL_TIMER_TMU_1)
+#define MOD_SEL1_25_24         FM(SEL_SSP1_1_0)        FM(SEL_SSP1_1_1)        FM(SEL_SSP1_1_2)        FM(SEL_SSP1_1_3)
+#define MOD_SEL1_23_22_21      FM(SEL_SSP1_0_0)        FM(SEL_SSP1_0_1)        FM(SEL_SSP1_0_2)        FM(SEL_SSP1_0_3)        FM(SEL_SSP1_0_4)        F_(0, 0)                F_(0, 0)                F_(0, 0)
+#define MOD_SEL1_20            FM(SEL_SSI_0)           FM(SEL_SSI_1)
+#define MOD_SEL1_19            FM(SEL_SPEED_PULSE_0)   FM(SEL_SPEED_PULSE_1)
+#define MOD_SEL1_18_17         FM(SEL_SIMCARD_0)       FM(SEL_SIMCARD_1)       FM(SEL_SIMCARD_2)       FM(SEL_SIMCARD_3)
+#define MOD_SEL1_16            FM(SEL_SDHI2_0)         FM(SEL_SDHI2_1)
+#define MOD_SEL1_15_14         FM(SEL_SCIF4_0)         FM(SEL_SCIF4_1)         FM(SEL_SCIF4_2)         F_(0, 0)
+#define MOD_SEL1_13            FM(SEL_SCIF3_0)         FM(SEL_SCIF3_1)
+#define MOD_SEL1_12            FM(SEL_SCIF2_0)         FM(SEL_SCIF2_1)
+#define MOD_SEL1_11            FM(SEL_SCIF1_0)         FM(SEL_SCIF1_1)
+#define MOD_SEL1_10            FM(SEL_SCIF_0)          FM(SEL_SCIF_1)
+#define MOD_SEL1_9             FM(SEL_REMOCON_0)       FM(SEL_REMOCON_1)
+#define MOD_SEL1_6             FM(SEL_RCAN0_0)         FM(SEL_RCAN0_1)
+#define MOD_SEL1_5             FM(SEL_PWM6_0)          FM(SEL_PWM6_1)
+#define MOD_SEL1_4             FM(SEL_PWM5_0)          FM(SEL_PWM5_1)
+#define MOD_SEL1_3             FM(SEL_PWM4_0)          FM(SEL_PWM4_1)
+#define MOD_SEL1_2             FM(SEL_PWM3_0)          FM(SEL_PWM3_1)
+#define MOD_SEL1_1             FM(SEL_PWM2_0)          FM(SEL_PWM2_1)
+#define MOD_SEL1_0             FM(SEL_PWM1_0)          FM(SEL_PWM1_1)
+
+/* MOD_SEL2 */                 /* 0 */                 /* 1 */                 /* 2 */                 /* 3 */
+#define MOD_SEL2_31            FM(I2C_SEL_5_0)         FM(I2C_SEL_5_1)
+#define MOD_SEL2_30            FM(I2C_SEL_3_0)         FM(I2C_SEL_3_1)
+#define MOD_SEL2_29            FM(I2C_SEL_0_0)         FM(I2C_SEL_0_1)
+#define MOD_SEL2_2_1           FM(SEL_VSP_0)           FM(SEL_VSP_1)           FM(SEL_VSP_2)           FM(SEL_VSP_3)
+#define MOD_SEL2_0             FM(SEL_VIN4_0)          FM(SEL_VIN4_1)
+
+#define PINMUX_MOD_SELS\
+\
+                       MOD_SEL1_31_30          MOD_SEL2_31 \
+MOD_SEL0_30_29                                 MOD_SEL2_30 \
+                       MOD_SEL1_29_28_27       MOD_SEL2_29 \
+MOD_SEL0_28_27 \
+\
+MOD_SEL0_26_25_24      MOD_SEL1_26 \
+                       MOD_SEL1_25_24 \
+\
+MOD_SEL0_23            MOD_SEL1_23_22_21 \
+MOD_SEL0_22 \
+MOD_SEL0_21_20 \
+                       MOD_SEL1_20 \
+MOD_SEL0_19            MOD_SEL1_19 \
+MOD_SEL0_18            MOD_SEL1_18_17 \
+MOD_SEL0_17 \
+MOD_SEL0_16_15         MOD_SEL1_16 \
+                       MOD_SEL1_15_14 \
+MOD_SEL0_14 \
+MOD_SEL0_13            MOD_SEL1_13 \
+MOD_SEL0_12            MOD_SEL1_12 \
+MOD_SEL0_11            MOD_SEL1_11 \
+MOD_SEL0_10            MOD_SEL1_10 \
+MOD_SEL0_9             MOD_SEL1_9 \
+MOD_SEL0_8 \
+MOD_SEL0_7_6 \
+                       MOD_SEL1_6 \
+MOD_SEL0_5_4           MOD_SEL1_5 \
+                       MOD_SEL1_4 \
+MOD_SEL0_3             MOD_SEL1_3 \
+MOD_SEL0_2_1           MOD_SEL1_2              MOD_SEL2_2_1 \
+                       MOD_SEL1_1 \
+                       MOD_SEL1_0              MOD_SEL2_0
+
+
+enum {
+       PINMUX_RESERVED = 0,
+
+       PINMUX_DATA_BEGIN,
+       GP_ALL(DATA),
+       PINMUX_DATA_END,
+
+#define F_(x, y)
+#define FM(x)  FN_##x,
+       PINMUX_FUNCTION_BEGIN,
+       GP_ALL(FN),
+       PINMUX_GPSR
+       PINMUX_IPSR
+       PINMUX_MOD_SELS
+       PINMUX_FUNCTION_END,
+#undef F_
+#undef FM
+
+#define F_(x, y)
+#define FM(x)  x##_MARK,
+       PINMUX_MARK_BEGIN,
+       PINMUX_GPSR
+       PINMUX_IPSR
+       PINMUX_MOD_SELS
+       PINMUX_MARK_END,
+#undef F_
+#undef FM
+};
+
+static const u16 pinmux_data[] = {
+       PINMUX_DATA_GP_ALL(),
+
+       /* IPSR0 */
+       PINMUX_IPSR_DATA(IP0_3_0,       AVB_MDC),
+       PINMUX_IPSR_MSEL(IP0_3_0,       MSIOF2_SS2_C,           SEL_MSIOF2_2),
+
+       PINMUX_IPSR_DATA(IP0_7_4,       AVB_MAGIC),
+       PINMUX_IPSR_MSEL(IP0_7_4,       MSIOF2_SS1_C,           SEL_MSIOF2_2),
+       PINMUX_IPSR_MSEL(IP0_7_4,       SCK4_A,                 SEL_SCIF4_0),
+
+       PINMUX_IPSR_DATA(IP0_11_8,      AVB_PHY_INT),
+       PINMUX_IPSR_MSEL(IP0_11_8,      MSIOF2_SYNC_C,          SEL_MSIOF2_2),
+       PINMUX_IPSR_MSEL(IP0_11_8,      RX4_A,                  SEL_SCIF4_0),
+
+       PINMUX_IPSR_DATA(IP0_15_12,     AVB_LINK),
+       PINMUX_IPSR_MSEL(IP0_15_12,     MSIOF2_SCK_C,           SEL_MSIOF2_2),
+       PINMUX_IPSR_MSEL(IP0_15_12,     TX4_A,                  SEL_SCIF4_0),
+
+       PINMUX_IPSR_MSEL(IP0_19_16,     AVB_AVTP_MATCH_A,       SEL_ETHERAVB_0),
+       PINMUX_IPSR_MSEL(IP0_19_16,     MSIOF2_RXD_C,           SEL_MSIOF2_2),
+       PINMUX_IPSR_MSEL(IP0_19_16,     CTS4_N_A,               SEL_SCIF4_0),
+
+       PINMUX_IPSR_MSEL(IP0_23_20,     AVB_AVTP_CAPTURE_A,     SEL_ETHERAVB_0),
+       PINMUX_IPSR_MSEL(IP0_23_20,     MSIOF2_TXD_C,           SEL_MSIOF2_2),
+       PINMUX_IPSR_MSEL(IP0_23_20,     RTS4_N_TANS_A,          SEL_SCIF4_0),
+
+       PINMUX_IPSR_DATA(IP0_27_24,     IRQ0),
+       PINMUX_IPSR_DATA(IP0_27_24,     QPOLB),
+       PINMUX_IPSR_DATA(IP0_27_24,     DU_CDE),
+       PINMUX_IPSR_MSEL(IP0_27_24,     VI4_DATA0_B,            SEL_VIN4_1),
+       PINMUX_IPSR_MSEL(IP0_27_24,     CAN0_TX_B,              SEL_RCAN0_1),
+       PINMUX_IPSR_MSEL(IP0_27_24,     CANFD0_TX_B,            SEL_CANFD0_1),
+
+       PINMUX_IPSR_DATA(IP0_31_28,     IRQ1),
+       PINMUX_IPSR_DATA(IP0_31_28,     QPOLA),
+       PINMUX_IPSR_DATA(IP0_31_28,     DU_DISP),
+       PINMUX_IPSR_MSEL(IP0_31_28,     VI4_DATA1_B,            SEL_VIN4_1),
+       PINMUX_IPSR_MSEL(IP0_31_28,     CAN0_RX_B,              SEL_RCAN0_1),
+       PINMUX_IPSR_MSEL(IP0_31_28,     CANFD0_RX_B,            SEL_CANFD0_1),
+
+       /* IPSR1 */
+       PINMUX_IPSR_DATA(IP1_3_0,       IRQ2),
+       PINMUX_IPSR_DATA(IP1_3_0,       QCPV_QDE),
+       PINMUX_IPSR_DATA(IP1_3_0,       DU_EXODDF_DU_ODDF_DISP_CDE),
+       PINMUX_IPSR_MSEL(IP1_3_0,       VI4_DATA2_B,            SEL_VIN4_1),
+       PINMUX_IPSR_MSEL(IP1_3_0,       PWM3_B,                 SEL_PWM3_1),
+
+       PINMUX_IPSR_DATA(IP1_7_4,       IRQ3),
+       PINMUX_IPSR_DATA(IP1_7_4,       QSTVB_QVE),
+       PINMUX_IPSR_DATA(IP1_7_4,       A25),
+       PINMUX_IPSR_DATA(IP1_7_4,       DU_DOTCLKOUT1),
+       PINMUX_IPSR_MSEL(IP1_7_4,       VI4_DATA3_B,            SEL_VIN4_1),
+       PINMUX_IPSR_MSEL(IP1_7_4,       PWM4_B,                 SEL_PWM4_1),
+
+       PINMUX_IPSR_DATA(IP1_11_8,      IRQ4),
+       PINMUX_IPSR_DATA(IP1_11_8,      QSTH_QHS),
+       PINMUX_IPSR_DATA(IP1_11_8,      A24),
+       PINMUX_IPSR_DATA(IP1_11_8,      DU_EXHSYNC_DU_HSYNC),
+       PINMUX_IPSR_MSEL(IP1_11_8,      VI4_DATA4_B,            SEL_VIN4_1),
+       PINMUX_IPSR_MSEL(IP1_11_8,      PWM5_B,                 SEL_PWM5_1),
+
+       PINMUX_IPSR_DATA(IP1_15_12,     IRQ5),
+       PINMUX_IPSR_DATA(IP1_15_12,     QSTB_QHE),
+       PINMUX_IPSR_DATA(IP1_15_12,     A23),
+       PINMUX_IPSR_DATA(IP1_15_12,     DU_EXVSYNC_DU_VSYNC),
+       PINMUX_IPSR_MSEL(IP1_15_12,     VI4_DATA5_B,            SEL_VIN4_1),
+       PINMUX_IPSR_MSEL(IP1_15_12,     PWM6_B,                 SEL_PWM6_1),
+
+       PINMUX_IPSR_DATA(IP1_19_16,     PWM0),
+       PINMUX_IPSR_DATA(IP1_19_16,     AVB_AVTP_PPS),
+       PINMUX_IPSR_DATA(IP1_19_16,     A22),
+       PINMUX_IPSR_MSEL(IP1_19_16,     VI4_DATA6_B,            SEL_VIN4_1),
+       PINMUX_IPSR_MSEL(IP1_19_16,     IECLK_B,                SEL_IEBUS_1),
+
+       PINMUX_IPSR_MSEL(IP1_23_20,     PWM1_A,                 SEL_PWM1_0),
+       PINMUX_IPSR_DATA(IP1_23_20,     A21),
+       PINMUX_IPSR_MSEL(IP1_23_20,     HRX3_D,                 SEL_HSCIF3_3),
+       PINMUX_IPSR_MSEL(IP1_23_20,     VI4_DATA7_B,            SEL_VIN4_1),
+       PINMUX_IPSR_MSEL(IP1_23_20,     IERX_B,                 SEL_IEBUS_1),
+
+       PINMUX_IPSR_MSEL(IP1_27_24,     PWM2_A,                 SEL_PWM2_0),
+       PINMUX_IPSR_DATA(IP1_27_24,     A20),
+       PINMUX_IPSR_MSEL(IP1_27_24,     HTX3_D,                 SEL_HSCIF3_3),
+       PINMUX_IPSR_MSEL(IP1_27_24,     IETX_B,                 SEL_IEBUS_1),
+
+       PINMUX_IPSR_DATA(IP1_31_28,     A0),
+       PINMUX_IPSR_DATA(IP1_31_28,     LCDOUT16),
+       PINMUX_IPSR_MSEL(IP1_31_28,     MSIOF3_SYNC_B,          SEL_MSIOF3_1),
+       PINMUX_IPSR_DATA(IP1_31_28,     VI4_DATA8),
+       PINMUX_IPSR_DATA(IP1_31_28,     DU_DB0),
+       PINMUX_IPSR_MSEL(IP1_31_28,     PWM3_A,                 SEL_PWM3_0),
+
+       /* IPSR2 */
+       PINMUX_IPSR_DATA(IP2_3_0,       A1),
+       PINMUX_IPSR_DATA(IP2_3_0,       LCDOUT17),
+       PINMUX_IPSR_MSEL(IP2_3_0,       MSIOF3_TXD_B,           SEL_MSIOF3_1),
+       PINMUX_IPSR_DATA(IP2_3_0,       VI4_DATA9),
+       PINMUX_IPSR_DATA(IP2_3_0,       DU_DB1),
+       PINMUX_IPSR_MSEL(IP2_3_0,       PWM4_A,                 SEL_PWM4_0),
+
+       PINMUX_IPSR_DATA(IP2_7_4,       A2),
+       PINMUX_IPSR_DATA(IP2_7_4,       LCDOUT18),
+       PINMUX_IPSR_MSEL(IP2_7_4,       MSIOF3_SCK_B,           SEL_MSIOF3_1),
+       PINMUX_IPSR_DATA(IP2_7_4,       VI4_DATA10),
+       PINMUX_IPSR_DATA(IP2_7_4,       DU_DB2),
+       PINMUX_IPSR_MSEL(IP2_7_4,       PWM5_A,                 SEL_PWM5_0),
+
+       PINMUX_IPSR_DATA(IP2_11_8,      A3),
+       PINMUX_IPSR_DATA(IP2_11_8,      LCDOUT19),
+       PINMUX_IPSR_MSEL(IP2_11_8,      MSIOF3_RXD_B,           SEL_MSIOF3_1),
+       PINMUX_IPSR_DATA(IP2_11_8,      VI4_DATA11),
+       PINMUX_IPSR_DATA(IP2_11_8,      DU_DB3),
+       PINMUX_IPSR_MSEL(IP2_11_8,      PWM6_A,                 SEL_PWM6_0),
+
+       PINMUX_IPSR_DATA(IP2_15_12,     A4),
+       PINMUX_IPSR_DATA(IP2_15_12,     LCDOUT20),
+       PINMUX_IPSR_MSEL(IP2_15_12,     MSIOF3_SS1_B,           SEL_MSIOF3_1),
+       PINMUX_IPSR_DATA(IP2_15_12,     VI4_DATA12),
+       PINMUX_IPSR_DATA(IP2_15_12,     VI5_DATA12),
+       PINMUX_IPSR_DATA(IP2_15_12,     DU_DB4),
+
+       PINMUX_IPSR_DATA(IP2_19_16,     A5),
+       PINMUX_IPSR_DATA(IP2_19_16,     LCDOUT21),
+       PINMUX_IPSR_MSEL(IP2_19_16,     MSIOF3_SS2_B,           SEL_MSIOF3_1),
+       PINMUX_IPSR_MSEL(IP2_19_16,     SCK4_B,                 SEL_SCIF4_1),
+       PINMUX_IPSR_DATA(IP2_19_16,     VI4_DATA13),
+       PINMUX_IPSR_DATA(IP2_19_16,     VI5_DATA13),
+       PINMUX_IPSR_DATA(IP2_19_16,     DU_DB5),
+
+       PINMUX_IPSR_DATA(IP2_23_20,     A6),
+       PINMUX_IPSR_DATA(IP2_23_20,     LCDOUT22),
+       PINMUX_IPSR_MSEL(IP2_23_20,     MSIOF2_SS1_A,           SEL_MSIOF2_0),
+       PINMUX_IPSR_MSEL(IP2_23_20,     RX4_B,                  SEL_SCIF4_1),
+       PINMUX_IPSR_DATA(IP2_23_20,     VI4_DATA14),
+       PINMUX_IPSR_DATA(IP2_23_20,     VI5_DATA14),
+       PINMUX_IPSR_DATA(IP2_23_20,     DU_DB6),
+
+       PINMUX_IPSR_DATA(IP2_27_24,     A7),
+       PINMUX_IPSR_DATA(IP2_27_24,     LCDOUT23),
+       PINMUX_IPSR_MSEL(IP2_27_24,     MSIOF2_SS2_A,           SEL_MSIOF2_0),
+       PINMUX_IPSR_MSEL(IP2_27_24,     TX4_B,                  SEL_SCIF4_1),
+       PINMUX_IPSR_DATA(IP2_27_24,     VI4_DATA15),
+       PINMUX_IPSR_DATA(IP2_27_24,     VI5_DATA15),
+       PINMUX_IPSR_DATA(IP2_27_24,     DU_DB7),
+
+       PINMUX_IPSR_DATA(IP2_31_28,     A8),
+       PINMUX_IPSR_MSEL(IP2_31_28,     RX3_B,                  SEL_SCIF3_1),
+       PINMUX_IPSR_MSEL(IP2_31_28,     MSIOF2_SYNC_A,          SEL_MSIOF2_0),
+       PINMUX_IPSR_MSEL(IP2_31_28,     HRX4_B,                 SEL_HSCIF4_1),
+       PINMUX_IPSR_MSEL(IP2_31_28,     SDA6_A,                 SEL_I2C6_0),
+       PINMUX_IPSR_MSEL(IP2_31_28,     AVB_AVTP_MATCH_B,       SEL_ETHERAVB_1),
+       PINMUX_IPSR_MSEL(IP2_31_28,     PWM1_B,                 SEL_PWM1_1),
+
+       /* IPSR3 */
+       PINMUX_IPSR_DATA(IP3_3_0,       A9),
+       PINMUX_IPSR_MSEL(IP3_3_0,       MSIOF2_SCK_A,           SEL_MSIOF2_0),
+       PINMUX_IPSR_MSEL(IP3_3_0,       CTS4_N_B,               SEL_SCIF4_1),
+       PINMUX_IPSR_DATA(IP3_3_0,       VI5_VSYNC_N),
+
+       PINMUX_IPSR_DATA(IP3_7_4,       A10),
+       PINMUX_IPSR_MSEL(IP3_7_4,       MSIOF2_RXD_A,           SEL_MSIOF2_0),
+       PINMUX_IPSR_MSEL(IP3_7_4,       RTS4_N_TANS_B,          SEL_SCIF4_1),
+       PINMUX_IPSR_DATA(IP3_7_4,       VI5_HSYNC_N),
+
+       PINMUX_IPSR_DATA(IP3_11_8,      A11),
+       PINMUX_IPSR_MSEL(IP3_11_8,      TX3_B,                  SEL_SCIF3_1),
+       PINMUX_IPSR_MSEL(IP3_11_8,      MSIOF2_TXD_A,           SEL_MSIOF2_0),
+       PINMUX_IPSR_MSEL(IP3_11_8,      HTX4_B,                 SEL_HSCIF4_1),
+       PINMUX_IPSR_DATA(IP3_11_8,      HSCK4),
+       PINMUX_IPSR_DATA(IP3_11_8,      VI5_FIELD),
+       PINMUX_IPSR_MSEL(IP3_11_8,      SCL6_A,                 SEL_I2C6_0),
+       PINMUX_IPSR_MSEL(IP3_11_8,      AVB_AVTP_CAPTURE_B,     SEL_ETHERAVB_1),
+       PINMUX_IPSR_MSEL(IP3_11_8,      PWM2_B,                 SEL_PWM2_1),
+
+       PINMUX_IPSR_DATA(IP3_15_12,     A12),
+       PINMUX_IPSR_DATA(IP3_15_12,     LCDOUT12),
+       PINMUX_IPSR_MSEL(IP3_15_12,     MSIOF3_SCK_C,           SEL_MSIOF3_2),
+       PINMUX_IPSR_MSEL(IP3_15_12,     HRX4_A,                 SEL_HSCIF4_0),
+       PINMUX_IPSR_DATA(IP3_15_12,     VI5_DATA8),
+       PINMUX_IPSR_DATA(IP3_15_12,     DU_DG4),
+
+       PINMUX_IPSR_DATA(IP3_19_16,     A13),
+       PINMUX_IPSR_DATA(IP3_19_16,     LCDOUT13),
+       PINMUX_IPSR_MSEL(IP3_19_16,     MSIOF3_SYNC_C,          SEL_MSIOF3_2),
+       PINMUX_IPSR_MSEL(IP3_19_16,     HTX4_A,                 SEL_HSCIF4_0),
+       PINMUX_IPSR_DATA(IP3_19_16,     VI5_DATA9),
+       PINMUX_IPSR_DATA(IP3_19_16,     DU_DG5),
+
+       PINMUX_IPSR_DATA(IP3_23_20,     A14),
+       PINMUX_IPSR_DATA(IP3_23_20,     LCDOUT14),
+       PINMUX_IPSR_MSEL(IP3_23_20,     MSIOF3_RXD_C,           SEL_MSIOF3_2),
+       PINMUX_IPSR_DATA(IP3_23_20,     HCTS4_N),
+       PINMUX_IPSR_DATA(IP3_23_20,     VI5_DATA10),
+       PINMUX_IPSR_DATA(IP3_23_20,     DU_DG6),
+
+       PINMUX_IPSR_DATA(IP3_27_24,     A15),
+       PINMUX_IPSR_DATA(IP3_27_24,     LCDOUT15),
+       PINMUX_IPSR_MSEL(IP3_27_24,     MSIOF3_TXD_C,           SEL_MSIOF3_2),
+       PINMUX_IPSR_DATA(IP3_27_24,     HRTS4_N),
+       PINMUX_IPSR_DATA(IP3_27_24,     VI5_DATA11),
+       PINMUX_IPSR_DATA(IP3_27_24,     DU_DG7),
+
+       PINMUX_IPSR_DATA(IP3_31_28,     A16),
+       PINMUX_IPSR_DATA(IP3_31_28,     LCDOUT8),
+       PINMUX_IPSR_DATA(IP3_31_28,     VI4_FIELD),
+       PINMUX_IPSR_DATA(IP3_31_28,     DU_DG0),
+
+       /* IPSR4 */
+       PINMUX_IPSR_DATA(IP4_3_0,       A17),
+       PINMUX_IPSR_DATA(IP4_3_0,       LCDOUT9),
+       PINMUX_IPSR_DATA(IP4_3_0,       VI4_VSYNC_N),
+       PINMUX_IPSR_DATA(IP4_3_0,       DU_DG1),
+
+       PINMUX_IPSR_DATA(IP4_7_4,       A18),
+       PINMUX_IPSR_DATA(IP4_7_4,       LCDOUT10),
+       PINMUX_IPSR_DATA(IP4_7_4,       VI4_HSYNC_N),
+       PINMUX_IPSR_DATA(IP4_7_4,       DU_DG2),
+
+       PINMUX_IPSR_DATA(IP4_11_8,      A19),
+       PINMUX_IPSR_DATA(IP4_11_8,      LCDOUT11),
+       PINMUX_IPSR_DATA(IP4_11_8,      VI4_CLKENB),
+       PINMUX_IPSR_DATA(IP4_11_8,      DU_DG3),
+
+       PINMUX_IPSR_DATA(IP4_15_12,     CS0_N),
+       PINMUX_IPSR_DATA(IP4_15_12,     VI5_CLKENB),
+
+       PINMUX_IPSR_DATA(IP4_19_16,     CS1_N_A26),
+       PINMUX_IPSR_DATA(IP4_19_16,     VI5_CLK),
+       PINMUX_IPSR_MSEL(IP4_19_16,     EX_WAIT0_B,             SEL_LBSC_1),
+
+       PINMUX_IPSR_DATA(IP4_23_20,     BS_N),
+       PINMUX_IPSR_DATA(IP4_23_20,     QSTVA_QVS),
+       PINMUX_IPSR_MSEL(IP4_23_20,     MSIOF3_SCK_D,           SEL_MSIOF3_3),
+       PINMUX_IPSR_DATA(IP4_23_20,     SCK3),
+       PINMUX_IPSR_DATA(IP4_23_20,     HSCK3),
+       PINMUX_IPSR_DATA(IP4_23_20,     CAN1_TX),
+       PINMUX_IPSR_DATA(IP4_23_20,     CANFD1_TX),
+       PINMUX_IPSR_MSEL(IP4_23_20,     IETX_A,                 SEL_IEBUS_0),
+
+       PINMUX_IPSR_DATA(IP4_27_24,     RD_N),
+       PINMUX_IPSR_MSEL(IP4_27_24,     MSIOF3_SYNC_D,          SEL_MSIOF3_3),
+       PINMUX_IPSR_MSEL(IP4_27_24,     RX3_A,                  SEL_SCIF3_0),
+       PINMUX_IPSR_MSEL(IP4_27_24,     HRX3_A,                 SEL_HSCIF3_0),
+       PINMUX_IPSR_MSEL(IP4_27_24,     CAN0_TX_A,              SEL_RCAN0_0),
+       PINMUX_IPSR_MSEL(IP4_27_24,     CANFD0_TX_A,            SEL_CANFD0_0),
+
+       PINMUX_IPSR_DATA(IP4_31_28,     RD_WR_N),
+       PINMUX_IPSR_MSEL(IP4_31_28,     MSIOF3_RXD_D,           SEL_MSIOF3_3),
+       PINMUX_IPSR_MSEL(IP4_31_28,     TX3_A,                  SEL_SCIF3_0),
+       PINMUX_IPSR_MSEL(IP4_31_28,     HTX3_A,                 SEL_HSCIF3_0),
+       PINMUX_IPSR_MSEL(IP4_31_28,     CAN0_RX_A,              SEL_RCAN0_0),
+       PINMUX_IPSR_MSEL(IP4_31_28,     CANFD0_RX_A,            SEL_CANFD0_0),
+
+       /* IPSR5 */
+       PINMUX_IPSR_DATA(IP5_3_0,       WE0_N),
+       PINMUX_IPSR_MSEL(IP5_3_0,       MSIOF3_TXD_D,           SEL_MSIOF3_3),
+       PINMUX_IPSR_DATA(IP5_3_0,       CTS3_N),
+       PINMUX_IPSR_DATA(IP5_3_0,       HCTS3_N),
+       PINMUX_IPSR_MSEL(IP5_3_0,       SCL6_B,                 SEL_I2C6_1),
+       PINMUX_IPSR_DATA(IP5_3_0,       CAN_CLK),
+       PINMUX_IPSR_MSEL(IP5_3_0,       IECLK_A,                SEL_IEBUS_0),
+
+       PINMUX_IPSR_DATA(IP5_7_4,       WE1_N),
+       PINMUX_IPSR_MSEL(IP5_7_4,       MSIOF3_SS1_D,           SEL_MSIOF3_3),
+       PINMUX_IPSR_DATA(IP5_7_4,       RTS3_N_TANS),
+       PINMUX_IPSR_DATA(IP5_7_4,       HRTS3_N),
+       PINMUX_IPSR_MSEL(IP5_7_4,       SDA6_B,                 SEL_I2C6_1),
+       PINMUX_IPSR_DATA(IP5_7_4,       CAN1_RX),
+       PINMUX_IPSR_DATA(IP5_7_4,       CANFD1_RX),
+       PINMUX_IPSR_MSEL(IP5_7_4,       IERX_A,                 SEL_IEBUS_0),
+
+       PINMUX_IPSR_MSEL(IP5_11_8,      EX_WAIT0_A,             SEL_LBSC_0),
+       PINMUX_IPSR_DATA(IP5_11_8,      QCLK),
+       PINMUX_IPSR_DATA(IP5_11_8,      VI4_CLK),
+       PINMUX_IPSR_DATA(IP5_11_8,      DU_DOTCLKOUT0),
+
+       PINMUX_IPSR_DATA(IP5_15_12,     D0),
+       PINMUX_IPSR_MSEL(IP5_15_12,     MSIOF2_SS1_B,           SEL_MSIOF2_1),
+       PINMUX_IPSR_MSEL(IP5_15_12,     MSIOF3_SCK_A,           SEL_MSIOF3_0),
+       PINMUX_IPSR_DATA(IP5_15_12,     VI4_DATA16),
+       PINMUX_IPSR_DATA(IP5_15_12,     VI5_DATA0),
+
+       PINMUX_IPSR_DATA(IP5_19_16,     D1),
+       PINMUX_IPSR_MSEL(IP5_19_16,     MSIOF2_SS2_B,           SEL_MSIOF2_1),
+       PINMUX_IPSR_MSEL(IP5_19_16,     MSIOF3_SYNC_A,          SEL_MSIOF3_0),
+       PINMUX_IPSR_DATA(IP5_19_16,     VI4_DATA17),
+       PINMUX_IPSR_DATA(IP5_19_16,     VI5_DATA1),
+
+       PINMUX_IPSR_DATA(IP5_23_20,     D2),
+       PINMUX_IPSR_MSEL(IP5_23_20,     MSIOF3_RXD_A,           SEL_MSIOF3_0),
+       PINMUX_IPSR_DATA(IP5_23_20,     VI4_DATA18),
+       PINMUX_IPSR_DATA(IP5_23_20,     VI5_DATA2),
+
+       PINMUX_IPSR_DATA(IP5_27_24,     D3),
+       PINMUX_IPSR_MSEL(IP5_27_24,     MSIOF3_TXD_A,           SEL_MSIOF3_0),
+       PINMUX_IPSR_DATA(IP5_27_24,     VI4_DATA19),
+       PINMUX_IPSR_DATA(IP5_27_24,     VI5_DATA3),
+
+       PINMUX_IPSR_DATA(IP5_31_28,     D4),
+       PINMUX_IPSR_MSEL(IP5_31_28,     MSIOF2_SCK_B,           SEL_MSIOF2_1),
+       PINMUX_IPSR_DATA(IP5_31_28,     VI4_DATA20),
+       PINMUX_IPSR_DATA(IP5_31_28,     VI5_DATA4),
+
+       /* IPSR6 */
+       PINMUX_IPSR_DATA(IP6_3_0,       D5),
+       PINMUX_IPSR_MSEL(IP6_3_0,       MSIOF2_SYNC_B,          SEL_MSIOF2_1),
+       PINMUX_IPSR_DATA(IP6_3_0,       VI4_DATA21),
+       PINMUX_IPSR_DATA(IP6_3_0,       VI5_DATA5),
+
+       PINMUX_IPSR_DATA(IP6_7_4,       D6),
+       PINMUX_IPSR_MSEL(IP6_7_4,       MSIOF2_RXD_B,           SEL_MSIOF2_1),
+       PINMUX_IPSR_DATA(IP6_7_4,       VI4_DATA22),
+       PINMUX_IPSR_DATA(IP6_7_4,       VI5_DATA6),
+
+       PINMUX_IPSR_DATA(IP6_11_8,      D7),
+       PINMUX_IPSR_MSEL(IP6_11_8,      MSIOF2_TXD_B,           SEL_MSIOF2_1),
+       PINMUX_IPSR_DATA(IP6_11_8,      VI4_DATA23),
+       PINMUX_IPSR_DATA(IP6_11_8,      VI5_DATA7),
+
+       PINMUX_IPSR_DATA(IP6_15_12,     D8),
+       PINMUX_IPSR_DATA(IP6_15_12,     LCDOUT0),
+       PINMUX_IPSR_MSEL(IP6_15_12,     MSIOF2_SCK_D,           SEL_MSIOF2_3),
+       PINMUX_IPSR_MSEL(IP6_15_12,     SCK4_C,                 SEL_SCIF4_2),
+       PINMUX_IPSR_MSEL(IP6_15_12,     VI4_DATA0_A,            SEL_VIN4_0),
+       PINMUX_IPSR_DATA(IP6_15_12,     DU_DR0),
+
+       PINMUX_IPSR_DATA(IP6_19_16,     D9),
+       PINMUX_IPSR_DATA(IP6_19_16,     LCDOUT1),
+       PINMUX_IPSR_MSEL(IP6_19_16,     MSIOF2_SYNC_D,          SEL_MSIOF2_3),
+       PINMUX_IPSR_MSEL(IP6_19_16,     VI4_DATA1_A,            SEL_VIN4_0),
+       PINMUX_IPSR_DATA(IP6_19_16,     DU_DR1),
+
+       PINMUX_IPSR_DATA(IP6_23_20,     D10),
+       PINMUX_IPSR_DATA(IP6_23_20,     LCDOUT2),
+       PINMUX_IPSR_MSEL(IP6_23_20,     MSIOF2_RXD_D,           SEL_MSIOF2_3),
+       PINMUX_IPSR_MSEL(IP6_23_20,     HRX3_B,                 SEL_HSCIF3_1),
+       PINMUX_IPSR_MSEL(IP6_23_20,     VI4_DATA2_A,            SEL_VIN4_0),
+       PINMUX_IPSR_MSEL(IP6_23_20,     CTS4_N_C,               SEL_SCIF4_2),
+       PINMUX_IPSR_DATA(IP6_23_20,     DU_DR2),
+
+       PINMUX_IPSR_DATA(IP6_27_24,     D11),
+       PINMUX_IPSR_DATA(IP6_27_24,     LCDOUT3),
+       PINMUX_IPSR_MSEL(IP6_27_24,     MSIOF2_TXD_D,           SEL_MSIOF2_3),
+       PINMUX_IPSR_MSEL(IP6_27_24,     HTX3_B,                 SEL_HSCIF3_1),
+       PINMUX_IPSR_MSEL(IP6_27_24,     VI4_DATA3_A,            SEL_VIN4_0),
+       PINMUX_IPSR_MSEL(IP6_27_24,     RTS4_N_TANS_C,          SEL_SCIF4_2),
+       PINMUX_IPSR_DATA(IP6_27_24,     DU_DR3),
+
+       PINMUX_IPSR_DATA(IP6_31_28,     D12),
+       PINMUX_IPSR_DATA(IP6_31_28,     LCDOUT4),
+       PINMUX_IPSR_MSEL(IP6_31_28,     MSIOF2_SS1_D,           SEL_MSIOF2_3),
+       PINMUX_IPSR_MSEL(IP6_31_28,     RX4_C,                  SEL_SCIF4_2),
+       PINMUX_IPSR_MSEL(IP6_31_28,     VI4_DATA4_A,            SEL_VIN4_0),
+       PINMUX_IPSR_DATA(IP6_31_28,     DU_DR4),
+
+       /* IPSR7 */
+       PINMUX_IPSR_DATA(IP7_3_0,       D13),
+       PINMUX_IPSR_DATA(IP7_3_0,       LCDOUT5),
+       PINMUX_IPSR_MSEL(IP7_3_0,       MSIOF2_SS2_D,           SEL_MSIOF2_3),
+       PINMUX_IPSR_MSEL(IP7_3_0,       TX4_C,                  SEL_SCIF4_2),
+       PINMUX_IPSR_MSEL(IP7_3_0,       VI4_DATA5_A,            SEL_VIN4_0),
+       PINMUX_IPSR_DATA(IP7_3_0,       DU_DR5),
+
+       PINMUX_IPSR_DATA(IP7_7_4,       D14),
+       PINMUX_IPSR_DATA(IP7_7_4,       LCDOUT6),
+       PINMUX_IPSR_MSEL(IP7_7_4,       MSIOF3_SS1_A,           SEL_MSIOF3_0),
+       PINMUX_IPSR_MSEL(IP7_7_4,       HRX3_C,                 SEL_HSCIF3_2),
+       PINMUX_IPSR_MSEL(IP7_7_4,       VI4_DATA6_A,            SEL_VIN4_0),
+       PINMUX_IPSR_DATA(IP7_7_4,       DU_DR6),
+       PINMUX_IPSR_MSEL(IP7_7_4,       SCL6_C,                 SEL_I2C6_2),
+
+       PINMUX_IPSR_DATA(IP7_11_8,      D15),
+       PINMUX_IPSR_DATA(IP7_11_8,      LCDOUT7),
+       PINMUX_IPSR_MSEL(IP7_11_8,      MSIOF3_SS2_A,           SEL_MSIOF3_0),
+       PINMUX_IPSR_MSEL(IP7_11_8,      HTX3_C,                 SEL_HSCIF3_2),
+       PINMUX_IPSR_MSEL(IP7_11_8,      VI4_DATA7_A,            SEL_VIN4_0),
+       PINMUX_IPSR_DATA(IP7_11_8,      DU_DR7),
+       PINMUX_IPSR_MSEL(IP7_11_8,      SDA6_C,                 SEL_I2C6_2),
+
+       PINMUX_IPSR_DATA(IP7_15_12,     FSCLKST),
+
+       PINMUX_IPSR_DATA(IP7_19_16,     SD0_CLK),
+       PINMUX_IPSR_MSEL(IP7_19_16,     MSIOF1_SCK_E,           SEL_MSIOF1_4),
+       PINMUX_IPSR_MSEL(IP7_19_16,     STP_OPWM_0_B,           SEL_SSP1_0_1),
+
+       PINMUX_IPSR_DATA(IP7_23_20,     SD0_CMD),
+       PINMUX_IPSR_MSEL(IP7_23_20,     MSIOF1_SYNC_E,          SEL_MSIOF1_4),
+       PINMUX_IPSR_MSEL(IP7_23_20,     STP_IVCXO27_0_B,        SEL_SSP1_0_1),
+
+       PINMUX_IPSR_DATA(IP7_27_24,     SD0_DAT0),
+       PINMUX_IPSR_MSEL(IP7_27_24,     MSIOF1_RXD_E,           SEL_MSIOF1_4),
+       PINMUX_IPSR_MSEL(IP7_27_24,     TS_SCK0_B,              SEL_TSIF0_1),
+       PINMUX_IPSR_MSEL(IP7_27_24,     STP_ISCLK_0_B,          SEL_SSP1_0_1),
+
+       PINMUX_IPSR_DATA(IP7_31_28,     SD0_DAT1),
+       PINMUX_IPSR_MSEL(IP7_31_28,     MSIOF1_TXD_E,           SEL_MSIOF1_4),
+       PINMUX_IPSR_MSEL(IP7_31_28,     TS_SPSYNC0_B,           SEL_TSIF0_1),
+       PINMUX_IPSR_MSEL(IP7_31_28,     STP_ISSYNC_0_B,         SEL_SSP1_0_1),
+
+       /* IPSR8 */
+       PINMUX_IPSR_DATA(IP8_3_0,       SD0_DAT2),
+       PINMUX_IPSR_MSEL(IP8_3_0,       MSIOF1_SS1_E,           SEL_MSIOF1_4),
+       PINMUX_IPSR_MSEL(IP8_3_0,       TS_SDAT0_B,             SEL_TSIF0_1),
+       PINMUX_IPSR_MSEL(IP8_3_0,       STP_ISD_0_B,            SEL_SSP1_0_1),
+
+       PINMUX_IPSR_DATA(IP8_7_4,       SD0_DAT3),
+       PINMUX_IPSR_MSEL(IP8_7_4,       MSIOF1_SS2_E,           SEL_MSIOF1_4),
+       PINMUX_IPSR_MSEL(IP8_7_4,       TS_SDEN0_B,             SEL_TSIF0_1),
+       PINMUX_IPSR_MSEL(IP8_7_4,       STP_ISEN_0_B,           SEL_SSP1_0_1),
+
+       PINMUX_IPSR_DATA(IP8_11_8,      SD1_CLK),
+       PINMUX_IPSR_MSEL(IP8_11_8,      MSIOF1_SCK_G,           SEL_MSIOF1_6),
+       PINMUX_IPSR_MSEL(IP8_11_8,      SIM0_CLK_A,             SEL_SIMCARD_0),
+
+       PINMUX_IPSR_DATA(IP8_15_12,     SD1_CMD),
+       PINMUX_IPSR_MSEL(IP8_15_12,     MSIOF1_SYNC_G,          SEL_MSIOF1_6),
+       PINMUX_IPSR_MSEL(IP8_15_12,     SIM0_D_A,               SEL_SIMCARD_0),
+       PINMUX_IPSR_MSEL(IP8_15_12,     STP_IVCXO27_1_B,        SEL_SSP1_1_1),
+
+       PINMUX_IPSR_DATA(IP8_19_16,     SD1_DAT0),
+       PINMUX_IPSR_DATA(IP8_19_16,     SD2_DAT4),
+       PINMUX_IPSR_MSEL(IP8_19_16,     MSIOF1_RXD_G,           SEL_MSIOF1_6),
+       PINMUX_IPSR_MSEL(IP8_19_16,     TS_SCK1_B,              SEL_TSIF1_1),
+       PINMUX_IPSR_MSEL(IP8_19_16,     STP_ISCLK_1_B,          SEL_SSP1_1_1),
+
+       PINMUX_IPSR_DATA(IP8_23_20,     SD1_DAT1),
+       PINMUX_IPSR_DATA(IP8_23_20,     SD2_DAT5),
+       PINMUX_IPSR_MSEL(IP8_23_20,     MSIOF1_TXD_G,           SEL_MSIOF1_6),
+       PINMUX_IPSR_MSEL(IP8_23_20,     TS_SPSYNC1_B,           SEL_TSIF1_1),
+       PINMUX_IPSR_MSEL(IP8_23_20,     STP_ISSYNC_1_B,         SEL_SSP1_1_1),
+
+       PINMUX_IPSR_DATA(IP8_27_24,     SD1_DAT2),
+       PINMUX_IPSR_DATA(IP8_27_24,     SD2_DAT6),
+       PINMUX_IPSR_MSEL(IP8_27_24,     MSIOF1_SS1_G,           SEL_MSIOF1_6),
+       PINMUX_IPSR_MSEL(IP8_27_24,     TS_SDAT1_B,             SEL_TSIF1_1),
+       PINMUX_IPSR_MSEL(IP8_27_24,     STP_ISD_1_B,            SEL_SSP1_1_1),
+
+       PINMUX_IPSR_DATA(IP8_31_28,     SD1_DAT3),
+       PINMUX_IPSR_DATA(IP8_31_28,     SD2_DAT7),
+       PINMUX_IPSR_MSEL(IP8_31_28,     MSIOF1_SS2_G,           SEL_MSIOF1_6),
+       PINMUX_IPSR_MSEL(IP8_31_28,     TS_SDEN1_B,             SEL_TSIF1_1),
+       PINMUX_IPSR_MSEL(IP8_31_28,     STP_ISEN_1_B,           SEL_SSP1_1_1),
+
+       /* IPSR9 */
+       PINMUX_IPSR_DATA(IP9_3_0,       SD2_CLK),
+
+       PINMUX_IPSR_DATA(IP9_7_4,       SD2_DAT0),
+
+       PINMUX_IPSR_DATA(IP9_11_8,      SD2_DAT1),
+
+       PINMUX_IPSR_DATA(IP9_15_12,     SD2_DAT2),
+
+       PINMUX_IPSR_DATA(IP9_19_16,     SD2_DAT3),
+
+       PINMUX_IPSR_DATA(IP9_23_20,     SD2_DS),
+       PINMUX_IPSR_MSEL(IP9_23_20,     SATA_DEVSLP_B,          SEL_SCIF_1),
+
+       PINMUX_IPSR_DATA(IP9_27_24,     SD3_DAT4),
+       PINMUX_IPSR_MSEL(IP9_27_24,     SD2_CD_A,               SEL_SDHI2_0),
+
+       PINMUX_IPSR_DATA(IP9_31_28,     SD3_DAT5),
+       PINMUX_IPSR_MSEL(IP9_31_28,     SD2_WP_A,               SEL_SDHI2_0),
+
+       /* IPSR10 */
+       PINMUX_IPSR_DATA(IP10_3_0,      SD3_DAT6),
+       PINMUX_IPSR_DATA(IP10_3_0,      SD3_CD),
+
+       PINMUX_IPSR_DATA(IP10_7_4,      SD3_DAT7),
+       PINMUX_IPSR_DATA(IP10_7_4,      SD3_WP),
+
+       PINMUX_IPSR_DATA(IP10_11_8,     SD0_CD),
+       PINMUX_IPSR_MSEL(IP10_11_8,     SCL2_B,                 SEL_I2C2_1),
+       PINMUX_IPSR_MSEL(IP10_11_8,     SIM0_RST_A,             SEL_SIMCARD_0),
+
+       PINMUX_IPSR_DATA(IP10_15_12,    SD0_WP),
+       PINMUX_IPSR_MSEL(IP10_15_12,    SDA2_B,                 SEL_I2C2_1),
+
+       PINMUX_IPSR_DATA(IP10_19_16,    SD1_CD),
+       PINMUX_IPSR_MSEL(IP10_19_16,    SIM0_CLK_B,             SEL_SIMCARD_1),
+
+       PINMUX_IPSR_DATA(IP10_23_20,    SD1_WP),
+       PINMUX_IPSR_MSEL(IP10_23_20,    SIM0_D_B,               SEL_SIMCARD_1),
+
+       PINMUX_IPSR_DATA(IP10_27_24,    SCK0),
+       PINMUX_IPSR_MSEL(IP10_27_24,    HSCK1_B,                SEL_HSCIF1_1),
+       PINMUX_IPSR_MSEL(IP10_27_24,    MSIOF1_SS2_B,           SEL_MSIOF1_1),
+       PINMUX_IPSR_MSEL(IP10_27_24,    AUDIO_CLKC_B,           SEL_ADG_1),
+       PINMUX_IPSR_MSEL(IP10_27_24,    SDA2_A,                 SEL_I2C2_0),
+       PINMUX_IPSR_MSEL(IP10_27_24,    SIM0_RST_B,             SEL_SIMCARD_1),
+       PINMUX_IPSR_MSEL(IP10_27_24,    STP_OPWM_0_C,           SEL_SSP1_0_2),
+       PINMUX_IPSR_MSEL(IP10_27_24,    RIF0_CLK_B,             SEL_DRIF0_1),
+       PINMUX_IPSR_DATA(IP10_27_24,    ADICHS2),
+
+       PINMUX_IPSR_DATA(IP10_31_28,    RX0),
+       PINMUX_IPSR_MSEL(IP10_31_28,    HRX1_B,                 SEL_HSCIF1_1),
+       PINMUX_IPSR_MSEL(IP10_31_28,    TS_SCK0_C,              SEL_TSIF0_2),
+       PINMUX_IPSR_MSEL(IP10_31_28,    STP_ISCLK_0_C,          SEL_SSP1_0_2),
+       PINMUX_IPSR_MSEL(IP10_31_28,    RIF0_D0_B,              SEL_DRIF0_1),
+
+       /* IPSR11 */
+       PINMUX_IPSR_DATA(IP11_3_0,      TX0),
+       PINMUX_IPSR_MSEL(IP11_3_0,      HTX1_B,                 SEL_HSCIF1_1),
+       PINMUX_IPSR_MSEL(IP11_3_0,      TS_SPSYNC0_C,           SEL_TSIF0_2),
+       PINMUX_IPSR_MSEL(IP11_3_0,      STP_ISSYNC_0_C,         SEL_SSP1_0_2),
+       PINMUX_IPSR_MSEL(IP11_3_0,      RIF0_D1_B,              SEL_DRIF0_1),
+
+       PINMUX_IPSR_DATA(IP11_7_4,      CTS0_N),
+       PINMUX_IPSR_MSEL(IP11_7_4,      HCTS1_N_B,              SEL_HSCIF1_1),
+       PINMUX_IPSR_MSEL(IP11_7_4,      MSIOF1_SYNC_B,          SEL_MSIOF1_1),
+       PINMUX_IPSR_MSEL(IP11_7_4,      TS_SPSYNC1_C,           SEL_TSIF1_2),
+       PINMUX_IPSR_MSEL(IP11_7_4,      STP_ISSYNC_1_C,         SEL_SSP1_1_2),
+       PINMUX_IPSR_MSEL(IP11_7_4,      RIF1_SYNC_B,            SEL_DRIF1_1),
+       PINMUX_IPSR_MSEL(IP11_7_4,      AUDIO_CLKOUT_C,         SEL_ADG_2),
+       PINMUX_IPSR_DATA(IP11_7_4,      ADICS_SAMP),
+
+       PINMUX_IPSR_DATA(IP11_11_8,     RTS0_N_TANS),
+       PINMUX_IPSR_MSEL(IP11_11_8,     HRTS1_N_B,              SEL_HSCIF1_1),
+       PINMUX_IPSR_MSEL(IP11_11_8,     MSIOF1_SS1_B,           SEL_MSIOF1_1),
+       PINMUX_IPSR_MSEL(IP11_11_8,     AUDIO_CLKA_B,           SEL_ADG_1),
+       PINMUX_IPSR_MSEL(IP11_11_8,     SCL2_A,                 SEL_I2C2_0),
+       PINMUX_IPSR_MSEL(IP11_11_8,     STP_IVCXO27_1_C,        SEL_SSP1_1_2),
+       PINMUX_IPSR_MSEL(IP11_11_8,     RIF0_SYNC_B,            SEL_DRIF0_1),
+       PINMUX_IPSR_DATA(IP11_11_8,     ADICHS1),
+
+       PINMUX_IPSR_MSEL(IP11_15_12,    RX1_A,                  SEL_SCIF1_0),
+       PINMUX_IPSR_MSEL(IP11_15_12,    HRX1_A,                 SEL_HSCIF1_0),
+       PINMUX_IPSR_MSEL(IP11_15_12,    TS_SDAT0_C,             SEL_TSIF0_2),
+       PINMUX_IPSR_MSEL(IP11_15_12,    STP_ISD_0_C,            SEL_SSP1_0_2),
+       PINMUX_IPSR_MSEL(IP11_15_12,    RIF1_CLK_C,             SEL_DRIF1_2),
+
+       PINMUX_IPSR_MSEL(IP11_19_16,    TX1_A,                  SEL_SCIF1_0),
+       PINMUX_IPSR_MSEL(IP11_19_16,    HTX1_A,                 SEL_HSCIF1_0),
+       PINMUX_IPSR_MSEL(IP11_19_16,    TS_SDEN0_C,             SEL_TSIF0_2),
+       PINMUX_IPSR_MSEL(IP11_19_16,    STP_ISEN_0_C,           SEL_SSP1_0_2),
+       PINMUX_IPSR_MSEL(IP11_19_16,    RIF1_D0_C,              SEL_DRIF1_2),
+
+       PINMUX_IPSR_DATA(IP11_23_20,    CTS1_N),
+       PINMUX_IPSR_MSEL(IP11_23_20,    HCTS1_N_A,              SEL_HSCIF1_0),
+       PINMUX_IPSR_MSEL(IP11_23_20,    MSIOF1_RXD_B,           SEL_MSIOF1_1),
+       PINMUX_IPSR_MSEL(IP11_23_20,    TS_SDEN1_C,             SEL_TSIF1_2),
+       PINMUX_IPSR_MSEL(IP11_23_20,    STP_ISEN_1_C,           SEL_SSP1_1_2),
+       PINMUX_IPSR_MSEL(IP11_23_20,    RIF1_D0_B,              SEL_DRIF1_1),
+       PINMUX_IPSR_DATA(IP11_23_20,    ADIDATA),
+
+       PINMUX_IPSR_DATA(IP11_27_24,    RTS1_N_TANS),
+       PINMUX_IPSR_MSEL(IP11_27_24,    HRTS1_N_A,              SEL_HSCIF1_0),
+       PINMUX_IPSR_MSEL(IP11_27_24,    MSIOF1_TXD_B,           SEL_MSIOF1_1),
+       PINMUX_IPSR_MSEL(IP11_27_24,    TS_SDAT1_C,             SEL_TSIF1_2),
+       PINMUX_IPSR_MSEL(IP11_27_24,    STP_ISD_1_C,            SEL_SSP1_1_2),
+       PINMUX_IPSR_MSEL(IP11_27_24,    RIF1_D1_B,              SEL_DRIF1_1),
+       PINMUX_IPSR_DATA(IP11_27_24,    ADICHS0),
+
+       PINMUX_IPSR_DATA(IP11_31_28,    SCK2),
+       PINMUX_IPSR_MSEL(IP11_31_28,    SCIF_CLK_B,             SEL_SCIF1_1),
+       PINMUX_IPSR_MSEL(IP11_31_28,    MSIOF1_SCK_B,           SEL_MSIOF1_1),
+       PINMUX_IPSR_MSEL(IP11_31_28,    TS_SCK1_C,              SEL_TSIF1_2),
+       PINMUX_IPSR_MSEL(IP11_31_28,    STP_ISCLK_1_C,          SEL_SSP1_1_2),
+       PINMUX_IPSR_MSEL(IP11_31_28,    RIF1_CLK_B,             SEL_DRIF1_1),
+       PINMUX_IPSR_DATA(IP11_31_28,    ADICLK),
+
+       /* IPSR12 */
+       PINMUX_IPSR_MSEL(IP12_3_0,      TX2_A,                  SEL_SCIF2_0),
+       PINMUX_IPSR_MSEL(IP12_3_0,      SD2_CD_B,               SEL_SDHI2_1),
+       PINMUX_IPSR_MSEL(IP12_3_0,      SCL1_A,                 SEL_I2C1_0),
+       PINMUX_IPSR_MSEL(IP12_3_0,      FMCLK_A,                SEL_FM_0),
+       PINMUX_IPSR_MSEL(IP12_3_0,      RIF1_D1_C,              SEL_DRIF1_2),
+       PINMUX_IPSR_MSEL(IP12_3_0,      FSO_CFE_0_B,            SEL_FSO_1),
+
+       PINMUX_IPSR_MSEL(IP12_7_4,      RX2_A,                  SEL_SCIF2_0),
+       PINMUX_IPSR_MSEL(IP12_7_4,      SD2_WP_B,               SEL_SDHI2_1),
+       PINMUX_IPSR_MSEL(IP12_7_4,      SDA1_A,                 SEL_I2C1_0),
+       PINMUX_IPSR_MSEL(IP12_7_4,      FMIN_A,                 SEL_FM_0),
+       PINMUX_IPSR_MSEL(IP12_7_4,      RIF1_SYNC_C,            SEL_DRIF1_2),
+       PINMUX_IPSR_MSEL(IP12_7_4,      FSO_CFE_1_B,            SEL_FSO_1),
+
+       PINMUX_IPSR_DATA(IP12_11_8,     HSCK0),
+       PINMUX_IPSR_MSEL(IP12_11_8,     MSIOF1_SCK_D,           SEL_MSIOF1_3),
+       PINMUX_IPSR_MSEL(IP12_11_8,     AUDIO_CLKB_A,           SEL_ADG_0),
+       PINMUX_IPSR_MSEL(IP12_11_8,     SSI_SDATA1_B,           SEL_SSI_1),
+       PINMUX_IPSR_MSEL(IP12_11_8,     TS_SCK0_D,              SEL_TSIF0_3),
+       PINMUX_IPSR_MSEL(IP12_11_8,     STP_ISCLK_0_D,          SEL_SSP1_0_3),
+       PINMUX_IPSR_MSEL(IP12_11_8,     RIF0_CLK_C,             SEL_DRIF0_2),
+
+       PINMUX_IPSR_DATA(IP12_15_12,    HRX0),
+       PINMUX_IPSR_MSEL(IP12_15_12,    MSIOF1_RXD_D,           SEL_MSIOF1_3),
+       PINMUX_IPSR_MSEL(IP12_15_12,    SSI_SDATA2_B,           SEL_SSI_1),
+       PINMUX_IPSR_MSEL(IP12_15_12,    TS_SDEN0_D,             SEL_TSIF0_3),
+       PINMUX_IPSR_MSEL(IP12_15_12,    STP_ISEN_0_D,           SEL_SSP1_0_3),
+       PINMUX_IPSR_MSEL(IP12_15_12,    RIF0_D0_C,              SEL_DRIF0_2),
+
+       PINMUX_IPSR_DATA(IP12_19_16,    HTX0),
+       PINMUX_IPSR_MSEL(IP12_19_16,    MSIOF1_TXD_D,           SEL_MSIOF1_3),
+       PINMUX_IPSR_MSEL(IP12_19_16,    SSI_SDATA9_B,           SEL_SSI_1),
+       PINMUX_IPSR_MSEL(IP12_19_16,    TS_SDAT0_D,             SEL_TSIF0_3),
+       PINMUX_IPSR_MSEL(IP12_19_16,    STP_ISD_0_D,            SEL_SSP1_0_3),
+       PINMUX_IPSR_MSEL(IP12_19_16,    RIF0_D1_C,              SEL_DRIF0_2),
+
+       PINMUX_IPSR_DATA(IP12_23_20,    HCTS0_N),
+       PINMUX_IPSR_MSEL(IP12_23_20,    RX2_B,                  SEL_SCIF2_1),
+       PINMUX_IPSR_MSEL(IP12_23_20,    MSIOF1_SYNC_D,          SEL_MSIOF1_3),
+       PINMUX_IPSR_MSEL(IP12_23_20,    SSI_SCK9_A,             SEL_SSI_0),
+       PINMUX_IPSR_MSEL(IP12_23_20,    TS_SPSYNC0_D,           SEL_TSIF0_3),
+       PINMUX_IPSR_MSEL(IP12_23_20,    STP_ISSYNC_0_D,         SEL_SSP1_0_3),
+       PINMUX_IPSR_MSEL(IP12_23_20,    RIF0_SYNC_C,            SEL_DRIF0_2),
+       PINMUX_IPSR_MSEL(IP12_23_20,    AUDIO_CLKOUT1_A,        SEL_ADG_0),
+
+       PINMUX_IPSR_DATA(IP12_27_24,    HRTS0_N),
+       PINMUX_IPSR_MSEL(IP12_27_24,    TX2_B,                  SEL_SCIF2_1),
+       PINMUX_IPSR_MSEL(IP12_27_24,    MSIOF1_SS1_D,           SEL_MSIOF1_3),
+       PINMUX_IPSR_MSEL(IP12_27_24,    SSI_WS9_A,              SEL_SSI_0),
+       PINMUX_IPSR_MSEL(IP12_27_24,    STP_IVCXO27_0_D,        SEL_SSP1_0_3),
+       PINMUX_IPSR_MSEL(IP12_27_24,    BPFCLK_A,               SEL_FM_0),
+       PINMUX_IPSR_MSEL(IP12_27_24,    AUDIO_CLKOUT2_A,        SEL_ADG_0),
+
+       PINMUX_IPSR_DATA(IP12_31_28,    MSIOF0_SYNC),
+       PINMUX_IPSR_MSEL(IP12_31_28,    AUDIO_CLKOUT_A,         SEL_ADG_0),
+
+       /* IPSR13 */
+       PINMUX_IPSR_DATA(IP13_3_0,      MSIOF0_SS1),
+       PINMUX_IPSR_DATA(IP13_3_0,      RX5),
+       PINMUX_IPSR_MSEL(IP13_3_0,      AUDIO_CLKA_C,           SEL_ADG_2),
+       PINMUX_IPSR_MSEL(IP13_3_0,      SSI_SCK2_A,             SEL_SSI_0),
+       PINMUX_IPSR_MSEL(IP13_3_0,      STP_IVCXO27_0_C,        SEL_SSP1_0_2),
+       PINMUX_IPSR_MSEL(IP13_3_0,      AUDIO_CLKOUT3_A,        SEL_ADG_0),
+       PINMUX_IPSR_MSEL(IP13_3_0,      TCLK1_B,                SEL_TIMER_TMU_1),
+
+       PINMUX_IPSR_DATA(IP13_7_4,      MSIOF0_SS2),
+       PINMUX_IPSR_DATA(IP13_7_4,      TX5),
+       PINMUX_IPSR_MSEL(IP13_7_4,      MSIOF1_SS2_D,           SEL_MSIOF1_3),
+       PINMUX_IPSR_MSEL(IP13_7_4,      AUDIO_CLKC_A,           SEL_ADG_0),
+       PINMUX_IPSR_MSEL(IP13_7_4,      SSI_WS2_A,              SEL_SSI_0),
+       PINMUX_IPSR_MSEL(IP13_7_4,      STP_OPWM_0_D,           SEL_SSP1_0_3),
+       PINMUX_IPSR_MSEL(IP13_7_4,      AUDIO_CLKOUT_D,         SEL_ADG_3),
+       PINMUX_IPSR_MSEL(IP13_7_4,      SPEEDIN_B,              SEL_SPEED_PULSE_1),
+
+       PINMUX_IPSR_DATA(IP13_11_8,     MLB_CLK),
+       PINMUX_IPSR_MSEL(IP13_11_8,     MSIOF1_SCK_F,           SEL_MSIOF1_5),
+       PINMUX_IPSR_MSEL(IP13_11_8,     SCL1_B,                 SEL_I2C1_1),
+
+       PINMUX_IPSR_DATA(IP13_15_12,    MLB_SIG),
+       PINMUX_IPSR_MSEL(IP13_15_12,    RX1_B,                  SEL_SCIF1_1),
+       PINMUX_IPSR_MSEL(IP13_15_12,    MSIOF1_SYNC_F,          SEL_MSIOF1_5),
+       PINMUX_IPSR_MSEL(IP13_15_12,    SDA1_B,                 SEL_I2C1_1),
+
+       PINMUX_IPSR_DATA(IP13_19_16,    MLB_DAT),
+       PINMUX_IPSR_MSEL(IP13_19_16,    TX1_B,                  SEL_SCIF1_1),
+       PINMUX_IPSR_MSEL(IP13_19_16,    MSIOF1_RXD_F,           SEL_MSIOF1_5),
+
+       PINMUX_IPSR_DATA(IP13_23_20,    SSI_SCK0129),
+       PINMUX_IPSR_MSEL(IP13_23_20,    MSIOF1_TXD_F,           SEL_MSIOF1_5),
+
+       PINMUX_IPSR_DATA(IP13_27_24,    SSI_WS0129),
+       PINMUX_IPSR_MSEL(IP13_27_24,    MSIOF1_SS1_F,           SEL_MSIOF1_5),
+
+       PINMUX_IPSR_DATA(IP13_31_28,    SSI_SDATA0),
+       PINMUX_IPSR_MSEL(IP13_31_28,    MSIOF1_SS2_F,           SEL_MSIOF1_5),
+
+       /* IPSR14 */
+       PINMUX_IPSR_MSEL(IP14_3_0,      SSI_SDATA1_A,           SEL_SSI_0),
+
+       PINMUX_IPSR_MSEL(IP14_7_4,      SSI_SDATA2_A,           SEL_SSI_0),
+       PINMUX_IPSR_MSEL(IP14_7_4,      SSI_SCK1_B,             SEL_SSI_1),
+
+       PINMUX_IPSR_DATA(IP14_11_8,     SSI_SCK34),
+       PINMUX_IPSR_MSEL(IP14_11_8,     MSIOF1_SS1_A,           SEL_MSIOF1_0),
+       PINMUX_IPSR_MSEL(IP14_11_8,     STP_OPWM_0_A,           SEL_SSP1_0_0),
+
+       PINMUX_IPSR_DATA(IP14_15_12,    SSI_WS34),
+       PINMUX_IPSR_MSEL(IP14_15_12,    HCTS2_N_A,              SEL_HSCIF2_0),
+       PINMUX_IPSR_MSEL(IP14_15_12,    MSIOF1_SS2_A,           SEL_MSIOF1_0),
+       PINMUX_IPSR_MSEL(IP14_15_12,    STP_IVCXO27_0_A,        SEL_SSP1_0_0),
+
+       PINMUX_IPSR_DATA(IP14_19_16,    SSI_SDATA3),
+       PINMUX_IPSR_MSEL(IP14_19_16,    HRTS2_N_A,              SEL_HSCIF2_0),
+       PINMUX_IPSR_MSEL(IP14_19_16,    MSIOF1_TXD_A,           SEL_MSIOF1_0),
+       PINMUX_IPSR_MSEL(IP14_19_16,    TS_SCK0_A,              SEL_TSIF0_0),
+       PINMUX_IPSR_MSEL(IP14_19_16,    STP_ISCLK_0_A,          SEL_SSP1_0_0),
+       PINMUX_IPSR_MSEL(IP14_19_16,    RIF0_D1_A,              SEL_DRIF0_0),
+       PINMUX_IPSR_MSEL(IP14_19_16,    RIF2_D0_A,              SEL_DRIF2_0),
+
+       PINMUX_IPSR_DATA(IP14_23_20,    SSI_SCK4),
+       PINMUX_IPSR_MSEL(IP14_23_20,    HRX2_A,                 SEL_HSCIF2_0),
+       PINMUX_IPSR_MSEL(IP14_23_20,    MSIOF1_SCK_A,           SEL_MSIOF1_0),
+       PINMUX_IPSR_MSEL(IP14_23_20,    TS_SDAT0_A,             SEL_TSIF0_0),
+       PINMUX_IPSR_MSEL(IP14_23_20,    STP_ISD_0_A,            SEL_SSP1_0_0),
+       PINMUX_IPSR_MSEL(IP14_23_20,    RIF0_CLK_A,             SEL_DRIF0_0),
+       PINMUX_IPSR_MSEL(IP14_23_20,    RIF2_CLK_A,             SEL_DRIF2_0),
+
+       PINMUX_IPSR_DATA(IP14_27_24,    SSI_WS4),
+       PINMUX_IPSR_MSEL(IP14_27_24,    HTX2_A,                 SEL_HSCIF2_0),
+       PINMUX_IPSR_MSEL(IP14_27_24,    MSIOF1_SYNC_A,          SEL_MSIOF1_0),
+       PINMUX_IPSR_MSEL(IP14_27_24,    TS_SDEN0_A,             SEL_TSIF0_0),
+       PINMUX_IPSR_MSEL(IP14_27_24,    STP_ISEN_0_A,           SEL_SSP1_0_0),
+       PINMUX_IPSR_MSEL(IP14_27_24,    RIF0_SYNC_A,            SEL_DRIF0_0),
+       PINMUX_IPSR_MSEL(IP14_27_24,    RIF2_SYNC_A,            SEL_DRIF2_0),
+
+       PINMUX_IPSR_DATA(IP14_31_28,    SSI_SDATA4),
+       PINMUX_IPSR_MSEL(IP14_31_28,    HSCK2_A,                SEL_HSCIF2_0),
+       PINMUX_IPSR_MSEL(IP14_31_28,    MSIOF1_RXD_A,           SEL_MSIOF1_0),
+       PINMUX_IPSR_MSEL(IP14_31_28,    TS_SPSYNC0_A,           SEL_TSIF0_0),
+       PINMUX_IPSR_MSEL(IP14_31_28,    STP_ISSYNC_0_A,         SEL_SSP1_0_0),
+       PINMUX_IPSR_MSEL(IP14_31_28,    RIF0_D0_A,              SEL_DRIF0_0),
+       PINMUX_IPSR_MSEL(IP14_31_28,    RIF2_D1_A,              SEL_DRIF2_0),
+
+       /* IPSR15 */
+       PINMUX_IPSR_DATA(IP15_3_0,      SSI_SCK6),
+       PINMUX_IPSR_DATA(IP15_3_0,      USB2_PWEN),
+       PINMUX_IPSR_MSEL(IP15_3_0,      SIM0_RST_D,             SEL_SIMCARD_3),
+
+       PINMUX_IPSR_DATA(IP15_7_4,      SSI_WS6),
+       PINMUX_IPSR_DATA(IP15_7_4,      USB2_OVC),
+       PINMUX_IPSR_MSEL(IP15_7_4,      SIM0_D_D,               SEL_SIMCARD_3),
+
+       PINMUX_IPSR_DATA(IP15_11_8,     SSI_SDATA6),
+       PINMUX_IPSR_MSEL(IP15_11_8,     SIM0_CLK_D,             SEL_SIMCARD_3),
+       PINMUX_IPSR_MSEL(IP15_11_8,     SATA_DEVSLP_A,          SEL_SCIF_0),
+
+       PINMUX_IPSR_DATA(IP15_15_12,    SSI_SCK78),
+       PINMUX_IPSR_MSEL(IP15_15_12,    HRX2_B,                 SEL_HSCIF2_1),
+       PINMUX_IPSR_MSEL(IP15_15_12,    MSIOF1_SCK_C,           SEL_MSIOF1_2),
+       PINMUX_IPSR_MSEL(IP15_15_12,    TS_SCK1_A,              SEL_TSIF1_0),
+       PINMUX_IPSR_MSEL(IP15_15_12,    STP_ISCLK_1_A,          SEL_SSP1_1_0),
+       PINMUX_IPSR_MSEL(IP15_15_12,    RIF1_CLK_A,             SEL_DRIF1_0),
+       PINMUX_IPSR_MSEL(IP15_15_12,    RIF3_CLK_A,             SEL_DRIF3_0),
+
+       PINMUX_IPSR_DATA(IP15_19_16,    SSI_WS78),
+       PINMUX_IPSR_MSEL(IP15_19_16,    HTX2_B,                 SEL_HSCIF2_1),
+       PINMUX_IPSR_MSEL(IP15_19_16,    MSIOF1_SYNC_C,          SEL_MSIOF1_2),
+       PINMUX_IPSR_MSEL(IP15_19_16,    TS_SDAT1_A,             SEL_TSIF1_0),
+       PINMUX_IPSR_MSEL(IP15_19_16,    STP_ISD_1_A,            SEL_SSP1_1_0),
+       PINMUX_IPSR_MSEL(IP15_19_16,    RIF1_SYNC_A,            SEL_DRIF1_0),
+       PINMUX_IPSR_MSEL(IP15_19_16,    RIF3_SYNC_A,            SEL_DRIF3_0),
+
+       PINMUX_IPSR_DATA(IP15_23_20,    SSI_SDATA7),
+       PINMUX_IPSR_MSEL(IP15_23_20,    HCTS2_N_B,              SEL_HSCIF2_1),
+       PINMUX_IPSR_MSEL(IP15_23_20,    MSIOF1_RXD_C,           SEL_MSIOF1_2),
+       PINMUX_IPSR_MSEL(IP15_23_20,    TS_SDEN1_A,             SEL_TSIF1_0),
+       PINMUX_IPSR_MSEL(IP15_23_20,    STP_ISEN_1_A,           SEL_SSP1_1_0),
+       PINMUX_IPSR_MSEL(IP15_23_20,    RIF1_D0_A,              SEL_DRIF1_0),
+       PINMUX_IPSR_MSEL(IP15_23_20,    RIF3_D0_A,              SEL_DRIF3_0),
+       PINMUX_IPSR_MSEL(IP15_23_20,    TCLK2_A,                SEL_TIMER_TMU_0),
+
+       PINMUX_IPSR_DATA(IP15_27_24,    SSI_SDATA8),
+       PINMUX_IPSR_MSEL(IP15_27_24,    HRTS2_N_B,              SEL_HSCIF2_1),
+       PINMUX_IPSR_MSEL(IP15_27_24,    MSIOF1_TXD_C,           SEL_MSIOF1_2),
+       PINMUX_IPSR_MSEL(IP15_27_24,    TS_SPSYNC1_A,           SEL_TSIF1_0),
+       PINMUX_IPSR_MSEL(IP15_27_24,    STP_ISSYNC_1_A,         SEL_SSP1_1_0),
+       PINMUX_IPSR_MSEL(IP15_27_24,    RIF1_D1_A,              SEL_DRIF1_0),
+       PINMUX_IPSR_MSEL(IP15_27_24,    RIF3_D1_A,              SEL_DRIF3_0),
+
+       PINMUX_IPSR_MSEL(IP15_31_28,    SSI_SDATA9_A,           SEL_SSI_0),
+       PINMUX_IPSR_MSEL(IP15_31_28,    HSCK2_B,                SEL_HSCIF2_1),
+       PINMUX_IPSR_MSEL(IP15_31_28,    MSIOF1_SS1_C,           SEL_MSIOF1_2),
+       PINMUX_IPSR_MSEL(IP15_31_28,    HSCK1_A,                SEL_HSCIF1_0),
+       PINMUX_IPSR_MSEL(IP15_31_28,    SSI_WS1_B,              SEL_SSI_1),
+       PINMUX_IPSR_DATA(IP15_31_28,    SCK1),
+       PINMUX_IPSR_MSEL(IP15_31_28,    STP_IVCXO27_1_A,        SEL_SSP1_1_0),
+       PINMUX_IPSR_DATA(IP15_31_28,    SCK5),
+
+       /* IPSR16 */
+       PINMUX_IPSR_MSEL(IP16_3_0,      AUDIO_CLKA_A,           SEL_ADG_0),
+       PINMUX_IPSR_DATA(IP16_3_0,      CC5_OSCOUT),
+
+       PINMUX_IPSR_MSEL(IP16_7_4,      AUDIO_CLKB_B,           SEL_ADG_1),
+       PINMUX_IPSR_MSEL(IP16_7_4,      SCIF_CLK_A,             SEL_SCIF1_0),
+       PINMUX_IPSR_MSEL(IP16_7_4,      STP_IVCXO27_1_D,        SEL_SSP1_1_3),
+       PINMUX_IPSR_MSEL(IP16_7_4,      REMOCON_A,              SEL_REMOCON_0),
+       PINMUX_IPSR_MSEL(IP16_7_4,      TCLK1_A,                SEL_TIMER_TMU_0),
+
+       PINMUX_IPSR_DATA(IP16_11_8,     USB0_PWEN),
+       PINMUX_IPSR_MSEL(IP16_11_8,     SIM0_RST_C,             SEL_SIMCARD_2),
+       PINMUX_IPSR_MSEL(IP16_11_8,     TS_SCK1_D,              SEL_TSIF1_3),
+       PINMUX_IPSR_MSEL(IP16_11_8,     STP_ISCLK_1_D,          SEL_SSP1_1_3),
+       PINMUX_IPSR_MSEL(IP16_11_8,     BPFCLK_B,               SEL_FM_1),
+       PINMUX_IPSR_MSEL(IP16_11_8,     RIF3_CLK_B,             SEL_DRIF3_1),
+
+       PINMUX_IPSR_DATA(IP16_15_12,    USB0_OVC),
+       PINMUX_IPSR_MSEL(IP16_11_8,     SIM0_D_C,               SEL_SIMCARD_2),
+       PINMUX_IPSR_MSEL(IP16_11_8,     TS_SDAT1_D,             SEL_TSIF1_3),
+       PINMUX_IPSR_MSEL(IP16_11_8,     STP_ISD_1_D,            SEL_SSP1_1_3),
+       PINMUX_IPSR_MSEL(IP16_11_8,     RIF3_SYNC_B,            SEL_DRIF3_1),
+
+       PINMUX_IPSR_DATA(IP16_19_16,    USB1_PWEN),
+       PINMUX_IPSR_MSEL(IP16_19_16,    SIM0_CLK_C,             SEL_SIMCARD_2),
+       PINMUX_IPSR_MSEL(IP16_19_16,    SSI_SCK1_A,             SEL_SSI_0),
+       PINMUX_IPSR_MSEL(IP16_19_16,    TS_SCK0_E,              SEL_TSIF0_4),
+       PINMUX_IPSR_MSEL(IP16_19_16,    STP_ISCLK_0_E,          SEL_SSP1_0_4),
+       PINMUX_IPSR_MSEL(IP16_19_16,    FMCLK_B,                SEL_FM_1),
+       PINMUX_IPSR_MSEL(IP16_19_16,    RIF2_CLK_B,             SEL_DRIF2_1),
+       PINMUX_IPSR_MSEL(IP16_19_16,    SPEEDIN_A,              SEL_SPEED_PULSE_0),
+
+       PINMUX_IPSR_DATA(IP16_23_20,    USB1_OVC),
+       PINMUX_IPSR_MSEL(IP16_23_20,    MSIOF1_SS2_C,           SEL_MSIOF1_2),
+       PINMUX_IPSR_MSEL(IP16_23_20,    SSI_WS1_A,              SEL_SSI_0),
+       PINMUX_IPSR_MSEL(IP16_23_20,    TS_SDAT0_E,             SEL_TSIF0_4),
+       PINMUX_IPSR_MSEL(IP16_23_20,    STP_ISD_0_E,            SEL_SSP1_0_4),
+       PINMUX_IPSR_MSEL(IP16_23_20,    FMIN_B,                 SEL_FM_1),
+       PINMUX_IPSR_MSEL(IP16_23_20,    RIF2_SYNC_B,            SEL_DRIF2_1),
+       PINMUX_IPSR_MSEL(IP16_23_20,    REMOCON_B,              SEL_REMOCON_1),
+
+       PINMUX_IPSR_DATA(IP16_27_24,    USB30_PWEN),
+       PINMUX_IPSR_MSEL(IP16_27_24,    AUDIO_CLKOUT_B,         SEL_ADG_1),
+       PINMUX_IPSR_MSEL(IP16_27_24,    SSI_SCK2_B,             SEL_SSI_1),
+       PINMUX_IPSR_MSEL(IP16_27_24,    TS_SDEN1_D,             SEL_TSIF1_3),
+       PINMUX_IPSR_MSEL(IP16_27_24,    STP_ISEN_1_D,           SEL_SSP1_1_2),
+       PINMUX_IPSR_MSEL(IP16_27_24,    STP_OPWM_0_E,           SEL_SSP1_0_4),
+       PINMUX_IPSR_MSEL(IP16_27_24,    RIF3_D0_B,              SEL_DRIF3_1),
+       PINMUX_IPSR_MSEL(IP16_27_24,    TCLK2_B,                SEL_TIMER_TMU_1),
+       PINMUX_IPSR_DATA(IP16_27_24,    TPU0TO0),
+
+       PINMUX_IPSR_DATA(IP16_31_28,    USB30_OVC),
+       PINMUX_IPSR_MSEL(IP16_31_28,    AUDIO_CLKOUT1_B,        SEL_ADG_1),
+       PINMUX_IPSR_MSEL(IP16_31_28,    SSI_WS2_B,              SEL_SSI_1),
+       PINMUX_IPSR_MSEL(IP16_31_28,    TS_SPSYNC1_D,           SEL_TSIF1_3),
+       PINMUX_IPSR_MSEL(IP16_31_28,    STP_ISSYNC_1_D,         SEL_SSP1_1_3),
+       PINMUX_IPSR_MSEL(IP16_31_28,    STP_IVCXO27_0_E,        SEL_SSP1_0_4),
+       PINMUX_IPSR_MSEL(IP16_31_28,    RIF3_D1_B,              SEL_DRIF3_1),
+       PINMUX_IPSR_MSEL(IP16_31_28,    FSO_TOE_B,              SEL_FSO_1),
+       PINMUX_IPSR_DATA(IP16_31_28,    TPU0TO1),
+
+       /* IPSR17 */
+       PINMUX_IPSR_DATA(IP17_3_0,      USB31_PWEN),
+       PINMUX_IPSR_MSEL(IP17_3_0,      AUDIO_CLKOUT2_B,        SEL_ADG_1),
+       PINMUX_IPSR_MSEL(IP17_3_0,      SSI_SCK9_B,             SEL_SSI_1),
+       PINMUX_IPSR_MSEL(IP17_3_0,      TS_SDEN0_E,             SEL_TSIF0_4),
+       PINMUX_IPSR_MSEL(IP17_3_0,      STP_ISEN_0_E,           SEL_SSP1_0_4),
+       PINMUX_IPSR_MSEL(IP17_3_0,      RIF2_D0_B,              SEL_DRIF2_1),
+       PINMUX_IPSR_DATA(IP17_3_0,      TPU0TO2),
+
+       PINMUX_IPSR_DATA(IP17_7_4,      USB31_OVC),
+       PINMUX_IPSR_MSEL(IP17_7_4,      AUDIO_CLKOUT3_B,        SEL_ADG_1),
+       PINMUX_IPSR_MSEL(IP17_7_4,      SSI_WS9_B,              SEL_SSI_1),
+       PINMUX_IPSR_MSEL(IP17_7_4,      TS_SPSYNC0_E,           SEL_TSIF0_4),
+       PINMUX_IPSR_MSEL(IP17_7_4,      STP_ISSYNC_0_E,         SEL_SSP1_0_4),
+       PINMUX_IPSR_MSEL(IP17_7_4,      RIF2_D1_B,              SEL_DRIF2_1),
+       PINMUX_IPSR_DATA(IP17_7_4,      TPU0TO3),
+
+       /* I2C */
+       PINMUX_IPSR_NOGP(0,             I2C_SEL_0_1),
+       PINMUX_IPSR_NOGP(0,             I2C_SEL_3_1),
+       PINMUX_IPSR_NOGP(0,             I2C_SEL_5_1),
+};
+
+static const struct sh_pfc_pin pinmux_pins[] = {
+       PINMUX_GPIO_GP_ALL(),
+};
+
+/* - AUDIO CLOCK ------------------------------------------------------------ */
+static const unsigned int audio_clk_a_a_pins[] = {
+       /* CLK A */
+       RCAR_GP_PIN(6, 22),
+};
+static const unsigned int audio_clk_a_a_mux[] = {
+       AUDIO_CLKA_A_MARK,
+};
+static const unsigned int audio_clk_a_b_pins[] = {
+       /* CLK A */
+       RCAR_GP_PIN(5, 4),
+};
+static const unsigned int audio_clk_a_b_mux[] = {
+       AUDIO_CLKA_B_MARK,
+};
+static const unsigned int audio_clk_a_c_pins[] = {
+       /* CLK A */
+       RCAR_GP_PIN(5, 19),
+};
+static const unsigned int audio_clk_a_c_mux[] = {
+       AUDIO_CLKA_C_MARK,
+};
+static const unsigned int audio_clk_b_a_pins[] = {
+       /* CLK B */
+       RCAR_GP_PIN(5, 12),
+};
+static const unsigned int audio_clk_b_a_mux[] = {
+       AUDIO_CLKB_A_MARK,
+};
+static const unsigned int audio_clk_b_b_pins[] = {
+       /* CLK B */
+       RCAR_GP_PIN(6, 23),
+};
+static const unsigned int audio_clk_b_b_mux[] = {
+       AUDIO_CLKB_B_MARK,
+};
+static const unsigned int audio_clk_c_a_pins[] = {
+       /* CLK C */
+       RCAR_GP_PIN(5, 21),
+};
+static const unsigned int audio_clk_c_a_mux[] = {
+       AUDIO_CLKC_A_MARK,
+};
+static const unsigned int audio_clk_c_b_pins[] = {
+       /* CLK C */
+       RCAR_GP_PIN(5, 0),
+};
+static const unsigned int audio_clk_c_b_mux[] = {
+       AUDIO_CLKC_B_MARK,
+};
+static const unsigned int audio_clkout_a_pins[] = {
+       /* CLKOUT */
+       RCAR_GP_PIN(5, 18),
+};
+static const unsigned int audio_clkout_a_mux[] = {
+       AUDIO_CLKOUT_A_MARK,
+};
+static const unsigned int audio_clkout_b_pins[] = {
+       /* CLKOUT */
+       RCAR_GP_PIN(6, 28),
+};
+static const unsigned int audio_clkout_b_mux[] = {
+       AUDIO_CLKOUT_B_MARK,
+};
+static const unsigned int audio_clkout_c_pins[] = {
+       /* CLKOUT */
+       RCAR_GP_PIN(5, 3),
+};
+static const unsigned int audio_clkout_c_mux[] = {
+       AUDIO_CLKOUT_C_MARK,
+};
+static const unsigned int audio_clkout_d_pins[] = {
+       /* CLKOUT */
+       RCAR_GP_PIN(5, 21),
+};
+static const unsigned int audio_clkout_d_mux[] = {
+       AUDIO_CLKOUT_D_MARK,
+};
+static const unsigned int audio_clkout1_a_pins[] = {
+       /* CLKOUT1 */
+       RCAR_GP_PIN(5, 15),
+};
+static const unsigned int audio_clkout1_a_mux[] = {
+       AUDIO_CLKOUT1_A_MARK,
+};
+static const unsigned int audio_clkout1_b_pins[] = {
+       /* CLKOUT1 */
+       RCAR_GP_PIN(6, 29),
+};
+static const unsigned int audio_clkout1_b_mux[] = {
+       AUDIO_CLKOUT1_B_MARK,
+};
+static const unsigned int audio_clkout2_a_pins[] = {
+       /* CLKOUT2 */
+       RCAR_GP_PIN(5, 16),
+};
+static const unsigned int audio_clkout2_a_mux[] = {
+       AUDIO_CLKOUT2_A_MARK,
+};
+static const unsigned int audio_clkout2_b_pins[] = {
+       /* CLKOUT2 */
+       RCAR_GP_PIN(6, 30),
+};
+static const unsigned int audio_clkout2_b_mux[] = {
+       AUDIO_CLKOUT2_B_MARK,
+};
+
+static const unsigned int audio_clkout3_a_pins[] = {
+       /* CLKOUT3 */
+       RCAR_GP_PIN(5, 19),
+};
+static const unsigned int audio_clkout3_a_mux[] = {
+       AUDIO_CLKOUT3_A_MARK,
+};
+static const unsigned int audio_clkout3_b_pins[] = {
+       /* CLKOUT3 */
+       RCAR_GP_PIN(6, 31),
+};
+static const unsigned int audio_clkout3_b_mux[] = {
+       AUDIO_CLKOUT3_B_MARK,
+};
+
+/* - EtherAVB --------------------------------------------------------------- */
+static const unsigned int avb_link_pins[] = {
+       /* AVB_LINK */
+       RCAR_GP_PIN(2, 12),
+};
+static const unsigned int avb_link_mux[] = {
+       AVB_LINK_MARK,
+};
+static const unsigned int avb_magic_pins[] = {
+       /* AVB_MAGIC_ */
+       RCAR_GP_PIN(2, 10),
+};
+static const unsigned int avb_magic_mux[] = {
+       AVB_MAGIC_MARK,
+};
+static const unsigned int avb_phy_int_pins[] = {
+       /* AVB_PHY_INT */
+       RCAR_GP_PIN(2, 11),
+};
+static const unsigned int avb_phy_int_mux[] = {
+       AVB_PHY_INT_MARK,
+};
+static const unsigned int avb_mdc_pins[] = {
+       /* AVB_MDC */
+       RCAR_GP_PIN(2, 9),
+};
+static const unsigned int avb_mdc_mux[] = {
+       AVB_MDC_MARK,
+};
+static const unsigned int avb_avtp_pps_pins[] = {
+       /* AVB_AVTP_PPS */
+       RCAR_GP_PIN(2, 6),
+};
+static const unsigned int avb_avtp_pps_mux[] = {
+       AVB_AVTP_PPS_MARK,
+};
+static const unsigned int avb_avtp_match_a_pins[] = {
+       /* AVB_AVTP_MATCH_A */
+       RCAR_GP_PIN(2, 13),
+};
+static const unsigned int avb_avtp_match_a_mux[] = {
+       AVB_AVTP_MATCH_A_MARK,
+};
+static const unsigned int avb_avtp_capture_a_pins[] = {
+       /* AVB_AVTP_CAPTURE_A */
+       RCAR_GP_PIN(2, 14),
+};
+static const unsigned int avb_avtp_capture_a_mux[] = {
+       AVB_AVTP_CAPTURE_A_MARK,
+};
+static const unsigned int avb_avtp_match_b_pins[] = {
+       /*  AVB_AVTP_MATCH_B */
+       RCAR_GP_PIN(1, 8),
+};
+static const unsigned int avb_avtp_match_b_mux[] = {
+       AVB_AVTP_MATCH_B_MARK,
+};
+static const unsigned int avb_avtp_capture_b_pins[] = {
+       /* AVB_AVTP_CAPTURE_B */
+       RCAR_GP_PIN(1, 11),
+};
+static const unsigned int avb_avtp_capture_b_mux[] = {
+       AVB_AVTP_CAPTURE_B_MARK,
+};
+
+/* - I2C -------------------------------------------------------------------- */
+static const unsigned int i2c1_a_pins[] = {
+       /* SDA, SCL */
+       RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10),
+};
+static const unsigned int i2c1_a_mux[] = {
+       SDA1_A_MARK, SCL1_A_MARK,
+};
+static const unsigned int i2c1_b_pins[] = {
+       /* SDA, SCL */
+       RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 23),
+};
+static const unsigned int i2c1_b_mux[] = {
+       SDA1_B_MARK, SCL1_B_MARK,
+};
+static const unsigned int i2c2_a_pins[] = {
+       /* SDA, SCL */
+       RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4),
+};
+static const unsigned int i2c2_a_mux[] = {
+       SDA2_A_MARK, SCL2_A_MARK,
+};
+static const unsigned int i2c2_b_pins[] = {
+       /* SDA, SCL */
+       RCAR_GP_PIN(3, 13), RCAR_GP_PIN(3, 12),
+};
+static const unsigned int i2c2_b_mux[] = {
+       SDA2_B_MARK, SCL2_B_MARK,
+};
+static const unsigned int i2c6_a_pins[] = {
+       /* SDA, SCL */
+       RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
+};
+static const unsigned int i2c6_a_mux[] = {
+       SDA6_A_MARK, SCL6_A_MARK,
+};
+static const unsigned int i2c6_b_pins[] = {
+       /* SDA, SCL */
+       RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int i2c6_b_mux[] = {
+       SDA6_B_MARK, SCL6_B_MARK,
+};
+static const unsigned int i2c6_c_pins[] = {
+       /* SDA, SCL */
+       RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+};
+static const unsigned int i2c6_c_mux[] = {
+       SDA6_C_MARK, SCL6_C_MARK,
+};
+
+/* - SCIF0 ------------------------------------------------------------------ */
+static const unsigned int scif0_data_pins[] = {
+       /* RX, TX */
+       RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2),
+};
+static const unsigned int scif0_data_mux[] = {
+       RX0_MARK, TX0_MARK,
+};
+static const unsigned int scif0_clk_pins[] = {
+       /* SCK */
+       RCAR_GP_PIN(5, 0),
+};
+static const unsigned int scif0_clk_mux[] = {
+       SCK0_MARK,
+};
+static const unsigned int scif0_ctrl_pins[] = {
+       /* RTS, CTS */
+       RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 3),
+};
+static const unsigned int scif0_ctrl_mux[] = {
+       RTS0_N_TANS_MARK, CTS0_N_MARK,
+};
+/* - SCIF1 ------------------------------------------------------------------ */
+static const unsigned int scif1_data_a_pins[] = {
+       /* RX, TX */
+       RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6),
+};
+static const unsigned int scif1_data_a_mux[] = {
+       RX1_A_MARK, TX1_A_MARK,
+};
+static const unsigned int scif1_clk_pins[] = {
+       /* SCK */
+       RCAR_GP_PIN(6, 21),
+};
+static const unsigned int scif1_clk_mux[] = {
+       SCK1_MARK,
+};
+static const unsigned int scif1_ctrl_pins[] = {
+       /* RTS, CTS */
+       RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 7),
+};
+static const unsigned int scif1_ctrl_mux[] = {
+       RTS1_N_TANS_MARK, CTS1_N_MARK,
+};
+
+static const unsigned int scif1_data_b_pins[] = {
+       /* RX, TX */
+       RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 25),
+};
+static const unsigned int scif1_data_b_mux[] = {
+       RX1_B_MARK, TX1_B_MARK,
+};
+/* - SCIF2 ------------------------------------------------------------------ */
+static const unsigned int scif2_data_a_pins[] = {
+       /* RX, TX */
+       RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10),
+};
+static const unsigned int scif2_data_a_mux[] = {
+       RX2_A_MARK, TX2_A_MARK,
+};
+static const unsigned int scif2_clk_pins[] = {
+       /* SCK */
+       RCAR_GP_PIN(5, 9),
+};
+static const unsigned int scif2_clk_mux[] = {
+       SCK2_MARK,
+};
+static const unsigned int scif2_data_b_pins[] = {
+       /* RX, TX */
+       RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
+};
+static const unsigned int scif2_data_b_mux[] = {
+       RX2_B_MARK, TX2_B_MARK,
+};
+/* - SCIF3 ------------------------------------------------------------------ */
+static const unsigned int scif3_data_a_pins[] = {
+       /* RX, TX */
+       RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 24),
+};
+static const unsigned int scif3_data_a_mux[] = {
+       RX3_A_MARK, TX3_A_MARK,
+};
+static const unsigned int scif3_clk_pins[] = {
+       /* SCK */
+       RCAR_GP_PIN(1, 22),
+};
+static const unsigned int scif3_clk_mux[] = {
+       SCK3_MARK,
+};
+static const unsigned int scif3_ctrl_pins[] = {
+       /* RTS, CTS */
+       RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int scif3_ctrl_mux[] = {
+       RTS3_N_TANS_MARK, CTS3_N_MARK,
+};
+static const unsigned int scif3_data_b_pins[] = {
+       /* RX, TX */
+       RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
+};
+static const unsigned int scif3_data_b_mux[] = {
+       RX3_B_MARK, TX3_B_MARK,
+};
+/* - SCIF4 ------------------------------------------------------------------ */
+static const unsigned int scif4_data_a_pins[] = {
+       /* RX, TX */
+       RCAR_GP_PIN(2, 11), RCAR_GP_PIN(2, 12),
+};
+static const unsigned int scif4_data_a_mux[] = {
+       RX4_A_MARK, TX4_A_MARK,
+};
+static const unsigned int scif4_clk_a_pins[] = {
+       /* SCK */
+       RCAR_GP_PIN(2, 10),
+};
+static const unsigned int scif4_clk_a_mux[] = {
+       SCK4_A_MARK,
+};
+static const unsigned int scif4_ctrl_a_pins[] = {
+       /* RTS, CTS */
+       RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 13),
+};
+static const unsigned int scif4_ctrl_a_mux[] = {
+       RTS4_N_TANS_A_MARK, CTS4_N_A_MARK,
+};
+static const unsigned int scif4_data_b_pins[] = {
+       /* RX, TX */
+       RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+};
+static const unsigned int scif4_data_b_mux[] = {
+       RX4_B_MARK, TX4_B_MARK,
+};
+static const unsigned int scif4_clk_b_pins[] = {
+       /* SCK */
+       RCAR_GP_PIN(1, 5),
+};
+static const unsigned int scif4_clk_b_mux[] = {
+       SCK4_B_MARK,
+};
+static const unsigned int scif4_ctrl_b_pins[] = {
+       /* RTS, CTS */
+       RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 9),
+};
+static const unsigned int scif4_ctrl_b_mux[] = {
+       RTS4_N_TANS_B_MARK, CTS4_N_B_MARK,
+};
+static const unsigned int scif4_data_c_pins[] = {
+       /* RX, TX */
+       RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 13),
+};
+static const unsigned int scif4_data_c_mux[] = {
+       RX4_C_MARK, TX4_C_MARK,
+};
+static const unsigned int scif4_clk_c_pins[] = {
+       /* SCK */
+       RCAR_GP_PIN(0, 8),
+};
+static const unsigned int scif4_clk_c_mux[] = {
+       SCK4_C_MARK,
+};
+static const unsigned int scif4_ctrl_c_pins[] = {
+       /* RTS, CTS */
+       RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
+};
+static const unsigned int scif4_ctrl_c_mux[] = {
+       RTS4_N_TANS_C_MARK, CTS4_N_C_MARK,
+};
+/* - SCIF5 ------------------------------------------------------------------ */
+static const unsigned int scif5_data_pins[] = {
+       /* RX, TX */
+       RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 21),
+};
+static const unsigned int scif5_data_mux[] = {
+       RX5_MARK, TX5_MARK,
+};
+static const unsigned int scif5_clk_pins[] = {
+       /* SCK */
+       RCAR_GP_PIN(6, 21),
+};
+static const unsigned int scif5_clk_mux[] = {
+       SCK5_MARK,
+};
+
+/* - SSI -------------------------------------------------------------------- */
+static const unsigned int ssi0_data_pins[] = {
+       /* SDATA */
+       RCAR_GP_PIN(6, 2),
+};
+static const unsigned int ssi0_data_mux[] = {
+       SSI_SDATA0_MARK,
+};
+static const unsigned int ssi01239_ctrl_pins[] = {
+       /* SCK, WS */
+       RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 1),
+};
+static const unsigned int ssi01239_ctrl_mux[] = {
+       SSI_SCK0129_MARK, SSI_WS0129_MARK,
+};
+static const unsigned int ssi1_data_a_pins[] = {
+       /* SDATA */
+       RCAR_GP_PIN(6, 3),
+};
+static const unsigned int ssi1_data_a_mux[] = {
+       SSI_SDATA1_A_MARK,
+};
+static const unsigned int ssi1_data_b_pins[] = {
+       /* SDATA */
+       RCAR_GP_PIN(5, 12),
+};
+static const unsigned int ssi1_data_b_mux[] = {
+       SSI_SDATA1_B_MARK,
+};
+static const unsigned int ssi1_ctrl_a_pins[] = {
+       /* SCK, WS */
+       RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int ssi1_ctrl_a_mux[] = {
+       SSI_SCK1_A_MARK, SSI_WS1_A_MARK,
+};
+static const unsigned int ssi1_ctrl_b_pins[] = {
+       /* SCK, WS */
+       RCAR_GP_PIN(6, 4), RCAR_GP_PIN(6, 21),
+};
+static const unsigned int ssi1_ctrl_b_mux[] = {
+       SSI_SCK1_B_MARK, SSI_WS1_B_MARK,
+};
+static const unsigned int ssi2_data_a_pins[] = {
+       /* SDATA */
+       RCAR_GP_PIN(6, 4),
+};
+static const unsigned int ssi2_data_a_mux[] = {
+       SSI_SDATA2_A_MARK,
+};
+static const unsigned int ssi2_data_b_pins[] = {
+       /* SDATA */
+       RCAR_GP_PIN(5, 13),
+};
+static const unsigned int ssi2_data_b_mux[] = {
+       SSI_SDATA2_B_MARK,
+};
+static const unsigned int ssi2_ctrl_a_pins[] = {
+       /* SCK, WS */
+       RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 21),
+};
+static const unsigned int ssi2_ctrl_a_mux[] = {
+       SSI_SCK2_A_MARK, SSI_WS2_A_MARK,
+};
+static const unsigned int ssi2_ctrl_b_pins[] = {
+       /* SCK, WS */
+       RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
+};
+static const unsigned int ssi2_ctrl_b_mux[] = {
+       SSI_SCK2_B_MARK, SSI_WS2_B_MARK,
+};
+static const unsigned int ssi3_data_pins[] = {
+       /* SDATA */
+       RCAR_GP_PIN(6, 7),
+};
+static const unsigned int ssi3_data_mux[] = {
+       SSI_SDATA3_MARK,
+};
+static const unsigned int ssi34_ctrl_pins[] = {
+       /* SCK, WS */
+       RCAR_GP_PIN(6, 5), RCAR_GP_PIN(6, 6),
+};
+static const unsigned int ssi34_ctrl_mux[] = {
+       SSI_SCK34_MARK, SSI_WS34_MARK,
+};
+static const unsigned int ssi4_data_pins[] = {
+       /* SDATA */
+       RCAR_GP_PIN(6, 10),
+};
+static const unsigned int ssi4_data_mux[] = {
+       SSI_SDATA4_MARK,
+};
+static const unsigned int ssi4_ctrl_pins[] = {
+       /* SCK, WS */
+       RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int ssi4_ctrl_mux[] = {
+       SSI_SCK4_MARK, SSI_WS4_MARK,
+};
+static const unsigned int ssi5_data_pins[] = {
+       /* SDATA */
+       RCAR_GP_PIN(6, 13),
+};
+static const unsigned int ssi5_data_mux[] = {
+       SSI_SDATA5_MARK,
+};
+static const unsigned int ssi5_ctrl_pins[] = {
+       /* SCK, WS */
+       RCAR_GP_PIN(6, 11), RCAR_GP_PIN(6, 12),
+};
+static const unsigned int ssi5_ctrl_mux[] = {
+       SSI_SCK5_MARK, SSI_WS5_MARK,
+};
+static const unsigned int ssi6_data_pins[] = {
+       /* SDATA */
+       RCAR_GP_PIN(6, 16),
+};
+static const unsigned int ssi6_data_mux[] = {
+       SSI_SDATA6_MARK,
+};
+static const unsigned int ssi6_ctrl_pins[] = {
+       /* SCK, WS */
+       RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15),
+};
+static const unsigned int ssi6_ctrl_mux[] = {
+       SSI_SCK6_MARK, SSI_WS6_MARK,
+};
+static const unsigned int ssi7_data_pins[] = {
+       /* SDATA */
+       RCAR_GP_PIN(6, 19),
+};
+static const unsigned int ssi7_data_mux[] = {
+       SSI_SDATA7_MARK,
+};
+static const unsigned int ssi78_ctrl_pins[] = {
+       /* SCK, WS */
+       RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int ssi78_ctrl_mux[] = {
+       SSI_SCK78_MARK, SSI_WS78_MARK,
+};
+static const unsigned int ssi8_data_pins[] = {
+       /* SDATA */
+       RCAR_GP_PIN(6, 20),
+};
+static const unsigned int ssi8_data_mux[] = {
+       SSI_SDATA8_MARK,
+};
+static const unsigned int ssi9_data_a_pins[] = {
+       /* SDATA */
+       RCAR_GP_PIN(6, 21),
+};
+static const unsigned int ssi9_data_a_mux[] = {
+       SSI_SDATA9_A_MARK,
+};
+static const unsigned int ssi9_data_b_pins[] = {
+       /* SDATA */
+       RCAR_GP_PIN(5, 14),
+};
+static const unsigned int ssi9_data_b_mux[] = {
+       SSI_SDATA9_B_MARK,
+};
+static const unsigned int ssi9_ctrl_a_pins[] = {
+       /* SCK, WS */
+       RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
+};
+static const unsigned int ssi9_ctrl_a_mux[] = {
+       SSI_SCK9_A_MARK, SSI_WS9_A_MARK,
+};
+static const unsigned int ssi9_ctrl_b_pins[] = {
+       /* SCK, WS */
+       RCAR_GP_PIN(6, 30), RCAR_GP_PIN(6, 31),
+};
+static const unsigned int ssi9_ctrl_b_mux[] = {
+       SSI_SCK9_B_MARK, SSI_WS9_B_MARK,
+};
+
+static const struct sh_pfc_pin_group pinmux_groups[] = {
+       SH_PFC_PIN_GROUP(audio_clk_a_a),
+       SH_PFC_PIN_GROUP(audio_clk_a_b),
+       SH_PFC_PIN_GROUP(audio_clk_a_c),
+       SH_PFC_PIN_GROUP(audio_clk_b_a),
+       SH_PFC_PIN_GROUP(audio_clk_b_b),
+       SH_PFC_PIN_GROUP(audio_clk_c_a),
+       SH_PFC_PIN_GROUP(audio_clk_c_b),
+       SH_PFC_PIN_GROUP(audio_clkout_a),
+       SH_PFC_PIN_GROUP(audio_clkout_b),
+       SH_PFC_PIN_GROUP(audio_clkout_c),
+       SH_PFC_PIN_GROUP(audio_clkout_d),
+       SH_PFC_PIN_GROUP(audio_clkout1_a),
+       SH_PFC_PIN_GROUP(audio_clkout1_b),
+       SH_PFC_PIN_GROUP(audio_clkout2_a),
+       SH_PFC_PIN_GROUP(audio_clkout2_b),
+       SH_PFC_PIN_GROUP(audio_clkout3_a),
+       SH_PFC_PIN_GROUP(audio_clkout3_b),
+       SH_PFC_PIN_GROUP(avb_link),
+       SH_PFC_PIN_GROUP(avb_magic),
+       SH_PFC_PIN_GROUP(avb_phy_int),
+       SH_PFC_PIN_GROUP(avb_mdc),
+       SH_PFC_PIN_GROUP(avb_avtp_pps),
+       SH_PFC_PIN_GROUP(avb_avtp_match_a),
+       SH_PFC_PIN_GROUP(avb_avtp_capture_a),
+       SH_PFC_PIN_GROUP(avb_avtp_match_b),
+       SH_PFC_PIN_GROUP(avb_avtp_capture_b),
+       SH_PFC_PIN_GROUP(i2c1_a),
+       SH_PFC_PIN_GROUP(i2c1_b),
+       SH_PFC_PIN_GROUP(i2c2_a),
+       SH_PFC_PIN_GROUP(i2c2_b),
+       SH_PFC_PIN_GROUP(i2c6_a),
+       SH_PFC_PIN_GROUP(i2c6_b),
+       SH_PFC_PIN_GROUP(i2c6_c),
+       SH_PFC_PIN_GROUP(scif0_data),
+       SH_PFC_PIN_GROUP(scif0_clk),
+       SH_PFC_PIN_GROUP(scif0_ctrl),
+       SH_PFC_PIN_GROUP(scif1_data_a),
+       SH_PFC_PIN_GROUP(scif1_clk),
+       SH_PFC_PIN_GROUP(scif1_ctrl),
+       SH_PFC_PIN_GROUP(scif1_data_b),
+       SH_PFC_PIN_GROUP(scif2_data_a),
+       SH_PFC_PIN_GROUP(scif2_clk),
+       SH_PFC_PIN_GROUP(scif2_data_b),
+       SH_PFC_PIN_GROUP(scif3_data_a),
+       SH_PFC_PIN_GROUP(scif3_clk),
+       SH_PFC_PIN_GROUP(scif3_ctrl),
+       SH_PFC_PIN_GROUP(scif3_data_b),
+       SH_PFC_PIN_GROUP(scif4_data_a),
+       SH_PFC_PIN_GROUP(scif4_clk_a),
+       SH_PFC_PIN_GROUP(scif4_ctrl_a),
+       SH_PFC_PIN_GROUP(scif4_data_b),
+       SH_PFC_PIN_GROUP(scif4_clk_b),
+       SH_PFC_PIN_GROUP(scif4_ctrl_b),
+       SH_PFC_PIN_GROUP(scif4_data_c),
+       SH_PFC_PIN_GROUP(scif4_clk_c),
+       SH_PFC_PIN_GROUP(scif4_ctrl_c),
+       SH_PFC_PIN_GROUP(scif5_data),
+       SH_PFC_PIN_GROUP(scif5_clk),
+       SH_PFC_PIN_GROUP(ssi0_data),
+       SH_PFC_PIN_GROUP(ssi01239_ctrl),
+       SH_PFC_PIN_GROUP(ssi1_data_a),
+       SH_PFC_PIN_GROUP(ssi1_data_b),
+       SH_PFC_PIN_GROUP(ssi1_ctrl_a),
+       SH_PFC_PIN_GROUP(ssi1_ctrl_b),
+       SH_PFC_PIN_GROUP(ssi2_data_a),
+       SH_PFC_PIN_GROUP(ssi2_data_b),
+       SH_PFC_PIN_GROUP(ssi2_ctrl_a),
+       SH_PFC_PIN_GROUP(ssi2_ctrl_b),
+       SH_PFC_PIN_GROUP(ssi3_data),
+       SH_PFC_PIN_GROUP(ssi34_ctrl),
+       SH_PFC_PIN_GROUP(ssi4_data),
+       SH_PFC_PIN_GROUP(ssi4_ctrl),
+       SH_PFC_PIN_GROUP(ssi5_data),
+       SH_PFC_PIN_GROUP(ssi5_ctrl),
+       SH_PFC_PIN_GROUP(ssi6_data),
+       SH_PFC_PIN_GROUP(ssi6_ctrl),
+       SH_PFC_PIN_GROUP(ssi7_data),
+       SH_PFC_PIN_GROUP(ssi78_ctrl),
+       SH_PFC_PIN_GROUP(ssi8_data),
+       SH_PFC_PIN_GROUP(ssi9_data_a),
+       SH_PFC_PIN_GROUP(ssi9_data_b),
+       SH_PFC_PIN_GROUP(ssi9_ctrl_a),
+       SH_PFC_PIN_GROUP(ssi9_ctrl_b),
+};
+
+static const char * const audio_clk_groups[] = {
+       "audio_clk_a_a",
+       "audio_clk_a_b",
+       "audio_clk_a_c",
+       "audio_clk_b_a",
+       "audio_clk_b_b",
+       "audio_clk_c_a",
+       "audio_clk_c_b",
+       "audio_clkout_a",
+       "audio_clkout_b",
+       "audio_clkout_c",
+       "audio_clkout_d",
+       "audio_clkout1_a",
+       "audio_clkout1_b",
+       "audio_clkout2_a",
+       "audio_clkout2_b",
+       "audio_clkout3_a",
+       "audio_clkout3_b",
+};
+
+static const char * const avb_groups[] = {
+       "avb_link",
+       "avb_magic",
+       "avb_phy_int",
+       "avb_mdc",
+       "avb_avtp_pps",
+       "avb_avtp_match_a",
+       "avb_avtp_capture_a",
+       "avb_avtp_match_b",
+       "avb_avtp_capture_b",
+};
+
+static const char * const i2c1_groups[] = {
+       "i2c1_a",
+       "i2c1_b",
+};
+
+static const char * const i2c2_groups[] = {
+       "i2c2_a",
+       "i2c2_b",
+};
+
+static const char * const i2c6_groups[] = {
+       "i2c6_a",
+       "i2c6_b",
+       "i2c6_c",
+};
+
+static const char * const scif0_groups[] = {
+       "scif0_data",
+       "scif0_clk",
+       "scif0_ctrl",
+};
+
+static const char * const scif1_groups[] = {
+       "scif1_data_a",
+       "scif1_clk",
+       "scif1_ctrl",
+       "scif1_data_b",
+};
+
+static const char * const scif2_groups[] = {
+       "scif2_data_a",
+       "scif2_clk",
+       "scif2_data_b",
+};
+
+static const char * const scif3_groups[] = {
+       "scif3_data_a",
+       "scif3_clk",
+       "scif3_ctrl",
+       "scif3_data_b",
+};
+
+static const char * const scif4_groups[] = {
+       "scif4_data_a",
+       "scif4_clk_a",
+       "scif4_ctrl_a",
+       "scif4_data_b",
+       "scif4_clk_b",
+       "scif4_ctrl_b",
+       "scif4_data_c",
+       "scif4_clk_c",
+       "scif4_ctrl_c",
+};
+
+static const char * const scif5_groups[] = {
+       "scif5_data",
+       "scif5_clk",
+};
+
+static const char * const ssi_groups[] = {
+       "ssi0_data",
+       "ssi01239_ctrl",
+       "ssi1_data_a",
+       "ssi1_data_b",
+       "ssi1_ctrl_a",
+       "ssi1_ctrl_b",
+       "ssi2_data_a",
+       "ssi2_data_b",
+       "ssi2_ctrl_a",
+       "ssi2_ctrl_b",
+       "ssi3_data",
+       "ssi34_ctrl",
+       "ssi4_data",
+       "ssi4_ctrl",
+       "ssi5_data",
+       "ssi5_ctrl",
+       "ssi6_data",
+       "ssi6_ctrl",
+       "ssi7_data",
+       "ssi78_ctrl",
+       "ssi8_data",
+       "ssi9_data_a",
+       "ssi9_data_b",
+       "ssi9_ctrl_a",
+       "ssi9_ctrl_b",
+};
+
+static const struct sh_pfc_function pinmux_functions[] = {
+       SH_PFC_FUNCTION(audio_clk),
+       SH_PFC_FUNCTION(avb),
+       SH_PFC_FUNCTION(i2c1),
+       SH_PFC_FUNCTION(i2c2),
+       SH_PFC_FUNCTION(i2c6),
+       SH_PFC_FUNCTION(scif0),
+       SH_PFC_FUNCTION(scif1),
+       SH_PFC_FUNCTION(scif2),
+       SH_PFC_FUNCTION(scif3),
+       SH_PFC_FUNCTION(scif4),
+       SH_PFC_FUNCTION(scif5),
+       SH_PFC_FUNCTION(ssi),
+};
+
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+#define F_(x, y)       FN_##y
+#define FM(x)          FN_##x
+       { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1) {
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               GP_0_15_FN,     GPSR0_15,
+               GP_0_14_FN,     GPSR0_14,
+               GP_0_13_FN,     GPSR0_13,
+               GP_0_12_FN,     GPSR0_12,
+               GP_0_11_FN,     GPSR0_11,
+               GP_0_10_FN,     GPSR0_10,
+               GP_0_9_FN,      GPSR0_9,
+               GP_0_8_FN,      GPSR0_8,
+               GP_0_7_FN,      GPSR0_7,
+               GP_0_6_FN,      GPSR0_6,
+               GP_0_5_FN,      GPSR0_5,
+               GP_0_4_FN,      GPSR0_4,
+               GP_0_3_FN,      GPSR0_3,
+               GP_0_2_FN,      GPSR0_2,
+               GP_0_1_FN,      GPSR0_1,
+               GP_0_0_FN,      GPSR0_0, }
+       },
+       { PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1) {
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               GP_1_27_FN,     GPSR1_27,
+               GP_1_26_FN,     GPSR1_26,
+               GP_1_25_FN,     GPSR1_25,
+               GP_1_24_FN,     GPSR1_24,
+               GP_1_23_FN,     GPSR1_23,
+               GP_1_22_FN,     GPSR1_22,
+               GP_1_21_FN,     GPSR1_21,
+               GP_1_20_FN,     GPSR1_20,
+               GP_1_19_FN,     GPSR1_19,
+               GP_1_18_FN,     GPSR1_18,
+               GP_1_17_FN,     GPSR1_17,
+               GP_1_16_FN,     GPSR1_16,
+               GP_1_15_FN,     GPSR1_15,
+               GP_1_14_FN,     GPSR1_14,
+               GP_1_13_FN,     GPSR1_13,
+               GP_1_12_FN,     GPSR1_12,
+               GP_1_11_FN,     GPSR1_11,
+               GP_1_10_FN,     GPSR1_10,
+               GP_1_9_FN,      GPSR1_9,
+               GP_1_8_FN,      GPSR1_8,
+               GP_1_7_FN,      GPSR1_7,
+               GP_1_6_FN,      GPSR1_6,
+               GP_1_5_FN,      GPSR1_5,
+               GP_1_4_FN,      GPSR1_4,
+               GP_1_3_FN,      GPSR1_3,
+               GP_1_2_FN,      GPSR1_2,
+               GP_1_1_FN,      GPSR1_1,
+               GP_1_0_FN,      GPSR1_0, }
+       },
+       { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1) {
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               GP_2_14_FN,     GPSR2_14,
+               GP_2_13_FN,     GPSR2_13,
+               GP_2_12_FN,     GPSR2_12,
+               GP_2_11_FN,     GPSR2_11,
+               GP_2_10_FN,     GPSR2_10,
+               GP_2_9_FN,      GPSR2_9,
+               GP_2_8_FN,      GPSR2_8,
+               GP_2_7_FN,      GPSR2_7,
+               GP_2_6_FN,      GPSR2_6,
+               GP_2_5_FN,      GPSR2_5,
+               GP_2_4_FN,      GPSR2_4,
+               GP_2_3_FN,      GPSR2_3,
+               GP_2_2_FN,      GPSR2_2,
+               GP_2_1_FN,      GPSR2_1,
+               GP_2_0_FN,      GPSR2_0, }
+       },
+       { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1) {
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               GP_3_15_FN,     GPSR3_15,
+               GP_3_14_FN,     GPSR3_14,
+               GP_3_13_FN,     GPSR3_13,
+               GP_3_12_FN,     GPSR3_12,
+               GP_3_11_FN,     GPSR3_11,
+               GP_3_10_FN,     GPSR3_10,
+               GP_3_9_FN,      GPSR3_9,
+               GP_3_8_FN,      GPSR3_8,
+               GP_3_7_FN,      GPSR3_7,
+               GP_3_6_FN,      GPSR3_6,
+               GP_3_5_FN,      GPSR3_5,
+               GP_3_4_FN,      GPSR3_4,
+               GP_3_3_FN,      GPSR3_3,
+               GP_3_2_FN,      GPSR3_2,
+               GP_3_1_FN,      GPSR3_1,
+               GP_3_0_FN,      GPSR3_0, }
+       },
+       { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1) {
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               GP_4_17_FN,     GPSR4_17,
+               GP_4_16_FN,     GPSR4_16,
+               GP_4_15_FN,     GPSR4_15,
+               GP_4_14_FN,     GPSR4_14,
+               GP_4_13_FN,     GPSR4_13,
+               GP_4_12_FN,     GPSR4_12,
+               GP_4_11_FN,     GPSR4_11,
+               GP_4_10_FN,     GPSR4_10,
+               GP_4_9_FN,      GPSR4_9,
+               GP_4_8_FN,      GPSR4_8,
+               GP_4_7_FN,      GPSR4_7,
+               GP_4_6_FN,      GPSR4_6,
+               GP_4_5_FN,      GPSR4_5,
+               GP_4_4_FN,      GPSR4_4,
+               GP_4_3_FN,      GPSR4_3,
+               GP_4_2_FN,      GPSR4_2,
+               GP_4_1_FN,      GPSR4_1,
+               GP_4_0_FN,      GPSR4_0, }
+       },
+       { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1) {
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               GP_5_25_FN,     GPSR5_25,
+               GP_5_24_FN,     GPSR5_24,
+               GP_5_23_FN,     GPSR5_23,
+               GP_5_22_FN,     GPSR5_22,
+               GP_5_21_FN,     GPSR5_21,
+               GP_5_20_FN,     GPSR5_20,
+               GP_5_19_FN,     GPSR5_19,
+               GP_5_18_FN,     GPSR5_18,
+               GP_5_17_FN,     GPSR5_17,
+               GP_5_16_FN,     GPSR5_16,
+               GP_5_15_FN,     GPSR5_15,
+               GP_5_14_FN,     GPSR5_14,
+               GP_5_13_FN,     GPSR5_13,
+               GP_5_12_FN,     GPSR5_12,
+               GP_5_11_FN,     GPSR5_11,
+               GP_5_10_FN,     GPSR5_10,
+               GP_5_9_FN,      GPSR5_9,
+               GP_5_8_FN,      GPSR5_8,
+               GP_5_7_FN,      GPSR5_7,
+               GP_5_6_FN,      GPSR5_6,
+               GP_5_5_FN,      GPSR5_5,
+               GP_5_4_FN,      GPSR5_4,
+               GP_5_3_FN,      GPSR5_3,
+               GP_5_2_FN,      GPSR5_2,
+               GP_5_1_FN,      GPSR5_1,
+               GP_5_0_FN,      GPSR5_0, }
+       },
+       { PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1) {
+               GP_6_31_FN,     GPSR6_31,
+               GP_6_30_FN,     GPSR6_30,
+               GP_6_29_FN,     GPSR6_29,
+               GP_6_28_FN,     GPSR6_28,
+               GP_6_27_FN,     GPSR6_27,
+               GP_6_26_FN,     GPSR6_26,
+               GP_6_25_FN,     GPSR6_25,
+               GP_6_24_FN,     GPSR6_24,
+               GP_6_23_FN,     GPSR6_23,
+               GP_6_22_FN,     GPSR6_22,
+               GP_6_21_FN,     GPSR6_21,
+               GP_6_20_FN,     GPSR6_20,
+               GP_6_19_FN,     GPSR6_19,
+               GP_6_18_FN,     GPSR6_18,
+               GP_6_17_FN,     GPSR6_17,
+               GP_6_16_FN,     GPSR6_16,
+               GP_6_15_FN,     GPSR6_15,
+               GP_6_14_FN,     GPSR6_14,
+               GP_6_13_FN,     GPSR6_13,
+               GP_6_12_FN,     GPSR6_12,
+               GP_6_11_FN,     GPSR6_11,
+               GP_6_10_FN,     GPSR6_10,
+               GP_6_9_FN,      GPSR6_9,
+               GP_6_8_FN,      GPSR6_8,
+               GP_6_7_FN,      GPSR6_7,
+               GP_6_6_FN,      GPSR6_6,
+               GP_6_5_FN,      GPSR6_5,
+               GP_6_4_FN,      GPSR6_4,
+               GP_6_3_FN,      GPSR6_3,
+               GP_6_2_FN,      GPSR6_2,
+               GP_6_1_FN,      GPSR6_1,
+               GP_6_0_FN,      GPSR6_0, }
+       },
+       { PINMUX_CFG_REG("GPSR7", 0xe606011c, 32, 1) {
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               0, 0,
+               GP_7_3_FN, GPSR7_3,
+               GP_7_2_FN, GPSR7_2,
+               GP_7_1_FN, GPSR7_1,
+               GP_7_0_FN, GPSR7_0, }
+       },
+#undef F_
+#undef FM
+
+#define F_(x, y)       x,
+#define FM(x)          FN_##x,
+       { PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4) {
+               IP0_31_28
+               IP0_27_24
+               IP0_23_20
+               IP0_19_16
+               IP0_15_12
+               IP0_11_8
+               IP0_7_4
+               IP0_3_0 }
+       },
+       { PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4) {
+               IP1_31_28
+               IP1_27_24
+               IP1_23_20
+               IP1_19_16
+               IP1_15_12
+               IP1_11_8
+               IP1_7_4
+               IP1_3_0 }
+       },
+       { PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4) {
+               IP2_31_28
+               IP2_27_24
+               IP2_23_20
+               IP2_19_16
+               IP2_15_12
+               IP2_11_8
+               IP2_7_4
+               IP2_3_0 }
+       },
+       { PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4) {
+               IP3_31_28
+               IP3_27_24
+               IP3_23_20
+               IP3_19_16
+               IP3_15_12
+               IP3_11_8
+               IP3_7_4
+               IP3_3_0 }
+       },
+       { PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4) {
+               IP4_31_28
+               IP4_27_24
+               IP4_23_20
+               IP4_19_16
+               IP4_15_12
+               IP4_11_8
+               IP4_7_4
+               IP4_3_0 }
+       },
+       { PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4) {
+               IP5_31_28
+               IP5_27_24
+               IP5_23_20
+               IP5_19_16
+               IP5_15_12
+               IP5_11_8
+               IP5_7_4
+               IP5_3_0 }
+       },
+       { PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4) {
+               IP6_31_28
+               IP6_27_24
+               IP6_23_20
+               IP6_19_16
+               IP6_15_12
+               IP6_11_8
+               IP6_7_4
+               IP6_3_0 }
+       },
+       { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4) {
+               IP7_31_28
+               IP7_27_24
+               IP7_23_20
+               IP7_19_16
+               IP7_15_12
+               IP7_11_8
+               IP7_7_4
+               IP7_3_0 }
+       },
+       { PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4) {
+               IP8_31_28
+               IP8_27_24
+               IP8_23_20
+               IP8_19_16
+               IP8_15_12
+               IP8_11_8
+               IP8_7_4
+               IP8_3_0 }
+       },
+       { PINMUX_CFG_REG("IPSR9", 0xe6060224, 32, 4) {
+               IP9_31_28
+               IP9_27_24
+               IP9_23_20
+               IP9_19_16
+               IP9_15_12
+               IP9_11_8
+               IP9_7_4
+               IP9_3_0 }
+       },
+       { PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4) {
+               IP10_31_28
+               IP10_27_24
+               IP10_23_20
+               IP10_19_16
+               IP10_15_12
+               IP10_11_8
+               IP10_7_4
+               IP10_3_0 }
+       },
+       { PINMUX_CFG_REG("IPSR11", 0xe606022c, 32, 4) {
+               IP11_31_28
+               IP11_27_24
+               IP11_23_20
+               IP11_19_16
+               IP11_15_12
+               IP11_11_8
+               IP11_7_4
+               IP11_3_0 }
+       },
+       { PINMUX_CFG_REG("IPSR12", 0xe6060230, 32, 4) {
+               IP12_31_28
+               IP12_27_24
+               IP12_23_20
+               IP12_19_16
+               IP12_15_12
+               IP12_11_8
+               IP12_7_4
+               IP12_3_0 }
+       },
+       { PINMUX_CFG_REG("IPSR13", 0xe6060234, 32, 4) {
+               IP13_31_28
+               IP13_27_24
+               IP13_23_20
+               IP13_19_16
+               IP13_15_12
+               IP13_11_8
+               IP13_7_4
+               IP13_3_0 }
+       },
+       { PINMUX_CFG_REG("IPSR14", 0xe6060238, 32, 4) {
+               IP14_31_28
+               IP14_27_24
+               IP14_23_20
+               IP14_19_16
+               IP14_15_12
+               IP14_11_8
+               IP14_7_4
+               IP14_3_0 }
+       },
+       { PINMUX_CFG_REG("IPSR15", 0xe606023c, 32, 4) {
+               IP15_31_28
+               IP15_27_24
+               IP15_23_20
+               IP15_19_16
+               IP15_15_12
+               IP15_11_8
+               IP15_7_4
+               IP15_3_0 }
+       },
+       { PINMUX_CFG_REG("IPSR16", 0xe6060240, 32, 4) {
+               IP16_31_28
+               IP16_27_24
+               IP16_23_20
+               IP16_19_16
+               IP16_15_12
+               IP16_11_8
+               IP16_7_4
+               IP16_3_0 }
+       },
+       { PINMUX_CFG_REG("IPSR17", 0xe6060244, 32, 4) {
+               /* IP17_31_28 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+               /* IP17_27_24 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+               /* IP17_23_20 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+               /* IP17_19_16 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+               /* IP17_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+               /* IP17_11_8  */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+               IP17_7_4
+               IP17_3_0 }
+       },
+#undef F_
+#undef FM
+
+#define F_(x, y)       x,
+#define FM(x)          FN_##x,
+       { PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
+                            1, 2, 2, 3, 1, 1, 2, 1, 1, 1,
+                            2, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 2, 1) {
+               0, 0, /* RESERVED 31 */
+               MOD_SEL0_30_29
+               MOD_SEL0_28_27
+               MOD_SEL0_26_25_24
+               MOD_SEL0_23
+               MOD_SEL0_22
+               MOD_SEL0_21_20
+               MOD_SEL0_19
+               MOD_SEL0_18
+               MOD_SEL0_17
+               MOD_SEL0_16_15
+               MOD_SEL0_14
+               MOD_SEL0_13
+               MOD_SEL0_12
+               MOD_SEL0_11
+               MOD_SEL0_10
+               MOD_SEL0_9
+               MOD_SEL0_8
+               MOD_SEL0_7_6
+               MOD_SEL0_5_4
+               MOD_SEL0_3
+               MOD_SEL0_2_1
+               0, 0, /* RESERVED 0 */ }
+       },
+       { PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
+                            2, 3, 1, 2, 3, 1, 1, 2, 1,
+                            2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1) {
+               MOD_SEL1_31_30
+               MOD_SEL1_29_28_27
+               MOD_SEL1_26
+               MOD_SEL1_25_24
+               MOD_SEL1_23_22_21
+               MOD_SEL1_20
+               MOD_SEL1_19
+               MOD_SEL1_18_17
+               MOD_SEL1_16
+               MOD_SEL1_15_14
+               MOD_SEL1_13
+               MOD_SEL1_12
+               MOD_SEL1_11
+               MOD_SEL1_10
+               MOD_SEL1_9
+               0, 0, 0, 0, /* RESERVED 8, 7 */
+               MOD_SEL1_6
+               MOD_SEL1_5
+               MOD_SEL1_4
+               MOD_SEL1_3
+               MOD_SEL1_2
+               MOD_SEL1_1
+               MOD_SEL1_0 }
+       },
+       { PINMUX_CFG_REG_VAR("MOD_SEL2", 0xe6060508, 32,
+                            1, 1, 1, 1, 4, 4, 4,
+                            4, 4, 4, 1, 2, 1) {
+               MOD_SEL2_31
+               MOD_SEL2_30
+               MOD_SEL2_29
+               /* RESERVED 28 */
+               0, 0,
+               /* RESERVED 27, 26, 25, 24 */
+               0, 0, 0, 0, 0, 0, 0, 0,
+               0, 0, 0, 0, 0, 0, 0, 0,
+               /* RESERVED 23, 22, 21, 20 */
+               0, 0, 0, 0, 0, 0, 0, 0,
+               0, 0, 0, 0, 0, 0, 0, 0,
+               /* RESERVED 19, 18, 17, 16 */
+               0, 0, 0, 0, 0, 0, 0, 0,
+               0, 0, 0, 0, 0, 0, 0, 0,
+               /* RESERVED 15, 14, 13, 12 */
+               0, 0, 0, 0, 0, 0, 0, 0,
+               0, 0, 0, 0, 0, 0, 0, 0,
+               /* RESERVED 11, 10, 9, 8 */
+               0, 0, 0, 0, 0, 0, 0, 0,
+               0, 0, 0, 0, 0, 0, 0, 0,
+               /* RESERVED 7, 6, 5, 4 */
+               0, 0, 0, 0, 0, 0, 0, 0,
+               0, 0, 0, 0, 0, 0, 0, 0,
+               /* RESERVED 3 */
+               0, 0,
+               MOD_SEL2_2_1
+               MOD_SEL2_0 }
+       },
+       { },
+};
+
+const struct sh_pfc_soc_info r8a7795_pinmux_info = {
+       .name = "r8a77950_pfc",
+       .unlock_reg = 0xe6060000, /* PMMR */
+
+       .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+       .pins = pinmux_pins,
+       .nr_pins = ARRAY_SIZE(pinmux_pins),
+       .groups = pinmux_groups,
+       .nr_groups = ARRAY_SIZE(pinmux_groups),
+       .functions = pinmux_functions,
+       .nr_functions = ARRAY_SIZE(pinmux_functions),
+
+       .cfg_regs = pinmux_config_regs,
+
+       .pinmux_data = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
index 3bda7bafd0ab5a55ff81c20d1183d72934bda8d6..61b27ec48876ece871726a644b64b11d7bc47d58 100644 (file)
@@ -1587,6 +1587,6 @@ const struct sh_pfc_soc_info sh7203_pinmux_info = {
        .cfg_regs = pinmux_config_regs,
        .data_regs = pinmux_data_regs,
 
-       .gpio_data = pinmux_data,
-       .gpio_data_size = ARRAY_SIZE(pinmux_data),
+       .pinmux_data = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
 };
index e1cb6dc0502851734828e7edf3acb01106474891..8070765311dbf7a488fe5edd9d3d7ce92e2f331a 100644 (file)
@@ -2126,6 +2126,6 @@ const struct sh_pfc_soc_info sh7264_pinmux_info = {
        .cfg_regs = pinmux_config_regs,
        .data_regs = pinmux_data_regs,
 
-       .gpio_data = pinmux_data,
-       .gpio_data_size = ARRAY_SIZE(pinmux_data),
+       .pinmux_data = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
 };
index 7a11320ad96d391adcb02b6c17d942241ecc2c37..a50d22bef1f444517523c6bce95dc091a7d42b9a 100644 (file)
@@ -2830,6 +2830,6 @@ const struct sh_pfc_soc_info sh7269_pinmux_info = {
        .cfg_regs = pinmux_config_regs,
        .data_regs = pinmux_data_regs,
 
-       .gpio_data = pinmux_data,
-       .gpio_data_size = ARRAY_SIZE(pinmux_data),
+       .pinmux_data = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
 };
index 097526576f88181f24188cc6ff3aa5305e5d1e2e..6a69c8c5d9433d7ed8af781e29c41b86a52c98ed 100644 (file)
@@ -3649,38 +3649,38 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
 };
 
 static const struct pinmux_irq pinmux_irqs[] = {
-       PINMUX_IRQ(irq_pin(0), 11),
-       PINMUX_IRQ(irq_pin(1), 10),
-       PINMUX_IRQ(irq_pin(2), 149),
-       PINMUX_IRQ(irq_pin(3), 224),
-       PINMUX_IRQ(irq_pin(4), 159),
-       PINMUX_IRQ(irq_pin(5), 227),
-       PINMUX_IRQ(irq_pin(6), 147),
-       PINMUX_IRQ(irq_pin(7), 150),
-       PINMUX_IRQ(irq_pin(8), 223),
-       PINMUX_IRQ(irq_pin(9), 56, 308),
-       PINMUX_IRQ(irq_pin(10), 54),
-       PINMUX_IRQ(irq_pin(11), 238),
-       PINMUX_IRQ(irq_pin(12), 156),
-       PINMUX_IRQ(irq_pin(13), 239),
-       PINMUX_IRQ(irq_pin(14), 251),
-       PINMUX_IRQ(irq_pin(15), 0),
-       PINMUX_IRQ(irq_pin(16), 249),
-       PINMUX_IRQ(irq_pin(17), 234),
-       PINMUX_IRQ(irq_pin(18), 13),
-       PINMUX_IRQ(irq_pin(19), 9),
-       PINMUX_IRQ(irq_pin(20), 14),
-       PINMUX_IRQ(irq_pin(21), 15),
-       PINMUX_IRQ(irq_pin(22), 40),
-       PINMUX_IRQ(irq_pin(23), 53),
-       PINMUX_IRQ(irq_pin(24), 118),
-       PINMUX_IRQ(irq_pin(25), 164),
-       PINMUX_IRQ(irq_pin(26), 115),
-       PINMUX_IRQ(irq_pin(27), 116),
-       PINMUX_IRQ(irq_pin(28), 117),
-       PINMUX_IRQ(irq_pin(29), 28),
-       PINMUX_IRQ(irq_pin(30), 27),
-       PINMUX_IRQ(irq_pin(31), 26),
+       PINMUX_IRQ(11),         /* IRQ0 */
+       PINMUX_IRQ(10),         /* IRQ1 */
+       PINMUX_IRQ(149),        /* IRQ2 */
+       PINMUX_IRQ(224),        /* IRQ3 */
+       PINMUX_IRQ(159),        /* IRQ4 */
+       PINMUX_IRQ(227),        /* IRQ5 */
+       PINMUX_IRQ(147),        /* IRQ6 */
+       PINMUX_IRQ(150),        /* IRQ7 */
+       PINMUX_IRQ(223),        /* IRQ8 */
+       PINMUX_IRQ(56, 308),    /* IRQ9 */
+       PINMUX_IRQ(54),         /* IRQ10 */
+       PINMUX_IRQ(238),        /* IRQ11 */
+       PINMUX_IRQ(156),        /* IRQ12 */
+       PINMUX_IRQ(239),        /* IRQ13 */
+       PINMUX_IRQ(251),        /* IRQ14 */
+       PINMUX_IRQ(0),          /* IRQ15 */
+       PINMUX_IRQ(249),        /* IRQ16 */
+       PINMUX_IRQ(234),        /* IRQ17 */
+       PINMUX_IRQ(13),         /* IRQ18 */
+       PINMUX_IRQ(9),          /* IRQ19 */
+       PINMUX_IRQ(14),         /* IRQ20 */
+       PINMUX_IRQ(15),         /* IRQ21 */
+       PINMUX_IRQ(40),         /* IRQ22 */
+       PINMUX_IRQ(53),         /* IRQ23 */
+       PINMUX_IRQ(118),        /* IRQ24 */
+       PINMUX_IRQ(164),        /* IRQ25 */
+       PINMUX_IRQ(115),        /* IRQ26 */
+       PINMUX_IRQ(116),        /* IRQ27 */
+       PINMUX_IRQ(117),        /* IRQ28 */
+       PINMUX_IRQ(28),         /* IRQ29 */
+       PINMUX_IRQ(27),         /* IRQ30 */
+       PINMUX_IRQ(26),         /* IRQ31 */
 };
 
 /* -----------------------------------------------------------------------------
@@ -3865,8 +3865,8 @@ const struct sh_pfc_soc_info sh73a0_pinmux_info = {
        .cfg_regs = pinmux_config_regs,
        .data_regs = pinmux_data_regs,
 
-       .gpio_data = pinmux_data,
-       .gpio_data_size = ARRAY_SIZE(pinmux_data),
+       .pinmux_data = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
 
        .gpio_irq = pinmux_irqs,
        .gpio_irq_size = ARRAY_SIZE(pinmux_irqs),
index 13d05f88bc01c78db572a70924a821f376245931..e07a82df42c83a8f9cd498f219e5c58aaf19f087 100644 (file)
@@ -1201,6 +1201,6 @@ const struct sh_pfc_soc_info sh7720_pinmux_info = {
        .cfg_regs = pinmux_config_regs,
        .data_regs = pinmux_data_regs,
 
-       .gpio_data = pinmux_data,
-       .gpio_data_size = ARRAY_SIZE(pinmux_data),
+       .pinmux_data = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
 };
index 914d872c37a40a2198ea5a26c7e875771e19814d..29c69133b0ef88d0e217941cfb0ff12be43ecb91 100644 (file)
@@ -1741,6 +1741,6 @@ const struct sh_pfc_soc_info sh7722_pinmux_info = {
        .cfg_regs = pinmux_config_regs,
        .data_regs = pinmux_data_regs,
 
-       .gpio_data = pinmux_data,
-       .gpio_data_size = ARRAY_SIZE(pinmux_data),
+       .pinmux_data = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
 };
index 4eb7eae2e6d0406573c325b254b0aefdaa77f067..8ea18df034927948518ca087f0b7e0b3a3d43fdb 100644 (file)
@@ -1893,6 +1893,6 @@ const struct sh_pfc_soc_info sh7723_pinmux_info = {
        .cfg_regs = pinmux_config_regs,
        .data_regs = pinmux_data_regs,
 
-       .gpio_data = pinmux_data,
-       .gpio_data_size = ARRAY_SIZE(pinmux_data),
+       .pinmux_data = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
 };
index 74a1a7f1317c7ce097c24abcb85da143a6097f09..7f6c36c1a8fa8db510e70367c125c0d14d3830f8 100644 (file)
@@ -2175,6 +2175,6 @@ const struct sh_pfc_soc_info sh7724_pinmux_info = {
        .cfg_regs = pinmux_config_regs,
        .data_regs = pinmux_data_regs,
 
-       .gpio_data = pinmux_data,
-       .gpio_data_size = ARRAY_SIZE(pinmux_data),
+       .pinmux_data = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
 };
index e53dd1cb1625d6105f04ef38848c8d6d3f06a65d..e7deb51de7dc4fd4c76bc1b7483ac2127e1da850 100644 (file)
@@ -598,502 +598,502 @@ static const u16 pinmux_data[] = {
        /* IPSR0 */
        PINMUX_IPSR_DATA(IP0_1_0, A0),
        PINMUX_IPSR_DATA(IP0_1_0, ST0_CLKIN),
-       PINMUX_IPSR_MODSEL_DATA(IP0_1_0, LCD_DATA0_A, SEL_LCDC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_1_0, TCLKA_C, SEL_MTU2_CLK_1),
+       PINMUX_IPSR_MSEL(IP0_1_0, LCD_DATA0_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP0_1_0, TCLKA_C, SEL_MTU2_CLK_1),
 
        PINMUX_IPSR_DATA(IP0_3_2, A1),
        PINMUX_IPSR_DATA(IP0_3_2, ST0_REQ),
-       PINMUX_IPSR_MODSEL_DATA(IP0_3_2, LCD_DATA1_A, SEL_LCDC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_3_2, TCLKB_C, SEL_MTU2_CLK_1),
+       PINMUX_IPSR_MSEL(IP0_3_2, LCD_DATA1_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP0_3_2, TCLKB_C, SEL_MTU2_CLK_1),
 
        PINMUX_IPSR_DATA(IP0_5_4, A2),
        PINMUX_IPSR_DATA(IP0_5_4, ST0_SYC),
-       PINMUX_IPSR_MODSEL_DATA(IP0_5_4, LCD_DATA2_A, SEL_LCDC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_5_4, TCLKC_C, SEL_MTU2_CLK_1),
+       PINMUX_IPSR_MSEL(IP0_5_4, LCD_DATA2_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP0_5_4, TCLKC_C, SEL_MTU2_CLK_1),
 
        PINMUX_IPSR_DATA(IP0_7_6, A3),
        PINMUX_IPSR_DATA(IP0_7_6, ST0_VLD),
-       PINMUX_IPSR_MODSEL_DATA(IP0_7_6, LCD_DATA3_A, SEL_LCDC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_7_6, TCLKD_C, SEL_MTU2_CLK_1),
+       PINMUX_IPSR_MSEL(IP0_7_6, LCD_DATA3_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP0_7_6, TCLKD_C, SEL_MTU2_CLK_1),
 
        PINMUX_IPSR_DATA(IP0_9_8, A4),
        PINMUX_IPSR_DATA(IP0_9_8, ST0_D0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_9_8, LCD_DATA4_A, SEL_LCDC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_9_8, TIOC0A_C, SEL_MTU2_CH0_1),
+       PINMUX_IPSR_MSEL(IP0_9_8, LCD_DATA4_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP0_9_8, TIOC0A_C, SEL_MTU2_CH0_1),
 
        PINMUX_IPSR_DATA(IP0_11_10, A5),
        PINMUX_IPSR_DATA(IP0_11_10, ST0_D1),
-       PINMUX_IPSR_MODSEL_DATA(IP0_11_10, LCD_DATA5_A, SEL_LCDC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_11_10, TIOC0B_C, SEL_MTU2_CH0_1),
+       PINMUX_IPSR_MSEL(IP0_11_10, LCD_DATA5_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP0_11_10, TIOC0B_C, SEL_MTU2_CH0_1),
 
        PINMUX_IPSR_DATA(IP0_13_12, A6),
        PINMUX_IPSR_DATA(IP0_13_12, ST0_D2),
-       PINMUX_IPSR_MODSEL_DATA(IP0_13_12, LCD_DATA6_A, SEL_LCDC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_13_12, TIOC0C_C, SEL_MTU2_CH0_1),
+       PINMUX_IPSR_MSEL(IP0_13_12, LCD_DATA6_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP0_13_12, TIOC0C_C, SEL_MTU2_CH0_1),
 
        PINMUX_IPSR_DATA(IP0_15_14, A7),
        PINMUX_IPSR_DATA(IP0_15_14, ST0_D3),
-       PINMUX_IPSR_MODSEL_DATA(IP0_15_14, LCD_DATA7_A, SEL_LCDC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_15_14, TIOC0D_C, SEL_MTU2_CH0_1),
+       PINMUX_IPSR_MSEL(IP0_15_14, LCD_DATA7_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP0_15_14, TIOC0D_C, SEL_MTU2_CH0_1),
 
        PINMUX_IPSR_DATA(IP0_17_16, A8),
        PINMUX_IPSR_DATA(IP0_17_16, ST0_D4),
-       PINMUX_IPSR_MODSEL_DATA(IP0_17_16, LCD_DATA8_A, SEL_LCDC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_17_16, TIOC1A_C, SEL_MTU2_CH1_2),
+       PINMUX_IPSR_MSEL(IP0_17_16, LCD_DATA8_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP0_17_16, TIOC1A_C, SEL_MTU2_CH1_2),
 
        PINMUX_IPSR_DATA(IP0_19_18, A9),
        PINMUX_IPSR_DATA(IP0_19_18, ST0_D5),
-       PINMUX_IPSR_MODSEL_DATA(IP0_19_18, LCD_DATA9_A, SEL_LCDC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_19_18, TIOC1B_C, SEL_MTU2_CH1_2),
+       PINMUX_IPSR_MSEL(IP0_19_18, LCD_DATA9_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP0_19_18, TIOC1B_C, SEL_MTU2_CH1_2),
 
        PINMUX_IPSR_DATA(IP0_21_20, A10),
        PINMUX_IPSR_DATA(IP0_21_20, ST0_D6),
-       PINMUX_IPSR_MODSEL_DATA(IP0_21_20, LCD_DATA10_A, SEL_LCDC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_21_20, TIOC2A_C, SEL_MTU2_CH2_2),
+       PINMUX_IPSR_MSEL(IP0_21_20, LCD_DATA10_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP0_21_20, TIOC2A_C, SEL_MTU2_CH2_2),
 
        PINMUX_IPSR_DATA(IP0_23_22, A11),
        PINMUX_IPSR_DATA(IP0_23_22, ST0_D7),
-       PINMUX_IPSR_MODSEL_DATA(IP0_23_22, LCD_DATA11_A, SEL_LCDC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_23_22, TIOC2B_C, SEL_MTU2_CH2_2),
+       PINMUX_IPSR_MSEL(IP0_23_22, LCD_DATA11_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP0_23_22, TIOC2B_C, SEL_MTU2_CH2_2),
 
        PINMUX_IPSR_DATA(IP0_25_24, A12),
-       PINMUX_IPSR_MODSEL_DATA(IP0_25_24, LCD_DATA12_A, SEL_LCDC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_25_24, TIOC3A_C, SEL_MTU2_CH3_1),
+       PINMUX_IPSR_MSEL(IP0_25_24, LCD_DATA12_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP0_25_24, TIOC3A_C, SEL_MTU2_CH3_1),
 
        PINMUX_IPSR_DATA(IP0_27_26, A13),
-       PINMUX_IPSR_MODSEL_DATA(IP0_27_26, LCD_DATA13_A, SEL_LCDC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_27_26, TIOC3B_C, SEL_MTU2_CH3_1),
+       PINMUX_IPSR_MSEL(IP0_27_26, LCD_DATA13_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP0_27_26, TIOC3B_C, SEL_MTU2_CH3_1),
 
        PINMUX_IPSR_DATA(IP0_29_28, A14),
-       PINMUX_IPSR_MODSEL_DATA(IP0_29_28, LCD_DATA14_A, SEL_LCDC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_29_28, TIOC3C_C, SEL_MTU2_CH3_1),
+       PINMUX_IPSR_MSEL(IP0_29_28, LCD_DATA14_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP0_29_28, TIOC3C_C, SEL_MTU2_CH3_1),
 
        PINMUX_IPSR_DATA(IP0_31_30, A15),
        PINMUX_IPSR_DATA(IP0_31_30, ST0_VCO_CLKIN),
-       PINMUX_IPSR_MODSEL_DATA(IP0_31_30, LCD_DATA15_A, SEL_LCDC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP0_31_30, TIOC3D_C, SEL_MTU2_CH3_1),
+       PINMUX_IPSR_MSEL(IP0_31_30, LCD_DATA15_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP0_31_30, TIOC3D_C, SEL_MTU2_CH3_1),
 
 
        /* IPSR1 */
        PINMUX_IPSR_DATA(IP1_1_0, A16),
        PINMUX_IPSR_DATA(IP1_1_0, ST0_PWM),
-       PINMUX_IPSR_MODSEL_DATA(IP1_1_0, LCD_DON_A, SEL_LCDC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_1_0, TIOC4A_C, SEL_MTU2_CH4_1),
+       PINMUX_IPSR_MSEL(IP1_1_0, LCD_DON_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP1_1_0, TIOC4A_C, SEL_MTU2_CH4_1),
 
        PINMUX_IPSR_DATA(IP1_3_2, A17),
        PINMUX_IPSR_DATA(IP1_3_2, ST1_VCO_CLKIN),
-       PINMUX_IPSR_MODSEL_DATA(IP1_3_2, LCD_CL1_A, SEL_LCDC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_3_2, TIOC4B_C, SEL_MTU2_CH4_1),
+       PINMUX_IPSR_MSEL(IP1_3_2, LCD_CL1_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP1_3_2, TIOC4B_C, SEL_MTU2_CH4_1),
 
        PINMUX_IPSR_DATA(IP1_5_4, A18),
        PINMUX_IPSR_DATA(IP1_5_4, ST1_PWM),
-       PINMUX_IPSR_MODSEL_DATA(IP1_5_4, LCD_CL2_A, SEL_LCDC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_5_4, TIOC4C_C, SEL_MTU2_CH4_1),
+       PINMUX_IPSR_MSEL(IP1_5_4, LCD_CL2_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP1_5_4, TIOC4C_C, SEL_MTU2_CH4_1),
 
        PINMUX_IPSR_DATA(IP1_7_6, A19),
        PINMUX_IPSR_DATA(IP1_7_6, ST1_CLKIN),
-       PINMUX_IPSR_MODSEL_DATA(IP1_7_6, LCD_CLK_A, SEL_LCDC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_7_6, TIOC4D_C, SEL_MTU2_CH4_1),
+       PINMUX_IPSR_MSEL(IP1_7_6, LCD_CLK_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP1_7_6, TIOC4D_C, SEL_MTU2_CH4_1),
 
        PINMUX_IPSR_DATA(IP1_9_8, A20),
        PINMUX_IPSR_DATA(IP1_9_8, ST1_REQ),
-       PINMUX_IPSR_MODSEL_DATA(IP1_9_8, LCD_FLM_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP1_9_8, LCD_FLM_A, SEL_LCDC_0),
 
        PINMUX_IPSR_DATA(IP1_11_10, A21),
        PINMUX_IPSR_DATA(IP1_11_10, ST1_SYC),
-       PINMUX_IPSR_MODSEL_DATA(IP1_11_10, LCD_VCPWC_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP1_11_10, LCD_VCPWC_A, SEL_LCDC_0),
 
        PINMUX_IPSR_DATA(IP1_13_12, A22),
        PINMUX_IPSR_DATA(IP1_13_12, ST1_VLD),
-       PINMUX_IPSR_MODSEL_DATA(IP1_13_12, LCD_VEPWC_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP1_13_12, LCD_VEPWC_A, SEL_LCDC_0),
 
        PINMUX_IPSR_DATA(IP1_15_14, A23),
        PINMUX_IPSR_DATA(IP1_15_14, ST1_D0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_15_14, LCD_M_DISP_A, SEL_LCDC_0),
+       PINMUX_IPSR_MSEL(IP1_15_14, LCD_M_DISP_A, SEL_LCDC_0),
 
        PINMUX_IPSR_DATA(IP1_17_16, A24),
-       PINMUX_IPSR_MODSEL_DATA(IP1_17_16, RX2_D, SEL_SCIF2_3),
+       PINMUX_IPSR_MSEL(IP1_17_16, RX2_D, SEL_SCIF2_3),
        PINMUX_IPSR_DATA(IP1_17_16, ST1_D1),
 
        PINMUX_IPSR_DATA(IP1_19_18, A25),
-       PINMUX_IPSR_MODSEL_DATA(IP1_17_16, RX2_D, SEL_SCIF2_3),
+       PINMUX_IPSR_MSEL(IP1_17_16, RX2_D, SEL_SCIF2_3),
        PINMUX_IPSR_DATA(IP1_17_16, ST1_D2),
 
        PINMUX_IPSR_DATA(IP1_22_20, D0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_22_20, SD0_DAT0_A, SEL_SDHI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_22_20, MMC_D0_A, SEL_MMC_0),
+       PINMUX_IPSR_MSEL(IP1_22_20, SD0_DAT0_A, SEL_SDHI0_0),
+       PINMUX_IPSR_MSEL(IP1_22_20, MMC_D0_A, SEL_MMC_0),
        PINMUX_IPSR_DATA(IP1_22_20, ST1_D3),
-       PINMUX_IPSR_MODSEL_DATA(IP1_22_20, FD0_A, SEL_FLCTL_0),
+       PINMUX_IPSR_MSEL(IP1_22_20, FD0_A, SEL_FLCTL_0),
 
        PINMUX_IPSR_DATA(IP1_25_23, D1),
-       PINMUX_IPSR_MODSEL_DATA(IP1_25_23, SD0_DAT0_A, SEL_SDHI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_25_23, MMC_D1_A, SEL_MMC_0),
+       PINMUX_IPSR_MSEL(IP1_25_23, SD0_DAT0_A, SEL_SDHI0_0),
+       PINMUX_IPSR_MSEL(IP1_25_23, MMC_D1_A, SEL_MMC_0),
        PINMUX_IPSR_DATA(IP1_25_23, ST1_D4),
-       PINMUX_IPSR_MODSEL_DATA(IP1_25_23, FD1_A, SEL_FLCTL_0),
+       PINMUX_IPSR_MSEL(IP1_25_23, FD1_A, SEL_FLCTL_0),
 
        PINMUX_IPSR_DATA(IP1_28_26, D2),
-       PINMUX_IPSR_MODSEL_DATA(IP1_28_26, SD0_DAT0_A, SEL_SDHI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_28_26, MMC_D2_A, SEL_MMC_0),
+       PINMUX_IPSR_MSEL(IP1_28_26, SD0_DAT0_A, SEL_SDHI0_0),
+       PINMUX_IPSR_MSEL(IP1_28_26, MMC_D2_A, SEL_MMC_0),
        PINMUX_IPSR_DATA(IP1_28_26, ST1_D5),
-       PINMUX_IPSR_MODSEL_DATA(IP1_28_26, FD2_A, SEL_FLCTL_0),
+       PINMUX_IPSR_MSEL(IP1_28_26, FD2_A, SEL_FLCTL_0),
 
        PINMUX_IPSR_DATA(IP1_31_29, D3),
-       PINMUX_IPSR_MODSEL_DATA(IP1_31_29, SD0_DAT0_A, SEL_SDHI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP1_31_29, MMC_D3_A, SEL_MMC_0),
+       PINMUX_IPSR_MSEL(IP1_31_29, SD0_DAT0_A, SEL_SDHI0_0),
+       PINMUX_IPSR_MSEL(IP1_31_29, MMC_D3_A, SEL_MMC_0),
        PINMUX_IPSR_DATA(IP1_31_29, ST1_D6),
-       PINMUX_IPSR_MODSEL_DATA(IP1_31_29, FD3_A, SEL_FLCTL_0),
+       PINMUX_IPSR_MSEL(IP1_31_29, FD3_A, SEL_FLCTL_0),
 
        /* IPSR2 */
        PINMUX_IPSR_DATA(IP2_2_0, D4),
-       PINMUX_IPSR_MODSEL_DATA(IP2_2_0, SD0_CD_A, SEL_SDHI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_2_0, MMC_D4_A, SEL_MMC_0),
+       PINMUX_IPSR_MSEL(IP2_2_0, SD0_CD_A, SEL_SDHI0_0),
+       PINMUX_IPSR_MSEL(IP2_2_0, MMC_D4_A, SEL_MMC_0),
        PINMUX_IPSR_DATA(IP2_2_0, ST1_D7),
-       PINMUX_IPSR_MODSEL_DATA(IP2_2_0, FD4_A, SEL_FLCTL_0),
+       PINMUX_IPSR_MSEL(IP2_2_0, FD4_A, SEL_FLCTL_0),
 
        PINMUX_IPSR_DATA(IP2_4_3, D5),
-       PINMUX_IPSR_MODSEL_DATA(IP2_4_3, SD0_WP_A, SEL_SDHI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_4_3, MMC_D5_A, SEL_MMC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_4_3, FD5_A, SEL_FLCTL_0),
+       PINMUX_IPSR_MSEL(IP2_4_3, SD0_WP_A, SEL_SDHI0_0),
+       PINMUX_IPSR_MSEL(IP2_4_3, MMC_D5_A, SEL_MMC_0),
+       PINMUX_IPSR_MSEL(IP2_4_3, FD5_A, SEL_FLCTL_0),
 
        PINMUX_IPSR_DATA(IP2_7_5, D6),
-       PINMUX_IPSR_MODSEL_DATA(IP2_7_5, RSPI_RSPCK_A, SEL_RSPI_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_7_5, MMC_D6_A, SEL_MMC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_7_5, QSPCLK_A, SEL_RQSPI_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_7_5, FD6_A, SEL_FLCTL_0),
+       PINMUX_IPSR_MSEL(IP2_7_5, RSPI_RSPCK_A, SEL_RSPI_0),
+       PINMUX_IPSR_MSEL(IP2_7_5, MMC_D6_A, SEL_MMC_0),
+       PINMUX_IPSR_MSEL(IP2_7_5, QSPCLK_A, SEL_RQSPI_0),
+       PINMUX_IPSR_MSEL(IP2_7_5, FD6_A, SEL_FLCTL_0),
 
        PINMUX_IPSR_DATA(IP2_10_8, D7),
-       PINMUX_IPSR_MODSEL_DATA(IP2_10_8, RSPI_SSL_A, SEL_RSPI_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_10_8, MMC_D7_A, SEL_MMC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_10_8, QSSL_A, SEL_RQSPI_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_10_8, FD7_A, SEL_FLCTL_0),
+       PINMUX_IPSR_MSEL(IP2_10_8, RSPI_SSL_A, SEL_RSPI_0),
+       PINMUX_IPSR_MSEL(IP2_10_8, MMC_D7_A, SEL_MMC_0),
+       PINMUX_IPSR_MSEL(IP2_10_8, QSSL_A, SEL_RQSPI_0),
+       PINMUX_IPSR_MSEL(IP2_10_8, FD7_A, SEL_FLCTL_0),
 
        PINMUX_IPSR_DATA(IP2_13_11, D8),
-       PINMUX_IPSR_MODSEL_DATA(IP2_13_11, SD0_CLK_A, SEL_SDHI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_13_11, MMC_CLK_A, SEL_MMC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_13_11, QIO2_A, SEL_RQSPI_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_13_11, FCE_A, SEL_FLCTL_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_13_11, ET0_GTX_CLK_B, SEL_ET0_1),
+       PINMUX_IPSR_MSEL(IP2_13_11, SD0_CLK_A, SEL_SDHI0_0),
+       PINMUX_IPSR_MSEL(IP2_13_11, MMC_CLK_A, SEL_MMC_0),
+       PINMUX_IPSR_MSEL(IP2_13_11, QIO2_A, SEL_RQSPI_0),
+       PINMUX_IPSR_MSEL(IP2_13_11, FCE_A, SEL_FLCTL_0),
+       PINMUX_IPSR_MSEL(IP2_13_11, ET0_GTX_CLK_B, SEL_ET0_1),
 
        PINMUX_IPSR_DATA(IP2_16_14, D9),
-       PINMUX_IPSR_MODSEL_DATA(IP2_16_14, SD0_CMD_A, SEL_SDHI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_16_14, MMC_CMD_A, SEL_MMC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_16_14, QIO3_A, SEL_RQSPI_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_16_14, FCLE_A, SEL_FLCTL_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_16_14, ET0_ETXD1_B, SEL_ET0_1),
+       PINMUX_IPSR_MSEL(IP2_16_14, SD0_CMD_A, SEL_SDHI0_0),
+       PINMUX_IPSR_MSEL(IP2_16_14, MMC_CMD_A, SEL_MMC_0),
+       PINMUX_IPSR_MSEL(IP2_16_14, QIO3_A, SEL_RQSPI_0),
+       PINMUX_IPSR_MSEL(IP2_16_14, FCLE_A, SEL_FLCTL_0),
+       PINMUX_IPSR_MSEL(IP2_16_14, ET0_ETXD1_B, SEL_ET0_1),
 
        PINMUX_IPSR_DATA(IP2_19_17, D10),
-       PINMUX_IPSR_MODSEL_DATA(IP2_19_17, RSPI_MOSI_A, SEL_RSPI_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_19_17, QMO_QIO0_A, SEL_RQSPI_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_19_17, FALE_A, SEL_FLCTL_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_19_17, ET0_ETXD2_B, SEL_ET0_1),
+       PINMUX_IPSR_MSEL(IP2_19_17, RSPI_MOSI_A, SEL_RSPI_0),
+       PINMUX_IPSR_MSEL(IP2_19_17, QMO_QIO0_A, SEL_RQSPI_0),
+       PINMUX_IPSR_MSEL(IP2_19_17, FALE_A, SEL_FLCTL_0),
+       PINMUX_IPSR_MSEL(IP2_19_17, ET0_ETXD2_B, SEL_ET0_1),
 
        PINMUX_IPSR_DATA(IP2_22_20, D11),
-       PINMUX_IPSR_MODSEL_DATA(IP2_22_20, RSPI_MISO_A, SEL_RSPI_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_22_20, QMI_QIO1_A, SEL_RQSPI_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_22_20, FRE_A, SEL_FLCTL_0),
+       PINMUX_IPSR_MSEL(IP2_22_20, RSPI_MISO_A, SEL_RSPI_0),
+       PINMUX_IPSR_MSEL(IP2_22_20, QMI_QIO1_A, SEL_RQSPI_0),
+       PINMUX_IPSR_MSEL(IP2_22_20, FRE_A, SEL_FLCTL_0),
 
        PINMUX_IPSR_DATA(IP2_24_23, D12),
-       PINMUX_IPSR_MODSEL_DATA(IP2_24_23, FWE_A, SEL_FLCTL_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_24_23, ET0_ETXD5_B, SEL_ET0_1),
+       PINMUX_IPSR_MSEL(IP2_24_23, FWE_A, SEL_FLCTL_0),
+       PINMUX_IPSR_MSEL(IP2_24_23, ET0_ETXD5_B, SEL_ET0_1),
 
        PINMUX_IPSR_DATA(IP2_27_25, D13),
-       PINMUX_IPSR_MODSEL_DATA(IP2_27_25, RX2_B, SEL_SCIF2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP2_27_25, FRB_A, SEL_FLCTL_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_27_25, ET0_ETXD6_B, SEL_ET0_1),
+       PINMUX_IPSR_MSEL(IP2_27_25, RX2_B, SEL_SCIF2_1),
+       PINMUX_IPSR_MSEL(IP2_27_25, FRB_A, SEL_FLCTL_0),
+       PINMUX_IPSR_MSEL(IP2_27_25, ET0_ETXD6_B, SEL_ET0_1),
 
        PINMUX_IPSR_DATA(IP2_30_28, D14),
-       PINMUX_IPSR_MODSEL_DATA(IP2_30_28, TX2_B, SEL_SCIF2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP2_30_28, FSE_A, SEL_FLCTL_0),
-       PINMUX_IPSR_MODSEL_DATA(IP2_30_28, ET0_TX_CLK_B, SEL_ET0_1),
+       PINMUX_IPSR_MSEL(IP2_30_28, TX2_B, SEL_SCIF2_1),
+       PINMUX_IPSR_MSEL(IP2_30_28, FSE_A, SEL_FLCTL_0),
+       PINMUX_IPSR_MSEL(IP2_30_28, ET0_TX_CLK_B, SEL_ET0_1),
 
        /* IPSR3 */
        PINMUX_IPSR_DATA(IP3_1_0, D15),
-       PINMUX_IPSR_MODSEL_DATA(IP3_1_0, SCK2_B, SEL_SCIF2_1),
+       PINMUX_IPSR_MSEL(IP3_1_0, SCK2_B, SEL_SCIF2_1),
 
        PINMUX_IPSR_DATA(IP3_2, CS1_A26),
-       PINMUX_IPSR_MODSEL_DATA(IP3_2, QIO3_B, SEL_RQSPI_1),
+       PINMUX_IPSR_MSEL(IP3_2, QIO3_B, SEL_RQSPI_1),
 
        PINMUX_IPSR_DATA(IP3_5_3, EX_CS1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_5_3, RX3_B, SEL_SCIF2_1),
+       PINMUX_IPSR_MSEL(IP3_5_3, RX3_B, SEL_SCIF2_1),
        PINMUX_IPSR_DATA(IP3_5_3, ATACS0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_5_3, QIO2_B, SEL_RQSPI_1),
+       PINMUX_IPSR_MSEL(IP3_5_3, QIO2_B, SEL_RQSPI_1),
        PINMUX_IPSR_DATA(IP3_5_3, ET0_ETXD0),
 
        PINMUX_IPSR_DATA(IP3_8_6, EX_CS2),
-       PINMUX_IPSR_MODSEL_DATA(IP3_8_6, TX3_B, SEL_SCIF3_1),
+       PINMUX_IPSR_MSEL(IP3_8_6, TX3_B, SEL_SCIF3_1),
        PINMUX_IPSR_DATA(IP3_8_6, ATACS1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_8_6, QSPCLK_B, SEL_RQSPI_1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_8_6, ET0_GTX_CLK_A, SEL_ET0_0),
+       PINMUX_IPSR_MSEL(IP3_8_6, QSPCLK_B, SEL_RQSPI_1),
+       PINMUX_IPSR_MSEL(IP3_8_6, ET0_GTX_CLK_A, SEL_ET0_0),
 
        PINMUX_IPSR_DATA(IP3_11_9, EX_CS3),
-       PINMUX_IPSR_MODSEL_DATA(IP3_11_9, SD1_CD_A, SEL_SDHI1_0),
+       PINMUX_IPSR_MSEL(IP3_11_9, SD1_CD_A, SEL_SDHI1_0),
        PINMUX_IPSR_DATA(IP3_11_9, ATARD),
-       PINMUX_IPSR_MODSEL_DATA(IP3_11_9, QMO_QIO0_B, SEL_RQSPI_1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_11_9, ET0_ETXD1_A, SEL_ET0_0),
+       PINMUX_IPSR_MSEL(IP3_11_9, QMO_QIO0_B, SEL_RQSPI_1),
+       PINMUX_IPSR_MSEL(IP3_11_9, ET0_ETXD1_A, SEL_ET0_0),
 
        PINMUX_IPSR_DATA(IP3_14_12, EX_CS4),
-       PINMUX_IPSR_MODSEL_DATA(IP3_14_12, SD1_WP_A, SEL_SDHI1_0),
+       PINMUX_IPSR_MSEL(IP3_14_12, SD1_WP_A, SEL_SDHI1_0),
        PINMUX_IPSR_DATA(IP3_14_12, ATAWR),
-       PINMUX_IPSR_MODSEL_DATA(IP3_14_12, QMI_QIO1_B, SEL_RQSPI_1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_14_12, ET0_ETXD2_A, SEL_ET0_0),
+       PINMUX_IPSR_MSEL(IP3_14_12, QMI_QIO1_B, SEL_RQSPI_1),
+       PINMUX_IPSR_MSEL(IP3_14_12, ET0_ETXD2_A, SEL_ET0_0),
 
        PINMUX_IPSR_DATA(IP3_17_15, EX_CS5),
-       PINMUX_IPSR_MODSEL_DATA(IP3_17_15, SD1_CMD_A, SEL_SDHI1_0),
+       PINMUX_IPSR_MSEL(IP3_17_15, SD1_CMD_A, SEL_SDHI1_0),
        PINMUX_IPSR_DATA(IP3_17_15, ATADIR),
-       PINMUX_IPSR_MODSEL_DATA(IP3_17_15, QSSL_B, SEL_RQSPI_1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_17_15, ET0_ETXD3_A, SEL_ET0_0),
+       PINMUX_IPSR_MSEL(IP3_17_15, QSSL_B, SEL_RQSPI_1),
+       PINMUX_IPSR_MSEL(IP3_17_15, ET0_ETXD3_A, SEL_ET0_0),
 
        PINMUX_IPSR_DATA(IP3_19_18, RD_WR),
        PINMUX_IPSR_DATA(IP3_19_18, TCLK0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_19_18, CAN_CLK_B, SEL_RCAN_CLK_1),
+       PINMUX_IPSR_MSEL(IP3_19_18, CAN_CLK_B, SEL_RCAN_CLK_1),
        PINMUX_IPSR_DATA(IP3_19_18, ET0_ETXD4),
 
        PINMUX_IPSR_DATA(IP3_20, EX_WAIT0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_20, TCLK1_B, SEL_TMU_1),
+       PINMUX_IPSR_MSEL(IP3_20, TCLK1_B, SEL_TMU_1),
 
        PINMUX_IPSR_DATA(IP3_23_21, EX_WAIT1),
-       PINMUX_IPSR_MODSEL_DATA(IP3_23_21, SD1_DAT0_A, SEL_SDHI1_0),
+       PINMUX_IPSR_MSEL(IP3_23_21, SD1_DAT0_A, SEL_SDHI1_0),
        PINMUX_IPSR_DATA(IP3_23_21, DREQ2),
-       PINMUX_IPSR_MODSEL_DATA(IP3_23_21, CAN1_TX_C, SEL_RCAN1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP3_23_21, ET0_LINK_C, SEL_ET0_CTL_2),
-       PINMUX_IPSR_MODSEL_DATA(IP3_23_21, ET0_ETXD5_A, SEL_ET0_0),
+       PINMUX_IPSR_MSEL(IP3_23_21, CAN1_TX_C, SEL_RCAN1_2),
+       PINMUX_IPSR_MSEL(IP3_23_21, ET0_LINK_C, SEL_ET0_CTL_2),
+       PINMUX_IPSR_MSEL(IP3_23_21, ET0_ETXD5_A, SEL_ET0_0),
 
        PINMUX_IPSR_DATA(IP3_26_24, EX_WAIT2),
-       PINMUX_IPSR_MODSEL_DATA(IP3_26_24, SD1_DAT1_A, SEL_SDHI1_0),
+       PINMUX_IPSR_MSEL(IP3_26_24, SD1_DAT1_A, SEL_SDHI1_0),
        PINMUX_IPSR_DATA(IP3_26_24, DACK2),
-       PINMUX_IPSR_MODSEL_DATA(IP3_26_24, CAN1_RX_C, SEL_RCAN1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP3_26_24, ET0_MAGIC_C, SEL_ET0_CTL_2),
-       PINMUX_IPSR_MODSEL_DATA(IP3_26_24, ET0_ETXD6_A, SEL_ET0_0),
+       PINMUX_IPSR_MSEL(IP3_26_24, CAN1_RX_C, SEL_RCAN1_2),
+       PINMUX_IPSR_MSEL(IP3_26_24, ET0_MAGIC_C, SEL_ET0_CTL_2),
+       PINMUX_IPSR_MSEL(IP3_26_24, ET0_ETXD6_A, SEL_ET0_0),
 
        PINMUX_IPSR_DATA(IP3_29_27, DRACK0),
-       PINMUX_IPSR_MODSEL_DATA(IP3_29_27, SD1_DAT2_A, SEL_SDHI1_0),
+       PINMUX_IPSR_MSEL(IP3_29_27, SD1_DAT2_A, SEL_SDHI1_0),
        PINMUX_IPSR_DATA(IP3_29_27, ATAG),
-       PINMUX_IPSR_MODSEL_DATA(IP3_29_27, TCLK1_A, SEL_TMU_0),
+       PINMUX_IPSR_MSEL(IP3_29_27, TCLK1_A, SEL_TMU_0),
        PINMUX_IPSR_DATA(IP3_29_27, ET0_ETXD7),
 
        /* IPSR4 */
-       PINMUX_IPSR_MODSEL_DATA(IP4_2_0, HCTS0_A, SEL_HSCIF_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_2_0, CTS1_A, SEL_SCIF1_0),
+       PINMUX_IPSR_MSEL(IP4_2_0, HCTS0_A, SEL_HSCIF_0),
+       PINMUX_IPSR_MSEL(IP4_2_0, CTS1_A, SEL_SCIF1_0),
        PINMUX_IPSR_DATA(IP4_2_0, VI0_FIELD),
-       PINMUX_IPSR_MODSEL_DATA(IP4_2_0, RMII0_RXD1_A, SEL_RMII_0),
+       PINMUX_IPSR_MSEL(IP4_2_0, RMII0_RXD1_A, SEL_RMII_0),
        PINMUX_IPSR_DATA(IP4_2_0, ET0_ERXD7),
 
-       PINMUX_IPSR_MODSEL_DATA(IP4_5_3, HRTS0_A, SEL_HSCIF_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_5_3, RTS1_A, SEL_SCIF1_0),
+       PINMUX_IPSR_MSEL(IP4_5_3, HRTS0_A, SEL_HSCIF_0),
+       PINMUX_IPSR_MSEL(IP4_5_3, RTS1_A, SEL_SCIF1_0),
        PINMUX_IPSR_DATA(IP4_5_3, VI0_HSYNC),
-       PINMUX_IPSR_MODSEL_DATA(IP4_5_3, RMII0_TXD_EN_A, SEL_RMII_0),
+       PINMUX_IPSR_MSEL(IP4_5_3, RMII0_TXD_EN_A, SEL_RMII_0),
        PINMUX_IPSR_DATA(IP4_5_3, ET0_RX_DV),
 
-       PINMUX_IPSR_MODSEL_DATA(IP4_8_6, HSCK0_A, SEL_HSCIF_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_8_6, SCK1_A, SEL_SCIF1_0),
+       PINMUX_IPSR_MSEL(IP4_8_6, HSCK0_A, SEL_HSCIF_0),
+       PINMUX_IPSR_MSEL(IP4_8_6, SCK1_A, SEL_SCIF1_0),
        PINMUX_IPSR_DATA(IP4_8_6, VI0_VSYNC),
-       PINMUX_IPSR_MODSEL_DATA(IP4_8_6, RMII0_RX_ER_A, SEL_RMII_0),
+       PINMUX_IPSR_MSEL(IP4_8_6, RMII0_RX_ER_A, SEL_RMII_0),
        PINMUX_IPSR_DATA(IP4_8_6, ET0_RX_ER),
 
-       PINMUX_IPSR_MODSEL_DATA(IP4_11_9, HRX0_A, SEL_HSCIF_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_11_9, RX1_A, SEL_SCIF1_0),
+       PINMUX_IPSR_MSEL(IP4_11_9, HRX0_A, SEL_HSCIF_0),
+       PINMUX_IPSR_MSEL(IP4_11_9, RX1_A, SEL_SCIF1_0),
        PINMUX_IPSR_DATA(IP4_11_9, VI0_DATA0_VI0_B0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_11_9, RMII0_CRS_DV_A, SEL_RMII_0),
+       PINMUX_IPSR_MSEL(IP4_11_9, RMII0_CRS_DV_A, SEL_RMII_0),
        PINMUX_IPSR_DATA(IP4_11_9, ET0_CRS),
 
-       PINMUX_IPSR_MODSEL_DATA(IP4_14_12, HTX0_A, SEL_HSCIF_0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_14_12, TX1_A, SEL_SCIF1_0),
+       PINMUX_IPSR_MSEL(IP4_14_12, HTX0_A, SEL_HSCIF_0),
+       PINMUX_IPSR_MSEL(IP4_14_12, TX1_A, SEL_SCIF1_0),
        PINMUX_IPSR_DATA(IP4_14_12, VI0_DATA1_VI0_B1),
-       PINMUX_IPSR_MODSEL_DATA(IP4_14_12, RMII0_MDC_A, SEL_RMII_0),
+       PINMUX_IPSR_MSEL(IP4_14_12, RMII0_MDC_A, SEL_RMII_0),
        PINMUX_IPSR_DATA(IP4_14_12, ET0_COL),
 
-       PINMUX_IPSR_MODSEL_DATA(IP4_17_15, CTS0_B, SEL_SCIF0_1),
+       PINMUX_IPSR_MSEL(IP4_17_15, CTS0_B, SEL_SCIF0_1),
        PINMUX_IPSR_DATA(IP4_17_15, VI0_DATA2_VI0_B2),
-       PINMUX_IPSR_MODSEL_DATA(IP4_17_15, RMII0_MDIO_A, SEL_RMII_0),
+       PINMUX_IPSR_MSEL(IP4_17_15, RMII0_MDIO_A, SEL_RMII_0),
        PINMUX_IPSR_DATA(IP4_17_15, ET0_MDC),
 
-       PINMUX_IPSR_MODSEL_DATA(IP4_19_18, RTS0_B, SEL_SCIF0_1),
+       PINMUX_IPSR_MSEL(IP4_19_18, RTS0_B, SEL_SCIF0_1),
        PINMUX_IPSR_DATA(IP4_19_18, VI0_DATA3_VI0_B3),
-       PINMUX_IPSR_MODSEL_DATA(IP4_19_18, ET0_MDIO_A, SEL_ET0_0),
+       PINMUX_IPSR_MSEL(IP4_19_18, ET0_MDIO_A, SEL_ET0_0),
 
-       PINMUX_IPSR_MODSEL_DATA(IP4_21_20, SCK1_B, SEL_SCIF1_1),
+       PINMUX_IPSR_MSEL(IP4_21_20, SCK1_B, SEL_SCIF1_1),
        PINMUX_IPSR_DATA(IP4_21_20, VI0_DATA4_VI0_B4),
-       PINMUX_IPSR_MODSEL_DATA(IP4_21_20, ET0_LINK_A, SEL_ET0_CTL_0),
+       PINMUX_IPSR_MSEL(IP4_21_20, ET0_LINK_A, SEL_ET0_CTL_0),
 
-       PINMUX_IPSR_MODSEL_DATA(IP4_23_22, RX1_B, SEL_SCIF1_1),
+       PINMUX_IPSR_MSEL(IP4_23_22, RX1_B, SEL_SCIF1_1),
        PINMUX_IPSR_DATA(IP4_23_22, VI0_DATA5_VI0_B5),
-       PINMUX_IPSR_MODSEL_DATA(IP4_23_22, ET0_MAGIC_A, SEL_ET0_CTL_0),
+       PINMUX_IPSR_MSEL(IP4_23_22, ET0_MAGIC_A, SEL_ET0_CTL_0),
 
-       PINMUX_IPSR_MODSEL_DATA(IP4_25_24, TX1_B, SEL_SCIF1_1),
+       PINMUX_IPSR_MSEL(IP4_25_24, TX1_B, SEL_SCIF1_1),
        PINMUX_IPSR_DATA(IP4_25_24, VI0_DATA6_VI0_G0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_25_24, ET0_PHY_INT_A, SEL_ET0_CTL_0),
+       PINMUX_IPSR_MSEL(IP4_25_24, ET0_PHY_INT_A, SEL_ET0_CTL_0),
 
-       PINMUX_IPSR_MODSEL_DATA(IP4_27_26, CTS1_B, SEL_SCIF1_1),
+       PINMUX_IPSR_MSEL(IP4_27_26, CTS1_B, SEL_SCIF1_1),
        PINMUX_IPSR_DATA(IP4_27_26, VI0_DATA7_VI0_G1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP4_29_28, RTS1_B, SEL_SCIF1_1),
+       PINMUX_IPSR_MSEL(IP4_29_28, RTS1_B, SEL_SCIF1_1),
        PINMUX_IPSR_DATA(IP4_29_28, VI0_G2),
 
-       PINMUX_IPSR_MODSEL_DATA(IP4_31_30, SCK2_A, SEL_SCIF2_0),
+       PINMUX_IPSR_MSEL(IP4_31_30, SCK2_A, SEL_SCIF2_0),
        PINMUX_IPSR_DATA(IP4_31_30, VI0_G3),
 
        /* IPSR5 */
-       PINMUX_IPSR_MODSEL_DATA(IP5_2_0, SD2_CLK_A, SEL_SDHI2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_2_0, RX2_A, SEL_SCIF2_0),
+       PINMUX_IPSR_MSEL(IP5_2_0, SD2_CLK_A, SEL_SDHI2_0),
+       PINMUX_IPSR_MSEL(IP5_2_0, RX2_A, SEL_SCIF2_0),
        PINMUX_IPSR_DATA(IP5_2_0, VI0_G4),
-       PINMUX_IPSR_MODSEL_DATA(IP5_2_0, ET0_RX_CLK_B, SEL_ET0_1),
+       PINMUX_IPSR_MSEL(IP5_2_0, ET0_RX_CLK_B, SEL_ET0_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP5_5_3, SD2_CMD_A, SEL_SDHI2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_5_3, TX2_A, SEL_SCIF2_0),
+       PINMUX_IPSR_MSEL(IP5_5_3, SD2_CMD_A, SEL_SDHI2_0),
+       PINMUX_IPSR_MSEL(IP5_5_3, TX2_A, SEL_SCIF2_0),
        PINMUX_IPSR_DATA(IP5_5_3, VI0_G5),
-       PINMUX_IPSR_MODSEL_DATA(IP5_5_3, ET0_ERXD2_B, SEL_ET0_1),
+       PINMUX_IPSR_MSEL(IP5_5_3, ET0_ERXD2_B, SEL_ET0_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP5_8_6, SD2_DAT0_A, SEL_SDHI2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_8_6, RX3_A, SEL_SCIF3_0),
+       PINMUX_IPSR_MSEL(IP5_8_6, SD2_DAT0_A, SEL_SDHI2_0),
+       PINMUX_IPSR_MSEL(IP5_8_6, RX3_A, SEL_SCIF3_0),
        PINMUX_IPSR_DATA(IP4_8_6, VI0_R0),
-       PINMUX_IPSR_MODSEL_DATA(IP4_8_6, ET0_ERXD2_B, SEL_ET0_1),
+       PINMUX_IPSR_MSEL(IP4_8_6, ET0_ERXD2_B, SEL_ET0_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP5_11_9, SD2_DAT1_A, SEL_SDHI2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_11_9, TX3_A, SEL_SCIF3_0),
+       PINMUX_IPSR_MSEL(IP5_11_9, SD2_DAT1_A, SEL_SDHI2_0),
+       PINMUX_IPSR_MSEL(IP5_11_9, TX3_A, SEL_SCIF3_0),
        PINMUX_IPSR_DATA(IP5_11_9, VI0_R1),
-       PINMUX_IPSR_MODSEL_DATA(IP5_11_9, ET0_MDIO_B, SEL_ET0_1),
+       PINMUX_IPSR_MSEL(IP5_11_9, ET0_MDIO_B, SEL_ET0_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP5_14_12, SD2_DAT2_A, SEL_SDHI2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_14_12, RX4_A, SEL_SCIF4_0),
+       PINMUX_IPSR_MSEL(IP5_14_12, SD2_DAT2_A, SEL_SDHI2_0),
+       PINMUX_IPSR_MSEL(IP5_14_12, RX4_A, SEL_SCIF4_0),
        PINMUX_IPSR_DATA(IP5_14_12, VI0_R2),
-       PINMUX_IPSR_MODSEL_DATA(IP5_14_12, ET0_LINK_B, SEL_ET0_CTL_1),
+       PINMUX_IPSR_MSEL(IP5_14_12, ET0_LINK_B, SEL_ET0_CTL_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP5_17_15, SD2_DAT3_A, SEL_SDHI2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_17_15, TX4_A, SEL_SCIF4_0),
+       PINMUX_IPSR_MSEL(IP5_17_15, SD2_DAT3_A, SEL_SDHI2_0),
+       PINMUX_IPSR_MSEL(IP5_17_15, TX4_A, SEL_SCIF4_0),
        PINMUX_IPSR_DATA(IP5_17_15, VI0_R3),
-       PINMUX_IPSR_MODSEL_DATA(IP5_17_15, ET0_MAGIC_B, SEL_ET0_CTL_1),
+       PINMUX_IPSR_MSEL(IP5_17_15, ET0_MAGIC_B, SEL_ET0_CTL_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP5_20_18, SD2_CD_A, SEL_SDHI2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_20_18, RX5_A, SEL_SCIF5_0),
+       PINMUX_IPSR_MSEL(IP5_20_18, SD2_CD_A, SEL_SDHI2_0),
+       PINMUX_IPSR_MSEL(IP5_20_18, RX5_A, SEL_SCIF5_0),
        PINMUX_IPSR_DATA(IP5_20_18, VI0_R4),
-       PINMUX_IPSR_MODSEL_DATA(IP5_20_18, ET0_PHY_INT_B, SEL_ET0_CTL_1),
+       PINMUX_IPSR_MSEL(IP5_20_18, ET0_PHY_INT_B, SEL_ET0_CTL_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP5_22_21, SD2_WP_A, SEL_SDHI2_0),
-       PINMUX_IPSR_MODSEL_DATA(IP5_22_21, TX5_A, SEL_SCIF5_0),
+       PINMUX_IPSR_MSEL(IP5_22_21, SD2_WP_A, SEL_SDHI2_0),
+       PINMUX_IPSR_MSEL(IP5_22_21, TX5_A, SEL_SCIF5_0),
        PINMUX_IPSR_DATA(IP5_22_21, VI0_R5),
 
        PINMUX_IPSR_DATA(IP5_24_23, REF125CK),
        PINMUX_IPSR_DATA(IP5_24_23, ADTRG),
-       PINMUX_IPSR_MODSEL_DATA(IP5_24_23, RX5_C, SEL_SCIF5_2),
+       PINMUX_IPSR_MSEL(IP5_24_23, RX5_C, SEL_SCIF5_2),
        PINMUX_IPSR_DATA(IP5_26_25, REF50CK),
-       PINMUX_IPSR_MODSEL_DATA(IP5_26_25, CTS1_E, SEL_SCIF1_3),
-       PINMUX_IPSR_MODSEL_DATA(IP5_26_25, HCTS0_D, SEL_HSCIF_3),
+       PINMUX_IPSR_MSEL(IP5_26_25, CTS1_E, SEL_SCIF1_3),
+       PINMUX_IPSR_MSEL(IP5_26_25, HCTS0_D, SEL_HSCIF_3),
 
        /* IPSR6 */
        PINMUX_IPSR_DATA(IP6_2_0, DU0_DR0),
-       PINMUX_IPSR_MODSEL_DATA(IP6_2_0, SCIF_CLK_B, SEL_SCIF_CLK_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_2_0, HRX0_D, SEL_HSCIF_3),
-       PINMUX_IPSR_MODSEL_DATA(IP6_2_0, IETX_A, SEL_IEBUS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP6_2_0, TCLKA_A, SEL_MTU2_CLK_0),
+       PINMUX_IPSR_MSEL(IP6_2_0, SCIF_CLK_B, SEL_SCIF_CLK_1),
+       PINMUX_IPSR_MSEL(IP6_2_0, HRX0_D, SEL_HSCIF_3),
+       PINMUX_IPSR_MSEL(IP6_2_0, IETX_A, SEL_IEBUS_0),
+       PINMUX_IPSR_MSEL(IP6_2_0, TCLKA_A, SEL_MTU2_CLK_0),
        PINMUX_IPSR_DATA(IP6_2_0, HIFD00),
 
        PINMUX_IPSR_DATA(IP6_5_3, DU0_DR1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_5_3, SCK0_B, SEL_SCIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_5_3, HTX0_D, SEL_HSCIF_3),
-       PINMUX_IPSR_MODSEL_DATA(IP6_5_3, IERX_A, SEL_IEBUS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP6_5_3, TCLKB_A, SEL_MTU2_CLK_0),
+       PINMUX_IPSR_MSEL(IP6_5_3, SCK0_B, SEL_SCIF0_1),
+       PINMUX_IPSR_MSEL(IP6_5_3, HTX0_D, SEL_HSCIF_3),
+       PINMUX_IPSR_MSEL(IP6_5_3, IERX_A, SEL_IEBUS_0),
+       PINMUX_IPSR_MSEL(IP6_5_3, TCLKB_A, SEL_MTU2_CLK_0),
        PINMUX_IPSR_DATA(IP6_5_3, HIFD01),
 
        PINMUX_IPSR_DATA(IP6_7_6, DU0_DR2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_7_6, RX0_B, SEL_SCIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_7_6, TCLKC_A, SEL_MTU2_CLK_0),
+       PINMUX_IPSR_MSEL(IP6_7_6, RX0_B, SEL_SCIF0_1),
+       PINMUX_IPSR_MSEL(IP6_7_6, TCLKC_A, SEL_MTU2_CLK_0),
        PINMUX_IPSR_DATA(IP6_7_6, HIFD02),
 
        PINMUX_IPSR_DATA(IP6_9_8, DU0_DR3),
-       PINMUX_IPSR_MODSEL_DATA(IP6_9_8, TX0_B, SEL_SCIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_9_8, TCLKD_A, SEL_MTU2_CLK_0),
+       PINMUX_IPSR_MSEL(IP6_9_8, TX0_B, SEL_SCIF0_1),
+       PINMUX_IPSR_MSEL(IP6_9_8, TCLKD_A, SEL_MTU2_CLK_0),
        PINMUX_IPSR_DATA(IP6_9_8, HIFD03),
 
        PINMUX_IPSR_DATA(IP6_11_10, DU0_DR4),
-       PINMUX_IPSR_MODSEL_DATA(IP6_11_10, CTS0_C, SEL_SCIF0_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_11_10, TIOC0A_A, SEL_MTU2_CH0_0),
+       PINMUX_IPSR_MSEL(IP6_11_10, CTS0_C, SEL_SCIF0_2),
+       PINMUX_IPSR_MSEL(IP6_11_10, TIOC0A_A, SEL_MTU2_CH0_0),
        PINMUX_IPSR_DATA(IP6_11_10, HIFD04),
 
        PINMUX_IPSR_DATA(IP6_13_12, DU0_DR5),
-       PINMUX_IPSR_MODSEL_DATA(IP6_13_12, RTS0_C, SEL_SCIF0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_13_12, TIOC0B_A, SEL_MTU2_CH0_0),
+       PINMUX_IPSR_MSEL(IP6_13_12, RTS0_C, SEL_SCIF0_1),
+       PINMUX_IPSR_MSEL(IP6_13_12, TIOC0B_A, SEL_MTU2_CH0_0),
        PINMUX_IPSR_DATA(IP6_13_12, HIFD05),
 
        PINMUX_IPSR_DATA(IP6_15_14, DU0_DR6),
-       PINMUX_IPSR_MODSEL_DATA(IP6_15_14, SCK1_C, SEL_SCIF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_15_14, TIOC0C_A, SEL_MTU2_CH0_0),
+       PINMUX_IPSR_MSEL(IP6_15_14, SCK1_C, SEL_SCIF1_2),
+       PINMUX_IPSR_MSEL(IP6_15_14, TIOC0C_A, SEL_MTU2_CH0_0),
        PINMUX_IPSR_DATA(IP6_15_14, HIFD06),
 
        PINMUX_IPSR_DATA(IP6_17_16, DU0_DR7),
-       PINMUX_IPSR_MODSEL_DATA(IP6_17_16, RX1_C, SEL_SCIF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_17_16, TIOC0D_A, SEL_MTU2_CH0_0),
+       PINMUX_IPSR_MSEL(IP6_17_16, RX1_C, SEL_SCIF1_2),
+       PINMUX_IPSR_MSEL(IP6_17_16, TIOC0D_A, SEL_MTU2_CH0_0),
        PINMUX_IPSR_DATA(IP6_17_16, HIFD07),
 
        PINMUX_IPSR_DATA(IP6_20_18, DU0_DG0),
-       PINMUX_IPSR_MODSEL_DATA(IP6_20_18, TX1_C, SEL_SCIF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_20_18, HSCK0_D, SEL_HSCIF_3),
-       PINMUX_IPSR_MODSEL_DATA(IP6_20_18, IECLK_A, SEL_IEBUS_0),
-       PINMUX_IPSR_MODSEL_DATA(IP6_20_18, TIOC1A_A, SEL_MTU2_CH1_0),
+       PINMUX_IPSR_MSEL(IP6_20_18, TX1_C, SEL_SCIF1_2),
+       PINMUX_IPSR_MSEL(IP6_20_18, HSCK0_D, SEL_HSCIF_3),
+       PINMUX_IPSR_MSEL(IP6_20_18, IECLK_A, SEL_IEBUS_0),
+       PINMUX_IPSR_MSEL(IP6_20_18, TIOC1A_A, SEL_MTU2_CH1_0),
        PINMUX_IPSR_DATA(IP6_20_18, HIFD08),
 
        PINMUX_IPSR_DATA(IP6_23_21, DU0_DG1),
-       PINMUX_IPSR_MODSEL_DATA(IP6_23_21, CTS1_C, SEL_SCIF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP6_23_21, HRTS0_D, SEL_HSCIF_3),
-       PINMUX_IPSR_MODSEL_DATA(IP6_23_21, TIOC1B_A, SEL_MTU2_CH1_0),
+       PINMUX_IPSR_MSEL(IP6_23_21, CTS1_C, SEL_SCIF1_2),
+       PINMUX_IPSR_MSEL(IP6_23_21, HRTS0_D, SEL_HSCIF_3),
+       PINMUX_IPSR_MSEL(IP6_23_21, TIOC1B_A, SEL_MTU2_CH1_0),
        PINMUX_IPSR_DATA(IP6_23_21, HIFD09),
 
        /* IPSR7 */
        PINMUX_IPSR_DATA(IP7_2_0, DU0_DG2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_2_0, RTS1_C, SEL_SCIF1_2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_2_0, RMII0_MDC_B, SEL_RMII_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_2_0, TIOC2A_A, SEL_MTU2_CH2_0),
+       PINMUX_IPSR_MSEL(IP7_2_0, RTS1_C, SEL_SCIF1_2),
+       PINMUX_IPSR_MSEL(IP7_2_0, RMII0_MDC_B, SEL_RMII_1),
+       PINMUX_IPSR_MSEL(IP7_2_0, TIOC2A_A, SEL_MTU2_CH2_0),
        PINMUX_IPSR_DATA(IP7_2_0, HIFD10),
 
        PINMUX_IPSR_DATA(IP7_5_3, DU0_DG3),
-       PINMUX_IPSR_MODSEL_DATA(IP7_5_3, SCK2_C, SEL_SCIF2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_5_3, RMII0_MDIO_B, SEL_RMII_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_5_3, TIOC2B_A, SEL_MTU2_CH2_0),
+       PINMUX_IPSR_MSEL(IP7_5_3, SCK2_C, SEL_SCIF2_2),
+       PINMUX_IPSR_MSEL(IP7_5_3, RMII0_MDIO_B, SEL_RMII_1),
+       PINMUX_IPSR_MSEL(IP7_5_3, TIOC2B_A, SEL_MTU2_CH2_0),
        PINMUX_IPSR_DATA(IP7_5_3, HIFD11),
 
        PINMUX_IPSR_DATA(IP7_8_6, DU0_DG4),
-       PINMUX_IPSR_MODSEL_DATA(IP7_8_6, RX2_C, SEL_SCIF2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_8_6, RMII0_CRS_DV_B, SEL_RMII_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_8_6, TIOC3A_A, SEL_MTU2_CH3_0),
+       PINMUX_IPSR_MSEL(IP7_8_6, RX2_C, SEL_SCIF2_2),
+       PINMUX_IPSR_MSEL(IP7_8_6, RMII0_CRS_DV_B, SEL_RMII_1),
+       PINMUX_IPSR_MSEL(IP7_8_6, TIOC3A_A, SEL_MTU2_CH3_0),
        PINMUX_IPSR_DATA(IP7_8_6, HIFD12),
 
        PINMUX_IPSR_DATA(IP7_11_9, DU0_DG5),
-       PINMUX_IPSR_MODSEL_DATA(IP7_11_9, TX2_C, SEL_SCIF2_2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_11_9, RMII0_RX_ER_B, SEL_RMII_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_11_9, TIOC3B_A, SEL_MTU2_CH3_0),
+       PINMUX_IPSR_MSEL(IP7_11_9, TX2_C, SEL_SCIF2_2),
+       PINMUX_IPSR_MSEL(IP7_11_9, RMII0_RX_ER_B, SEL_RMII_1),
+       PINMUX_IPSR_MSEL(IP7_11_9, TIOC3B_A, SEL_MTU2_CH3_0),
        PINMUX_IPSR_DATA(IP7_11_9, HIFD13),
 
        PINMUX_IPSR_DATA(IP7_14_12, DU0_DG6),
-       PINMUX_IPSR_MODSEL_DATA(IP7_14_12, RX3_C, SEL_SCIF3_2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_14_12, RMII0_RXD0_B, SEL_RMII_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_14_12, TIOC3C_A, SEL_MTU2_CH3_0),
+       PINMUX_IPSR_MSEL(IP7_14_12, RX3_C, SEL_SCIF3_2),
+       PINMUX_IPSR_MSEL(IP7_14_12, RMII0_RXD0_B, SEL_RMII_1),
+       PINMUX_IPSR_MSEL(IP7_14_12, TIOC3C_A, SEL_MTU2_CH3_0),
        PINMUX_IPSR_DATA(IP7_14_12, HIFD14),
 
        PINMUX_IPSR_DATA(IP7_17_15, DU0_DG7),
-       PINMUX_IPSR_MODSEL_DATA(IP7_17_15, TX3_C, SEL_SCIF3_2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_17_15, RMII0_RXD1_B, SEL_RMII_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_17_15, TIOC3D_A, SEL_MTU2_CH3_0),
+       PINMUX_IPSR_MSEL(IP7_17_15, TX3_C, SEL_SCIF3_2),
+       PINMUX_IPSR_MSEL(IP7_17_15, RMII0_RXD1_B, SEL_RMII_1),
+       PINMUX_IPSR_MSEL(IP7_17_15, TIOC3D_A, SEL_MTU2_CH3_0),
        PINMUX_IPSR_DATA(IP7_17_15, HIFD15),
 
        PINMUX_IPSR_DATA(IP7_20_18, DU0_DB0),
-       PINMUX_IPSR_MODSEL_DATA(IP7_20_18, RX4_C, SEL_SCIF4_2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_20_18, RMII0_TXD_EN_B, SEL_RMII_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_20_18, TIOC4A_A, SEL_MTU2_CH4_0),
+       PINMUX_IPSR_MSEL(IP7_20_18, RX4_C, SEL_SCIF4_2),
+       PINMUX_IPSR_MSEL(IP7_20_18, RMII0_TXD_EN_B, SEL_RMII_1),
+       PINMUX_IPSR_MSEL(IP7_20_18, TIOC4A_A, SEL_MTU2_CH4_0),
        PINMUX_IPSR_DATA(IP7_20_18, HIFCS),
 
        PINMUX_IPSR_DATA(IP7_23_21, DU0_DB1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_23_21, TX4_C, SEL_SCIF4_2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_23_21, RMII0_TXD0_B, SEL_RMII_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_23_21, TIOC4B_A, SEL_MTU2_CH4_0),
+       PINMUX_IPSR_MSEL(IP7_23_21, TX4_C, SEL_SCIF4_2),
+       PINMUX_IPSR_MSEL(IP7_23_21, RMII0_TXD0_B, SEL_RMII_1),
+       PINMUX_IPSR_MSEL(IP7_23_21, TIOC4B_A, SEL_MTU2_CH4_0),
        PINMUX_IPSR_DATA(IP7_23_21, HIFWR),
 
        PINMUX_IPSR_DATA(IP7_26_24, DU0_DB2),
-       PINMUX_IPSR_MODSEL_DATA(IP7_26_24, RX5_B, SEL_SCIF5_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_26_24, RMII0_TXD1_B, SEL_RMII_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_26_24, TIOC4C_A, SEL_MTU2_CH4_0),
+       PINMUX_IPSR_MSEL(IP7_26_24, RX5_B, SEL_SCIF5_1),
+       PINMUX_IPSR_MSEL(IP7_26_24, RMII0_TXD1_B, SEL_RMII_1),
+       PINMUX_IPSR_MSEL(IP7_26_24, TIOC4C_A, SEL_MTU2_CH4_0),
 
        PINMUX_IPSR_DATA(IP7_28_27, DU0_DB3),
-       PINMUX_IPSR_MODSEL_DATA(IP7_28_27, TX5_B, SEL_SCIF5_1),
-       PINMUX_IPSR_MODSEL_DATA(IP7_28_27, TIOC4D_A, SEL_MTU2_CH4_0),
+       PINMUX_IPSR_MSEL(IP7_28_27, TX5_B, SEL_SCIF5_1),
+       PINMUX_IPSR_MSEL(IP7_28_27, TIOC4D_A, SEL_MTU2_CH4_0),
        PINMUX_IPSR_DATA(IP7_28_27, HIFRD),
 
        PINMUX_IPSR_DATA(IP7_30_29, DU0_DB4),
@@ -1107,251 +1107,251 @@ static const u16 pinmux_data[] = {
        PINMUX_IPSR_DATA(IP8_3_2, HIFRDY),
 
        PINMUX_IPSR_DATA(IP8_5_4, DU0_DB7),
-       PINMUX_IPSR_MODSEL_DATA(IP8_5_4, SSI_SCK0_B, SEL_SSI0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_5_4, HIFEBL_B, SEL_HIF_1),
+       PINMUX_IPSR_MSEL(IP8_5_4, SSI_SCK0_B, SEL_SSI0_1),
+       PINMUX_IPSR_MSEL(IP8_5_4, HIFEBL_B, SEL_HIF_1),
 
        PINMUX_IPSR_DATA(IP8_7_6, DU0_DOTCLKIN),
-       PINMUX_IPSR_MODSEL_DATA(IP8_7_6, HSPI_CS0_C, SEL_HSPI_2),
-       PINMUX_IPSR_MODSEL_DATA(IP8_7_6, SSI_WS0_B, SEL_SSI0_1),
+       PINMUX_IPSR_MSEL(IP8_7_6, HSPI_CS0_C, SEL_HSPI_2),
+       PINMUX_IPSR_MSEL(IP8_7_6, SSI_WS0_B, SEL_SSI0_1),
 
        PINMUX_IPSR_DATA(IP8_9_8, DU0_DOTCLKOUT),
-       PINMUX_IPSR_MODSEL_DATA(IP8_9_8, HSPI_CLK0_C, SEL_HSPI_2),
-       PINMUX_IPSR_MODSEL_DATA(IP8_9_8, SSI_SDATA0_B, SEL_SSI0_1),
+       PINMUX_IPSR_MSEL(IP8_9_8, HSPI_CLK0_C, SEL_HSPI_2),
+       PINMUX_IPSR_MSEL(IP8_9_8, SSI_SDATA0_B, SEL_SSI0_1),
 
        PINMUX_IPSR_DATA(IP8_11_10, DU0_EXHSYNC_DU0_HSYNC),
-       PINMUX_IPSR_MODSEL_DATA(IP8_11_10, HSPI_TX0_C, SEL_HSPI_2),
-       PINMUX_IPSR_MODSEL_DATA(IP8_11_10, SSI_SCK1_B, SEL_SSI1_1),
+       PINMUX_IPSR_MSEL(IP8_11_10, HSPI_TX0_C, SEL_HSPI_2),
+       PINMUX_IPSR_MSEL(IP8_11_10, SSI_SCK1_B, SEL_SSI1_1),
 
        PINMUX_IPSR_DATA(IP8_13_12, DU0_EXVSYNC_DU0_VSYNC),
-       PINMUX_IPSR_MODSEL_DATA(IP8_13_12, HSPI_RX0_C, SEL_HSPI_2),
-       PINMUX_IPSR_MODSEL_DATA(IP8_13_12, SSI_WS1_B, SEL_SSI1_1),
+       PINMUX_IPSR_MSEL(IP8_13_12, HSPI_RX0_C, SEL_HSPI_2),
+       PINMUX_IPSR_MSEL(IP8_13_12, SSI_WS1_B, SEL_SSI1_1),
 
        PINMUX_IPSR_DATA(IP8_15_14, DU0_EXODDF_DU0_ODDF),
-       PINMUX_IPSR_MODSEL_DATA(IP8_15_14, CAN0_RX_B, SEL_RCAN0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_15_14, HSCK0_B, SEL_HSCIF_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_15_14, SSI_SDATA1_B, SEL_SSI1_1),
+       PINMUX_IPSR_MSEL(IP8_15_14, CAN0_RX_B, SEL_RCAN0_1),
+       PINMUX_IPSR_MSEL(IP8_15_14, HSCK0_B, SEL_HSCIF_1),
+       PINMUX_IPSR_MSEL(IP8_15_14, SSI_SDATA1_B, SEL_SSI1_1),
 
        PINMUX_IPSR_DATA(IP8_17_16, DU0_DISP),
-       PINMUX_IPSR_MODSEL_DATA(IP8_17_16, CAN0_TX_B, SEL_RCAN0_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_17_16, HRX0_B, SEL_HSCIF_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_17_16, AUDIO_CLKA_B, SEL_AUDIO_CLKA_1),
+       PINMUX_IPSR_MSEL(IP8_17_16, CAN0_TX_B, SEL_RCAN0_1),
+       PINMUX_IPSR_MSEL(IP8_17_16, HRX0_B, SEL_HSCIF_1),
+       PINMUX_IPSR_MSEL(IP8_17_16, AUDIO_CLKA_B, SEL_AUDIO_CLKA_1),
 
        PINMUX_IPSR_DATA(IP8_19_18, DU0_CDE),
-       PINMUX_IPSR_MODSEL_DATA(IP8_19_18, HTX0_B, SEL_HSCIF_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_19_18, AUDIO_CLKB_B, SEL_AUDIO_CLKB_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_19_18, LCD_VCPWC_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP8_19_18, HTX0_B, SEL_HSCIF_1),
+       PINMUX_IPSR_MSEL(IP8_19_18, AUDIO_CLKB_B, SEL_AUDIO_CLKB_1),
+       PINMUX_IPSR_MSEL(IP8_19_18, LCD_VCPWC_B, SEL_LCDC_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP8_22_20, IRQ0_A, SEL_INTC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_22_20, HSPI_TX_B, SEL_HSPI_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_22_20, RX3_E, SEL_SCIF3_4),
+       PINMUX_IPSR_MSEL(IP8_22_20, IRQ0_A, SEL_INTC_0),
+       PINMUX_IPSR_MSEL(IP8_22_20, HSPI_TX_B, SEL_HSPI_1),
+       PINMUX_IPSR_MSEL(IP8_22_20, RX3_E, SEL_SCIF3_4),
        PINMUX_IPSR_DATA(IP8_22_20, ET0_ERXD0),
 
-       PINMUX_IPSR_MODSEL_DATA(IP8_25_23, IRQ1_A, SEL_INTC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_25_23, HSPI_RX_B, SEL_HSPI_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_25_23, TX3_E, SEL_SCIF3_4),
+       PINMUX_IPSR_MSEL(IP8_25_23, IRQ1_A, SEL_INTC_0),
+       PINMUX_IPSR_MSEL(IP8_25_23, HSPI_RX_B, SEL_HSPI_1),
+       PINMUX_IPSR_MSEL(IP8_25_23, TX3_E, SEL_SCIF3_4),
        PINMUX_IPSR_DATA(IP8_25_23, ET0_ERXD1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP8_27_26, IRQ2_A, SEL_INTC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_27_26, CTS0_A, SEL_SCIF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_27_26, HCTS0_B, SEL_HSCIF_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_27_26, ET0_ERXD2_A, SEL_ET0_0),
+       PINMUX_IPSR_MSEL(IP8_27_26, IRQ2_A, SEL_INTC_0),
+       PINMUX_IPSR_MSEL(IP8_27_26, CTS0_A, SEL_SCIF0_0),
+       PINMUX_IPSR_MSEL(IP8_27_26, HCTS0_B, SEL_HSCIF_1),
+       PINMUX_IPSR_MSEL(IP8_27_26, ET0_ERXD2_A, SEL_ET0_0),
 
-       PINMUX_IPSR_MODSEL_DATA(IP8_29_28, IRQ3_A, SEL_INTC_0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_29_28, RTS0_A, SEL_SCIF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP8_29_28, HRTS0_B, SEL_HSCIF_1),
-       PINMUX_IPSR_MODSEL_DATA(IP8_29_28, ET0_ERXD3_A, SEL_ET0_0),
+       PINMUX_IPSR_MSEL(IP8_29_28, IRQ3_A, SEL_INTC_0),
+       PINMUX_IPSR_MSEL(IP8_29_28, RTS0_A, SEL_SCIF0_0),
+       PINMUX_IPSR_MSEL(IP8_29_28, HRTS0_B, SEL_HSCIF_1),
+       PINMUX_IPSR_MSEL(IP8_29_28, ET0_ERXD3_A, SEL_ET0_0),
 
        /* IPSR9 */
-       PINMUX_IPSR_MODSEL_DATA(IP9_1_0, VI1_CLK_A, SEL_VIN1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_1_0, FD0_B, SEL_FLCTL_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_1_0, LCD_DATA0_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP9_1_0, VI1_CLK_A, SEL_VIN1_0),
+       PINMUX_IPSR_MSEL(IP9_1_0, FD0_B, SEL_FLCTL_1),
+       PINMUX_IPSR_MSEL(IP9_1_0, LCD_DATA0_B, SEL_LCDC_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP9_3_2, VI1_0_A, SEL_VIN1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_3_2, FD1_B, SEL_FLCTL_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_3_2, LCD_DATA1_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP9_3_2, VI1_0_A, SEL_VIN1_0),
+       PINMUX_IPSR_MSEL(IP9_3_2, FD1_B, SEL_FLCTL_1),
+       PINMUX_IPSR_MSEL(IP9_3_2, LCD_DATA1_B, SEL_LCDC_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP9_5_4, VI1_1_A, SEL_VIN1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_5_4, FD2_B, SEL_FLCTL_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_5_4, LCD_DATA2_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP9_5_4, VI1_1_A, SEL_VIN1_0),
+       PINMUX_IPSR_MSEL(IP9_5_4, FD2_B, SEL_FLCTL_1),
+       PINMUX_IPSR_MSEL(IP9_5_4, LCD_DATA2_B, SEL_LCDC_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP9_7_6, VI1_2_A, SEL_VIN1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_7_6, FD3_B, SEL_FLCTL_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_7_6, LCD_DATA3_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP9_7_6, VI1_2_A, SEL_VIN1_0),
+       PINMUX_IPSR_MSEL(IP9_7_6, FD3_B, SEL_FLCTL_1),
+       PINMUX_IPSR_MSEL(IP9_7_6, LCD_DATA3_B, SEL_LCDC_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP9_9_8, VI1_3_A, SEL_VIN1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_9_8, FD4_B, SEL_FLCTL_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_9_8, LCD_DATA4_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP9_9_8, VI1_3_A, SEL_VIN1_0),
+       PINMUX_IPSR_MSEL(IP9_9_8, FD4_B, SEL_FLCTL_1),
+       PINMUX_IPSR_MSEL(IP9_9_8, LCD_DATA4_B, SEL_LCDC_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP9_11_10, VI1_4_A, SEL_VIN1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_11_10, FD5_B, SEL_FLCTL_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_11_10, LCD_DATA5_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP9_11_10, VI1_4_A, SEL_VIN1_0),
+       PINMUX_IPSR_MSEL(IP9_11_10, FD5_B, SEL_FLCTL_1),
+       PINMUX_IPSR_MSEL(IP9_11_10, LCD_DATA5_B, SEL_LCDC_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP9_13_12, VI1_5_A, SEL_VIN1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_13_12, FD6_B, SEL_FLCTL_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_13_12, LCD_DATA6_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP9_13_12, VI1_5_A, SEL_VIN1_0),
+       PINMUX_IPSR_MSEL(IP9_13_12, FD6_B, SEL_FLCTL_1),
+       PINMUX_IPSR_MSEL(IP9_13_12, LCD_DATA6_B, SEL_LCDC_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP9_15_14, VI1_6_A, SEL_VIN1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_15_14, FD7_B, SEL_FLCTL_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_15_14, LCD_DATA7_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP9_15_14, VI1_6_A, SEL_VIN1_0),
+       PINMUX_IPSR_MSEL(IP9_15_14, FD7_B, SEL_FLCTL_1),
+       PINMUX_IPSR_MSEL(IP9_15_14, LCD_DATA7_B, SEL_LCDC_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP9_17_16, VI1_7_A, SEL_VIN1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_17_16, FCE_B, SEL_FLCTL_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_17_16, LCD_DATA8_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP9_17_16, VI1_7_A, SEL_VIN1_0),
+       PINMUX_IPSR_MSEL(IP9_17_16, FCE_B, SEL_FLCTL_1),
+       PINMUX_IPSR_MSEL(IP9_17_16, LCD_DATA8_B, SEL_LCDC_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP9_19_18, SSI_SCK0_A, SEL_SSI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_19_18, TIOC1A_B, SEL_MTU2_CH1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_19_18, LCD_DATA9_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP9_19_18, SSI_SCK0_A, SEL_SSI0_0),
+       PINMUX_IPSR_MSEL(IP9_19_18, TIOC1A_B, SEL_MTU2_CH1_1),
+       PINMUX_IPSR_MSEL(IP9_19_18, LCD_DATA9_B, SEL_LCDC_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP9_21_20, SSI_WS0_A, SEL_SSI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_21_20, TIOC1B_B, SEL_MTU2_CH1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_21_20, LCD_DATA10_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP9_21_20, SSI_WS0_A, SEL_SSI0_0),
+       PINMUX_IPSR_MSEL(IP9_21_20, TIOC1B_B, SEL_MTU2_CH1_1),
+       PINMUX_IPSR_MSEL(IP9_21_20, LCD_DATA10_B, SEL_LCDC_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP9_23_22, SSI_SDATA0_A, SEL_SSI0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_23_22, VI1_0_B, SEL_VIN1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_23_22, TIOC2A_B, SEL_MTU2_CH2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_23_22, LCD_DATA11_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP9_23_22, SSI_SDATA0_A, SEL_SSI0_0),
+       PINMUX_IPSR_MSEL(IP9_23_22, VI1_0_B, SEL_VIN1_1),
+       PINMUX_IPSR_MSEL(IP9_23_22, TIOC2A_B, SEL_MTU2_CH2_1),
+       PINMUX_IPSR_MSEL(IP9_23_22, LCD_DATA11_B, SEL_LCDC_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP9_25_24, SSI_SCK1_A, SEL_SSI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_25_24, VI1_1_B, SEL_VIN1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_25_24, TIOC2B_B, SEL_MTU2_CH2_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_25_24, LCD_DATA12_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP9_25_24, SSI_SCK1_A, SEL_SSI1_0),
+       PINMUX_IPSR_MSEL(IP9_25_24, VI1_1_B, SEL_VIN1_1),
+       PINMUX_IPSR_MSEL(IP9_25_24, TIOC2B_B, SEL_MTU2_CH2_1),
+       PINMUX_IPSR_MSEL(IP9_25_24, LCD_DATA12_B, SEL_LCDC_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP9_27_26, SSI_WS1_A, SEL_SSI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_27_26, VI1_2_B, SEL_VIN1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_27_26, LCD_DATA13_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP9_27_26, SSI_WS1_A, SEL_SSI1_0),
+       PINMUX_IPSR_MSEL(IP9_27_26, VI1_2_B, SEL_VIN1_1),
+       PINMUX_IPSR_MSEL(IP9_27_26, LCD_DATA13_B, SEL_LCDC_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP9_29_28, SSI_SDATA1_A, SEL_SSI1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP9_29_28, VI1_3_B, SEL_VIN1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP9_29_28, LCD_DATA14_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP9_29_28, SSI_SDATA1_A, SEL_SSI1_0),
+       PINMUX_IPSR_MSEL(IP9_29_28, VI1_3_B, SEL_VIN1_1),
+       PINMUX_IPSR_MSEL(IP9_29_28, LCD_DATA14_B, SEL_LCDC_1),
 
        /* IPSE10 */
        PINMUX_IPSR_DATA(IP10_2_0, SSI_SCK23),
-       PINMUX_IPSR_MODSEL_DATA(IP10_2_0, VI1_4_B, SEL_VIN1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_2_0, RX1_D, SEL_SCIF1_3),
-       PINMUX_IPSR_MODSEL_DATA(IP10_2_0, FCLE_B, SEL_FLCTL_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_2_0, LCD_DATA15_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP10_2_0, VI1_4_B, SEL_VIN1_1),
+       PINMUX_IPSR_MSEL(IP10_2_0, RX1_D, SEL_SCIF1_3),
+       PINMUX_IPSR_MSEL(IP10_2_0, FCLE_B, SEL_FLCTL_1),
+       PINMUX_IPSR_MSEL(IP10_2_0, LCD_DATA15_B, SEL_LCDC_1),
 
        PINMUX_IPSR_DATA(IP10_5_3, SSI_WS23),
-       PINMUX_IPSR_MODSEL_DATA(IP10_5_3, VI1_5_B, SEL_VIN1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_5_3, TX1_D, SEL_SCIF1_3),
-       PINMUX_IPSR_MODSEL_DATA(IP10_5_3, HSCK0_C, SEL_HSCIF_2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_5_3, FALE_B, SEL_FLCTL_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_5_3, LCD_DON_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP10_5_3, VI1_5_B, SEL_VIN1_1),
+       PINMUX_IPSR_MSEL(IP10_5_3, TX1_D, SEL_SCIF1_3),
+       PINMUX_IPSR_MSEL(IP10_5_3, HSCK0_C, SEL_HSCIF_2),
+       PINMUX_IPSR_MSEL(IP10_5_3, FALE_B, SEL_FLCTL_1),
+       PINMUX_IPSR_MSEL(IP10_5_3, LCD_DON_B, SEL_LCDC_1),
 
        PINMUX_IPSR_DATA(IP10_8_6, SSI_SDATA2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_8_6, VI1_6_B, SEL_VIN1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_8_6, HRX0_C, SEL_HSCIF_2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_8_6, FRE_B, SEL_FLCTL_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_8_6, LCD_CL1_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP10_8_6, VI1_6_B, SEL_VIN1_1),
+       PINMUX_IPSR_MSEL(IP10_8_6, HRX0_C, SEL_HSCIF_2),
+       PINMUX_IPSR_MSEL(IP10_8_6, FRE_B, SEL_FLCTL_1),
+       PINMUX_IPSR_MSEL(IP10_8_6, LCD_CL1_B, SEL_LCDC_1),
 
        PINMUX_IPSR_DATA(IP10_11_9, SSI_SDATA3),
-       PINMUX_IPSR_MODSEL_DATA(IP10_11_9, VI1_7_B, SEL_VIN1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_11_9, HTX0_C, SEL_HSCIF_2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_11_9, FWE_B, SEL_FLCTL_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_11_9, LCD_CL2_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP10_11_9, VI1_7_B, SEL_VIN1_1),
+       PINMUX_IPSR_MSEL(IP10_11_9, HTX0_C, SEL_HSCIF_2),
+       PINMUX_IPSR_MSEL(IP10_11_9, FWE_B, SEL_FLCTL_1),
+       PINMUX_IPSR_MSEL(IP10_11_9, LCD_CL2_B, SEL_LCDC_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP10_14_12, AUDIO_CLKA_A, SEL_AUDIO_CLKA_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_14_12, VI1_CLK_B, SEL_VIN1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_14_12, SCK1_D, SEL_SCIF1_3),
-       PINMUX_IPSR_MODSEL_DATA(IP10_14_12, IECLK_B, SEL_IEBUS_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_14_12, LCD_FLM_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP10_14_12, AUDIO_CLKA_A, SEL_AUDIO_CLKA_0),
+       PINMUX_IPSR_MSEL(IP10_14_12, VI1_CLK_B, SEL_VIN1_1),
+       PINMUX_IPSR_MSEL(IP10_14_12, SCK1_D, SEL_SCIF1_3),
+       PINMUX_IPSR_MSEL(IP10_14_12, IECLK_B, SEL_IEBUS_1),
+       PINMUX_IPSR_MSEL(IP10_14_12, LCD_FLM_B, SEL_LCDC_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP10_15, AUDIO_CLKB_A, SEL_AUDIO_CLKB_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_15, LCD_CLK_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP10_15, AUDIO_CLKB_A, SEL_AUDIO_CLKB_0),
+       PINMUX_IPSR_MSEL(IP10_15, LCD_CLK_B, SEL_LCDC_1),
 
        PINMUX_IPSR_DATA(IP10_18_16, AUDIO_CLKC),
-       PINMUX_IPSR_MODSEL_DATA(IP10_18_16, SCK1_E, SEL_SCIF1_4),
-       PINMUX_IPSR_MODSEL_DATA(IP10_18_16, HCTS0_C, SEL_HSCIF_2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_18_16, FRB_B, SEL_FLCTL_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_18_16, LCD_VEPWC_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP10_18_16, SCK1_E, SEL_SCIF1_4),
+       PINMUX_IPSR_MSEL(IP10_18_16, HCTS0_C, SEL_HSCIF_2),
+       PINMUX_IPSR_MSEL(IP10_18_16, FRB_B, SEL_FLCTL_1),
+       PINMUX_IPSR_MSEL(IP10_18_16, LCD_VEPWC_B, SEL_LCDC_1),
 
        PINMUX_IPSR_DATA(IP10_21_19, AUDIO_CLKOUT),
-       PINMUX_IPSR_MODSEL_DATA(IP10_21_19, TX1_E, SEL_SCIF1_4),
-       PINMUX_IPSR_MODSEL_DATA(IP10_21_19, HRTS0_C, SEL_HSCIF_2),
-       PINMUX_IPSR_MODSEL_DATA(IP10_21_19, FSE_B, SEL_FLCTL_1),
-       PINMUX_IPSR_MODSEL_DATA(IP10_21_19, LCD_M_DISP_B, SEL_LCDC_1),
+       PINMUX_IPSR_MSEL(IP10_21_19, TX1_E, SEL_SCIF1_4),
+       PINMUX_IPSR_MSEL(IP10_21_19, HRTS0_C, SEL_HSCIF_2),
+       PINMUX_IPSR_MSEL(IP10_21_19, FSE_B, SEL_FLCTL_1),
+       PINMUX_IPSR_MSEL(IP10_21_19, LCD_M_DISP_B, SEL_LCDC_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP10_22, CAN_CLK_A, SEL_RCAN_CLK_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_22, RX4_D, SEL_SCIF4_3),
+       PINMUX_IPSR_MSEL(IP10_22, CAN_CLK_A, SEL_RCAN_CLK_0),
+       PINMUX_IPSR_MSEL(IP10_22, RX4_D, SEL_SCIF4_3),
 
-       PINMUX_IPSR_MODSEL_DATA(IP10_24_23, CAN0_TX_A, SEL_RCAN0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_24_23, TX4_D, SEL_SCIF4_3),
+       PINMUX_IPSR_MSEL(IP10_24_23, CAN0_TX_A, SEL_RCAN0_0),
+       PINMUX_IPSR_MSEL(IP10_24_23, TX4_D, SEL_SCIF4_3),
        PINMUX_IPSR_DATA(IP10_24_23, MLB_CLK),
 
-       PINMUX_IPSR_MODSEL_DATA(IP10_25, CAN1_RX_A, SEL_RCAN1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_25, IRQ1_B, SEL_INTC_1),
+       PINMUX_IPSR_MSEL(IP10_25, CAN1_RX_A, SEL_RCAN1_0),
+       PINMUX_IPSR_MSEL(IP10_25, IRQ1_B, SEL_INTC_1),
 
-       PINMUX_IPSR_MODSEL_DATA(IP10_27_26, CAN0_RX_A, SEL_RCAN0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_27_26, IRQ0_B, SEL_INTC_1),
+       PINMUX_IPSR_MSEL(IP10_27_26, CAN0_RX_A, SEL_RCAN0_0),
+       PINMUX_IPSR_MSEL(IP10_27_26, IRQ0_B, SEL_INTC_1),
        PINMUX_IPSR_DATA(IP10_27_26, MLB_SIG),
 
-       PINMUX_IPSR_MODSEL_DATA(IP10_29_28, CAN1_TX_A, SEL_RCAN1_0),
-       PINMUX_IPSR_MODSEL_DATA(IP10_29_28, TX5_C, SEL_SCIF1_2),
+       PINMUX_IPSR_MSEL(IP10_29_28, CAN1_TX_A, SEL_RCAN1_0),
+       PINMUX_IPSR_MSEL(IP10_29_28, TX5_C, SEL_SCIF1_2),
        PINMUX_IPSR_DATA(IP10_29_28, MLB_DAT),
 
        /* IPSR11 */
        PINMUX_IPSR_DATA(IP11_0, SCL1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_0, SCIF_CLK_C, SEL_SCIF_CLK_2),
+       PINMUX_IPSR_MSEL(IP11_0, SCIF_CLK_C, SEL_SCIF_CLK_2),
 
        PINMUX_IPSR_DATA(IP11_1, SDA1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_0, RX1_E, SEL_SCIF1_4),
+       PINMUX_IPSR_MSEL(IP11_0, RX1_E, SEL_SCIF1_4),
 
        PINMUX_IPSR_DATA(IP11_2, SDA0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_2, HIFEBL_A, SEL_HIF_0),
+       PINMUX_IPSR_MSEL(IP11_2, HIFEBL_A, SEL_HIF_0),
 
        PINMUX_IPSR_DATA(IP11_3, SDSELF),
-       PINMUX_IPSR_MODSEL_DATA(IP11_3, RTS1_E, SEL_SCIF1_3),
+       PINMUX_IPSR_MSEL(IP11_3, RTS1_E, SEL_SCIF1_3),
 
-       PINMUX_IPSR_MODSEL_DATA(IP11_6_4, SCIF_CLK_A, SEL_SCIF_CLK_0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_6_4, HSPI_CLK_A, SEL_HSPI_0),
+       PINMUX_IPSR_MSEL(IP11_6_4, SCIF_CLK_A, SEL_SCIF_CLK_0),
+       PINMUX_IPSR_MSEL(IP11_6_4, HSPI_CLK_A, SEL_HSPI_0),
        PINMUX_IPSR_DATA(IP11_6_4, VI0_CLK),
-       PINMUX_IPSR_MODSEL_DATA(IP11_6_4, RMII0_TXD0_A, SEL_RMII_0),
+       PINMUX_IPSR_MSEL(IP11_6_4, RMII0_TXD0_A, SEL_RMII_0),
        PINMUX_IPSR_DATA(IP11_6_4, ET0_ERXD4),
 
-       PINMUX_IPSR_MODSEL_DATA(IP11_9_7, SCK0_A, SEL_SCIF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_9_7, HSPI_CS_A, SEL_HSPI_0),
+       PINMUX_IPSR_MSEL(IP11_9_7, SCK0_A, SEL_SCIF0_0),
+       PINMUX_IPSR_MSEL(IP11_9_7, HSPI_CS_A, SEL_HSPI_0),
        PINMUX_IPSR_DATA(IP11_9_7, VI0_CLKENB),
-       PINMUX_IPSR_MODSEL_DATA(IP11_9_7, RMII0_TXD1_A, SEL_RMII_0),
+       PINMUX_IPSR_MSEL(IP11_9_7, RMII0_TXD1_A, SEL_RMII_0),
        PINMUX_IPSR_DATA(IP11_9_7, ET0_ERXD5),
 
-       PINMUX_IPSR_MODSEL_DATA(IP11_11_10, RX0_A, SEL_SCIF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_11_10, HSPI_RX_A, SEL_HSPI_0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_11_10, RMII0_RXD0_A, SEL_RMII_0),
+       PINMUX_IPSR_MSEL(IP11_11_10, RX0_A, SEL_SCIF0_0),
+       PINMUX_IPSR_MSEL(IP11_11_10, HSPI_RX_A, SEL_HSPI_0),
+       PINMUX_IPSR_MSEL(IP11_11_10, RMII0_RXD0_A, SEL_RMII_0),
        PINMUX_IPSR_DATA(IP11_11_10, ET0_ERXD6),
 
-       PINMUX_IPSR_MODSEL_DATA(IP11_12, TX0_A, SEL_SCIF0_0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_12, HSPI_TX_A, SEL_HSPI_0),
+       PINMUX_IPSR_MSEL(IP11_12, TX0_A, SEL_SCIF0_0),
+       PINMUX_IPSR_MSEL(IP11_12, HSPI_TX_A, SEL_HSPI_0),
 
        PINMUX_IPSR_DATA(IP11_15_13, PENC1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_15_13, TX3_D, SEL_SCIF3_3),
-       PINMUX_IPSR_MODSEL_DATA(IP11_15_13, CAN1_TX_B,  SEL_RCAN1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_15_13, TX5_D, SEL_SCIF5_3),
-       PINMUX_IPSR_MODSEL_DATA(IP11_15_13, IETX_B, SEL_IEBUS_1),
+       PINMUX_IPSR_MSEL(IP11_15_13, TX3_D, SEL_SCIF3_3),
+       PINMUX_IPSR_MSEL(IP11_15_13, CAN1_TX_B,  SEL_RCAN1_1),
+       PINMUX_IPSR_MSEL(IP11_15_13, TX5_D, SEL_SCIF5_3),
+       PINMUX_IPSR_MSEL(IP11_15_13, IETX_B, SEL_IEBUS_1),
 
        PINMUX_IPSR_DATA(IP11_18_16, USB_OVC1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_18_16, RX3_D, SEL_SCIF3_3),
-       PINMUX_IPSR_MODSEL_DATA(IP11_18_16, CAN1_RX_B, SEL_RCAN1_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_18_16, RX5_D, SEL_SCIF5_3),
-       PINMUX_IPSR_MODSEL_DATA(IP11_18_16, IERX_B, SEL_IEBUS_1),
+       PINMUX_IPSR_MSEL(IP11_18_16, RX3_D, SEL_SCIF3_3),
+       PINMUX_IPSR_MSEL(IP11_18_16, CAN1_RX_B, SEL_RCAN1_1),
+       PINMUX_IPSR_MSEL(IP11_18_16, RX5_D, SEL_SCIF5_3),
+       PINMUX_IPSR_MSEL(IP11_18_16, IERX_B, SEL_IEBUS_1),
 
        PINMUX_IPSR_DATA(IP11_20_19, DREQ0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_20_19, SD1_CLK_A, SEL_SDHI1_0),
+       PINMUX_IPSR_MSEL(IP11_20_19, SD1_CLK_A, SEL_SDHI1_0),
        PINMUX_IPSR_DATA(IP11_20_19, ET0_TX_EN),
 
        PINMUX_IPSR_DATA(IP11_22_21, DACK0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_22_21, SD1_DAT3_A, SEL_SDHI1_0),
+       PINMUX_IPSR_MSEL(IP11_22_21, SD1_DAT3_A, SEL_SDHI1_0),
        PINMUX_IPSR_DATA(IP11_22_21, ET0_TX_ER),
 
        PINMUX_IPSR_DATA(IP11_25_23, DREQ1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_25_23, HSPI_CLK_B, SEL_HSPI_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_25_23, RX4_B, SEL_SCIF4_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_25_23, ET0_PHY_INT_C, SEL_ET0_CTL_0),
-       PINMUX_IPSR_MODSEL_DATA(IP11_25_23, ET0_TX_CLK_A, SEL_ET0_0),
+       PINMUX_IPSR_MSEL(IP11_25_23, HSPI_CLK_B, SEL_HSPI_1),
+       PINMUX_IPSR_MSEL(IP11_25_23, RX4_B, SEL_SCIF4_1),
+       PINMUX_IPSR_MSEL(IP11_25_23, ET0_PHY_INT_C, SEL_ET0_CTL_0),
+       PINMUX_IPSR_MSEL(IP11_25_23, ET0_TX_CLK_A, SEL_ET0_0),
 
        PINMUX_IPSR_DATA(IP11_27_26, DACK1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_27_26, HSPI_CS_B, SEL_HSPI_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_27_26, TX4_B, SEL_SCIF3_1),
-       PINMUX_IPSR_MODSEL_DATA(IP11_27_26, ET0_RX_CLK_A, SEL_ET0_0),
+       PINMUX_IPSR_MSEL(IP11_27_26, HSPI_CS_B, SEL_HSPI_1),
+       PINMUX_IPSR_MSEL(IP11_27_26, TX4_B, SEL_SCIF3_1),
+       PINMUX_IPSR_MSEL(IP11_27_26, ET0_RX_CLK_A, SEL_ET0_0),
 
        PINMUX_IPSR_DATA(IP11_28, PRESETOUT),
        PINMUX_IPSR_DATA(IP11_28, ST_CLKOUT),
@@ -2445,6 +2445,6 @@ const struct sh_pfc_soc_info sh7734_pinmux_info = {
        .cfg_regs = pinmux_config_regs,
        .data_regs = pinmux_data_regs,
 
-       .gpio_data = pinmux_data,
-       .gpio_data_size = ARRAY_SIZE(pinmux_data),
+       .pinmux_data = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
 };
index 625661a88c52703931e6c574dde016f379886a70..0555a1fe076ed353c4627239534a702e1b810c5c 100644 (file)
@@ -2238,6 +2238,6 @@ const struct sh_pfc_soc_info sh7757_pinmux_info = {
        .cfg_regs = pinmux_config_regs,
        .data_regs = pinmux_data_regs,
 
-       .gpio_data = pinmux_data,
-       .gpio_data_size = ARRAY_SIZE(pinmux_data),
+       .pinmux_data = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
 };
index b38dd7e3e375806c8cba0e3e809758fe0bd534de..1934cbec39651cb7426cadf005ec397e555e6e53 100644 (file)
@@ -1269,6 +1269,6 @@ const struct sh_pfc_soc_info sh7785_pinmux_info = {
        .cfg_regs = pinmux_config_regs,
        .data_regs = pinmux_data_regs,
 
-       .gpio_data = pinmux_data,
-       .gpio_data_size = ARRAY_SIZE(pinmux_data),
+       .pinmux_data = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
 };
index 6cb4e0aaf20b0217598b884eeececa57f58f80f5..c98585d80de8df4e176cdcd1f9636d77bded3bb2 100644 (file)
@@ -813,6 +813,6 @@ const struct sh_pfc_soc_info sh7786_pinmux_info = {
        .cfg_regs = pinmux_config_regs,
        .data_regs = pinmux_data_regs,
 
-       .gpio_data = pinmux_data,
-       .gpio_data_size = ARRAY_SIZE(pinmux_data),
+       .pinmux_data = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
 };
index a3fcb2284d91117437eb1afc3fc6189f078bd3b7..3f60c900645eebd51a8af4f4fe44a269a25ebecd 100644 (file)
@@ -554,8 +554,8 @@ const struct sh_pfc_soc_info shx3_pinmux_info = {
        .nr_pins        = ARRAY_SIZE(pinmux_pins),
        .func_gpios     = pinmux_func_gpios,
        .nr_func_gpios  = ARRAY_SIZE(pinmux_func_gpios),
-       .gpio_data      = pinmux_data,
-       .gpio_data_size = ARRAY_SIZE(pinmux_data),
+       .pinmux_data    = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
        .cfg_regs       = pinmux_config_regs,
        .data_regs      = pinmux_data_regs,
 };
index 15afd49fd4e33b5e72a9fe4b8c618d5ef4d2aa3a..7b373d43d981899f1d85e5eab6846025235f1d89 100644 (file)
@@ -52,6 +52,29 @@ struct sh_pfc_pin_group {
        unsigned int nr_pins;
 };
 
+/*
+ * Using union vin_data saves memory occupied by the VIN data pins.
+ * VIN_DATA_PIN_GROUP() is  a macro  used  to describe the VIN pin groups
+ * in this case.
+ */
+#define VIN_DATA_PIN_GROUP(n, s)                               \
+       {                                                       \
+               .name = #n#s,                                   \
+               .pins = n##_pins.data##s,                       \
+               .mux = n##_mux.data##s,                         \
+               .nr_pins = ARRAY_SIZE(n##_pins.data##s),        \
+       }
+
+union vin_data {
+       unsigned int data24[24];
+       unsigned int data20[20];
+       unsigned int data16[16];
+       unsigned int data12[12];
+       unsigned int data10[10];
+       unsigned int data8[8];
+       unsigned int data4[4];
+};
+
 #define SH_PFC_FUNCTION(n)                             \
        {                                               \
                .name = #n,                             \
@@ -98,17 +121,11 @@ struct pinmux_data_reg {
        .enum_ids = (const u16 [r_width]) \
 
 struct pinmux_irq {
-       int irq;
        const short *gpios;
 };
 
-#ifdef CONFIG_ARCH_MULTIPLATFORM
-#define PINMUX_IRQ(irq_nr, ids...)                        \
+#define PINMUX_IRQ(ids...)                        \
        { .gpios = (const short []) { ids, -1 } }
-#else
-#define PINMUX_IRQ(irq_nr, ids...)                        \
-       { .irq = irq_nr, .gpios = (const short []) { ids, -1 } }
-#endif
 
 struct pinmux_range {
        u16 begin;
@@ -143,14 +160,16 @@ struct sh_pfc_soc_info {
        const struct sh_pfc_function *functions;
        unsigned int nr_functions;
 
+#ifdef CONFIG_SUPERH
        const struct pinmux_func *func_gpios;
        unsigned int nr_func_gpios;
+#endif
 
        const struct pinmux_cfg_reg *cfg_regs;
        const struct pinmux_data_reg *data_regs;
 
-       const u16 *gpio_data;
-       unsigned int gpio_data_size;
+       const u16 *pinmux_data;
+       unsigned int pinmux_data_size;
 
        const struct pinmux_irq *gpio_irq;
        unsigned int gpio_irq_size;
@@ -163,7 +182,7 @@ struct sh_pfc_soc_info {
  */
 
 /*
- * sh_pfc_soc_info gpio_data array macros
+ * sh_pfc_soc_info pinmux_data array macros
  */
 
 #define PINMUX_DATA(data_or_mark, ids...)      data_or_mark, ids, 0
@@ -177,33 +196,33 @@ struct sh_pfc_soc_info {
 #define PINMUX_IPSR_NOFN(ipsr, fn, ms)                                 \
        PINMUX_DATA(fn##_MARK, FN_##ipsr, FN_##ms)
 #define PINMUX_IPSR_MSEL(ipsr, fn, ms)                                 \
-       PINMUX_DATA(fn##_MARK, FN_##fn, FN_##ipsr, FN_##ms)
-#define PINMUX_IPSR_MODSEL_DATA(ipsr, fn, ms)                          \
        PINMUX_DATA(fn##_MARK, FN_##ms, FN_##ipsr, FN_##fn)
 
 /*
  * GP port style (32 ports banks)
  */
 
-#define PORT_GP_1(bank, pin, fn, sfx) fn(bank, pin, GP_##bank##_##pin, sfx)
-
-#define PORT_GP_32(bank, fn, sfx)                                      \
-       PORT_GP_1(bank, 0,  fn, sfx), PORT_GP_1(bank, 1,  fn, sfx),     \
-       PORT_GP_1(bank, 2,  fn, sfx), PORT_GP_1(bank, 3,  fn, sfx),     \
-       PORT_GP_1(bank, 4,  fn, sfx), PORT_GP_1(bank, 5,  fn, sfx),     \
-       PORT_GP_1(bank, 6,  fn, sfx), PORT_GP_1(bank, 7,  fn, sfx),     \
-       PORT_GP_1(bank, 8,  fn, sfx), PORT_GP_1(bank, 9,  fn, sfx),     \
-       PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx),     \
-       PORT_GP_1(bank, 12, fn, sfx), PORT_GP_1(bank, 13, fn, sfx),     \
-       PORT_GP_1(bank, 14, fn, sfx), PORT_GP_1(bank, 15, fn, sfx),     \
-       PORT_GP_1(bank, 16, fn, sfx), PORT_GP_1(bank, 17, fn, sfx),     \
-       PORT_GP_1(bank, 18, fn, sfx), PORT_GP_1(bank, 19, fn, sfx),     \
-       PORT_GP_1(bank, 20, fn, sfx), PORT_GP_1(bank, 21, fn, sfx),     \
-       PORT_GP_1(bank, 22, fn, sfx), PORT_GP_1(bank, 23, fn, sfx),     \
-       PORT_GP_1(bank, 24, fn, sfx), PORT_GP_1(bank, 25, fn, sfx),     \
-       PORT_GP_1(bank, 26, fn, sfx), PORT_GP_1(bank, 27, fn, sfx),     \
-       PORT_GP_1(bank, 28, fn, sfx), PORT_GP_1(bank, 29, fn, sfx),     \
-       PORT_GP_1(bank, 30, fn, sfx), PORT_GP_1(bank, 31, fn, sfx)
+#define PORT_GP_CFG_1(bank, pin, fn, sfx, cfg) fn(bank, pin, GP_##bank##_##pin, sfx, cfg)
+#define PORT_GP_1(bank, pin, fn, sfx)  PORT_GP_CFG_1(bank, pin, fn, sfx, 0)
+
+#define PORT_GP_CFG_32(bank, fn, sfx, cfg)                             \
+       PORT_GP_CFG_1(bank, 0,  fn, sfx, cfg), PORT_GP_CFG_1(bank, 1,  fn, sfx, cfg),   \
+       PORT_GP_CFG_1(bank, 2,  fn, sfx, cfg), PORT_GP_CFG_1(bank, 3,  fn, sfx, cfg),   \
+       PORT_GP_CFG_1(bank, 4,  fn, sfx, cfg), PORT_GP_CFG_1(bank, 5,  fn, sfx, cfg),   \
+       PORT_GP_CFG_1(bank, 6,  fn, sfx, cfg), PORT_GP_CFG_1(bank, 7,  fn, sfx, cfg),   \
+       PORT_GP_CFG_1(bank, 8,  fn, sfx, cfg), PORT_GP_CFG_1(bank, 9,  fn, sfx, cfg),   \
+       PORT_GP_CFG_1(bank, 10, fn, sfx, cfg), PORT_GP_CFG_1(bank, 11, fn, sfx, cfg),   \
+       PORT_GP_CFG_1(bank, 12, fn, sfx, cfg), PORT_GP_CFG_1(bank, 13, fn, sfx, cfg),   \
+       PORT_GP_CFG_1(bank, 14, fn, sfx, cfg), PORT_GP_CFG_1(bank, 15, fn, sfx, cfg),   \
+       PORT_GP_CFG_1(bank, 16, fn, sfx, cfg), PORT_GP_CFG_1(bank, 17, fn, sfx, cfg),   \
+       PORT_GP_CFG_1(bank, 18, fn, sfx, cfg), PORT_GP_CFG_1(bank, 19, fn, sfx, cfg),   \
+       PORT_GP_CFG_1(bank, 20, fn, sfx, cfg), PORT_GP_CFG_1(bank, 21, fn, sfx, cfg),   \
+       PORT_GP_CFG_1(bank, 22, fn, sfx, cfg), PORT_GP_CFG_1(bank, 23, fn, sfx, cfg),   \
+       PORT_GP_CFG_1(bank, 24, fn, sfx, cfg), PORT_GP_CFG_1(bank, 25, fn, sfx, cfg),   \
+       PORT_GP_CFG_1(bank, 26, fn, sfx, cfg), PORT_GP_CFG_1(bank, 27, fn, sfx, cfg),   \
+       PORT_GP_CFG_1(bank, 28, fn, sfx, cfg), PORT_GP_CFG_1(bank, 29, fn, sfx, cfg),   \
+       PORT_GP_CFG_1(bank, 30, fn, sfx, cfg), PORT_GP_CFG_1(bank, 31, fn, sfx, cfg)
+#define PORT_GP_32(bank, fn, sfx)      PORT_GP_CFG_32(bank, fn, sfx, 0)
 
 #define PORT_GP_32_REV(bank, fn, sfx)                                  \
        PORT_GP_1(bank, 31, fn, sfx), PORT_GP_1(bank, 30, fn, sfx),     \
@@ -224,20 +243,21 @@ struct sh_pfc_soc_info {
        PORT_GP_1(bank, 1,  fn, sfx), PORT_GP_1(bank, 0,  fn, sfx)
 
 /* GP_ALL(suffix) - Expand to a list of GP_#_#_suffix */
-#define _GP_ALL(bank, pin, name, sfx)  name##_##sfx
+#define _GP_ALL(bank, pin, name, sfx, cfg)     name##_##sfx
 #define GP_ALL(str)                    CPU_ALL_PORT(_GP_ALL, str)
 
 /* PINMUX_GPIO_GP_ALL - Expand to a list of sh_pfc_pin entries */
-#define _GP_GPIO(bank, _pin, _name, sfx)                               \
+#define _GP_GPIO(bank, _pin, _name, sfx, cfg)                          \
        {                                                               \
                .pin = (bank * 32) + _pin,                              \
                .name = __stringify(_name),                             \
                .enum_id = _name##_DATA,                                \
+               .configs = cfg,                                         \
        }
 #define PINMUX_GPIO_GP_ALL()           CPU_ALL_PORT(_GP_GPIO, unused)
 
 /* PINMUX_DATA_GP_ALL -  Expand to a list of name_DATA, name_FN marks */
-#define _GP_DATA(bank, pin, name, sfx) PINMUX_DATA(name##_DATA, name##_FN)
+#define _GP_DATA(bank, pin, name, sfx, cfg)    PINMUX_DATA(name##_DATA, name##_FN)
 #define PINMUX_DATA_GP_ALL()           CPU_ALL_PORT(_GP_DATA, unused)
 
 /*
@@ -326,4 +346,9 @@ struct sh_pfc_soc_info {
                }                                                       \
        }
 
+/*
+ * GPIO number helper macro for R-Car
+ */
+#define RCAR_GP_PIN(bank, pin)         (((bank) * 32) + (pin))
+
 #endif /* __SH_PFC_H */
index 0d24d9e4b70c9583899d72aa02f072a8a0bdcd0d..829018c812bdda3690f7449626cecccb2de8874b 100644 (file)
@@ -544,6 +544,11 @@ static const struct pinctrl_pin_desc atlas7_ioc_pads[] = {
        PINCTRL_PIN(156, "lvds_tx0d1n"),
        PINCTRL_PIN(157, "lvds_tx0d0p"),
        PINCTRL_PIN(158, "lvds_tx0d0n"),
+       PINCTRL_PIN(159, "jtag_tdo"),
+       PINCTRL_PIN(160, "jtag_tms"),
+       PINCTRL_PIN(161, "jtag_tck"),
+       PINCTRL_PIN(162, "jtag_tdi"),
+       PINCTRL_PIN(163, "jtag_trstn"),
 };
 
 struct atlas7_pad_config atlas7_ioc_pad_confs[] = {
@@ -708,6 +713,11 @@ struct atlas7_pad_config atlas7_ioc_pad_confs[] = {
        PADCONF(156, 7, 0x130, 0x270, -1, 0x480, 28, 14, 0, 7),
        PADCONF(157, 7, 0x138, 0x278, -1, 0x480, 0, 0, 0, 8),
        PADCONF(158, 7, 0x138, 0x278, -1, 0x480, 4, 2, 0, 9),
+       PADCONF(159, 5, 0x140, 0x280, 0x380, -1, 0, 0, 0, 0),
+       PADCONF(160, 6, 0x140, 0x280, 0x380, -1, 4, 2, 2, 0),
+       PADCONF(161, 5, 0x140, 0x280, 0x380, -1, 8, 4, 4, 0),
+       PADCONF(162, 6, 0x140, 0x280, 0x380, -1, 12, 6, 6, 0),
+       PADCONF(163, 6, 0x140, 0x280, 0x380, -1, 16, 8, 8, 0),
 };
 
 /* pin list of each pin group */
@@ -724,12 +734,15 @@ static const unsigned int sp_rgmii_gpio_pins[] = { 97, 98, 99, 100, 101, 102,
                141, 142, 143, 144, 145, 146, 147, 148, };
 static const unsigned int lvds_gpio_pins[] = { 157, 158, 155, 156, 153, 154,
                151, 152, 149, 150, };
-static const unsigned int uart_nand_gpio_pins[] = { 44, 43, 42, 41, 40, 39,
-               38, 37, 46, 47, 48, 49, 50, 52, 51, 45, 133, 134, 135, 136,
-               137, 138, 139, 140, };
+static const unsigned int jtag_uart_nand_gpio_pins[] = { 44, 43, 42, 41, 40,
+               39, 38, 37, 46, 47, 48, 49, 50, 52, 51, 45, 133, 134, 135,
+               136, 137, 138, 139, 140, 159, 160, 161, 162, 163, };
 static const unsigned int rtc_gpio_pins[] = { 0, 1, 2, 3, 4, 10, 11, 12, 13,
-               14, 15, 16, 17, };
+               14, 15, 16, 17, 9, };
 static const unsigned int audio_ac97_pins[] = { 113, 118, 115, 114, };
+static const unsigned int audio_digmic_pins0[] = { 51, };
+static const unsigned int audio_digmic_pins1[] = { 122, };
+static const unsigned int audio_digmic_pins2[] = { 161, };
 static const unsigned int audio_func_dbg_pins[] = { 141, 144, 44, 43, 42, 41,
                40, 39, 38, 37, 74, 75, 76, 77, 78, 79, 81, 113, 114, 118,
                115, 49, 50, 142, 143, 80, };
@@ -737,16 +750,49 @@ static const unsigned int audio_i2s_pins[] = { 118, 115, 116, 117, 112, 113,
                114, };
 static const unsigned int audio_i2s_2ch_pins[] = { 118, 115, 112, 113, 114, };
 static const unsigned int audio_i2s_extclk_pins[] = { 112, };
-static const unsigned int audio_uart0_pins[] = { 143, 142, 141, 144, };
-static const unsigned int audio_uart1_pins[] = { 147, 146, 145, 148, };
-static const unsigned int audio_uart2_pins0[] = { 20, 21, 19, 18, };
-static const unsigned int audio_uart2_pins1[] = { 109, 110, 101, 111, };
-static const unsigned int c_can_trnsvr_pins[] = { 1, };
-static const unsigned int c0_can_pins0[] = { 11, 10, };
-static const unsigned int c0_can_pins1[] = { 2, 3, };
-static const unsigned int c1_can_pins0[] = { 138, 137, };
-static const unsigned int c1_can_pins1[] = { 147, 146, };
-static const unsigned int c1_can_pins2[] = { 2, 3, };
+static const unsigned int audio_spdif_out_pins0[] = { 112, };
+static const unsigned int audio_spdif_out_pins1[] = { 116, };
+static const unsigned int audio_spdif_out_pins2[] = { 142, };
+static const unsigned int audio_uart0_basic_pins[] = { 143, 142, 141, 144, };
+static const unsigned int audio_uart0_urfs_pins0[] = { 117, };
+static const unsigned int audio_uart0_urfs_pins1[] = { 139, };
+static const unsigned int audio_uart0_urfs_pins2[] = { 163, };
+static const unsigned int audio_uart0_urfs_pins3[] = { 162, };
+static const unsigned int audio_uart1_basic_pins[] = { 147, 146, 145, 148, };
+static const unsigned int audio_uart1_urfs_pins0[] = { 117, };
+static const unsigned int audio_uart1_urfs_pins1[] = { 140, };
+static const unsigned int audio_uart1_urfs_pins2[] = { 163, };
+static const unsigned int audio_uart2_urfs_pins0[] = { 139, };
+static const unsigned int audio_uart2_urfs_pins1[] = { 163, };
+static const unsigned int audio_uart2_urfs_pins2[] = { 96, };
+static const unsigned int audio_uart2_urxd_pins0[] = { 20, };
+static const unsigned int audio_uart2_urxd_pins1[] = { 109, };
+static const unsigned int audio_uart2_urxd_pins2[] = { 93, };
+static const unsigned int audio_uart2_usclk_pins0[] = { 19, };
+static const unsigned int audio_uart2_usclk_pins1[] = { 101, };
+static const unsigned int audio_uart2_usclk_pins2[] = { 91, };
+static const unsigned int audio_uart2_utfs_pins0[] = { 18, };
+static const unsigned int audio_uart2_utfs_pins1[] = { 111, };
+static const unsigned int audio_uart2_utfs_pins2[] = { 94, };
+static const unsigned int audio_uart2_utxd_pins0[] = { 21, };
+static const unsigned int audio_uart2_utxd_pins1[] = { 110, };
+static const unsigned int audio_uart2_utxd_pins2[] = { 92, };
+static const unsigned int c_can_trnsvr_en_pins0[] = { 2, };
+static const unsigned int c_can_trnsvr_en_pins1[] = { 0, };
+static const unsigned int c_can_trnsvr_intr_pins[] = { 1, };
+static const unsigned int c_can_trnsvr_stb_n_pins[] = { 3, };
+static const unsigned int c0_can_rxd_trnsv0_pins[] = { 11, };
+static const unsigned int c0_can_rxd_trnsv1_pins[] = { 2, };
+static const unsigned int c0_can_txd_trnsv0_pins[] = { 10, };
+static const unsigned int c0_can_txd_trnsv1_pins[] = { 3, };
+static const unsigned int c1_can_rxd_pins0[] = { 138, };
+static const unsigned int c1_can_rxd_pins1[] = { 147, };
+static const unsigned int c1_can_rxd_pins2[] = { 2, };
+static const unsigned int c1_can_rxd_pins3[] = { 162, };
+static const unsigned int c1_can_txd_pins0[] = { 137, };
+static const unsigned int c1_can_txd_pins1[] = { 146, };
+static const unsigned int c1_can_txd_pins2[] = { 3, };
+static const unsigned int c1_can_txd_pins3[] = { 161, };
 static const unsigned int ca_audio_lpc_pins[] = { 62, 63, 64, 65, 66, 67, 68,
                69, 70, 71, };
 static const unsigned int ca_bt_lpc_pins[] = { 85, 86, 87, 88, 89, 90, };
@@ -804,7 +850,29 @@ static const unsigned int gn_trg_shutdown_pins2[] = { 117, };
 static const unsigned int gn_trg_shutdown_pins3[] = { 123, };
 static const unsigned int i2c0_pins[] = { 128, 127, };
 static const unsigned int i2c1_pins[] = { 126, 125, };
-static const unsigned int jtag_pins0[] = { 125, 4, 2, 0, 1, 3, };
+static const unsigned int i2s0_pins[] = { 91, 93, 94, 92, };
+static const unsigned int i2s1_basic_pins[] = { 95, 96, };
+static const unsigned int i2s1_rxd0_pins0[] = { 61, };
+static const unsigned int i2s1_rxd0_pins1[] = { 131, };
+static const unsigned int i2s1_rxd0_pins2[] = { 129, };
+static const unsigned int i2s1_rxd0_pins3[] = { 117, };
+static const unsigned int i2s1_rxd0_pins4[] = { 83, };
+static const unsigned int i2s1_rxd1_pins0[] = { 72, };
+static const unsigned int i2s1_rxd1_pins1[] = { 132, };
+static const unsigned int i2s1_rxd1_pins2[] = { 130, };
+static const unsigned int i2s1_rxd1_pins3[] = { 118, };
+static const unsigned int i2s1_rxd1_pins4[] = { 84, };
+static const unsigned int jtag_jt_dbg_nsrst_pins[] = { 125, };
+static const unsigned int jtag_ntrst_pins0[] = { 4, };
+static const unsigned int jtag_ntrst_pins1[] = { 163, };
+static const unsigned int jtag_swdiotms_pins0[] = { 2, };
+static const unsigned int jtag_swdiotms_pins1[] = { 160, };
+static const unsigned int jtag_tck_pins0[] = { 0, };
+static const unsigned int jtag_tck_pins1[] = { 161, };
+static const unsigned int jtag_tdi_pins0[] = { 1, };
+static const unsigned int jtag_tdi_pins1[] = { 162, };
+static const unsigned int jtag_tdo_pins0[] = { 3, };
+static const unsigned int jtag_tdo_pins1[] = { 159, };
 static const unsigned int ks_kas_spi_pins0[] = { 141, 144, 143, 142, };
 static const unsigned int ld_ldd_pins[] = { 57, 58, 59, 60, 61, 62, 63, 64,
                65, 66, 67, 68, 69, 70, 71, 72, 74, 75, 76, 77, 78, 79, 80,
@@ -821,7 +889,7 @@ static const unsigned int nd_df_pins[] = { 44, 43, 42, 41, 40, 39, 38, 37,
                47, 46, 52, 51, 45, 49, 50, 48, 124, };
 static const unsigned int nd_df_nowp_pins[] = { 44, 43, 42, 41, 40, 39, 38,
                37, 47, 46, 52, 51, 45, 49, 50, 48, };
-static const unsigned int ps_pins[] = { 120, 119, };
+static const unsigned int ps_pins[] = { 120, 119, 121, };
 static const unsigned int pwc_core_on_pins[] = { 8, };
 static const unsigned int pwc_ext_on_pins[] = { 6, };
 static const unsigned int pwc_gpio3_clk_pins[] = { 3, };
@@ -836,18 +904,26 @@ static const unsigned int pwc_wakeup_src3_pins[] = { 3, };
 static const unsigned int pw_cko0_pins0[] = { 123, };
 static const unsigned int pw_cko0_pins1[] = { 101, };
 static const unsigned int pw_cko0_pins2[] = { 82, };
+static const unsigned int pw_cko0_pins3[] = { 162, };
 static const unsigned int pw_cko1_pins0[] = { 124, };
 static const unsigned int pw_cko1_pins1[] = { 110, };
+static const unsigned int pw_cko1_pins2[] = { 163, };
 static const unsigned int pw_i2s01_clk_pins0[] = { 125, };
 static const unsigned int pw_i2s01_clk_pins1[] = { 117, };
-static const unsigned int pw_pwm0_pins[] = { 119, };
-static const unsigned int pw_pwm1_pins[] = { 120, };
+static const unsigned int pw_i2s01_clk_pins2[] = { 132, };
+static const unsigned int pw_pwm0_pins0[] = { 119, };
+static const unsigned int pw_pwm0_pins1[] = { 159, };
+static const unsigned int pw_pwm1_pins0[] = { 120, };
+static const unsigned int pw_pwm1_pins1[] = { 160, };
+static const unsigned int pw_pwm1_pins2[] = { 131, };
 static const unsigned int pw_pwm2_pins0[] = { 121, };
 static const unsigned int pw_pwm2_pins1[] = { 98, };
+static const unsigned int pw_pwm2_pins2[] = { 161, };
 static const unsigned int pw_pwm3_pins0[] = { 122, };
 static const unsigned int pw_pwm3_pins1[] = { 73, };
 static const unsigned int pw_pwm_cpu_vol_pins0[] = { 121, };
 static const unsigned int pw_pwm_cpu_vol_pins1[] = { 98, };
+static const unsigned int pw_pwm_cpu_vol_pins2[] = { 161, };
 static const unsigned int pw_backlight_pins0[] = { 122, };
 static const unsigned int pw_backlight_pins1[] = { 73, };
 static const unsigned int rg_eth_mac_pins[] = { 108, 103, 104, 105, 106, 107,
@@ -863,8 +939,11 @@ static const unsigned int sd1_pins[] = { 48, 49, 44, 43, 42, 41, 40, 39, 38,
                37, };
 static const unsigned int sd1_4bit_pins0[] = { 48, 49, 44, 43, 42, 41, };
 static const unsigned int sd1_4bit_pins1[] = { 48, 49, 40, 39, 38, 37, };
-static const unsigned int sd2_pins0[] = { 124, 31, 32, 33, 34, 35, 36, 123, };
-static const unsigned int sd2_no_cdb_pins0[] = { 31, 32, 33, 34, 35, 36, 123, };
+static const unsigned int sd2_basic_pins[] = { 31, 32, 33, 34, 35, 36, };
+static const unsigned int sd2_cdb_pins0[] = { 124, };
+static const unsigned int sd2_cdb_pins1[] = { 161, };
+static const unsigned int sd2_wpb_pins0[] = { 123, };
+static const unsigned int sd2_wpb_pins1[] = { 163, };
 static const unsigned int sd3_pins[] = { 85, 86, 87, 88, 89, 90, };
 static const unsigned int sd5_pins[] = { 91, 92, 93, 94, 95, 96, };
 static const unsigned int sd6_pins0[] = { 79, 78, 74, 75, 76, 77, };
@@ -877,19 +956,39 @@ static const unsigned int tpiu_trace_pins[] = { 53, 56, 57, 58, 59, 60, 61,
 static const unsigned int uart0_pins[] = { 121, 120, 134, 133, };
 static const unsigned int uart0_nopause_pins[] = { 134, 133, };
 static const unsigned int uart1_pins[] = { 136, 135, };
-static const unsigned int uart2_pins[] = { 11, 10, };
-static const unsigned int uart3_pins0[] = { 125, 126, 138, 137, };
-static const unsigned int uart3_pins1[] = { 111, 109, 84, 83, };
-static const unsigned int uart3_pins2[] = { 140, 139, 138, 137, };
-static const unsigned int uart3_pins3[] = { 139, 140, 84, 83, };
-static const unsigned int uart3_nopause_pins0[] = { 138, 137, };
-static const unsigned int uart3_nopause_pins1[] = { 84, 83, };
-static const unsigned int uart4_pins0[] = { 122, 123, 140, 139, };
-static const unsigned int uart4_pins1[] = { 100, 99, 140, 139, };
-static const unsigned int uart4_pins2[] = { 117, 116, 140, 139, };
-static const unsigned int uart4_nopause_pins[] = { 140, 139, };
-static const unsigned int usb0_drvvbus_pins[] = { 51, };
-static const unsigned int usb1_drvvbus_pins[] = { 134, };
+static const unsigned int uart2_cts_pins0[] = { 132, };
+static const unsigned int uart2_cts_pins1[] = { 162, };
+static const unsigned int uart2_rts_pins0[] = { 131, };
+static const unsigned int uart2_rts_pins1[] = { 161, };
+static const unsigned int uart2_rxd_pins0[] = { 11, };
+static const unsigned int uart2_rxd_pins1[] = { 160, };
+static const unsigned int uart2_rxd_pins2[] = { 130, };
+static const unsigned int uart2_txd_pins0[] = { 10, };
+static const unsigned int uart2_txd_pins1[] = { 159, };
+static const unsigned int uart2_txd_pins2[] = { 129, };
+static const unsigned int uart3_cts_pins0[] = { 125, };
+static const unsigned int uart3_cts_pins1[] = { 111, };
+static const unsigned int uart3_cts_pins2[] = { 140, };
+static const unsigned int uart3_rts_pins0[] = { 126, };
+static const unsigned int uart3_rts_pins1[] = { 109, };
+static const unsigned int uart3_rts_pins2[] = { 139, };
+static const unsigned int uart3_rxd_pins0[] = { 138, };
+static const unsigned int uart3_rxd_pins1[] = { 84, };
+static const unsigned int uart3_rxd_pins2[] = { 162, };
+static const unsigned int uart3_txd_pins0[] = { 137, };
+static const unsigned int uart3_txd_pins1[] = { 83, };
+static const unsigned int uart3_txd_pins2[] = { 161, };
+static const unsigned int uart4_basic_pins[] = { 140, 139, };
+static const unsigned int uart4_cts_pins0[] = { 122, };
+static const unsigned int uart4_cts_pins1[] = { 100, };
+static const unsigned int uart4_cts_pins2[] = { 117, };
+static const unsigned int uart4_rts_pins0[] = { 123, };
+static const unsigned int uart4_rts_pins1[] = { 99, };
+static const unsigned int uart4_rts_pins2[] = { 116, };
+static const unsigned int usb0_drvvbus_pins0[] = { 51, };
+static const unsigned int usb0_drvvbus_pins1[] = { 162, };
+static const unsigned int usb1_drvvbus_pins0[] = { 134, };
+static const unsigned int usb1_drvvbus_pins1[] = { 163, };
 static const unsigned int visbus_dout_pins[] = { 57, 58, 59, 60, 61, 62, 63,
                64, 65, 66, 67, 68, 69, 70, 71, 72, 53, 54, 55, 56, 85, 86,
                87, 88, 89, 90, 91, 92, 93, 94, 95, 96, };
@@ -910,23 +1009,59 @@ struct atlas7_pin_group altas7_pin_groups[] = {
        GROUP("sdio_i2s_gpio_grp", sdio_i2s_gpio_pins),
        GROUP("sp_rgmii_gpio_grp", sp_rgmii_gpio_pins),
        GROUP("lvds_gpio_grp", lvds_gpio_pins),
-       GROUP("uart_nand_gpio_grp", uart_nand_gpio_pins),
+       GROUP("jtag_uart_nand_gpio_grp", jtag_uart_nand_gpio_pins),
        GROUP("rtc_gpio_grp", rtc_gpio_pins),
        GROUP("audio_ac97_grp", audio_ac97_pins),
+       GROUP("audio_digmic_grp0", audio_digmic_pins0),
+       GROUP("audio_digmic_grp1", audio_digmic_pins1),
+       GROUP("audio_digmic_grp2", audio_digmic_pins2),
        GROUP("audio_func_dbg_grp", audio_func_dbg_pins),
        GROUP("audio_i2s_grp", audio_i2s_pins),
        GROUP("audio_i2s_2ch_grp", audio_i2s_2ch_pins),
        GROUP("audio_i2s_extclk_grp", audio_i2s_extclk_pins),
-       GROUP("audio_uart0_grp", audio_uart0_pins),
-       GROUP("audio_uart1_grp", audio_uart1_pins),
-       GROUP("audio_uart2_grp0", audio_uart2_pins0),
-       GROUP("audio_uart2_grp1", audio_uart2_pins1),
-       GROUP("c_can_trnsvr_grp", c_can_trnsvr_pins),
-       GROUP("c0_can_grp0", c0_can_pins0),
-       GROUP("c0_can_grp1", c0_can_pins1),
-       GROUP("c1_can_grp0", c1_can_pins0),
-       GROUP("c1_can_grp1", c1_can_pins1),
-       GROUP("c1_can_grp2", c1_can_pins2),
+       GROUP("audio_spdif_out_grp0", audio_spdif_out_pins0),
+       GROUP("audio_spdif_out_grp1", audio_spdif_out_pins1),
+       GROUP("audio_spdif_out_grp2", audio_spdif_out_pins2),
+       GROUP("audio_uart0_basic_grp", audio_uart0_basic_pins),
+       GROUP("audio_uart0_urfs_grp0", audio_uart0_urfs_pins0),
+       GROUP("audio_uart0_urfs_grp1", audio_uart0_urfs_pins1),
+       GROUP("audio_uart0_urfs_grp2", audio_uart0_urfs_pins2),
+       GROUP("audio_uart0_urfs_grp3", audio_uart0_urfs_pins3),
+       GROUP("audio_uart1_basic_grp", audio_uart1_basic_pins),
+       GROUP("audio_uart1_urfs_grp0", audio_uart1_urfs_pins0),
+       GROUP("audio_uart1_urfs_grp1", audio_uart1_urfs_pins1),
+       GROUP("audio_uart1_urfs_grp2", audio_uart1_urfs_pins2),
+       GROUP("audio_uart2_urfs_grp0", audio_uart2_urfs_pins0),
+       GROUP("audio_uart2_urfs_grp1", audio_uart2_urfs_pins1),
+       GROUP("audio_uart2_urfs_grp2", audio_uart2_urfs_pins2),
+       GROUP("audio_uart2_urxd_grp0", audio_uart2_urxd_pins0),
+       GROUP("audio_uart2_urxd_grp1", audio_uart2_urxd_pins1),
+       GROUP("audio_uart2_urxd_grp2", audio_uart2_urxd_pins2),
+       GROUP("audio_uart2_usclk_grp0", audio_uart2_usclk_pins0),
+       GROUP("audio_uart2_usclk_grp1", audio_uart2_usclk_pins1),
+       GROUP("audio_uart2_usclk_grp2", audio_uart2_usclk_pins2),
+       GROUP("audio_uart2_utfs_grp0", audio_uart2_utfs_pins0),
+       GROUP("audio_uart2_utfs_grp1", audio_uart2_utfs_pins1),
+       GROUP("audio_uart2_utfs_grp2", audio_uart2_utfs_pins2),
+       GROUP("audio_uart2_utxd_grp0", audio_uart2_utxd_pins0),
+       GROUP("audio_uart2_utxd_grp1", audio_uart2_utxd_pins1),
+       GROUP("audio_uart2_utxd_grp2", audio_uart2_utxd_pins2),
+       GROUP("c_can_trnsvr_en_grp0", c_can_trnsvr_en_pins0),
+       GROUP("c_can_trnsvr_en_grp1", c_can_trnsvr_en_pins1),
+       GROUP("c_can_trnsvr_intr_grp", c_can_trnsvr_intr_pins),
+       GROUP("c_can_trnsvr_stb_n_grp", c_can_trnsvr_stb_n_pins),
+       GROUP("c0_can_rxd_trnsv0_grp", c0_can_rxd_trnsv0_pins),
+       GROUP("c0_can_rxd_trnsv1_grp", c0_can_rxd_trnsv1_pins),
+       GROUP("c0_can_txd_trnsv0_grp", c0_can_txd_trnsv0_pins),
+       GROUP("c0_can_txd_trnsv1_grp", c0_can_txd_trnsv1_pins),
+       GROUP("c1_can_rxd_grp0", c1_can_rxd_pins0),
+       GROUP("c1_can_rxd_grp1", c1_can_rxd_pins1),
+       GROUP("c1_can_rxd_grp2", c1_can_rxd_pins2),
+       GROUP("c1_can_rxd_grp3", c1_can_rxd_pins3),
+       GROUP("c1_can_txd_grp0", c1_can_txd_pins0),
+       GROUP("c1_can_txd_grp1", c1_can_txd_pins1),
+       GROUP("c1_can_txd_grp2", c1_can_txd_pins2),
+       GROUP("c1_can_txd_grp3", c1_can_txd_pins3),
        GROUP("ca_audio_lpc_grp", ca_audio_lpc_pins),
        GROUP("ca_bt_lpc_grp", ca_bt_lpc_pins),
        GROUP("ca_coex_grp", ca_coex_pins),
@@ -977,7 +1112,29 @@ struct atlas7_pin_group altas7_pin_groups[] = {
        GROUP("gn_trg_shutdown_grp3", gn_trg_shutdown_pins3),
        GROUP("i2c0_grp", i2c0_pins),
        GROUP("i2c1_grp", i2c1_pins),
-       GROUP("jtag_grp0", jtag_pins0),
+       GROUP("i2s0_grp", i2s0_pins),
+       GROUP("i2s1_basic_grp", i2s1_basic_pins),
+       GROUP("i2s1_rxd0_grp0", i2s1_rxd0_pins0),
+       GROUP("i2s1_rxd0_grp1", i2s1_rxd0_pins1),
+       GROUP("i2s1_rxd0_grp2", i2s1_rxd0_pins2),
+       GROUP("i2s1_rxd0_grp3", i2s1_rxd0_pins3),
+       GROUP("i2s1_rxd0_grp4", i2s1_rxd0_pins4),
+       GROUP("i2s1_rxd1_grp0", i2s1_rxd1_pins0),
+       GROUP("i2s1_rxd1_grp1", i2s1_rxd1_pins1),
+       GROUP("i2s1_rxd1_grp2", i2s1_rxd1_pins2),
+       GROUP("i2s1_rxd1_grp3", i2s1_rxd1_pins3),
+       GROUP("i2s1_rxd1_grp4", i2s1_rxd1_pins4),
+       GROUP("jtag_jt_dbg_nsrst_grp", jtag_jt_dbg_nsrst_pins),
+       GROUP("jtag_ntrst_grp0", jtag_ntrst_pins0),
+       GROUP("jtag_ntrst_grp1", jtag_ntrst_pins1),
+       GROUP("jtag_swdiotms_grp0", jtag_swdiotms_pins0),
+       GROUP("jtag_swdiotms_grp1", jtag_swdiotms_pins1),
+       GROUP("jtag_tck_grp0", jtag_tck_pins0),
+       GROUP("jtag_tck_grp1", jtag_tck_pins1),
+       GROUP("jtag_tdi_grp0", jtag_tdi_pins0),
+       GROUP("jtag_tdi_grp1", jtag_tdi_pins1),
+       GROUP("jtag_tdo_grp0", jtag_tdo_pins0),
+       GROUP("jtag_tdo_grp1", jtag_tdo_pins1),
        GROUP("ks_kas_spi_grp0", ks_kas_spi_pins0),
        GROUP("ld_ldd_grp", ld_ldd_pins),
        GROUP("ld_ldd_16bit_grp", ld_ldd_16bit_pins),
@@ -1002,18 +1159,26 @@ struct atlas7_pin_group altas7_pin_groups[] = {
        GROUP("pw_cko0_grp0", pw_cko0_pins0),
        GROUP("pw_cko0_grp1", pw_cko0_pins1),
        GROUP("pw_cko0_grp2", pw_cko0_pins2),
+       GROUP("pw_cko0_grp3", pw_cko0_pins3),
        GROUP("pw_cko1_grp0", pw_cko1_pins0),
        GROUP("pw_cko1_grp1", pw_cko1_pins1),
+       GROUP("pw_cko1_grp2", pw_cko1_pins2),
        GROUP("pw_i2s01_clk_grp0", pw_i2s01_clk_pins0),
        GROUP("pw_i2s01_clk_grp1", pw_i2s01_clk_pins1),
-       GROUP("pw_pwm0_grp", pw_pwm0_pins),
-       GROUP("pw_pwm1_grp", pw_pwm1_pins),
+       GROUP("pw_i2s01_clk_grp2", pw_i2s01_clk_pins2),
+       GROUP("pw_pwm0_grp0", pw_pwm0_pins0),
+       GROUP("pw_pwm0_grp1", pw_pwm0_pins1),
+       GROUP("pw_pwm1_grp0", pw_pwm1_pins0),
+       GROUP("pw_pwm1_grp1", pw_pwm1_pins1),
+       GROUP("pw_pwm1_grp2", pw_pwm1_pins2),
        GROUP("pw_pwm2_grp0", pw_pwm2_pins0),
        GROUP("pw_pwm2_grp1", pw_pwm2_pins1),
+       GROUP("pw_pwm2_grp2", pw_pwm2_pins2),
        GROUP("pw_pwm3_grp0", pw_pwm3_pins0),
        GROUP("pw_pwm3_grp1", pw_pwm3_pins1),
        GROUP("pw_pwm_cpu_vol_grp0", pw_pwm_cpu_vol_pins0),
        GROUP("pw_pwm_cpu_vol_grp1", pw_pwm_cpu_vol_pins1),
+       GROUP("pw_pwm_cpu_vol_grp2", pw_pwm_cpu_vol_pins2),
        GROUP("pw_backlight_grp0", pw_backlight_pins0),
        GROUP("pw_backlight_grp1", pw_backlight_pins1),
        GROUP("rg_eth_mac_grp", rg_eth_mac_pins),
@@ -1026,8 +1191,11 @@ struct atlas7_pin_group altas7_pin_groups[] = {
        GROUP("sd1_grp", sd1_pins),
        GROUP("sd1_4bit_grp0", sd1_4bit_pins0),
        GROUP("sd1_4bit_grp1", sd1_4bit_pins1),
-       GROUP("sd2_grp0", sd2_pins0),
-       GROUP("sd2_no_cdb_grp0", sd2_no_cdb_pins0),
+       GROUP("sd2_basic_grp", sd2_basic_pins),
+       GROUP("sd2_cdb_grp0", sd2_cdb_pins0),
+       GROUP("sd2_cdb_grp1", sd2_cdb_pins1),
+       GROUP("sd2_wpb_grp0", sd2_wpb_pins0),
+       GROUP("sd2_wpb_grp1", sd2_wpb_pins1),
        GROUP("sd3_grp", sd3_pins),
        GROUP("sd5_grp", sd5_pins),
        GROUP("sd6_grp0", sd6_pins0),
@@ -1039,19 +1207,39 @@ struct atlas7_pin_group altas7_pin_groups[] = {
        GROUP("uart0_grp", uart0_pins),
        GROUP("uart0_nopause_grp", uart0_nopause_pins),
        GROUP("uart1_grp", uart1_pins),
-       GROUP("uart2_grp", uart2_pins),
-       GROUP("uart3_grp0", uart3_pins0),
-       GROUP("uart3_grp1", uart3_pins1),
-       GROUP("uart3_grp2", uart3_pins2),
-       GROUP("uart3_grp3", uart3_pins3),
-       GROUP("uart3_nopause_grp0", uart3_nopause_pins0),
-       GROUP("uart3_nopause_grp1", uart3_nopause_pins1),
-       GROUP("uart4_grp0", uart4_pins0),
-       GROUP("uart4_grp1", uart4_pins1),
-       GROUP("uart4_grp2", uart4_pins2),
-       GROUP("uart4_nopause_grp", uart4_nopause_pins),
-       GROUP("usb0_drvvbus_grp", usb0_drvvbus_pins),
-       GROUP("usb1_drvvbus_grp", usb1_drvvbus_pins),
+       GROUP("uart2_cts_grp0", uart2_cts_pins0),
+       GROUP("uart2_cts_grp1", uart2_cts_pins1),
+       GROUP("uart2_rts_grp0", uart2_rts_pins0),
+       GROUP("uart2_rts_grp1", uart2_rts_pins1),
+       GROUP("uart2_rxd_grp0", uart2_rxd_pins0),
+       GROUP("uart2_rxd_grp1", uart2_rxd_pins1),
+       GROUP("uart2_rxd_grp2", uart2_rxd_pins2),
+       GROUP("uart2_txd_grp0", uart2_txd_pins0),
+       GROUP("uart2_txd_grp1", uart2_txd_pins1),
+       GROUP("uart2_txd_grp2", uart2_txd_pins2),
+       GROUP("uart3_cts_grp0", uart3_cts_pins0),
+       GROUP("uart3_cts_grp1", uart3_cts_pins1),
+       GROUP("uart3_cts_grp2", uart3_cts_pins2),
+       GROUP("uart3_rts_grp0", uart3_rts_pins0),
+       GROUP("uart3_rts_grp1", uart3_rts_pins1),
+       GROUP("uart3_rts_grp2", uart3_rts_pins2),
+       GROUP("uart3_rxd_grp0", uart3_rxd_pins0),
+       GROUP("uart3_rxd_grp1", uart3_rxd_pins1),
+       GROUP("uart3_rxd_grp2", uart3_rxd_pins2),
+       GROUP("uart3_txd_grp0", uart3_txd_pins0),
+       GROUP("uart3_txd_grp1", uart3_txd_pins1),
+       GROUP("uart3_txd_grp2", uart3_txd_pins2),
+       GROUP("uart4_basic_grp", uart4_basic_pins),
+       GROUP("uart4_cts_grp0", uart4_cts_pins0),
+       GROUP("uart4_cts_grp1", uart4_cts_pins1),
+       GROUP("uart4_cts_grp2", uart4_cts_pins2),
+       GROUP("uart4_rts_grp0", uart4_rts_pins0),
+       GROUP("uart4_rts_grp1", uart4_rts_pins1),
+       GROUP("uart4_rts_grp2", uart4_rts_pins2),
+       GROUP("usb0_drvvbus_grp0", usb0_drvvbus_pins0),
+       GROUP("usb0_drvvbus_grp1", usb0_drvvbus_pins1),
+       GROUP("usb1_drvvbus_grp0", usb1_drvvbus_pins0),
+       GROUP("usb1_drvvbus_grp1", usb1_drvvbus_pins1),
        GROUP("visbus_dout_grp", visbus_dout_pins),
        GROUP("vi_vip1_grp", vi_vip1_pins),
        GROUP("vi_vip1_ext_grp", vi_vip1_ext_pins),
@@ -1065,23 +1253,90 @@ static const char * const lcd_vip_gpio_grp[] = { "lcd_vip_gpio_grp", };
 static const char * const sdio_i2s_gpio_grp[] = { "sdio_i2s_gpio_grp", };
 static const char * const sp_rgmii_gpio_grp[] = { "sp_rgmii_gpio_grp", };
 static const char * const lvds_gpio_grp[] = { "lvds_gpio_grp", };
-static const char * const uart_nand_gpio_grp[] = { "uart_nand_gpio_grp", };
+static const char * const jtag_uart_nand_gpio_grp[] = {
+                               "jtag_uart_nand_gpio_grp", };
 static const char * const rtc_gpio_grp[] = { "rtc_gpio_grp", };
 static const char * const audio_ac97_grp[] = { "audio_ac97_grp", };
+static const char * const audio_digmic_grp0[] = { "audio_digmic_grp0", };
+static const char * const audio_digmic_grp1[] = { "audio_digmic_grp1", };
+static const char * const audio_digmic_grp2[] = { "audio_digmic_grp2", };
 static const char * const audio_func_dbg_grp[] = { "audio_func_dbg_grp", };
 static const char * const audio_i2s_grp[] = { "audio_i2s_grp", };
 static const char * const audio_i2s_2ch_grp[] = { "audio_i2s_2ch_grp", };
 static const char * const audio_i2s_extclk_grp[] = { "audio_i2s_extclk_grp", };
-static const char * const audio_uart0_grp[] = { "audio_uart0_grp", };
-static const char * const audio_uart1_grp[] = { "audio_uart1_grp", };
-static const char * const audio_uart2_grp0[] = { "audio_uart2_grp0", };
-static const char * const audio_uart2_grp1[] = { "audio_uart2_grp1", };
-static const char * const c_can_trnsvr_grp[] = { "c_can_trnsvr_grp", };
-static const char * const c0_can_grp0[] = { "c0_can_grp0", };
-static const char * const c0_can_grp1[] = { "c0_can_grp1", };
-static const char * const c1_can_grp0[] = { "c1_can_grp0", };
-static const char * const c1_can_grp1[] = { "c1_can_grp1", };
-static const char * const c1_can_grp2[] = { "c1_can_grp2", };
+static const char * const audio_spdif_out_grp0[] = { "audio_spdif_out_grp0", };
+static const char * const audio_spdif_out_grp1[] = { "audio_spdif_out_grp1", };
+static const char * const audio_spdif_out_grp2[] = { "audio_spdif_out_grp2", };
+static const char * const audio_uart0_basic_grp[] = {
+                               "audio_uart0_basic_grp", };
+static const char * const audio_uart0_urfs_grp0[] = {
+                               "audio_uart0_urfs_grp0", };
+static const char * const audio_uart0_urfs_grp1[] = {
+                               "audio_uart0_urfs_grp1", };
+static const char * const audio_uart0_urfs_grp2[] = {
+                               "audio_uart0_urfs_grp2", };
+static const char * const audio_uart0_urfs_grp3[] = {
+                               "audio_uart0_urfs_grp3", };
+static const char * const audio_uart1_basic_grp[] = {
+                               "audio_uart1_basic_grp", };
+static const char * const audio_uart1_urfs_grp0[] = {
+                               "audio_uart1_urfs_grp0", };
+static const char * const audio_uart1_urfs_grp1[] = {
+                               "audio_uart1_urfs_grp1", };
+static const char * const audio_uart1_urfs_grp2[] = {
+                               "audio_uart1_urfs_grp2", };
+static const char * const audio_uart2_urfs_grp0[] = {
+                               "audio_uart2_urfs_grp0", };
+static const char * const audio_uart2_urfs_grp1[] = {
+                               "audio_uart2_urfs_grp1", };
+static const char * const audio_uart2_urfs_grp2[] = {
+                               "audio_uart2_urfs_grp2", };
+static const char * const audio_uart2_urxd_grp0[] = {
+                               "audio_uart2_urxd_grp0", };
+static const char * const audio_uart2_urxd_grp1[] = {
+                               "audio_uart2_urxd_grp1", };
+static const char * const audio_uart2_urxd_grp2[] = {
+                               "audio_uart2_urxd_grp2", };
+static const char * const audio_uart2_usclk_grp0[] = {
+                               "audio_uart2_usclk_grp0", };
+static const char * const audio_uart2_usclk_grp1[] = {
+                               "audio_uart2_usclk_grp1", };
+static const char * const audio_uart2_usclk_grp2[] = {
+                               "audio_uart2_usclk_grp2", };
+static const char * const audio_uart2_utfs_grp0[] = {
+                               "audio_uart2_utfs_grp0", };
+static const char * const audio_uart2_utfs_grp1[] = {
+                               "audio_uart2_utfs_grp1", };
+static const char * const audio_uart2_utfs_grp2[] = {
+                               "audio_uart2_utfs_grp2", };
+static const char * const audio_uart2_utxd_grp0[] = {
+                               "audio_uart2_utxd_grp0", };
+static const char * const audio_uart2_utxd_grp1[] = {
+                               "audio_uart2_utxd_grp1", };
+static const char * const audio_uart2_utxd_grp2[] = {
+                               "audio_uart2_utxd_grp2", };
+static const char * const c_can_trnsvr_en_grp0[] = { "c_can_trnsvr_en_grp0", };
+static const char * const c_can_trnsvr_en_grp1[] = { "c_can_trnsvr_en_grp1", };
+static const char * const c_can_trnsvr_intr_grp[] = {
+                               "c_can_trnsvr_intr_grp", };
+static const char * const c_can_trnsvr_stb_n_grp[] = {
+                               "c_can_trnsvr_stb_n_grp", };
+static const char * const c0_can_rxd_trnsv0_grp[] = {
+                               "c0_can_rxd_trnsv0_grp", };
+static const char * const c0_can_rxd_trnsv1_grp[] = {
+                               "c0_can_rxd_trnsv1_grp", };
+static const char * const c0_can_txd_trnsv0_grp[] = {
+                               "c0_can_txd_trnsv0_grp", };
+static const char * const c0_can_txd_trnsv1_grp[] = {
+                               "c0_can_txd_trnsv1_grp", };
+static const char * const c1_can_rxd_grp0[] = { "c1_can_rxd_grp0", };
+static const char * const c1_can_rxd_grp1[] = { "c1_can_rxd_grp1", };
+static const char * const c1_can_rxd_grp2[] = { "c1_can_rxd_grp2", };
+static const char * const c1_can_rxd_grp3[] = { "c1_can_rxd_grp3", };
+static const char * const c1_can_txd_grp0[] = { "c1_can_txd_grp0", };
+static const char * const c1_can_txd_grp1[] = { "c1_can_txd_grp1", };
+static const char * const c1_can_txd_grp2[] = { "c1_can_txd_grp2", };
+static const char * const c1_can_txd_grp3[] = { "c1_can_txd_grp3", };
 static const char * const ca_audio_lpc_grp[] = { "ca_audio_lpc_grp", };
 static const char * const ca_bt_lpc_grp[] = { "ca_bt_lpc_grp", };
 static const char * const ca_coex_grp[] = { "ca_coex_grp", };
@@ -1135,7 +1390,30 @@ static const char * const gn_trg_shutdown_grp2[] = { "gn_trg_shutdown_grp2", };
 static const char * const gn_trg_shutdown_grp3[] = { "gn_trg_shutdown_grp3", };
 static const char * const i2c0_grp[] = { "i2c0_grp", };
 static const char * const i2c1_grp[] = { "i2c1_grp", };
-static const char * const jtag_grp0[] = { "jtag_grp0", };
+static const char * const i2s0_grp[] = { "i2s0_grp", };
+static const char * const i2s1_basic_grp[] = { "i2s1_basic_grp", };
+static const char * const i2s1_rxd0_grp0[] = { "i2s1_rxd0_grp0", };
+static const char * const i2s1_rxd0_grp1[] = { "i2s1_rxd0_grp1", };
+static const char * const i2s1_rxd0_grp2[] = { "i2s1_rxd0_grp2", };
+static const char * const i2s1_rxd0_grp3[] = { "i2s1_rxd0_grp3", };
+static const char * const i2s1_rxd0_grp4[] = { "i2s1_rxd0_grp4", };
+static const char * const i2s1_rxd1_grp0[] = { "i2s1_rxd1_grp0", };
+static const char * const i2s1_rxd1_grp1[] = { "i2s1_rxd1_grp1", };
+static const char * const i2s1_rxd1_grp2[] = { "i2s1_rxd1_grp2", };
+static const char * const i2s1_rxd1_grp3[] = { "i2s1_rxd1_grp3", };
+static const char * const i2s1_rxd1_grp4[] = { "i2s1_rxd1_grp4", };
+static const char * const jtag_jt_dbg_nsrst_grp[] = {
+                               "jtag_jt_dbg_nsrst_grp", };
+static const char * const jtag_ntrst_grp0[] = { "jtag_ntrst_grp0", };
+static const char * const jtag_ntrst_grp1[] = { "jtag_ntrst_grp1", };
+static const char * const jtag_swdiotms_grp0[] = { "jtag_swdiotms_grp0", };
+static const char * const jtag_swdiotms_grp1[] = { "jtag_swdiotms_grp1", };
+static const char * const jtag_tck_grp0[] = { "jtag_tck_grp0", };
+static const char * const jtag_tck_grp1[] = { "jtag_tck_grp1", };
+static const char * const jtag_tdi_grp0[] = { "jtag_tdi_grp0", };
+static const char * const jtag_tdi_grp1[] = { "jtag_tdi_grp1", };
+static const char * const jtag_tdo_grp0[] = { "jtag_tdo_grp0", };
+static const char * const jtag_tdo_grp1[] = { "jtag_tdo_grp1", };
 static const char * const ks_kas_spi_grp0[] = { "ks_kas_spi_grp0", };
 static const char * const ld_ldd_grp[] = { "ld_ldd_grp", };
 static const char * const ld_ldd_16bit_grp[] = { "ld_ldd_16bit_grp", };
@@ -1160,18 +1438,26 @@ static const char * const pwc_wakeup_src3_grp[] = { "pwc_wakeup_src3_grp", };
 static const char * const pw_cko0_grp0[] = { "pw_cko0_grp0", };
 static const char * const pw_cko0_grp1[] = { "pw_cko0_grp1", };
 static const char * const pw_cko0_grp2[] = { "pw_cko0_grp2", };
+static const char * const pw_cko0_grp3[] = { "pw_cko0_grp3", };
 static const char * const pw_cko1_grp0[] = { "pw_cko1_grp0", };
 static const char * const pw_cko1_grp1[] = { "pw_cko1_grp1", };
+static const char * const pw_cko1_grp2[] = { "pw_cko1_grp2", };
 static const char * const pw_i2s01_clk_grp0[] = { "pw_i2s01_clk_grp0", };
 static const char * const pw_i2s01_clk_grp1[] = { "pw_i2s01_clk_grp1", };
-static const char * const pw_pwm0_grp[] = { "pw_pwm0_grp", };
-static const char * const pw_pwm1_grp[] = { "pw_pwm1_grp", };
+static const char * const pw_i2s01_clk_grp2[] = { "pw_i2s01_clk_grp2", };
+static const char * const pw_pwm0_grp0[] = { "pw_pwm0_grp0", };
+static const char * const pw_pwm0_grp1[] = { "pw_pwm0_grp1", };
+static const char * const pw_pwm1_grp0[] = { "pw_pwm1_grp0", };
+static const char * const pw_pwm1_grp1[] = { "pw_pwm1_grp1", };
+static const char * const pw_pwm1_grp2[] = { "pw_pwm1_grp2", };
 static const char * const pw_pwm2_grp0[] = { "pw_pwm2_grp0", };
 static const char * const pw_pwm2_grp1[] = { "pw_pwm2_grp1", };
+static const char * const pw_pwm2_grp2[] = { "pw_pwm2_grp2", };
 static const char * const pw_pwm3_grp0[] = { "pw_pwm3_grp0", };
 static const char * const pw_pwm3_grp1[] = { "pw_pwm3_grp1", };
 static const char * const pw_pwm_cpu_vol_grp0[] = { "pw_pwm_cpu_vol_grp0", };
 static const char * const pw_pwm_cpu_vol_grp1[] = { "pw_pwm_cpu_vol_grp1", };
+static const char * const pw_pwm_cpu_vol_grp2[] = { "pw_pwm_cpu_vol_grp2", };
 static const char * const pw_backlight_grp0[] = { "pw_backlight_grp0", };
 static const char * const pw_backlight_grp1[] = { "pw_backlight_grp1", };
 static const char * const rg_eth_mac_grp[] = { "rg_eth_mac_grp", };
@@ -1187,8 +1473,11 @@ static const char * const sd0_4bit_grp[] = { "sd0_4bit_grp", };
 static const char * const sd1_grp[] = { "sd1_grp", };
 static const char * const sd1_4bit_grp0[] = { "sd1_4bit_grp0", };
 static const char * const sd1_4bit_grp1[] = { "sd1_4bit_grp1", };
-static const char * const sd2_grp0[] = { "sd2_grp0", };
-static const char * const sd2_no_cdb_grp0[] = { "sd2_no_cdb_grp0", };
+static const char * const sd2_basic_grp[] = { "sd2_basic_grp", };
+static const char * const sd2_cdb_grp0[] = { "sd2_cdb_grp0", };
+static const char * const sd2_cdb_grp1[] = { "sd2_cdb_grp1", };
+static const char * const sd2_wpb_grp0[] = { "sd2_wpb_grp0", };
+static const char * const sd2_wpb_grp1[] = { "sd2_wpb_grp1", };
 static const char * const sd3_grp[] = { "sd3_grp", };
 static const char * const sd5_grp[] = { "sd5_grp", };
 static const char * const sd6_grp0[] = { "sd6_grp0", };
@@ -1200,19 +1489,39 @@ static const char * const tpiu_trace_grp[] = { "tpiu_trace_grp", };
 static const char * const uart0_grp[] = { "uart0_grp", };
 static const char * const uart0_nopause_grp[] = { "uart0_nopause_grp", };
 static const char * const uart1_grp[] = { "uart1_grp", };
-static const char * const uart2_grp[] = { "uart2_grp", };
-static const char * const uart3_grp0[] = { "uart3_grp0", };
-static const char * const uart3_grp1[] = { "uart3_grp1", };
-static const char * const uart3_grp2[] = { "uart3_grp2", };
-static const char * const uart3_grp3[] = { "uart3_grp3", };
-static const char * const uart3_nopause_grp0[] = { "uart3_nopause_grp0", };
-static const char * const uart3_nopause_grp1[] = { "uart3_nopause_grp1", };
-static const char * const uart4_grp0[] = { "uart4_grp0", };
-static const char * const uart4_grp1[] = { "uart4_grp1", };
-static const char * const uart4_grp2[] = { "uart4_grp2", };
-static const char * const uart4_nopause_grp[] = { "uart4_nopause_grp", };
-static const char * const usb0_drvvbus_grp[] = { "usb0_drvvbus_grp", };
-static const char * const usb1_drvvbus_grp[] = { "usb1_drvvbus_grp", };
+static const char * const uart2_cts_grp0[] = { "uart2_cts_grp0", };
+static const char * const uart2_cts_grp1[] = { "uart2_cts_grp1", };
+static const char * const uart2_rts_grp0[] = { "uart2_rts_grp0", };
+static const char * const uart2_rts_grp1[] = { "uart2_rts_grp1", };
+static const char * const uart2_rxd_grp0[] = { "uart2_rxd_grp0", };
+static const char * const uart2_rxd_grp1[] = { "uart2_rxd_grp1", };
+static const char * const uart2_rxd_grp2[] = { "uart2_rxd_grp2", };
+static const char * const uart2_txd_grp0[] = { "uart2_txd_grp0", };
+static const char * const uart2_txd_grp1[] = { "uart2_txd_grp1", };
+static const char * const uart2_txd_grp2[] = { "uart2_txd_grp2", };
+static const char * const uart3_cts_grp0[] = { "uart3_cts_grp0", };
+static const char * const uart3_cts_grp1[] = { "uart3_cts_grp1", };
+static const char * const uart3_cts_grp2[] = { "uart3_cts_grp2", };
+static const char * const uart3_rts_grp0[] = { "uart3_rts_grp0", };
+static const char * const uart3_rts_grp1[] = { "uart3_rts_grp1", };
+static const char * const uart3_rts_grp2[] = { "uart3_rts_grp2", };
+static const char * const uart3_rxd_grp0[] = { "uart3_rxd_grp0", };
+static const char * const uart3_rxd_grp1[] = { "uart3_rxd_grp1", };
+static const char * const uart3_rxd_grp2[] = { "uart3_rxd_grp2", };
+static const char * const uart3_txd_grp0[] = { "uart3_txd_grp0", };
+static const char * const uart3_txd_grp1[] = { "uart3_txd_grp1", };
+static const char * const uart3_txd_grp2[] = { "uart3_txd_grp2", };
+static const char * const uart4_basic_grp[] = { "uart4_basic_grp", };
+static const char * const uart4_cts_grp0[] = { "uart4_cts_grp0", };
+static const char * const uart4_cts_grp1[] = { "uart4_cts_grp1", };
+static const char * const uart4_cts_grp2[] = { "uart4_cts_grp2", };
+static const char * const uart4_rts_grp0[] = { "uart4_rts_grp0", };
+static const char * const uart4_rts_grp1[] = { "uart4_rts_grp1", };
+static const char * const uart4_rts_grp2[] = { "uart4_rts_grp2", };
+static const char * const usb0_drvvbus_grp0[] = { "usb0_drvvbus_grp0", };
+static const char * const usb0_drvvbus_grp1[] = { "usb0_drvvbus_grp1", };
+static const char * const usb1_drvvbus_grp0[] = { "usb1_drvvbus_grp0", };
+static const char * const usb1_drvvbus_grp1[] = { "usb1_drvvbus_grp1", };
 static const char * const visbus_dout_grp[] = { "visbus_dout_grp", };
 static const char * const vi_vip1_grp[] = { "vi_vip1_grp", };
 static const char * const vi_vip1_ext_grp[] = { "vi_vip1_ext_grp", };
@@ -1376,7 +1685,7 @@ static struct atlas7_grp_mux lvds_gpio_grp_mux = {
        .pad_mux_list = lvds_gpio_grp_pad_mux,
 };
 
-static struct atlas7_pad_mux uart_nand_gpio_grp_pad_mux[] = {
+static struct atlas7_pad_mux jtag_uart_nand_gpio_grp_pad_mux[] = {
        MUX(1, 44, 0, N, N, N, N),
        MUX(1, 43, 0, N, N, N, N),
        MUX(1, 42, 0, N, N, N, N),
@@ -1401,11 +1710,16 @@ static struct atlas7_pad_mux uart_nand_gpio_grp_pad_mux[] = {
        MUX(1, 138, 0, N, N, N, N),
        MUX(1, 139, 0, N, N, N, N),
        MUX(1, 140, 0, N, N, N, N),
+       MUX(1, 159, 0, N, N, N, N),
+       MUX(1, 160, 0, N, N, N, N),
+       MUX(1, 161, 0, N, N, N, N),
+       MUX(1, 162, 0, N, N, N, N),
+       MUX(1, 163, 0, N, N, N, N),
 };
 
-static struct atlas7_grp_mux uart_nand_gpio_grp_mux = {
-       .pad_mux_count = ARRAY_SIZE(uart_nand_gpio_grp_pad_mux),
-       .pad_mux_list = uart_nand_gpio_grp_pad_mux,
+static struct atlas7_grp_mux jtag_uart_nand_gpio_grp_mux = {
+       .pad_mux_count = ARRAY_SIZE(jtag_uart_nand_gpio_grp_pad_mux),
+       .pad_mux_list = jtag_uart_nand_gpio_grp_pad_mux,
 };
 
 static struct atlas7_pad_mux rtc_gpio_grp_pad_mux[] = {
@@ -1422,6 +1736,7 @@ static struct atlas7_pad_mux rtc_gpio_grp_pad_mux[] = {
        MUX(0, 15, 0, N, N, N, N),
        MUX(0, 16, 0, N, N, N, N),
        MUX(0, 17, 0, N, N, N, N),
+       MUX(0, 9, 0, N, N, N, N),
 };
 
 static struct atlas7_grp_mux rtc_gpio_grp_mux = {
@@ -1441,6 +1756,33 @@ static struct atlas7_grp_mux audio_ac97_grp_mux = {
        .pad_mux_list = audio_ac97_grp_pad_mux,
 };
 
+static struct atlas7_pad_mux audio_digmic_grp0_pad_mux[] = {
+       MUX(1, 51, 3, 0xa10, 20, 0xa90, 20),
+};
+
+static struct atlas7_grp_mux audio_digmic_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_digmic_grp0_pad_mux),
+       .pad_mux_list = audio_digmic_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_digmic_grp1_pad_mux[] = {
+       MUX(1, 122, 5, 0xa10, 20, 0xa90, 20),
+};
+
+static struct atlas7_grp_mux audio_digmic_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_digmic_grp1_pad_mux),
+       .pad_mux_list = audio_digmic_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_digmic_grp2_pad_mux[] = {
+       MUX(1, 161, 7, 0xa10, 20, 0xa90, 20),
+};
+
+static struct atlas7_grp_mux audio_digmic_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_digmic_grp2_pad_mux),
+       .pad_mux_list = audio_digmic_grp2_pad_mux,
+};
+
 static struct atlas7_pad_mux audio_func_dbg_grp_pad_mux[] = {
        MUX(1, 141, 4, N, N, N, N),
        MUX(1, 144, 4, N, N, N, N),
@@ -1512,111 +1854,397 @@ static struct atlas7_grp_mux audio_i2s_extclk_grp_mux = {
        .pad_mux_list = audio_i2s_extclk_grp_pad_mux,
 };
 
-static struct atlas7_pad_mux audio_uart0_grp_pad_mux[] = {
+static struct atlas7_pad_mux audio_spdif_out_grp0_pad_mux[] = {
+       MUX(1, 112, 3, N, N, N, N),
+};
+
+static struct atlas7_grp_mux audio_spdif_out_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_spdif_out_grp0_pad_mux),
+       .pad_mux_list = audio_spdif_out_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_spdif_out_grp1_pad_mux[] = {
+       MUX(1, 116, 3, N, N, N, N),
+};
+
+static struct atlas7_grp_mux audio_spdif_out_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_spdif_out_grp1_pad_mux),
+       .pad_mux_list = audio_spdif_out_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_spdif_out_grp2_pad_mux[] = {
+       MUX(1, 142, 3, N, N, N, N),
+};
+
+static struct atlas7_grp_mux audio_spdif_out_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_spdif_out_grp2_pad_mux),
+       .pad_mux_list = audio_spdif_out_grp2_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_uart0_basic_grp_pad_mux[] = {
        MUX(1, 143, 1, N, N, N, N),
        MUX(1, 142, 1, N, N, N, N),
        MUX(1, 141, 1, N, N, N, N),
        MUX(1, 144, 1, N, N, N, N),
 };
 
-static struct atlas7_grp_mux audio_uart0_grp_mux = {
-       .pad_mux_count = ARRAY_SIZE(audio_uart0_grp_pad_mux),
-       .pad_mux_list = audio_uart0_grp_pad_mux,
+static struct atlas7_grp_mux audio_uart0_basic_grp_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart0_basic_grp_pad_mux),
+       .pad_mux_list = audio_uart0_basic_grp_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_uart0_urfs_grp0_pad_mux[] = {
+       MUX(1, 117, 5, 0xa10, 28, 0xa90, 28),
 };
 
-static struct atlas7_pad_mux audio_uart1_grp_pad_mux[] = {
-       MUX(1, 147, 1, N, N, N, N),
-       MUX(1, 146, 1, N, N, N, N),
-       MUX(1, 145, 1, N, N, N, N),
-       MUX(1, 148, 1, N, N, N, N),
+static struct atlas7_grp_mux audio_uart0_urfs_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart0_urfs_grp0_pad_mux),
+       .pad_mux_list = audio_uart0_urfs_grp0_pad_mux,
 };
 
-static struct atlas7_grp_mux audio_uart1_grp_mux = {
-       .pad_mux_count = ARRAY_SIZE(audio_uart1_grp_pad_mux),
-       .pad_mux_list = audio_uart1_grp_pad_mux,
+static struct atlas7_pad_mux audio_uart0_urfs_grp1_pad_mux[] = {
+       MUX(1, 139, 3, 0xa10, 28, 0xa90, 28),
 };
 
-static struct atlas7_pad_mux audio_uart2_grp0_pad_mux[] = {
+static struct atlas7_grp_mux audio_uart0_urfs_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart0_urfs_grp1_pad_mux),
+       .pad_mux_list = audio_uart0_urfs_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_uart0_urfs_grp2_pad_mux[] = {
+       MUX(1, 163, 3, 0xa10, 28, 0xa90, 28),
+};
+
+static struct atlas7_grp_mux audio_uart0_urfs_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart0_urfs_grp2_pad_mux),
+       .pad_mux_list = audio_uart0_urfs_grp2_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_uart0_urfs_grp3_pad_mux[] = {
+       MUX(1, 162, 6, 0xa10, 28, 0xa90, 28),
+};
+
+static struct atlas7_grp_mux audio_uart0_urfs_grp3_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart0_urfs_grp3_pad_mux),
+       .pad_mux_list = audio_uart0_urfs_grp3_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_uart1_basic_grp_pad_mux[] = {
+       MUX(1, 147, 1, 0xa10, 24, 0xa90, 24),
+       MUX(1, 146, 1, 0xa10, 25, 0xa90, 25),
+       MUX(1, 145, 1, 0xa10, 23, 0xa90, 23),
+       MUX(1, 148, 1, 0xa10, 22, 0xa90, 22),
+};
+
+static struct atlas7_grp_mux audio_uart1_basic_grp_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart1_basic_grp_pad_mux),
+       .pad_mux_list = audio_uart1_basic_grp_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_uart1_urfs_grp0_pad_mux[] = {
+       MUX(1, 117, 6, 0xa10, 29, 0xa90, 29),
+};
+
+static struct atlas7_grp_mux audio_uart1_urfs_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart1_urfs_grp0_pad_mux),
+       .pad_mux_list = audio_uart1_urfs_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_uart1_urfs_grp1_pad_mux[] = {
+       MUX(1, 140, 3, 0xa10, 29, 0xa90, 29),
+};
+
+static struct atlas7_grp_mux audio_uart1_urfs_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart1_urfs_grp1_pad_mux),
+       .pad_mux_list = audio_uart1_urfs_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_uart1_urfs_grp2_pad_mux[] = {
+       MUX(1, 163, 4, 0xa10, 29, 0xa90, 29),
+};
+
+static struct atlas7_grp_mux audio_uart1_urfs_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart1_urfs_grp2_pad_mux),
+       .pad_mux_list = audio_uart1_urfs_grp2_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_uart2_urfs_grp0_pad_mux[] = {
+       MUX(1, 139, 4, 0xa10, 30, 0xa90, 30),
+};
+
+static struct atlas7_grp_mux audio_uart2_urfs_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart2_urfs_grp0_pad_mux),
+       .pad_mux_list = audio_uart2_urfs_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_uart2_urfs_grp1_pad_mux[] = {
+       MUX(1, 163, 6, 0xa10, 30, 0xa90, 30),
+};
+
+static struct atlas7_grp_mux audio_uart2_urfs_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart2_urfs_grp1_pad_mux),
+       .pad_mux_list = audio_uart2_urfs_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_uart2_urfs_grp2_pad_mux[] = {
+       MUX(1, 96, 3, 0xa10, 30, 0xa90, 30),
+};
+
+static struct atlas7_grp_mux audio_uart2_urfs_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart2_urfs_grp2_pad_mux),
+       .pad_mux_list = audio_uart2_urfs_grp2_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_uart2_urxd_grp0_pad_mux[] = {
        MUX(1, 20, 2, 0xa00, 24, 0xa80, 24),
-       MUX(1, 21, 2, 0xa00, 25, 0xa80, 25),
-       MUX(1, 19, 2, 0xa00, 23, 0xa80, 23),
-       MUX(1, 18, 2, 0xa00, 22, 0xa80, 22),
 };
 
-static struct atlas7_grp_mux audio_uart2_grp0_mux = {
-       .pad_mux_count = ARRAY_SIZE(audio_uart2_grp0_pad_mux),
-       .pad_mux_list = audio_uart2_grp0_pad_mux,
+static struct atlas7_grp_mux audio_uart2_urxd_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart2_urxd_grp0_pad_mux),
+       .pad_mux_list = audio_uart2_urxd_grp0_pad_mux,
 };
 
-static struct atlas7_pad_mux audio_uart2_grp1_pad_mux[] = {
+static struct atlas7_pad_mux audio_uart2_urxd_grp1_pad_mux[] = {
        MUX(1, 109, 2, 0xa00, 24, 0xa80, 24),
-       MUX(1, 110, 2, 0xa00, 25, 0xa80, 25),
+};
+
+static struct atlas7_grp_mux audio_uart2_urxd_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart2_urxd_grp1_pad_mux),
+       .pad_mux_list = audio_uart2_urxd_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_uart2_urxd_grp2_pad_mux[] = {
+       MUX(1, 93, 3, 0xa00, 24, 0xa80, 24),
+};
+
+static struct atlas7_grp_mux audio_uart2_urxd_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart2_urxd_grp2_pad_mux),
+       .pad_mux_list = audio_uart2_urxd_grp2_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_uart2_usclk_grp0_pad_mux[] = {
+       MUX(1, 19, 2, 0xa00, 23, 0xa80, 23),
+};
+
+static struct atlas7_grp_mux audio_uart2_usclk_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart2_usclk_grp0_pad_mux),
+       .pad_mux_list = audio_uart2_usclk_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_uart2_usclk_grp1_pad_mux[] = {
        MUX(1, 101, 2, 0xa00, 23, 0xa80, 23),
+};
+
+static struct atlas7_grp_mux audio_uart2_usclk_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart2_usclk_grp1_pad_mux),
+       .pad_mux_list = audio_uart2_usclk_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_uart2_usclk_grp2_pad_mux[] = {
+       MUX(1, 91, 3, 0xa00, 23, 0xa80, 23),
+};
+
+static struct atlas7_grp_mux audio_uart2_usclk_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart2_usclk_grp2_pad_mux),
+       .pad_mux_list = audio_uart2_usclk_grp2_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_uart2_utfs_grp0_pad_mux[] = {
+       MUX(1, 18, 2, 0xa00, 22, 0xa80, 22),
+};
+
+static struct atlas7_grp_mux audio_uart2_utfs_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart2_utfs_grp0_pad_mux),
+       .pad_mux_list = audio_uart2_utfs_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_uart2_utfs_grp1_pad_mux[] = {
        MUX(1, 111, 2, 0xa00, 22, 0xa80, 22),
 };
 
-static struct atlas7_grp_mux audio_uart2_grp1_mux = {
-       .pad_mux_count = ARRAY_SIZE(audio_uart2_grp1_pad_mux),
-       .pad_mux_list = audio_uart2_grp1_pad_mux,
+static struct atlas7_grp_mux audio_uart2_utfs_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart2_utfs_grp1_pad_mux),
+       .pad_mux_list = audio_uart2_utfs_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_uart2_utfs_grp2_pad_mux[] = {
+       MUX(1, 94, 3, 0xa00, 22, 0xa80, 22),
+};
+
+static struct atlas7_grp_mux audio_uart2_utfs_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart2_utfs_grp2_pad_mux),
+       .pad_mux_list = audio_uart2_utfs_grp2_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_uart2_utxd_grp0_pad_mux[] = {
+       MUX(1, 21, 2, 0xa00, 25, 0xa80, 25),
+};
+
+static struct atlas7_grp_mux audio_uart2_utxd_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart2_utxd_grp0_pad_mux),
+       .pad_mux_list = audio_uart2_utxd_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_uart2_utxd_grp1_pad_mux[] = {
+       MUX(1, 110, 2, 0xa00, 25, 0xa80, 25),
+};
+
+static struct atlas7_grp_mux audio_uart2_utxd_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart2_utxd_grp1_pad_mux),
+       .pad_mux_list = audio_uart2_utxd_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux audio_uart2_utxd_grp2_pad_mux[] = {
+       MUX(1, 92, 3, 0xa00, 25, 0xa80, 25),
+};
+
+static struct atlas7_grp_mux audio_uart2_utxd_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(audio_uart2_utxd_grp2_pad_mux),
+       .pad_mux_list = audio_uart2_utxd_grp2_pad_mux,
+};
+
+static struct atlas7_pad_mux c_can_trnsvr_en_grp0_pad_mux[] = {
+       MUX(0, 2, 6, N, N, N, N),
 };
 
-static struct atlas7_pad_mux c_can_trnsvr_grp_pad_mux[] = {
+static struct atlas7_grp_mux c_can_trnsvr_en_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(c_can_trnsvr_en_grp0_pad_mux),
+       .pad_mux_list = c_can_trnsvr_en_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux c_can_trnsvr_en_grp1_pad_mux[] = {
+       MUX(0, 0, 2, N, N, N, N),
+};
+
+static struct atlas7_grp_mux c_can_trnsvr_en_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(c_can_trnsvr_en_grp1_pad_mux),
+       .pad_mux_list = c_can_trnsvr_en_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux c_can_trnsvr_intr_grp_pad_mux[] = {
        MUX(0, 1, 2, N, N, N, N),
 };
 
-static struct atlas7_grp_mux c_can_trnsvr_grp_mux = {
-       .pad_mux_count = ARRAY_SIZE(c_can_trnsvr_grp_pad_mux),
-       .pad_mux_list = c_can_trnsvr_grp_pad_mux,
+static struct atlas7_grp_mux c_can_trnsvr_intr_grp_mux = {
+       .pad_mux_count = ARRAY_SIZE(c_can_trnsvr_intr_grp_pad_mux),
+       .pad_mux_list = c_can_trnsvr_intr_grp_pad_mux,
 };
 
-static struct atlas7_pad_mux c0_can_grp0_pad_mux[] = {
+static struct atlas7_pad_mux c_can_trnsvr_stb_n_grp_pad_mux[] = {
+       MUX(0, 3, 6, N, N, N, N),
+};
+
+static struct atlas7_grp_mux c_can_trnsvr_stb_n_grp_mux = {
+       .pad_mux_count = ARRAY_SIZE(c_can_trnsvr_stb_n_grp_pad_mux),
+       .pad_mux_list = c_can_trnsvr_stb_n_grp_pad_mux,
+};
+
+static struct atlas7_pad_mux c0_can_rxd_trnsv0_grp_pad_mux[] = {
        MUX(0, 11, 1, 0xa08, 9, 0xa88, 9),
+};
+
+static struct atlas7_grp_mux c0_can_rxd_trnsv0_grp_mux = {
+       .pad_mux_count = ARRAY_SIZE(c0_can_rxd_trnsv0_grp_pad_mux),
+       .pad_mux_list = c0_can_rxd_trnsv0_grp_pad_mux,
+};
+
+static struct atlas7_pad_mux c0_can_rxd_trnsv1_grp_pad_mux[] = {
+       MUX(0, 2, 5, 0xa10, 9, 0xa90, 9),
+};
+
+static struct atlas7_grp_mux c0_can_rxd_trnsv1_grp_mux = {
+       .pad_mux_count = ARRAY_SIZE(c0_can_rxd_trnsv1_grp_pad_mux),
+       .pad_mux_list = c0_can_rxd_trnsv1_grp_pad_mux,
+};
+
+static struct atlas7_pad_mux c0_can_txd_trnsv0_grp_pad_mux[] = {
        MUX(0, 10, 1, N, N, N, N),
 };
 
-static struct atlas7_grp_mux c0_can_grp0_mux = {
-       .pad_mux_count = ARRAY_SIZE(c0_can_grp0_pad_mux),
-       .pad_mux_list = c0_can_grp0_pad_mux,
+static struct atlas7_grp_mux c0_can_txd_trnsv0_grp_mux = {
+       .pad_mux_count = ARRAY_SIZE(c0_can_txd_trnsv0_grp_pad_mux),
+       .pad_mux_list = c0_can_txd_trnsv0_grp_pad_mux,
 };
 
-static struct atlas7_pad_mux c0_can_grp1_pad_mux[] = {
-       MUX(0, 2, 5, 0xa08, 9, 0xa88, 9),
+static struct atlas7_pad_mux c0_can_txd_trnsv1_grp_pad_mux[] = {
        MUX(0, 3, 5, N, N, N, N),
 };
 
-static struct atlas7_grp_mux c0_can_grp1_mux = {
-       .pad_mux_count = ARRAY_SIZE(c0_can_grp1_pad_mux),
-       .pad_mux_list = c0_can_grp1_pad_mux,
+static struct atlas7_grp_mux c0_can_txd_trnsv1_grp_mux = {
+       .pad_mux_count = ARRAY_SIZE(c0_can_txd_trnsv1_grp_pad_mux),
+       .pad_mux_list = c0_can_txd_trnsv1_grp_pad_mux,
 };
 
-static struct atlas7_pad_mux c1_can_grp0_pad_mux[] = {
+static struct atlas7_pad_mux c1_can_rxd_grp0_pad_mux[] = {
        MUX(1, 138, 2, 0xa00, 4, 0xa80, 4),
-       MUX(1, 137, 2, N, N, N, N),
 };
 
-static struct atlas7_grp_mux c1_can_grp0_mux = {
-       .pad_mux_count = ARRAY_SIZE(c1_can_grp0_pad_mux),
-       .pad_mux_list = c1_can_grp0_pad_mux,
+static struct atlas7_grp_mux c1_can_rxd_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(c1_can_rxd_grp0_pad_mux),
+       .pad_mux_list = c1_can_rxd_grp0_pad_mux,
 };
 
-static struct atlas7_pad_mux c1_can_grp1_pad_mux[] = {
+static struct atlas7_pad_mux c1_can_rxd_grp1_pad_mux[] = {
        MUX(1, 147, 2, 0xa00, 4, 0xa80, 4),
-       MUX(1, 146, 2, N, N, N, N),
 };
 
-static struct atlas7_grp_mux c1_can_grp1_mux = {
-       .pad_mux_count = ARRAY_SIZE(c1_can_grp1_pad_mux),
-       .pad_mux_list = c1_can_grp1_pad_mux,
+static struct atlas7_grp_mux c1_can_rxd_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(c1_can_rxd_grp1_pad_mux),
+       .pad_mux_list = c1_can_rxd_grp1_pad_mux,
 };
 
-static struct atlas7_pad_mux c1_can_grp2_pad_mux[] = {
+static struct atlas7_pad_mux c1_can_rxd_grp2_pad_mux[] = {
        MUX(0, 2, 2, 0xa00, 4, 0xa80, 4),
+};
+
+static struct atlas7_grp_mux c1_can_rxd_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(c1_can_rxd_grp2_pad_mux),
+       .pad_mux_list = c1_can_rxd_grp2_pad_mux,
+};
+
+static struct atlas7_pad_mux c1_can_rxd_grp3_pad_mux[] = {
+       MUX(1, 162, 4, 0xa00, 4, 0xa80, 4),
+};
+
+static struct atlas7_grp_mux c1_can_rxd_grp3_mux = {
+       .pad_mux_count = ARRAY_SIZE(c1_can_rxd_grp3_pad_mux),
+       .pad_mux_list = c1_can_rxd_grp3_pad_mux,
+};
+
+static struct atlas7_pad_mux c1_can_txd_grp0_pad_mux[] = {
+       MUX(1, 137, 2, N, N, N, N),
+};
+
+static struct atlas7_grp_mux c1_can_txd_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(c1_can_txd_grp0_pad_mux),
+       .pad_mux_list = c1_can_txd_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux c1_can_txd_grp1_pad_mux[] = {
+       MUX(1, 146, 2, N, N, N, N),
+};
+
+static struct atlas7_grp_mux c1_can_txd_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(c1_can_txd_grp1_pad_mux),
+       .pad_mux_list = c1_can_txd_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux c1_can_txd_grp2_pad_mux[] = {
        MUX(0, 3, 2, N, N, N, N),
 };
 
-static struct atlas7_grp_mux c1_can_grp2_mux = {
-       .pad_mux_count = ARRAY_SIZE(c1_can_grp2_pad_mux),
-       .pad_mux_list = c1_can_grp2_pad_mux,
+static struct atlas7_grp_mux c1_can_txd_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(c1_can_txd_grp2_pad_mux),
+       .pad_mux_list = c1_can_txd_grp2_pad_mux,
+};
+
+static struct atlas7_pad_mux c1_can_txd_grp3_pad_mux[] = {
+       MUX(1, 161, 4, N, N, N, N),
+};
+
+static struct atlas7_grp_mux c1_can_txd_grp3_mux = {
+       .pad_mux_count = ARRAY_SIZE(c1_can_txd_grp3_pad_mux),
+       .pad_mux_list = c1_can_txd_grp3_pad_mux,
 };
 
 static struct atlas7_pad_mux ca_audio_lpc_grp_pad_mux[] = {
@@ -2198,18 +2826,215 @@ static struct atlas7_grp_mux i2c1_grp_mux = {
        .pad_mux_list = i2c1_grp_pad_mux,
 };
 
-static struct atlas7_pad_mux jtag_grp0_pad_mux[] = {
+static struct atlas7_pad_mux i2s0_grp_pad_mux[] = {
+       MUX(1, 91, 2, 0xa10, 12, 0xa90, 12),
+       MUX(1, 93, 2, 0xa10, 13, 0xa90, 13),
+       MUX(1, 94, 2, 0xa10, 14, 0xa90, 14),
+       MUX(1, 92, 2, 0xa10, 15, 0xa90, 15),
+};
+
+static struct atlas7_grp_mux i2s0_grp_mux = {
+       .pad_mux_count = ARRAY_SIZE(i2s0_grp_pad_mux),
+       .pad_mux_list = i2s0_grp_pad_mux,
+};
+
+static struct atlas7_pad_mux i2s1_basic_grp_pad_mux[] = {
+       MUX(1, 95, 2, 0xa10, 16, 0xa90, 16),
+       MUX(1, 96, 2, 0xa10, 19, 0xa90, 19),
+};
+
+static struct atlas7_grp_mux i2s1_basic_grp_mux = {
+       .pad_mux_count = ARRAY_SIZE(i2s1_basic_grp_pad_mux),
+       .pad_mux_list = i2s1_basic_grp_pad_mux,
+};
+
+static struct atlas7_pad_mux i2s1_rxd0_grp0_pad_mux[] = {
+       MUX(1, 61, 4, 0xa10, 17, 0xa90, 17),
+};
+
+static struct atlas7_grp_mux i2s1_rxd0_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(i2s1_rxd0_grp0_pad_mux),
+       .pad_mux_list = i2s1_rxd0_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux i2s1_rxd0_grp1_pad_mux[] = {
+       MUX(1, 131, 4, 0xa10, 17, 0xa90, 17),
+};
+
+static struct atlas7_grp_mux i2s1_rxd0_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(i2s1_rxd0_grp1_pad_mux),
+       .pad_mux_list = i2s1_rxd0_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux i2s1_rxd0_grp2_pad_mux[] = {
+       MUX(1, 129, 2, 0xa10, 17, 0xa90, 17),
+};
+
+static struct atlas7_grp_mux i2s1_rxd0_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(i2s1_rxd0_grp2_pad_mux),
+       .pad_mux_list = i2s1_rxd0_grp2_pad_mux,
+};
+
+static struct atlas7_pad_mux i2s1_rxd0_grp3_pad_mux[] = {
+       MUX(1, 117, 7, 0xa10, 17, 0xa90, 17),
+};
+
+static struct atlas7_grp_mux i2s1_rxd0_grp3_mux = {
+       .pad_mux_count = ARRAY_SIZE(i2s1_rxd0_grp3_pad_mux),
+       .pad_mux_list = i2s1_rxd0_grp3_pad_mux,
+};
+
+static struct atlas7_pad_mux i2s1_rxd0_grp4_pad_mux[] = {
+       MUX(1, 83, 4, 0xa10, 17, 0xa90, 17),
+};
+
+static struct atlas7_grp_mux i2s1_rxd0_grp4_mux = {
+       .pad_mux_count = ARRAY_SIZE(i2s1_rxd0_grp4_pad_mux),
+       .pad_mux_list = i2s1_rxd0_grp4_pad_mux,
+};
+
+static struct atlas7_pad_mux i2s1_rxd1_grp0_pad_mux[] = {
+       MUX(1, 72, 4, 0xa10, 18, 0xa90, 18),
+};
+
+static struct atlas7_grp_mux i2s1_rxd1_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(i2s1_rxd1_grp0_pad_mux),
+       .pad_mux_list = i2s1_rxd1_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux i2s1_rxd1_grp1_pad_mux[] = {
+       MUX(1, 132, 4, 0xa10, 18, 0xa90, 18),
+};
+
+static struct atlas7_grp_mux i2s1_rxd1_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(i2s1_rxd1_grp1_pad_mux),
+       .pad_mux_list = i2s1_rxd1_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux i2s1_rxd1_grp2_pad_mux[] = {
+       MUX(1, 130, 2, 0xa10, 18, 0xa90, 18),
+};
+
+static struct atlas7_grp_mux i2s1_rxd1_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(i2s1_rxd1_grp2_pad_mux),
+       .pad_mux_list = i2s1_rxd1_grp2_pad_mux,
+};
+
+static struct atlas7_pad_mux i2s1_rxd1_grp3_pad_mux[] = {
+       MUX(1, 118, 7, 0xa10, 18, 0xa90, 18),
+};
+
+static struct atlas7_grp_mux i2s1_rxd1_grp3_mux = {
+       .pad_mux_count = ARRAY_SIZE(i2s1_rxd1_grp3_pad_mux),
+       .pad_mux_list = i2s1_rxd1_grp3_pad_mux,
+};
+
+static struct atlas7_pad_mux i2s1_rxd1_grp4_pad_mux[] = {
+       MUX(1, 84, 4, 0xa10, 18, 0xa90, 18),
+};
+
+static struct atlas7_grp_mux i2s1_rxd1_grp4_mux = {
+       .pad_mux_count = ARRAY_SIZE(i2s1_rxd1_grp4_pad_mux),
+       .pad_mux_list = i2s1_rxd1_grp4_pad_mux,
+};
+
+static struct atlas7_pad_mux jtag_jt_dbg_nsrst_grp_pad_mux[] = {
        MUX(1, 125, 5, 0xa08, 2, 0xa88, 2),
+};
+
+static struct atlas7_grp_mux jtag_jt_dbg_nsrst_grp_mux = {
+       .pad_mux_count = ARRAY_SIZE(jtag_jt_dbg_nsrst_grp_pad_mux),
+       .pad_mux_list = jtag_jt_dbg_nsrst_grp_pad_mux,
+};
+
+static struct atlas7_pad_mux jtag_ntrst_grp0_pad_mux[] = {
        MUX(0, 4, 3, 0xa08, 3, 0xa88, 3),
-       MUX(0, 2, 3, N, N, N, N),
-       MUX(0, 0, 3, N, N, N, N),
-       MUX(0, 1, 3, N, N, N, N),
+};
+
+static struct atlas7_grp_mux jtag_ntrst_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(jtag_ntrst_grp0_pad_mux),
+       .pad_mux_list = jtag_ntrst_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux jtag_ntrst_grp1_pad_mux[] = {
+       MUX(1, 163, 1, 0xa08, 3, 0xa88, 3),
+};
+
+static struct atlas7_grp_mux jtag_ntrst_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(jtag_ntrst_grp1_pad_mux),
+       .pad_mux_list = jtag_ntrst_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux jtag_swdiotms_grp0_pad_mux[] = {
+       MUX(0, 2, 3, 0xa10, 10, 0xa90, 10),
+};
+
+static struct atlas7_grp_mux jtag_swdiotms_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(jtag_swdiotms_grp0_pad_mux),
+       .pad_mux_list = jtag_swdiotms_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux jtag_swdiotms_grp1_pad_mux[] = {
+       MUX(1, 160, 1, 0xa10, 10, 0xa90, 10),
+};
+
+static struct atlas7_grp_mux jtag_swdiotms_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(jtag_swdiotms_grp1_pad_mux),
+       .pad_mux_list = jtag_swdiotms_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux jtag_tck_grp0_pad_mux[] = {
+       MUX(0, 0, 3, 0xa10, 11, 0xa90, 11),
+};
+
+static struct atlas7_grp_mux jtag_tck_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(jtag_tck_grp0_pad_mux),
+       .pad_mux_list = jtag_tck_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux jtag_tck_grp1_pad_mux[] = {
+       MUX(1, 161, 1, 0xa10, 11, 0xa90, 11),
+};
+
+static struct atlas7_grp_mux jtag_tck_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(jtag_tck_grp1_pad_mux),
+       .pad_mux_list = jtag_tck_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux jtag_tdi_grp0_pad_mux[] = {
+       MUX(0, 1, 3, 0xa10, 31, 0xa90, 31),
+};
+
+static struct atlas7_grp_mux jtag_tdi_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(jtag_tdi_grp0_pad_mux),
+       .pad_mux_list = jtag_tdi_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux jtag_tdi_grp1_pad_mux[] = {
+       MUX(1, 162, 1, 0xa10, 31, 0xa90, 31),
+};
+
+static struct atlas7_grp_mux jtag_tdi_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(jtag_tdi_grp1_pad_mux),
+       .pad_mux_list = jtag_tdi_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux jtag_tdo_grp0_pad_mux[] = {
        MUX(0, 3, 3, N, N, N, N),
 };
 
-static struct atlas7_grp_mux jtag_grp0_mux = {
-       .pad_mux_count = ARRAY_SIZE(jtag_grp0_pad_mux),
-       .pad_mux_list = jtag_grp0_pad_mux,
+static struct atlas7_grp_mux jtag_tdo_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(jtag_tdo_grp0_pad_mux),
+       .pad_mux_list = jtag_tdo_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux jtag_tdo_grp1_pad_mux[] = {
+       MUX(1, 159, 1, N, N, N, N),
+};
+
+static struct atlas7_grp_mux jtag_tdo_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(jtag_tdo_grp1_pad_mux),
+       .pad_mux_list = jtag_tdo_grp1_pad_mux,
 };
 
 static struct atlas7_pad_mux ks_kas_spi_grp0_pad_mux[] = {
@@ -2401,6 +3226,7 @@ static struct atlas7_grp_mux nd_df_nowp_grp_mux = {
 static struct atlas7_pad_mux ps_grp_pad_mux[] = {
        MUX(1, 120, 2, N, N, N, N),
        MUX(1, 119, 2, N, N, N, N),
+       MUX(1, 121, 5, N, N, N, N),
 };
 
 static struct atlas7_grp_mux ps_grp_mux = {
@@ -2534,6 +3360,15 @@ static struct atlas7_grp_mux pw_cko0_grp2_mux = {
        .pad_mux_list = pw_cko0_grp2_pad_mux,
 };
 
+static struct atlas7_pad_mux pw_cko0_grp3_pad_mux[] = {
+       MUX(1, 162, 5, N, N, N, N),
+};
+
+static struct atlas7_grp_mux pw_cko0_grp3_mux = {
+       .pad_mux_count = ARRAY_SIZE(pw_cko0_grp3_pad_mux),
+       .pad_mux_list = pw_cko0_grp3_pad_mux,
+};
+
 static struct atlas7_pad_mux pw_cko1_grp0_pad_mux[] = {
        MUX(1, 124, 3, N, N, N, N),
 };
@@ -2552,6 +3387,15 @@ static struct atlas7_grp_mux pw_cko1_grp1_mux = {
        .pad_mux_list = pw_cko1_grp1_pad_mux,
 };
 
+static struct atlas7_pad_mux pw_cko1_grp2_pad_mux[] = {
+       MUX(1, 163, 5, N, N, N, N),
+};
+
+static struct atlas7_grp_mux pw_cko1_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(pw_cko1_grp2_pad_mux),
+       .pad_mux_list = pw_cko1_grp2_pad_mux,
+};
+
 static struct atlas7_pad_mux pw_i2s01_clk_grp0_pad_mux[] = {
        MUX(1, 125, 3, N, N, N, N),
 };
@@ -2570,22 +3414,58 @@ static struct atlas7_grp_mux pw_i2s01_clk_grp1_mux = {
        .pad_mux_list = pw_i2s01_clk_grp1_pad_mux,
 };
 
-static struct atlas7_pad_mux pw_pwm0_grp_pad_mux[] = {
+static struct atlas7_pad_mux pw_i2s01_clk_grp2_pad_mux[] = {
+       MUX(1, 132, 2, N, N, N, N),
+};
+
+static struct atlas7_grp_mux pw_i2s01_clk_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(pw_i2s01_clk_grp2_pad_mux),
+       .pad_mux_list = pw_i2s01_clk_grp2_pad_mux,
+};
+
+static struct atlas7_pad_mux pw_pwm0_grp0_pad_mux[] = {
        MUX(1, 119, 3, N, N, N, N),
 };
 
-static struct atlas7_grp_mux pw_pwm0_grp_mux = {
-       .pad_mux_count = ARRAY_SIZE(pw_pwm0_grp_pad_mux),
-       .pad_mux_list = pw_pwm0_grp_pad_mux,
+static struct atlas7_grp_mux pw_pwm0_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(pw_pwm0_grp0_pad_mux),
+       .pad_mux_list = pw_pwm0_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux pw_pwm0_grp1_pad_mux[] = {
+       MUX(1, 159, 5, N, N, N, N),
+};
+
+static struct atlas7_grp_mux pw_pwm0_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(pw_pwm0_grp1_pad_mux),
+       .pad_mux_list = pw_pwm0_grp1_pad_mux,
 };
 
-static struct atlas7_pad_mux pw_pwm1_grp_pad_mux[] = {
+static struct atlas7_pad_mux pw_pwm1_grp0_pad_mux[] = {
        MUX(1, 120, 3, N, N, N, N),
 };
 
-static struct atlas7_grp_mux pw_pwm1_grp_mux = {
-       .pad_mux_count = ARRAY_SIZE(pw_pwm1_grp_pad_mux),
-       .pad_mux_list = pw_pwm1_grp_pad_mux,
+static struct atlas7_grp_mux pw_pwm1_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(pw_pwm1_grp0_pad_mux),
+       .pad_mux_list = pw_pwm1_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux pw_pwm1_grp1_pad_mux[] = {
+       MUX(1, 160, 5, N, N, N, N),
+};
+
+static struct atlas7_grp_mux pw_pwm1_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(pw_pwm1_grp1_pad_mux),
+       .pad_mux_list = pw_pwm1_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux pw_pwm1_grp2_pad_mux[] = {
+       MUX(1, 131, 2, N, N, N, N),
+};
+
+static struct atlas7_grp_mux pw_pwm1_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(pw_pwm1_grp2_pad_mux),
+       .pad_mux_list = pw_pwm1_grp2_pad_mux,
 };
 
 static struct atlas7_pad_mux pw_pwm2_grp0_pad_mux[] = {
@@ -2606,6 +3486,15 @@ static struct atlas7_grp_mux pw_pwm2_grp1_mux = {
        .pad_mux_list = pw_pwm2_grp1_pad_mux,
 };
 
+static struct atlas7_pad_mux pw_pwm2_grp2_pad_mux[] = {
+       MUX(1, 161, 5, N, N, N, N),
+};
+
+static struct atlas7_grp_mux pw_pwm2_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(pw_pwm2_grp2_pad_mux),
+       .pad_mux_list = pw_pwm2_grp2_pad_mux,
+};
+
 static struct atlas7_pad_mux pw_pwm3_grp0_pad_mux[] = {
        MUX(1, 122, 3, N, N, N, N),
 };
@@ -2642,6 +3531,15 @@ static struct atlas7_grp_mux pw_pwm_cpu_vol_grp1_mux = {
        .pad_mux_list = pw_pwm_cpu_vol_grp1_pad_mux,
 };
 
+static struct atlas7_pad_mux pw_pwm_cpu_vol_grp2_pad_mux[] = {
+       MUX(1, 161, 5, N, N, N, N),
+};
+
+static struct atlas7_grp_mux pw_pwm_cpu_vol_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(pw_pwm_cpu_vol_grp2_pad_mux),
+       .pad_mux_list = pw_pwm_cpu_vol_grp2_pad_mux,
+};
+
 static struct atlas7_pad_mux pw_backlight_grp0_pad_mux[] = {
        MUX(1, 122, 3, N, N, N, N),
 };
@@ -2795,35 +3693,54 @@ static struct atlas7_grp_mux sd1_4bit_grp1_mux = {
        .pad_mux_list = sd1_4bit_grp1_pad_mux,
 };
 
-static struct atlas7_pad_mux sd2_grp0_pad_mux[] = {
-       MUX(1, 124, 2, 0xa08, 7, 0xa88, 7),
+static struct atlas7_pad_mux sd2_basic_grp_pad_mux[] = {
        MUX(1, 31, 1, N, N, N, N),
        MUX(1, 32, 1, N, N, N, N),
        MUX(1, 33, 1, N, N, N, N),
        MUX(1, 34, 1, N, N, N, N),
        MUX(1, 35, 1, N, N, N, N),
        MUX(1, 36, 1, N, N, N, N),
-       MUX(1, 123, 2, N, N, N, N),
 };
 
-static struct atlas7_grp_mux sd2_grp0_mux = {
-       .pad_mux_count = ARRAY_SIZE(sd2_grp0_pad_mux),
-       .pad_mux_list = sd2_grp0_pad_mux,
+static struct atlas7_grp_mux sd2_basic_grp_mux = {
+       .pad_mux_count = ARRAY_SIZE(sd2_basic_grp_pad_mux),
+       .pad_mux_list = sd2_basic_grp_pad_mux,
 };
 
-static struct atlas7_pad_mux sd2_no_cdb_grp0_pad_mux[] = {
-       MUX(1, 31, 1, N, N, N, N),
-       MUX(1, 32, 1, N, N, N, N),
-       MUX(1, 33, 1, N, N, N, N),
-       MUX(1, 34, 1, N, N, N, N),
-       MUX(1, 35, 1, N, N, N, N),
-       MUX(1, 36, 1, N, N, N, N),
-       MUX(1, 123, 2, N, N, N, N),
+static struct atlas7_pad_mux sd2_cdb_grp0_pad_mux[] = {
+       MUX(1, 124, 2, 0xa08, 7, 0xa88, 7),
 };
 
-static struct atlas7_grp_mux sd2_no_cdb_grp0_mux = {
-       .pad_mux_count = ARRAY_SIZE(sd2_no_cdb_grp0_pad_mux),
-       .pad_mux_list = sd2_no_cdb_grp0_pad_mux,
+static struct atlas7_grp_mux sd2_cdb_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(sd2_cdb_grp0_pad_mux),
+       .pad_mux_list = sd2_cdb_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux sd2_cdb_grp1_pad_mux[] = {
+       MUX(1, 161, 6, 0xa08, 7, 0xa88, 7),
+};
+
+static struct atlas7_grp_mux sd2_cdb_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(sd2_cdb_grp1_pad_mux),
+       .pad_mux_list = sd2_cdb_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux sd2_wpb_grp0_pad_mux[] = {
+       MUX(1, 123, 2, 0xa10, 6, 0xa90, 6),
+};
+
+static struct atlas7_grp_mux sd2_wpb_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(sd2_wpb_grp0_pad_mux),
+       .pad_mux_list = sd2_wpb_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux sd2_wpb_grp1_pad_mux[] = {
+       MUX(1, 163, 7, 0xa10, 6, 0xa90, 6),
+};
+
+static struct atlas7_grp_mux sd2_wpb_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(sd2_wpb_grp1_pad_mux),
+       .pad_mux_list = sd2_wpb_grp1_pad_mux,
 };
 
 static struct atlas7_pad_mux sd3_grp_pad_mux[] = {
@@ -2975,146 +3892,302 @@ static struct atlas7_grp_mux uart1_grp_mux = {
        .pad_mux_list = uart1_grp_pad_mux,
 };
 
-static struct atlas7_pad_mux uart2_grp_pad_mux[] = {
-       MUX(0, 11, 2, N, N, N, N),
+static struct atlas7_pad_mux uart2_cts_grp0_pad_mux[] = {
+       MUX(1, 132, 3, 0xa10, 2, 0xa90, 2),
+};
+
+static struct atlas7_grp_mux uart2_cts_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart2_cts_grp0_pad_mux),
+       .pad_mux_list = uart2_cts_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux uart2_cts_grp1_pad_mux[] = {
+       MUX(1, 162, 2, 0xa10, 2, 0xa90, 2),
+};
+
+static struct atlas7_grp_mux uart2_cts_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart2_cts_grp1_pad_mux),
+       .pad_mux_list = uart2_cts_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux uart2_rts_grp0_pad_mux[] = {
+       MUX(1, 131, 3, N, N, N, N),
+};
+
+static struct atlas7_grp_mux uart2_rts_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart2_rts_grp0_pad_mux),
+       .pad_mux_list = uart2_rts_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux uart2_rts_grp1_pad_mux[] = {
+       MUX(1, 161, 2, N, N, N, N),
+};
+
+static struct atlas7_grp_mux uart2_rts_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart2_rts_grp1_pad_mux),
+       .pad_mux_list = uart2_rts_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux uart2_rxd_grp0_pad_mux[] = {
+       MUX(0, 11, 2, 0xa10, 5, 0xa90, 5),
+};
+
+static struct atlas7_grp_mux uart2_rxd_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart2_rxd_grp0_pad_mux),
+       .pad_mux_list = uart2_rxd_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux uart2_rxd_grp1_pad_mux[] = {
+       MUX(1, 160, 2, 0xa10, 5, 0xa90, 5),
+};
+
+static struct atlas7_grp_mux uart2_rxd_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart2_rxd_grp1_pad_mux),
+       .pad_mux_list = uart2_rxd_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux uart2_rxd_grp2_pad_mux[] = {
+       MUX(1, 130, 3, 0xa10, 5, 0xa90, 5),
+};
+
+static struct atlas7_grp_mux uart2_rxd_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart2_rxd_grp2_pad_mux),
+       .pad_mux_list = uart2_rxd_grp2_pad_mux,
+};
+
+static struct atlas7_pad_mux uart2_txd_grp0_pad_mux[] = {
        MUX(0, 10, 2, N, N, N, N),
 };
 
-static struct atlas7_grp_mux uart2_grp_mux = {
-       .pad_mux_count = ARRAY_SIZE(uart2_grp_pad_mux),
-       .pad_mux_list = uart2_grp_pad_mux,
+static struct atlas7_grp_mux uart2_txd_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart2_txd_grp0_pad_mux),
+       .pad_mux_list = uart2_txd_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux uart2_txd_grp1_pad_mux[] = {
+       MUX(1, 159, 2, N, N, N, N),
 };
 
-static struct atlas7_pad_mux uart3_grp0_pad_mux[] = {
+static struct atlas7_grp_mux uart2_txd_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart2_txd_grp1_pad_mux),
+       .pad_mux_list = uart2_txd_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux uart2_txd_grp2_pad_mux[] = {
+       MUX(1, 129, 3, N, N, N, N),
+};
+
+static struct atlas7_grp_mux uart2_txd_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart2_txd_grp2_pad_mux),
+       .pad_mux_list = uart2_txd_grp2_pad_mux,
+};
+
+static struct atlas7_pad_mux uart3_cts_grp0_pad_mux[] = {
        MUX(1, 125, 2, 0xa08, 0, 0xa88, 0),
-       MUX(1, 126, 2, N, N, N, N),
-       MUX(1, 138, 1, 0xa00, 5, 0xa80, 5),
-       MUX(1, 137, 1, N, N, N, N),
 };
 
-static struct atlas7_grp_mux uart3_grp0_mux = {
-       .pad_mux_count = ARRAY_SIZE(uart3_grp0_pad_mux),
-       .pad_mux_list = uart3_grp0_pad_mux,
+static struct atlas7_grp_mux uart3_cts_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart3_cts_grp0_pad_mux),
+       .pad_mux_list = uart3_cts_grp0_pad_mux,
 };
 
-static struct atlas7_pad_mux uart3_grp1_pad_mux[] = {
+static struct atlas7_pad_mux uart3_cts_grp1_pad_mux[] = {
        MUX(1, 111, 4, 0xa08, 0, 0xa88, 0),
-       MUX(1, 109, 4, N, N, N, N),
-       MUX(1, 84, 2, 0xa00, 5, 0xa80, 5),
-       MUX(1, 83, 2, N, N, N, N),
 };
 
-static struct atlas7_grp_mux uart3_grp1_mux = {
-       .pad_mux_count = ARRAY_SIZE(uart3_grp1_pad_mux),
-       .pad_mux_list = uart3_grp1_pad_mux,
+static struct atlas7_grp_mux uart3_cts_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart3_cts_grp1_pad_mux),
+       .pad_mux_list = uart3_cts_grp1_pad_mux,
 };
 
-static struct atlas7_pad_mux uart3_grp2_pad_mux[] = {
+static struct atlas7_pad_mux uart3_cts_grp2_pad_mux[] = {
        MUX(1, 140, 2, 0xa08, 0, 0xa88, 0),
+};
+
+static struct atlas7_grp_mux uart3_cts_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart3_cts_grp2_pad_mux),
+       .pad_mux_list = uart3_cts_grp2_pad_mux,
+};
+
+static struct atlas7_pad_mux uart3_rts_grp0_pad_mux[] = {
+       MUX(1, 126, 2, N, N, N, N),
+};
+
+static struct atlas7_grp_mux uart3_rts_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart3_rts_grp0_pad_mux),
+       .pad_mux_list = uart3_rts_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux uart3_rts_grp1_pad_mux[] = {
+       MUX(1, 109, 4, N, N, N, N),
+};
+
+static struct atlas7_grp_mux uart3_rts_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart3_rts_grp1_pad_mux),
+       .pad_mux_list = uart3_rts_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux uart3_rts_grp2_pad_mux[] = {
        MUX(1, 139, 2, N, N, N, N),
+};
+
+static struct atlas7_grp_mux uart3_rts_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart3_rts_grp2_pad_mux),
+       .pad_mux_list = uart3_rts_grp2_pad_mux,
+};
+
+static struct atlas7_pad_mux uart3_rxd_grp0_pad_mux[] = {
        MUX(1, 138, 1, 0xa00, 5, 0xa80, 5),
-       MUX(1, 137, 1, N, N, N, N),
 };
 
-static struct atlas7_grp_mux uart3_grp2_mux = {
-       .pad_mux_count = ARRAY_SIZE(uart3_grp2_pad_mux),
-       .pad_mux_list = uart3_grp2_pad_mux,
+static struct atlas7_grp_mux uart3_rxd_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart3_rxd_grp0_pad_mux),
+       .pad_mux_list = uart3_rxd_grp0_pad_mux,
 };
 
-static struct atlas7_pad_mux uart3_grp3_pad_mux[] = {
-       MUX(1, 139, 2, N, N, N, N),
-       MUX(1, 140, 2, 0xa08, 0, 0xa88, 0),
+static struct atlas7_pad_mux uart3_rxd_grp1_pad_mux[] = {
        MUX(1, 84, 2, 0xa00, 5, 0xa80, 5),
-       MUX(1, 83, 2, N, N, N, N),
 };
 
-static struct atlas7_grp_mux uart3_grp3_mux = {
-       .pad_mux_count = ARRAY_SIZE(uart3_grp3_pad_mux),
-       .pad_mux_list = uart3_grp3_pad_mux,
+static struct atlas7_grp_mux uart3_rxd_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart3_rxd_grp1_pad_mux),
+       .pad_mux_list = uart3_rxd_grp1_pad_mux,
 };
 
-static struct atlas7_pad_mux uart3_nopause_grp0_pad_mux[] = {
-       MUX(1, 138, 1, 0xa00, 5, 0xa80, 5),
+static struct atlas7_pad_mux uart3_rxd_grp2_pad_mux[] = {
+       MUX(1, 162, 3, 0xa00, 5, 0xa80, 5),
+};
+
+static struct atlas7_grp_mux uart3_rxd_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart3_rxd_grp2_pad_mux),
+       .pad_mux_list = uart3_rxd_grp2_pad_mux,
+};
+
+static struct atlas7_pad_mux uart3_txd_grp0_pad_mux[] = {
        MUX(1, 137, 1, N, N, N, N),
 };
 
-static struct atlas7_grp_mux uart3_nopause_grp0_mux = {
-       .pad_mux_count = ARRAY_SIZE(uart3_nopause_grp0_pad_mux),
-       .pad_mux_list = uart3_nopause_grp0_pad_mux,
+static struct atlas7_grp_mux uart3_txd_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart3_txd_grp0_pad_mux),
+       .pad_mux_list = uart3_txd_grp0_pad_mux,
 };
 
-static struct atlas7_pad_mux uart3_nopause_grp1_pad_mux[] = {
-       MUX(1, 84, 2, 0xa00, 5, 0xa80, 5),
+static struct atlas7_pad_mux uart3_txd_grp1_pad_mux[] = {
        MUX(1, 83, 2, N, N, N, N),
 };
 
-static struct atlas7_grp_mux uart3_nopause_grp1_mux = {
-       .pad_mux_count = ARRAY_SIZE(uart3_nopause_grp1_pad_mux),
-       .pad_mux_list = uart3_nopause_grp1_pad_mux,
+static struct atlas7_grp_mux uart3_txd_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart3_txd_grp1_pad_mux),
+       .pad_mux_list = uart3_txd_grp1_pad_mux,
 };
 
-static struct atlas7_pad_mux uart4_grp0_pad_mux[] = {
-       MUX(1, 122, 4, 0xa08, 1, 0xa88, 1),
-       MUX(1, 123, 4, N, N, N, N),
+static struct atlas7_pad_mux uart3_txd_grp2_pad_mux[] = {
+       MUX(1, 161, 3, N, N, N, N),
+};
+
+static struct atlas7_grp_mux uart3_txd_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart3_txd_grp2_pad_mux),
+       .pad_mux_list = uart3_txd_grp2_pad_mux,
+};
+
+static struct atlas7_pad_mux uart4_basic_grp_pad_mux[] = {
        MUX(1, 140, 1, N, N, N, N),
        MUX(1, 139, 1, N, N, N, N),
 };
 
-static struct atlas7_grp_mux uart4_grp0_mux = {
-       .pad_mux_count = ARRAY_SIZE(uart4_grp0_pad_mux),
-       .pad_mux_list = uart4_grp0_pad_mux,
+static struct atlas7_grp_mux uart4_basic_grp_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart4_basic_grp_pad_mux),
+       .pad_mux_list = uart4_basic_grp_pad_mux,
+};
+
+static struct atlas7_pad_mux uart4_cts_grp0_pad_mux[] = {
+       MUX(1, 122, 4, 0xa08, 1, 0xa88, 1),
+};
+
+static struct atlas7_grp_mux uart4_cts_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart4_cts_grp0_pad_mux),
+       .pad_mux_list = uart4_cts_grp0_pad_mux,
 };
 
-static struct atlas7_pad_mux uart4_grp1_pad_mux[] = {
+static struct atlas7_pad_mux uart4_cts_grp1_pad_mux[] = {
        MUX(1, 100, 4, 0xa08, 1, 0xa88, 1),
-       MUX(1, 99, 4, N, N, N, N),
-       MUX(1, 140, 1, N, N, N, N),
-       MUX(1, 139, 1, N, N, N, N),
 };
 
-static struct atlas7_grp_mux uart4_grp1_mux = {
-       .pad_mux_count = ARRAY_SIZE(uart4_grp1_pad_mux),
-       .pad_mux_list = uart4_grp1_pad_mux,
+static struct atlas7_grp_mux uart4_cts_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart4_cts_grp1_pad_mux),
+       .pad_mux_list = uart4_cts_grp1_pad_mux,
 };
 
-static struct atlas7_pad_mux uart4_grp2_pad_mux[] = {
+static struct atlas7_pad_mux uart4_cts_grp2_pad_mux[] = {
        MUX(1, 117, 2, 0xa08, 1, 0xa88, 1),
-       MUX(1, 116, 2, N, N, N, N),
-       MUX(1, 140, 1, N, N, N, N),
-       MUX(1, 139, 1, N, N, N, N),
 };
 
-static struct atlas7_grp_mux uart4_grp2_mux = {
-       .pad_mux_count = ARRAY_SIZE(uart4_grp2_pad_mux),
-       .pad_mux_list = uart4_grp2_pad_mux,
+static struct atlas7_grp_mux uart4_cts_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart4_cts_grp2_pad_mux),
+       .pad_mux_list = uart4_cts_grp2_pad_mux,
 };
 
-static struct atlas7_pad_mux uart4_nopause_grp_pad_mux[] = {
-       MUX(1, 140, 1, N, N, N, N),
-       MUX(1, 139, 1, N, N, N, N),
+static struct atlas7_pad_mux uart4_rts_grp0_pad_mux[] = {
+       MUX(1, 123, 4, N, N, N, N),
 };
 
-static struct atlas7_grp_mux uart4_nopause_grp_mux = {
-       .pad_mux_count = ARRAY_SIZE(uart4_nopause_grp_pad_mux),
-       .pad_mux_list = uart4_nopause_grp_pad_mux,
+static struct atlas7_grp_mux uart4_rts_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart4_rts_grp0_pad_mux),
+       .pad_mux_list = uart4_rts_grp0_pad_mux,
 };
 
-static struct atlas7_pad_mux usb0_drvvbus_grp_pad_mux[] = {
+static struct atlas7_pad_mux uart4_rts_grp1_pad_mux[] = {
+       MUX(1, 99, 4, N, N, N, N),
+};
+
+static struct atlas7_grp_mux uart4_rts_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart4_rts_grp1_pad_mux),
+       .pad_mux_list = uart4_rts_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux uart4_rts_grp2_pad_mux[] = {
+       MUX(1, 116, 2, N, N, N, N),
+};
+
+static struct atlas7_grp_mux uart4_rts_grp2_mux = {
+       .pad_mux_count = ARRAY_SIZE(uart4_rts_grp2_pad_mux),
+       .pad_mux_list = uart4_rts_grp2_pad_mux,
+};
+
+static struct atlas7_pad_mux usb0_drvvbus_grp0_pad_mux[] = {
        MUX(1, 51, 2, N, N, N, N),
 };
 
-static struct atlas7_grp_mux usb0_drvvbus_grp_mux = {
-       .pad_mux_count = ARRAY_SIZE(usb0_drvvbus_grp_pad_mux),
-       .pad_mux_list = usb0_drvvbus_grp_pad_mux,
+static struct atlas7_grp_mux usb0_drvvbus_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(usb0_drvvbus_grp0_pad_mux),
+       .pad_mux_list = usb0_drvvbus_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux usb0_drvvbus_grp1_pad_mux[] = {
+       MUX(1, 162, 7, N, N, N, N),
 };
 
-static struct atlas7_pad_mux usb1_drvvbus_grp_pad_mux[] = {
+static struct atlas7_grp_mux usb0_drvvbus_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(usb0_drvvbus_grp1_pad_mux),
+       .pad_mux_list = usb0_drvvbus_grp1_pad_mux,
+};
+
+static struct atlas7_pad_mux usb1_drvvbus_grp0_pad_mux[] = {
        MUX(1, 134, 2, N, N, N, N),
 };
 
-static struct atlas7_grp_mux usb1_drvvbus_grp_mux = {
-       .pad_mux_count = ARRAY_SIZE(usb1_drvvbus_grp_pad_mux),
-       .pad_mux_list = usb1_drvvbus_grp_pad_mux,
+static struct atlas7_grp_mux usb1_drvvbus_grp0_mux = {
+       .pad_mux_count = ARRAY_SIZE(usb1_drvvbus_grp0_pad_mux),
+       .pad_mux_list = usb1_drvvbus_grp0_pad_mux,
+};
+
+static struct atlas7_pad_mux usb1_drvvbus_grp1_pad_mux[] = {
+       MUX(1, 163, 2, N, N, N, N),
+};
+
+static struct atlas7_grp_mux usb1_drvvbus_grp1_mux = {
+       .pad_mux_count = ARRAY_SIZE(usb1_drvvbus_grp1_pad_mux),
+       .pad_mux_list = usb1_drvvbus_grp1_pad_mux,
 };
 
 static struct atlas7_pad_mux visbus_dout_grp_pad_mux[] = {
@@ -3252,11 +4325,20 @@ static struct atlas7_pmx_func atlas7_pmx_functions[] = {
        FUNCTION("sdio_i2s_gpio", sdio_i2s_gpio_grp, &sdio_i2s_gpio_grp_mux),
        FUNCTION("sp_rgmii_gpio", sp_rgmii_gpio_grp, &sp_rgmii_gpio_grp_mux),
        FUNCTION("lvds_gpio", lvds_gpio_grp, &lvds_gpio_grp_mux),
-       FUNCTION("uart_nand_gpio",
-                       uart_nand_gpio_grp,
-                       &uart_nand_gpio_grp_mux),
+       FUNCTION("jtag_uart_nand_gpio",
+                       jtag_uart_nand_gpio_grp,
+                       &jtag_uart_nand_gpio_grp_mux),
        FUNCTION("rtc_gpio", rtc_gpio_grp, &rtc_gpio_grp_mux),
        FUNCTION("audio_ac97", audio_ac97_grp, &audio_ac97_grp_mux),
+       FUNCTION("audio_digmic_m0",
+                       audio_digmic_grp0,
+                       &audio_digmic_grp0_mux),
+       FUNCTION("audio_digmic_m1",
+                       audio_digmic_grp1,
+                       &audio_digmic_grp1_mux),
+       FUNCTION("audio_digmic_m2",
+                       audio_digmic_grp2,
+                       &audio_digmic_grp2_mux),
        FUNCTION("audio_func_dbg",
                        audio_func_dbg_grp,
                        &audio_func_dbg_grp_mux),
@@ -3265,16 +4347,119 @@ static struct atlas7_pmx_func atlas7_pmx_functions[] = {
        FUNCTION("audio_i2s_extclk",
                        audio_i2s_extclk_grp,
                        &audio_i2s_extclk_grp_mux),
-       FUNCTION("audio_uart0", audio_uart0_grp, &audio_uart0_grp_mux),
-       FUNCTION("audio_uart1", audio_uart1_grp, &audio_uart1_grp_mux),
-       FUNCTION("audio_uart2_m0", audio_uart2_grp0, &audio_uart2_grp0_mux),
-       FUNCTION("audio_uart2_m1", audio_uart2_grp1, &audio_uart2_grp1_mux),
-       FUNCTION("c_can_trnsvr", c_can_trnsvr_grp, &c_can_trnsvr_grp_mux),
-       FUNCTION("c0_can_m0", c0_can_grp0, &c0_can_grp0_mux),
-       FUNCTION("c0_can_m1", c0_can_grp1, &c0_can_grp1_mux),
-       FUNCTION("c1_can_m0", c1_can_grp0, &c1_can_grp0_mux),
-       FUNCTION("c1_can_m1", c1_can_grp1, &c1_can_grp1_mux),
-       FUNCTION("c1_can_m2", c1_can_grp2, &c1_can_grp2_mux),
+       FUNCTION("audio_spdif_out_m0",
+                       audio_spdif_out_grp0,
+                       &audio_spdif_out_grp0_mux),
+       FUNCTION("audio_spdif_out_m1",
+                       audio_spdif_out_grp1,
+                       &audio_spdif_out_grp1_mux),
+       FUNCTION("audio_spdif_out_m2",
+                       audio_spdif_out_grp2,
+                       &audio_spdif_out_grp2_mux),
+       FUNCTION("audio_uart0_basic",
+                       audio_uart0_basic_grp,
+                       &audio_uart0_basic_grp_mux),
+       FUNCTION("audio_uart0_urfs_m0",
+                       audio_uart0_urfs_grp0,
+                       &audio_uart0_urfs_grp0_mux),
+       FUNCTION("audio_uart0_urfs_m1",
+                       audio_uart0_urfs_grp1,
+                       &audio_uart0_urfs_grp1_mux),
+       FUNCTION("audio_uart0_urfs_m2",
+                       audio_uart0_urfs_grp2,
+                       &audio_uart0_urfs_grp2_mux),
+       FUNCTION("audio_uart0_urfs_m3",
+                       audio_uart0_urfs_grp3,
+                       &audio_uart0_urfs_grp3_mux),
+       FUNCTION("audio_uart1_basic",
+                       audio_uart1_basic_grp,
+                       &audio_uart1_basic_grp_mux),
+       FUNCTION("audio_uart1_urfs_m0",
+                       audio_uart1_urfs_grp0,
+                       &audio_uart1_urfs_grp0_mux),
+       FUNCTION("audio_uart1_urfs_m1",
+                       audio_uart1_urfs_grp1,
+                       &audio_uart1_urfs_grp1_mux),
+       FUNCTION("audio_uart1_urfs_m2",
+                       audio_uart1_urfs_grp2,
+                       &audio_uart1_urfs_grp2_mux),
+       FUNCTION("audio_uart2_urfs_m0",
+                       audio_uart2_urfs_grp0,
+                       &audio_uart2_urfs_grp0_mux),
+       FUNCTION("audio_uart2_urfs_m1",
+                       audio_uart2_urfs_grp1,
+                       &audio_uart2_urfs_grp1_mux),
+       FUNCTION("audio_uart2_urfs_m2",
+                       audio_uart2_urfs_grp2,
+                       &audio_uart2_urfs_grp2_mux),
+       FUNCTION("audio_uart2_urxd_m0",
+                       audio_uart2_urxd_grp0,
+                       &audio_uart2_urxd_grp0_mux),
+       FUNCTION("audio_uart2_urxd_m1",
+                       audio_uart2_urxd_grp1,
+                       &audio_uart2_urxd_grp1_mux),
+       FUNCTION("audio_uart2_urxd_m2",
+                       audio_uart2_urxd_grp2,
+                       &audio_uart2_urxd_grp2_mux),
+       FUNCTION("audio_uart2_usclk_m0",
+                       audio_uart2_usclk_grp0,
+                       &audio_uart2_usclk_grp0_mux),
+       FUNCTION("audio_uart2_usclk_m1",
+                       audio_uart2_usclk_grp1,
+                       &audio_uart2_usclk_grp1_mux),
+       FUNCTION("audio_uart2_usclk_m2",
+                       audio_uart2_usclk_grp2,
+                       &audio_uart2_usclk_grp2_mux),
+       FUNCTION("audio_uart2_utfs_m0",
+                       audio_uart2_utfs_grp0,
+                       &audio_uart2_utfs_grp0_mux),
+       FUNCTION("audio_uart2_utfs_m1",
+                       audio_uart2_utfs_grp1,
+                       &audio_uart2_utfs_grp1_mux),
+       FUNCTION("audio_uart2_utfs_m2",
+                       audio_uart2_utfs_grp2,
+                       &audio_uart2_utfs_grp2_mux),
+       FUNCTION("audio_uart2_utxd_m0",
+                       audio_uart2_utxd_grp0,
+                       &audio_uart2_utxd_grp0_mux),
+       FUNCTION("audio_uart2_utxd_m1",
+                       audio_uart2_utxd_grp1,
+                       &audio_uart2_utxd_grp1_mux),
+       FUNCTION("audio_uart2_utxd_m2",
+                       audio_uart2_utxd_grp2,
+                       &audio_uart2_utxd_grp2_mux),
+       FUNCTION("c_can_trnsvr_en_m0",
+                       c_can_trnsvr_en_grp0,
+                       &c_can_trnsvr_en_grp0_mux),
+       FUNCTION("c_can_trnsvr_en_m1",
+                       c_can_trnsvr_en_grp1,
+                       &c_can_trnsvr_en_grp1_mux),
+       FUNCTION("c_can_trnsvr_intr",
+                       c_can_trnsvr_intr_grp,
+                       &c_can_trnsvr_intr_grp_mux),
+       FUNCTION("c_can_trnsvr_stb_n",
+                       c_can_trnsvr_stb_n_grp,
+                       &c_can_trnsvr_stb_n_grp_mux),
+       FUNCTION("c0_can_rxd_trnsv0",
+                       c0_can_rxd_trnsv0_grp,
+                       &c0_can_rxd_trnsv0_grp_mux),
+       FUNCTION("c0_can_rxd_trnsv1",
+                       c0_can_rxd_trnsv1_grp,
+                       &c0_can_rxd_trnsv1_grp_mux),
+       FUNCTION("c0_can_txd_trnsv0",
+                       c0_can_txd_trnsv0_grp,
+                       &c0_can_txd_trnsv0_grp_mux),
+       FUNCTION("c0_can_txd_trnsv1",
+                       c0_can_txd_trnsv1_grp,
+                       &c0_can_txd_trnsv1_grp_mux),
+       FUNCTION("c1_can_rxd_m0", c1_can_rxd_grp0, &c1_can_rxd_grp0_mux),
+       FUNCTION("c1_can_rxd_m1", c1_can_rxd_grp1, &c1_can_rxd_grp1_mux),
+       FUNCTION("c1_can_rxd_m2", c1_can_rxd_grp2, &c1_can_rxd_grp2_mux),
+       FUNCTION("c1_can_rxd_m3", c1_can_rxd_grp3, &c1_can_rxd_grp3_mux),
+       FUNCTION("c1_can_txd_m0", c1_can_txd_grp0, &c1_can_txd_grp0_mux),
+       FUNCTION("c1_can_txd_m1", c1_can_txd_grp1, &c1_can_txd_grp1_mux),
+       FUNCTION("c1_can_txd_m2", c1_can_txd_grp2, &c1_can_txd_grp2_mux),
+       FUNCTION("c1_can_txd_m3", c1_can_txd_grp3, &c1_can_txd_grp3_mux),
        FUNCTION("ca_audio_lpc", ca_audio_lpc_grp, &ca_audio_lpc_grp_mux),
        FUNCTION("ca_bt_lpc", ca_bt_lpc_grp, &ca_bt_lpc_grp_mux),
        FUNCTION("ca_coex", ca_coex_grp, &ca_coex_grp_mux),
@@ -3377,7 +4562,35 @@ static struct atlas7_pmx_func atlas7_pmx_functions[] = {
                        &gn_trg_shutdown_grp3_mux),
        FUNCTION("i2c0", i2c0_grp, &i2c0_grp_mux),
        FUNCTION("i2c1", i2c1_grp, &i2c1_grp_mux),
-       FUNCTION("jtag_m0", jtag_grp0, &jtag_grp0_mux),
+       FUNCTION("i2s0", i2s0_grp, &i2s0_grp_mux),
+       FUNCTION("i2s1_basic", i2s1_basic_grp, &i2s1_basic_grp_mux),
+       FUNCTION("i2s1_rxd0_m0", i2s1_rxd0_grp0, &i2s1_rxd0_grp0_mux),
+       FUNCTION("i2s1_rxd0_m1", i2s1_rxd0_grp1, &i2s1_rxd0_grp1_mux),
+       FUNCTION("i2s1_rxd0_m2", i2s1_rxd0_grp2, &i2s1_rxd0_grp2_mux),
+       FUNCTION("i2s1_rxd0_m3", i2s1_rxd0_grp3, &i2s1_rxd0_grp3_mux),
+       FUNCTION("i2s1_rxd0_m4", i2s1_rxd0_grp4, &i2s1_rxd0_grp4_mux),
+       FUNCTION("i2s1_rxd1_m0", i2s1_rxd1_grp0, &i2s1_rxd1_grp0_mux),
+       FUNCTION("i2s1_rxd1_m1", i2s1_rxd1_grp1, &i2s1_rxd1_grp1_mux),
+       FUNCTION("i2s1_rxd1_m2", i2s1_rxd1_grp2, &i2s1_rxd1_grp2_mux),
+       FUNCTION("i2s1_rxd1_m3", i2s1_rxd1_grp3, &i2s1_rxd1_grp3_mux),
+       FUNCTION("i2s1_rxd1_m4", i2s1_rxd1_grp4, &i2s1_rxd1_grp4_mux),
+       FUNCTION("jtag_jt_dbg_nsrst",
+                       jtag_jt_dbg_nsrst_grp,
+                       &jtag_jt_dbg_nsrst_grp_mux),
+       FUNCTION("jtag_ntrst_m0", jtag_ntrst_grp0, &jtag_ntrst_grp0_mux),
+       FUNCTION("jtag_ntrst_m1", jtag_ntrst_grp1, &jtag_ntrst_grp1_mux),
+       FUNCTION("jtag_swdiotms_m0",
+                       jtag_swdiotms_grp0,
+                       &jtag_swdiotms_grp0_mux),
+       FUNCTION("jtag_swdiotms_m1",
+                       jtag_swdiotms_grp1,
+                       &jtag_swdiotms_grp1_mux),
+       FUNCTION("jtag_tck_m0", jtag_tck_grp0, &jtag_tck_grp0_mux),
+       FUNCTION("jtag_tck_m1", jtag_tck_grp1, &jtag_tck_grp1_mux),
+       FUNCTION("jtag_tdi_m0", jtag_tdi_grp0, &jtag_tdi_grp0_mux),
+       FUNCTION("jtag_tdi_m1", jtag_tdi_grp1, &jtag_tdi_grp1_mux),
+       FUNCTION("jtag_tdo_m0", jtag_tdo_grp0, &jtag_tdo_grp0_mux),
+       FUNCTION("jtag_tdo_m1", jtag_tdo_grp1, &jtag_tdo_grp1_mux),
        FUNCTION("ks_kas_spi_m0", ks_kas_spi_grp0, &ks_kas_spi_grp0_mux),
        FUNCTION("ld_ldd", ld_ldd_grp, &ld_ldd_grp_mux),
        FUNCTION("ld_ldd_16bit", ld_ldd_16bit_grp, &ld_ldd_16bit_grp_mux),
@@ -3414,18 +4627,27 @@ static struct atlas7_pmx_func atlas7_pmx_functions[] = {
        FUNCTION("pw_cko0_m0", pw_cko0_grp0, &pw_cko0_grp0_mux),
        FUNCTION("pw_cko0_m1", pw_cko0_grp1, &pw_cko0_grp1_mux),
        FUNCTION("pw_cko0_m2", pw_cko0_grp2, &pw_cko0_grp2_mux),
+       FUNCTION("pw_cko0_m3", pw_cko0_grp3, &pw_cko0_grp3_mux),
        FUNCTION("pw_cko1_m0", pw_cko1_grp0, &pw_cko1_grp0_mux),
        FUNCTION("pw_cko1_m1", pw_cko1_grp1, &pw_cko1_grp1_mux),
+       FUNCTION("pw_cko1_m2", pw_cko1_grp2, &pw_cko1_grp2_mux),
        FUNCTION("pw_i2s01_clk_m0",
                        pw_i2s01_clk_grp0,
                        &pw_i2s01_clk_grp0_mux),
        FUNCTION("pw_i2s01_clk_m1",
                        pw_i2s01_clk_grp1,
                        &pw_i2s01_clk_grp1_mux),
-       FUNCTION("pw_pwm0", pw_pwm0_grp, &pw_pwm0_grp_mux),
-       FUNCTION("pw_pwm1", pw_pwm1_grp, &pw_pwm1_grp_mux),
+       FUNCTION("pw_i2s01_clk_m2",
+                       pw_i2s01_clk_grp2,
+                       &pw_i2s01_clk_grp2_mux),
+       FUNCTION("pw_pwm0_m0", pw_pwm0_grp0, &pw_pwm0_grp0_mux),
+       FUNCTION("pw_pwm0_m1", pw_pwm0_grp1, &pw_pwm0_grp1_mux),
+       FUNCTION("pw_pwm1_m0", pw_pwm1_grp0, &pw_pwm1_grp0_mux),
+       FUNCTION("pw_pwm1_m1", pw_pwm1_grp1, &pw_pwm1_grp1_mux),
+       FUNCTION("pw_pwm1_m2", pw_pwm1_grp2, &pw_pwm1_grp2_mux),
        FUNCTION("pw_pwm2_m0", pw_pwm2_grp0, &pw_pwm2_grp0_mux),
        FUNCTION("pw_pwm2_m1", pw_pwm2_grp1, &pw_pwm2_grp1_mux),
+       FUNCTION("pw_pwm2_m2", pw_pwm2_grp2, &pw_pwm2_grp2_mux),
        FUNCTION("pw_pwm3_m0", pw_pwm3_grp0, &pw_pwm3_grp0_mux),
        FUNCTION("pw_pwm3_m1", pw_pwm3_grp1, &pw_pwm3_grp1_mux),
        FUNCTION("pw_pwm_cpu_vol_m0",
@@ -3434,6 +4656,9 @@ static struct atlas7_pmx_func atlas7_pmx_functions[] = {
        FUNCTION("pw_pwm_cpu_vol_m1",
                        pw_pwm_cpu_vol_grp1,
                        &pw_pwm_cpu_vol_grp1_mux),
+       FUNCTION("pw_pwm_cpu_vol_m2",
+                       pw_pwm_cpu_vol_grp2,
+                       &pw_pwm_cpu_vol_grp2_mux),
        FUNCTION("pw_backlight_m0",
                        pw_backlight_grp0,
                        &pw_backlight_grp0_mux),
@@ -3456,8 +4681,11 @@ static struct atlas7_pmx_func atlas7_pmx_functions[] = {
        FUNCTION("sd1", sd1_grp, &sd1_grp_mux),
        FUNCTION("sd1_4bit_m0", sd1_4bit_grp0, &sd1_4bit_grp0_mux),
        FUNCTION("sd1_4bit_m1", sd1_4bit_grp1, &sd1_4bit_grp1_mux),
-       FUNCTION("sd2_m0", sd2_grp0, &sd2_grp0_mux),
-       FUNCTION("sd2_no_cdb_m0", sd2_no_cdb_grp0, &sd2_no_cdb_grp0_mux),
+       FUNCTION("sd2_basic", sd2_basic_grp, &sd2_basic_grp_mux),
+       FUNCTION("sd2_cdb_m0", sd2_cdb_grp0, &sd2_cdb_grp0_mux),
+       FUNCTION("sd2_cdb_m1", sd2_cdb_grp1, &sd2_cdb_grp1_mux),
+       FUNCTION("sd2_wpb_m0", sd2_wpb_grp0, &sd2_wpb_grp0_mux),
+       FUNCTION("sd2_wpb_m1", sd2_wpb_grp1, &sd2_wpb_grp1_mux),
        FUNCTION("sd3", sd3_grp, &sd3_grp_mux),
        FUNCTION("sd5", sd5_grp, &sd5_grp_mux),
        FUNCTION("sd6_m0", sd6_grp0, &sd6_grp0_mux),
@@ -3471,23 +4699,47 @@ static struct atlas7_pmx_func atlas7_pmx_functions[] = {
        FUNCTION("uart0", uart0_grp, &uart0_grp_mux),
        FUNCTION("uart0_nopause", uart0_nopause_grp, &uart0_nopause_grp_mux),
        FUNCTION("uart1", uart1_grp, &uart1_grp_mux),
-       FUNCTION("uart2", uart2_grp, &uart2_grp_mux),
-       FUNCTION("uart3_m0", uart3_grp0, &uart3_grp0_mux),
-       FUNCTION("uart3_m1", uart3_grp1, &uart3_grp1_mux),
-       FUNCTION("uart3_m2", uart3_grp2, &uart3_grp2_mux),
-       FUNCTION("uart3_m3", uart3_grp3, &uart3_grp3_mux),
-       FUNCTION("uart3_nopause_m0",
-                       uart3_nopause_grp0,
-                       &uart3_nopause_grp0_mux),
-       FUNCTION("uart3_nopause_m1",
-                       uart3_nopause_grp1,
-                       &uart3_nopause_grp1_mux),
-       FUNCTION("uart4_m0", uart4_grp0, &uart4_grp0_mux),
-       FUNCTION("uart4_m1", uart4_grp1, &uart4_grp1_mux),
-       FUNCTION("uart4_m2", uart4_grp2, &uart4_grp2_mux),
-       FUNCTION("uart4_nopause", uart4_nopause_grp, &uart4_nopause_grp_mux),
-       FUNCTION("usb0_drvvbus", usb0_drvvbus_grp, &usb0_drvvbus_grp_mux),
-       FUNCTION("usb1_drvvbus", usb1_drvvbus_grp, &usb1_drvvbus_grp_mux),
+       FUNCTION("uart2_cts_m0", uart2_cts_grp0, &uart2_cts_grp0_mux),
+       FUNCTION("uart2_cts_m1", uart2_cts_grp1, &uart2_cts_grp1_mux),
+       FUNCTION("uart2_rts_m0", uart2_rts_grp0, &uart2_rts_grp0_mux),
+       FUNCTION("uart2_rts_m1", uart2_rts_grp1, &uart2_rts_grp1_mux),
+       FUNCTION("uart2_rxd_m0", uart2_rxd_grp0, &uart2_rxd_grp0_mux),
+       FUNCTION("uart2_rxd_m1", uart2_rxd_grp1, &uart2_rxd_grp1_mux),
+       FUNCTION("uart2_rxd_m2", uart2_rxd_grp2, &uart2_rxd_grp2_mux),
+       FUNCTION("uart2_txd_m0", uart2_txd_grp0, &uart2_txd_grp0_mux),
+       FUNCTION("uart2_txd_m1", uart2_txd_grp1, &uart2_txd_grp1_mux),
+       FUNCTION("uart2_txd_m2", uart2_txd_grp2, &uart2_txd_grp2_mux),
+       FUNCTION("uart3_cts_m0", uart3_cts_grp0, &uart3_cts_grp0_mux),
+       FUNCTION("uart3_cts_m1", uart3_cts_grp1, &uart3_cts_grp1_mux),
+       FUNCTION("uart3_cts_m2", uart3_cts_grp2, &uart3_cts_grp2_mux),
+       FUNCTION("uart3_rts_m0", uart3_rts_grp0, &uart3_rts_grp0_mux),
+       FUNCTION("uart3_rts_m1", uart3_rts_grp1, &uart3_rts_grp1_mux),
+       FUNCTION("uart3_rts_m2", uart3_rts_grp2, &uart3_rts_grp2_mux),
+       FUNCTION("uart3_rxd_m0", uart3_rxd_grp0, &uart3_rxd_grp0_mux),
+       FUNCTION("uart3_rxd_m1", uart3_rxd_grp1, &uart3_rxd_grp1_mux),
+       FUNCTION("uart3_rxd_m2", uart3_rxd_grp2, &uart3_rxd_grp2_mux),
+       FUNCTION("uart3_txd_m0", uart3_txd_grp0, &uart3_txd_grp0_mux),
+       FUNCTION("uart3_txd_m1", uart3_txd_grp1, &uart3_txd_grp1_mux),
+       FUNCTION("uart3_txd_m2", uart3_txd_grp2, &uart3_txd_grp2_mux),
+       FUNCTION("uart4_basic", uart4_basic_grp, &uart4_basic_grp_mux),
+       FUNCTION("uart4_cts_m0", uart4_cts_grp0, &uart4_cts_grp0_mux),
+       FUNCTION("uart4_cts_m1", uart4_cts_grp1, &uart4_cts_grp1_mux),
+       FUNCTION("uart4_cts_m2", uart4_cts_grp2, &uart4_cts_grp2_mux),
+       FUNCTION("uart4_rts_m0", uart4_rts_grp0, &uart4_rts_grp0_mux),
+       FUNCTION("uart4_rts_m1", uart4_rts_grp1, &uart4_rts_grp1_mux),
+       FUNCTION("uart4_rts_m2", uart4_rts_grp2, &uart4_rts_grp2_mux),
+       FUNCTION("usb0_drvvbus_m0",
+                       usb0_drvvbus_grp0,
+                       &usb0_drvvbus_grp0_mux),
+       FUNCTION("usb0_drvvbus_m1",
+                       usb0_drvvbus_grp1,
+                       &usb0_drvvbus_grp1_mux),
+       FUNCTION("usb1_drvvbus_m0",
+                       usb1_drvvbus_grp0,
+                       &usb1_drvvbus_grp0_mux),
+       FUNCTION("usb1_drvvbus_m1",
+                       usb1_drvvbus_grp1,
+                       &usb1_drvvbus_grp1_mux),
        FUNCTION("visbus_dout", visbus_dout_grp, &visbus_dout_grp_mux),
        FUNCTION("vi_vip1", vi_vip1_grp, &vi_vip1_grp_mux),
        FUNCTION("vi_vip1_ext", vi_vip1_ext_grp, &vi_vip1_ext_grp_mux),
index ae27872ff3a61ae70108aec4e886e18c9d281460..e68fd951129a4df5557c4007776fb5aa5891bf2c 100644 (file)
@@ -42,6 +42,10 @@ config PINCTRL_SUN8I_A33
        def_bool MACH_SUN8I
        select PINCTRL_SUNXI_COMMON
 
+config PINCTRL_SUN8I_A83T
+       def_bool MACH_SUN8I
+       select PINCTRL_SUNXI_COMMON
+
 config PINCTRL_SUN8I_A23_R
        def_bool MACH_SUN8I
        depends on RESET_CONTROLLER
index 227a1213947c817726e18b66a6e8c0df04783670..e080290345107d60a52dea9ec054949879f2a5f2 100644 (file)
@@ -12,4 +12,5 @@ obj-$(CONFIG_PINCTRL_SUN7I_A20)               += pinctrl-sun7i-a20.o
 obj-$(CONFIG_PINCTRL_SUN8I_A23)                += pinctrl-sun8i-a23.o
 obj-$(CONFIG_PINCTRL_SUN8I_A23_R)      += pinctrl-sun8i-a23-r.o
 obj-$(CONFIG_PINCTRL_SUN8I_A33)                += pinctrl-sun8i-a33.o
+obj-$(CONFIG_PINCTRL_SUN8I_A83T)       += pinctrl-sun8i-a83t.o
 obj-$(CONFIG_PINCTRL_SUN9I_A80)                += pinctrl-sun9i-a80.o
index 63676617bc5997218729a56c15c8597f8c2499b9..f9a3f8f446f76afe28177d5f9dcb8b8376b0310b 100644 (file)
@@ -653,7 +653,7 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
                  SUNXI_FUNCTION(0x2, "spi1"),          /* CS1 */
-                 SUNXI_FUNCTION(0x3, "uart3"),         /* PWM1 */
+                 SUNXI_FUNCTION(0x3, "pwm"),           /* PWM1 */
                  SUNXI_FUNCTION(0x5, "uart2"),         /* CTS */
                  SUNXI_FUNCTION_IRQ(0x6, 13)),         /* EINT13 */
 };
index 9596b0a3df6be7f1b4cdfb238ae4d29f94538d0e..d4bc4f0e8be02814473b67bdcc724fa7676fe430 100644 (file)
@@ -47,45 +47,57 @@ static const struct sunxi_desc_pin sun6i_a31_r_pins[] = {
        SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 5),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION_IRQ_BANK(0x2, 0, 0),   /* PL_EINT0 */
                  SUNXI_FUNCTION(0x3, "s_jtag")),       /* MS */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 6),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION_IRQ_BANK(0x2, 0, 1),   /* PL_EINT1 */
                  SUNXI_FUNCTION(0x3, "s_jtag")),       /* CK */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 7),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION_IRQ_BANK(0x2, 0, 2),   /* PL_EINT2 */
                  SUNXI_FUNCTION(0x3, "s_jtag")),       /* DO */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 8),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION_IRQ_BANK(0x2, 0, 3),   /* PL_EINT3 */
                  SUNXI_FUNCTION(0x3, "s_jtag")),       /* DI */
        /* Hole */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 0),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
-                 SUNXI_FUNCTION(0x1, "gpio_out")),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION_IRQ_BANK(0x2, 1, 0)),  /* PM_EINT0 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 1),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
-                 SUNXI_FUNCTION(0x1, "gpio_out")),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION_IRQ_BANK(0x2, 1, 1)),  /* PM_EINT1 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 2),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION_IRQ_BANK(0x2, 1, 2),   /* PM_EINT2 */
                  SUNXI_FUNCTION(0x3, "1wire")),
        SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 3),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
-                 SUNXI_FUNCTION(0x1, "gpio_out")),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION_IRQ_BANK(0x2, 1, 3)),  /* PM_EINT3 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 4),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
-                 SUNXI_FUNCTION(0x1, "gpio_out")),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION_IRQ_BANK(0x2, 1, 4)),  /* PM_EINT4 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 5),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
-                 SUNXI_FUNCTION(0x1, "gpio_out")),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION_IRQ_BANK(0x2, 1, 5)),  /* PM_EINT5 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 6),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
-                 SUNXI_FUNCTION(0x1, "gpio_out")),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION_IRQ_BANK(0x2, 1, 6)),  /* PM_EINT6 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 7),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION_IRQ_BANK(0x2, 1, 7),   /* PM_EINT7 */
                  SUNXI_FUNCTION(0x3, "rtc")),          /* CLKO */
 };
 
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
new file mode 100644 (file)
index 0000000..90b973e
--- /dev/null
@@ -0,0 +1,603 @@
+/*
+ * Allwinner a83t SoCs pinctrl driver.
+ *
+ * Copyright (C) 2015 Vishnu Patekar <vishnupatekar0510@gmail.com>
+ *
+ * Based on pinctrl-sun8i-a23.c, which is:
+ * Copyright (C) 2014 Chen-Yu Tsai <wens@csie.org>
+ * Copyright (C) 2014 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "uart2"),         /* TX */
+                 SUNXI_FUNCTION(0x3, "jtag"),          /* MS0 */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)),  /* PB_EINT0 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "uart2"),         /* RX */
+                 SUNXI_FUNCTION(0x3, "jtag"),          /* CK0 */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)),  /* PB_EINT1 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "uart2"),         /* RTS */
+                 SUNXI_FUNCTION(0x3, "jtag"),          /* DO0 */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)),  /* PB_EINT2 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "uart2"),         /* CTS */
+                 SUNXI_FUNCTION(0x3, "jtag"),          /* DI0 */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)),  /* PB_EINT3 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "i2s0"),          /* LRCK */
+                 SUNXI_FUNCTION(0x3, "tdm"),           /* LRCK */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)),  /* PB_EINT4 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "i2s0"),          /* BCLK */
+                 SUNXI_FUNCTION(0x3, "tdm"),           /* BCLK */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)),  /* PB_EINT5 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "i2s0"),          /* DOUT */
+                 SUNXI_FUNCTION(0x3, "tdm"),           /* DOUT */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)),  /* PB_EINT6 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "i2s0"),          /* DIN */
+                 SUNXI_FUNCTION(0x3, "tdm"),           /* DIN */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)),  /* PB_EINT7 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 8),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "i2s0"),          /* MCLK */
+                 SUNXI_FUNCTION(0x3, "tdm"),           /* MCLK */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)),  /* PB_EINT8 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 9),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "uart0"),         /* TX */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)),  /* PB_EINT9 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 10),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "uart0"),         /* RX */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 10)), /* PB_EINT10 */
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "nand0"),         /* WE */
+                 SUNXI_FUNCTION(0x3, "spi0")),         /* MOSI */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "nand0"),         /* ALE */
+                 SUNXI_FUNCTION(0x3, "spi0")),         /* MISO */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "nand0"),         /* CLE */
+                 SUNXI_FUNCTION(0x3, "spi0")),         /* CLK */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "nand0"),         /* CE1 */
+                 SUNXI_FUNCTION(0x3, "spi0")),         /* CS */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "nand0")),        /* CE0 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "nand0"),         /* RE */
+                 SUNXI_FUNCTION(0x3, "mmc2")),         /* CLK */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "nand0"),         /* RB0 */
+                 SUNXI_FUNCTION(0x3, "mmc2")),         /* CMD */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "nand0")),        /* RB1 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 8),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "nand0"),         /* DQ0 */
+                 SUNXI_FUNCTION(0x3, "mmc2")),         /* D0 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 9),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "nand0"),         /* DQ1 */
+                 SUNXI_FUNCTION(0x3, "mmc2")),         /* D1 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 10),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "nand0"),         /* DQ2 */
+                 SUNXI_FUNCTION(0x3, "mmc2")),         /* D2 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 11),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "nand0"),         /* DQ3 */
+                 SUNXI_FUNCTION(0x3, "mmc2")),         /* D3 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 12),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "nand0"),         /* DQ4 */
+                 SUNXI_FUNCTION(0x3, "mmc2")),         /* D4 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 13),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "nand0"),         /* DQ5 */
+                 SUNXI_FUNCTION(0x3, "mmc2")),         /* D5 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "nand"),          /* DQ6 */
+                 SUNXI_FUNCTION(0x3, "mmc2")),         /* D6 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "nand"),          /* DQ7 */
+                 SUNXI_FUNCTION(0x3, "mmc2")),         /* D7 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "nand"),          /* DQS */
+                 SUNXI_FUNCTION(0x3, "mmc2")),         /* RST */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 17),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "nand")),         /* CE2 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 18),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "nand")),         /* CE3 */
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "lcd0"),          /* D2 */
+                 SUNXI_FUNCTION(0x4, "gmac")),         /* RGMII / MII RXD3 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "lcd0"),          /* D3 */
+                 SUNXI_FUNCTION(0x4, "gmac")),         /* RGMII / MII RXD2 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "lcd0"),          /* D4 */
+                 SUNXI_FUNCTION(0x4, "gmac")),         /* RGMII / MII RXD1 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "lcd0"),          /* D5 */
+                 SUNXI_FUNCTION(0x4, "gmac")),         /* RGMII / MII RXD0 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "lcd0"),          /* D6 */
+                 SUNXI_FUNCTION(0x4, "gmac")),         /* RGMII / MII RXCK */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "lcd0"),          /* D7 */
+                 SUNXI_FUNCTION(0x4, "gmac")),         /* RGMII / MII RXDV */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "lcd0"),          /* D10 */
+                 SUNXI_FUNCTION(0x4, "gmac")),         /* RGMII / MII RXERR */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "lcd0"),          /* D11 */
+                 SUNXI_FUNCTION(0x4, "gmac")),         /* RGMII / MII TXD3 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "lcd0"),          /* D12 */
+                 SUNXI_FUNCTION(0x4, "gmac")),         /* RGMII / MII TXD2 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "lcd0"),          /* D13 */
+                 SUNXI_FUNCTION(0x4, "gmac")),         /* RGMII / MII TXD1 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "lcd0"),          /* D14 */
+                 SUNXI_FUNCTION(0x4, "gmac")),         /* RGMII / MII TXD0 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "lcd0"),          /* D15 */
+                 SUNXI_FUNCTION(0x4, "gmac")), /* RGMII-NULL / MII-CRS */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "lcd0"),          /* D18 */
+                 SUNXI_FUNCTION(0x3, "lvds0"),         /* VP0 */
+                 SUNXI_FUNCTION(0x4, "gmac")),         /* GTXCK / ETXCK */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "lcd0"),          /* D19 */
+                 SUNXI_FUNCTION(0x3, "lvds0"),         /* VN0 */
+                 SUNXI_FUNCTION(0x4, "gmac")),         /* GTXCTL / ETXEL */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "lcd0"),          /* D20 */
+                 SUNXI_FUNCTION(0x3, "lvds0"),         /* VP1 */
+                 SUNXI_FUNCTION(0x4, "gmac")),         /* GNULL / ETXERR */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 21),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "lcd0"),          /* D21 */
+                 SUNXI_FUNCTION(0x3, "lvds0"),         /* VN1 */
+                 SUNXI_FUNCTION(0x4, "gmac")),         /* GCLKIN / ECOL */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 22),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "lcd0"),          /* D22 */
+                 SUNXI_FUNCTION(0x3, "lvds0"),         /* VP2 */
+                 SUNXI_FUNCTION(0x4, "gmac")),         /* GMDC */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 23),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "lcd0"),          /* D23 */
+                 SUNXI_FUNCTION(0x3, "lvds0"),         /* VN2 */
+                 SUNXI_FUNCTION(0x4, "gmac")),         /* GMDIO */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 24),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "lcd0"),          /* CLK */
+                 SUNXI_FUNCTION(0x3, "lvds0")),        /* VPC */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 25),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "lcd0"),          /* DE */
+                 SUNXI_FUNCTION(0x3, "lvds0")),        /* VNC */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 26),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "lcd0"),          /* HSYNC */
+                 SUNXI_FUNCTION(0x3, "lvds0")),        /* VP3 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 27),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "lcd0"),          /* VSYNC */
+                 SUNXI_FUNCTION(0x3, "lvds0")),        /* VN3 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 28),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "pwm")),          /* PWM */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 29),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out")),
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "csi"),           /* PCLK */
+                 SUNXI_FUNCTION(0x4, "ccir")),         /* CLK */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "csi"),           /* MCLK */
+                 SUNXI_FUNCTION(0x4, "ccir")),         /* DE */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "csi"),           /* HSYNC */
+                 SUNXI_FUNCTION(0x4, "ccir")),         /* HSYNC */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "csi"),           /* VSYNC */
+                 SUNXI_FUNCTION(0x4, "ccir")),         /* VSYNC */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "csi")),          /* D0 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "csi")),          /* D1 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "csi"),           /* D2 */
+                 SUNXI_FUNCTION(0x4, "ccir")),         /* D0 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "csi"),           /* D3 */
+                 SUNXI_FUNCTION(0x4, "ccir")),         /* D1 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "csi"),           /* D4 */
+                 SUNXI_FUNCTION(0x4, "ccir")),         /* D2 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "csi"),           /* D5 */
+                 SUNXI_FUNCTION(0x4, "ccir")),         /* D3 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "csi"),           /* D6 */
+                 SUNXI_FUNCTION(0x3, "uart4"),         /* TX */
+                 SUNXI_FUNCTION(0x4, "ccir")),         /* D4 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "csi"),           /* D7 */
+                 SUNXI_FUNCTION(0x3, "uart4"),         /* RX */
+                 SUNXI_FUNCTION(0x4, "ccir")),         /* D5 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 12),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "csi"),           /* D8 */
+                 SUNXI_FUNCTION(0x3, "uart4"),         /* RTS */
+                 SUNXI_FUNCTION(0x4, "ccir")),         /* D6 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 13),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "csi"),           /* D9 */
+                 SUNXI_FUNCTION(0x3, "uart4"),         /* CTS */
+                 SUNXI_FUNCTION(0x4, "ccir")),         /* D7 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 14),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "csi"),           /* SCK */
+                 SUNXI_FUNCTION(0x3, "i2c2")),         /* SCK */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 15),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "csi"),           /* SDA */
+                 SUNXI_FUNCTION(0x3, "i2c2")),         /* SDA */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 16),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 17),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x3, "owa")),          /* DOUT */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out")),
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "mmc0"),          /* D1 */
+                 SUNXI_FUNCTION(0x3, "jtag")),         /* MS1 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "mmc0"),          /* D0 */
+                 SUNXI_FUNCTION(0x3, "jtag")),         /* DI1 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "mmc0"),          /* CLK */
+                 SUNXI_FUNCTION(0x3, "uart0")),        /* TX */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "mmc0"),          /* CMD */
+                 SUNXI_FUNCTION(0x3, "jtag")),         /* DO1 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "mmc0"),          /* D3 */
+                 SUNXI_FUNCTION(0x3, "uart0")),        /* RX */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "mmc0"),          /* D2 */
+                 SUNXI_FUNCTION(0x3, "jtag")),         /* CK1 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 6),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out")),
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "mmc1"),          /* CLK */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 0)),  /* PG_EINT0 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "mmc1"),          /* CMD */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 1)),  /* PG_EINT1 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "mmc1"),          /* D0 */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 2)),  /* PG_EINT2 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "mmc1"),          /* D1 */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 3)),  /* PG_EINT3 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "mmc1"),          /* D2 */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 4)),  /* PG_EINT4 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "mmc1"),          /* D3 */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 5)),  /* PG_EINT5 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "uart1"),         /* TX */
+                 SUNXI_FUNCTION(0x3, "spi1"),          /* CS */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 6)),  /* PG_EINT6 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "uart1"),         /* RX */
+                 SUNXI_FUNCTION(0x3, "spi1"),          /* CLK */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 7)),  /* PG_EINT7 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "uart1"),         /* RTS */
+                 SUNXI_FUNCTION(0x3, "spi1"),          /* MOSI */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 8)),  /* PG_EINT8 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "uart1"),         /* CTS */
+                 SUNXI_FUNCTION(0x3, "spi1"),          /* MISO */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 9)),  /* PG_EINT9 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "i2s1"),          /* BCLK */
+                 SUNXI_FUNCTION(0x3, "uart3"),         /* TX */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 10)), /* PG_EINT10 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "i2s1"),          /* LRCK */
+                 SUNXI_FUNCTION(0x3, "uart3"),         /* RX */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 11)), /* PG_EINT11 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "i2s1"),          /* DOUT */
+                 SUNXI_FUNCTION(0x3, "uart3"),         /* RTS */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 12)), /* PG_EINT12 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "i2s1"),          /* DIN */
+                 SUNXI_FUNCTION(0x3, "uart3"),         /* CTS */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 13)), /* PG_EINT13 */
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 0),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "i2c0"),          /* SCK */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 0)),  /* PH_EINT0 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 1),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "i2c0"),          /* SDA */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)),  /* PH_EINT1 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 2),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "i2c1"),          /* SCK */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 2)),  /* PH_EINT2 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 3),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "i2c1"),          /* SDA */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 3)),  /* PH_EINT3 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 4),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "i2c2"),          /* SCK */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 4)),  /* PH_EINT4 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 5),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "i2c2"),          /* SDA */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 5)),  /* PH_EINT5 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 6),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "hdmi"),          /* HSCL */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 6)),  /* PH_EINT6 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 7),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "hdmi"),          /* HSDA */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 7)),  /* PH_EINT7 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 8),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION(0x2, "hdmi"),          /* HCEC */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 8)),  /* PH_EINT8 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 9),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 9)),  /* PH_EINT9 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 10),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 10)), /* PH_EINT10 */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)),  /* PH_EINT11 */
+};
+
+static const struct sunxi_pinctrl_desc sun8i_a83t_pinctrl_data = {
+       .pins = sun8i_a83t_pins,
+       .npins = ARRAY_SIZE(sun8i_a83t_pins),
+       .irq_banks = 3,
+};
+
+static int sun8i_a83t_pinctrl_probe(struct platform_device *pdev)
+{
+       return sunxi_pinctrl_init(pdev,
+                                 &sun8i_a83t_pinctrl_data);
+}
+
+static const struct of_device_id sun8i_a83t_pinctrl_match[] = {
+       { .compatible = "allwinner,sun8i-a83t-pinctrl", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, sun8i_a83t_pinctrl_match);
+
+static struct platform_driver sun8i_a83t_pinctrl_driver = {
+       .probe  = sun8i_a83t_pinctrl_probe,
+       .driver = {
+               .name           = "sun8i-a83t-pinctrl",
+               .of_match_table = sun8i_a83t_pinctrl_match,
+       },
+};
+module_platform_driver(sun8i_a83t_pinctrl_driver);
+
+MODULE_AUTHOR("Vishnu Patekar <vishnupatekar0510@gmail.com>");
+MODULE_DESCRIPTION("Allwinner a83t pinctrl driver");
+MODULE_LICENSE("GPL");
index 38e0c7bdd2ac456362a7e2b845c0250be5e60411..dead97daca35fe2e59a0e81f7898b4e66f27b0cb 100644 (file)
@@ -446,16 +446,6 @@ static const struct pinmux_ops sunxi_pmx_ops = {
        .gpio_set_direction     = sunxi_pmx_gpio_set_direction,
 };
 
-static int sunxi_pinctrl_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-       return pinctrl_request_gpio(chip->base + offset);
-}
-
-static void sunxi_pinctrl_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       pinctrl_free_gpio(chip->base + offset);
-}
-
 static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
                                        unsigned offset)
 {
@@ -716,6 +706,7 @@ static int sunxi_pinctrl_irq_of_xlate(struct irq_domain *d,
                                      unsigned long *out_hwirq,
                                      unsigned int *out_type)
 {
+       struct sunxi_pinctrl *pctl = d->host_data;
        struct sunxi_desc_function *desc;
        int pin, base;
 
@@ -723,10 +714,9 @@ static int sunxi_pinctrl_irq_of_xlate(struct irq_domain *d,
                return -EINVAL;
 
        base = PINS_PER_BANK * intspec[0];
-       pin = base + intspec[1];
+       pin = pctl->desc->pin_base + base + intspec[1];
 
-       desc = sunxi_pinctrl_desc_find_function_by_pin(d->host_data,
-                                                      pin, "irq");
+       desc = sunxi_pinctrl_desc_find_function_by_pin(pctl, pin, "irq");
        if (!desc)
                return -EINVAL;
 
@@ -956,8 +946,8 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
 
        last_pin = pctl->desc->pins[pctl->desc->npins - 1].pin.number;
        pctl->chip->owner = THIS_MODULE;
-       pctl->chip->request = sunxi_pinctrl_gpio_request,
-       pctl->chip->free = sunxi_pinctrl_gpio_free,
+       pctl->chip->request = gpiochip_generic_request,
+       pctl->chip->free = gpiochip_generic_free,
        pctl->chip->direction_input = sunxi_pinctrl_gpio_direction_input,
        pctl->chip->direction_output = sunxi_pinctrl_gpio_direction_output,
        pctl->chip->get = sunxi_pinctrl_gpio_get,
@@ -1029,7 +1019,7 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
                irq_set_chip_and_handler(irqno, &sunxi_pinctrl_edge_irq_chip,
                                         handle_edge_irq);
                irq_set_chip_data(irqno, pctl);
-       };
+       }
 
        for (i = 0; i < pctl->desc->irq_banks; i++) {
                /* Mask and clear all IRQs before registering a handler */
index eab23ef9ddbf9a2b9a23ec1a826258eecd2fb065..ad907072e09fe191e92096e6fd3de6e88f0993e2 100644 (file)
@@ -1,32 +1,32 @@
 if ARCH_UNIPHIER
 
-config PINCTRL_UNIPHIER_CORE
+config PINCTRL_UNIPHIER
        bool
        select PINMUX
        select GENERIC_PINCONF
 
 config PINCTRL_UNIPHIER_PH1_LD4
        tristate "UniPhier PH1-LD4 SoC pinctrl driver"
-       select PINCTRL_UNIPHIER_CORE
+       select PINCTRL_UNIPHIER
 
 config PINCTRL_UNIPHIER_PH1_PRO4
        tristate "UniPhier PH1-Pro4 SoC pinctrl driver"
-       select PINCTRL_UNIPHIER_CORE
+       select PINCTRL_UNIPHIER
 
 config PINCTRL_UNIPHIER_PH1_SLD8
        tristate "UniPhier PH1-sLD8 SoC pinctrl driver"
-       select PINCTRL_UNIPHIER_CORE
+       select PINCTRL_UNIPHIER
 
 config PINCTRL_UNIPHIER_PH1_PRO5
        tristate "UniPhier PH1-Pro5 SoC pinctrl driver"
-       select PINCTRL_UNIPHIER_CORE
+       select PINCTRL_UNIPHIER
 
 config PINCTRL_UNIPHIER_PROXSTREAM2
        tristate "UniPhier ProXstream2 SoC pinctrl driver"
-       select PINCTRL_UNIPHIER_CORE
+       select PINCTRL_UNIPHIER
 
 config PINCTRL_UNIPHIER_PH1_LD6B
        tristate "UniPhier PH1-LD6b SoC pinctrl driver"
-       select PINCTRL_UNIPHIER_CORE
+       select PINCTRL_UNIPHIER
 
 endif
index e215b1097297a585298d855c2121af9222c0e1e9..e7ce9670306cb15399f7aee571cb63fd7c310c98 100644 (file)
@@ -1,4 +1,4 @@
-obj-$(CONFIG_PINCTRL_UNIPHIER_CORE)            += pinctrl-uniphier-core.o
+obj-y                                          += pinctrl-uniphier-core.o
 
 obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_LD4)         += pinctrl-ph1-ld4.o
 obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_PRO4)                += pinctrl-ph1-pro4.o
index 7beb87e8f499af55ce6afc267bbef91f9fe806bd..a7056dccfa5385a9854e475cc538a5642636f8dd 100644 (file)
@@ -537,6 +537,8 @@ static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                                        0, 0};
 static const unsigned nand_cs1_pins[] = {22, 23};
 static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const unsigned sd_pins[] = {44, 45, 46, 47, 48, 49, 50, 51, 52};
+static const unsigned sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
 static const unsigned uart0_pins[] = {85, 88};
 static const unsigned uart0_muxvals[] = {1, 1};
 static const unsigned uart1_pins[] = {155, 156};
@@ -619,6 +621,7 @@ static const struct uniphier_pinctrl_group ph1_ld4_groups[] = {
        UNIPHIER_PINCTRL_GROUP(i2c3),
        UNIPHIER_PINCTRL_GROUP(nand),
        UNIPHIER_PINCTRL_GROUP(nand_cs1),
+       UNIPHIER_PINCTRL_GROUP(sd),
        UNIPHIER_PINCTRL_GROUP(uart0),
        UNIPHIER_PINCTRL_GROUP(uart1),
        UNIPHIER_PINCTRL_GROUP(uart1b),
@@ -776,6 +779,7 @@ static const char * const i2c1_groups[] = {"i2c1"};
 static const char * const i2c2_groups[] = {"i2c2"};
 static const char * const i2c3_groups[] = {"i2c3"};
 static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const sd_groups[] = {"sd"};
 static const char * const uart0_groups[] = {"uart0"};
 static const char * const uart1_groups[] = {"uart1", "uart1b"};
 static const char * const uart2_groups[] = {"uart2"};
@@ -831,6 +835,7 @@ static const struct uniphier_pinmux_function ph1_ld4_functions[] = {
        UNIPHIER_PINMUX_FUNCTION(i2c2),
        UNIPHIER_PINMUX_FUNCTION(i2c3),
        UNIPHIER_PINMUX_FUNCTION(nand),
+       UNIPHIER_PINMUX_FUNCTION(sd),
        UNIPHIER_PINMUX_FUNCTION(uart0),
        UNIPHIER_PINMUX_FUNCTION(uart1),
        UNIPHIER_PINMUX_FUNCTION(uart2),
index 9720e697fbc1f94f019eac5533452f94576e297d..1824831bb4da6d7fc5d0d4a01b5a909130954ad7 100644 (file)
@@ -761,6 +761,8 @@ static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                                        0, 0};
 static const unsigned nand_cs1_pins[] = {37, 38};
 static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const unsigned sd_pins[] = {47, 48, 49, 50, 51, 52, 53, 54, 55};
+static const unsigned sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
 static const unsigned uart0_pins[] = {135, 136};
 static const unsigned uart0_muxvals[] = {3, 3};
 static const unsigned uart0b_pins[] = {11, 12};
@@ -866,6 +868,7 @@ static const struct uniphier_pinctrl_group ph1_ld6b_groups[] = {
        UNIPHIER_PINCTRL_GROUP(i2c3),
        UNIPHIER_PINCTRL_GROUP(nand),
        UNIPHIER_PINCTRL_GROUP(nand_cs1),
+       UNIPHIER_PINCTRL_GROUP(sd),
        UNIPHIER_PINCTRL_GROUP(uart0),
        UNIPHIER_PINCTRL_GROUP(uart0b),
        UNIPHIER_PINCTRL_GROUP(uart1),
@@ -1136,6 +1139,7 @@ static const char * const i2c1_groups[] = {"i2c1"};
 static const char * const i2c2_groups[] = {"i2c2"};
 static const char * const i2c3_groups[] = {"i2c3"};
 static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const sd_groups[] = {"sd"};
 static const char * const uart0_groups[] = {"uart0", "uart0b"};
 static const char * const uart1_groups[] = {"uart1", "uart1b"};
 static const char * const uart2_groups[] = {"uart2", "uart2b"};
@@ -1219,6 +1223,7 @@ static const struct uniphier_pinmux_function ph1_ld6b_functions[] = {
        UNIPHIER_PINMUX_FUNCTION(i2c2),
        UNIPHIER_PINMUX_FUNCTION(i2c3),
        UNIPHIER_PINMUX_FUNCTION(nand),
+       UNIPHIER_PINMUX_FUNCTION(sd),
        UNIPHIER_PINMUX_FUNCTION(uart0),
        UNIPHIER_PINMUX_FUNCTION(uart1),
        UNIPHIER_PINMUX_FUNCTION(uart2),
index 96921e40da5f9bc50f8f88b576569a5b18530256..ec8e92dfaf8c71394a658ec519856e72bc57f3ce 100644 (file)
@@ -1031,6 +1031,11 @@ static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                                        0, 0};
 static const unsigned nand_cs1_pins[] = {131, 132};
 static const unsigned nand_cs1_muxvals[] = {1, 1};
+static const unsigned sd_pins[] = {150, 151, 152, 153, 154, 155, 156, 157, 158};
+static const unsigned sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned sd1_pins[] = {319, 320, 321, 322, 323, 324, 325, 326,
+                                   327};
+static const unsigned sd1_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
 static const unsigned uart0_pins[] = {127, 128};
 static const unsigned uart0_muxvals[] = {0, 0};
 static const unsigned uart1_pins[] = {129, 130};
@@ -1140,6 +1145,8 @@ static const struct uniphier_pinctrl_group ph1_pro4_groups[] = {
        UNIPHIER_PINCTRL_GROUP(i2c6),
        UNIPHIER_PINCTRL_GROUP(nand),
        UNIPHIER_PINCTRL_GROUP(nand_cs1),
+       UNIPHIER_PINCTRL_GROUP(sd),
+       UNIPHIER_PINCTRL_GROUP(sd1),
        UNIPHIER_PINCTRL_GROUP(uart0),
        UNIPHIER_PINCTRL_GROUP(uart1),
        UNIPHIER_PINCTRL_GROUP(uart2),
@@ -1412,6 +1419,8 @@ static const char * const i2c2_groups[] = {"i2c2"};
 static const char * const i2c3_groups[] = {"i2c3"};
 static const char * const i2c6_groups[] = {"i2c6"};
 static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const sd_groups[] = {"sd"};
+static const char * const sd1_groups[] = {"sd1"};
 static const char * const uart0_groups[] = {"uart0"};
 static const char * const uart1_groups[] = {"uart1"};
 static const char * const uart2_groups[] = {"uart2"};
@@ -1498,6 +1507,8 @@ static const struct uniphier_pinmux_function ph1_pro4_functions[] = {
        UNIPHIER_PINMUX_FUNCTION(i2c3),
        UNIPHIER_PINMUX_FUNCTION(i2c6),
        UNIPHIER_PINMUX_FUNCTION(nand),
+       UNIPHIER_PINMUX_FUNCTION(sd),
+       UNIPHIER_PINMUX_FUNCTION(sd1),
        UNIPHIER_PINMUX_FUNCTION(uart0),
        UNIPHIER_PINMUX_FUNCTION(uart1),
        UNIPHIER_PINMUX_FUNCTION(uart2),
index 9af455978058cde2231a6398b3057041eec357bf..e3d648eae85afe0e9f2a1022ada182b182557e53 100644 (file)
@@ -818,6 +818,8 @@ static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                                        0, 0};
 static const unsigned nand_cs1_pins[] = {26, 27};
 static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const unsigned sd_pins[] = {250, 251, 252, 253, 254, 255, 256, 257, 258};
+static const unsigned sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
 static const unsigned uart0_pins[] = {47, 48};
 static const unsigned uart0_muxvals[] = {0, 0};
 static const unsigned uart0b_pins[] = {227, 228};
@@ -930,6 +932,7 @@ static const struct uniphier_pinctrl_group ph1_pro5_groups[] = {
        UNIPHIER_PINCTRL_GROUP(i2c5b),
        UNIPHIER_PINCTRL_GROUP(i2c5c),
        UNIPHIER_PINCTRL_GROUP(i2c6),
+       UNIPHIER_PINCTRL_GROUP(sd),
        UNIPHIER_PINCTRL_GROUP(uart0),
        UNIPHIER_PINCTRL_GROUP(uart0b),
        UNIPHIER_PINCTRL_GROUP(uart1),
@@ -1209,6 +1212,7 @@ static const char * const i2c3_groups[] = {"i2c3"};
 static const char * const i2c5_groups[] = {"i2c5", "i2c5b", "i2c5c"};
 static const char * const i2c6_groups[] = {"i2c6"};
 static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const sd_groups[] = {"sd"};
 static const char * const uart0_groups[] = {"uart0", "uart0b"};
 static const char * const uart1_groups[] = {"uart1"};
 static const char * const uart2_groups[] = {"uart2"};
@@ -1296,6 +1300,7 @@ static const struct uniphier_pinmux_function ph1_pro5_functions[] = {
        UNIPHIER_PINMUX_FUNCTION(i2c5),
        UNIPHIER_PINMUX_FUNCTION(i2c6),
        UNIPHIER_PINMUX_FUNCTION(nand),
+       UNIPHIER_PINMUX_FUNCTION(sd),
        UNIPHIER_PINMUX_FUNCTION(uart0),
        UNIPHIER_PINMUX_FUNCTION(uart1),
        UNIPHIER_PINMUX_FUNCTION(uart2),
index 7e9dae54fcb22e1df91d8a9fc6e01556de2e14f1..c3700a33a5da406541c3e3415ae075bedebd8cf2 100644 (file)
 #define DRIVER_NAME "ph1-sld8-pinctrl"
 
 static const struct pinctrl_pin_desc ph1_sld8_pins[] = {
-       UNIPHIER_PINCTRL_PIN(0, "PCA00", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(0, "PCA00", 0,
                             15, UNIPHIER_PIN_DRV_4_8,
                             15, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(1, "PCA01", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(1, "PCA01", 0,
                             16, UNIPHIER_PIN_DRV_4_8,
                             16, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(2, "PCA02", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(2, "PCA02", 0,
                             17, UNIPHIER_PIN_DRV_4_8,
                             17, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(3, "PCA03", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(3, "PCA03", 0,
                             18, UNIPHIER_PIN_DRV_4_8,
                             18, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(4, "PCA04", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(4, "PCA04", 0,
                             19, UNIPHIER_PIN_DRV_4_8,
                             19, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(5, "PCA05", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(5, "PCA05", 0,
                             20, UNIPHIER_PIN_DRV_4_8,
                             20, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(6, "PCA06", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(6, "PCA06", 0,
                             21, UNIPHIER_PIN_DRV_4_8,
                             21, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(7, "PCA07", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(7, "PCA07", 0,
                             22, UNIPHIER_PIN_DRV_4_8,
                             22, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(8, "PCA08", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(8, "PCA08", 0,
                             23, UNIPHIER_PIN_DRV_4_8,
                             23, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(9, "PCA09", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(9, "PCA09", 0,
                             24, UNIPHIER_PIN_DRV_4_8,
                             24, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(10, "PCA10", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(10, "PCA10", 0,
                             25, UNIPHIER_PIN_DRV_4_8,
                             25, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(11, "PCA11", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(11, "PCA11", 0,
                             26, UNIPHIER_PIN_DRV_4_8,
                             26, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(12, "PCA12", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(12, "PCA12", 0,
                             27, UNIPHIER_PIN_DRV_4_8,
                             27, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(13, "PCA13", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(13, "PCA13", 0,
                             28, UNIPHIER_PIN_DRV_4_8,
                             28, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(14, "PCA14", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(14, "PCA14", 0,
                             29, UNIPHIER_PIN_DRV_4_8,
                             29, UNIPHIER_PIN_PULL_DOWN),
        UNIPHIER_PINCTRL_PIN(15, "XNFRE_GB", UNIPHIER_PIN_IECTRL_NONE,
@@ -118,199 +118,199 @@ static const struct pinctrl_pin_desc ph1_sld8_pins[] = {
        UNIPHIER_PINCTRL_PIN(31, "NFD7_GB", UNIPHIER_PIN_IECTRL_NONE,
                             36, UNIPHIER_PIN_DRV_8_12_16_20,
                             128, UNIPHIER_PIN_PULL_UP),
-       UNIPHIER_PINCTRL_PIN(32, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(32, "SDCLK", 8,
                             40, UNIPHIER_PIN_DRV_8_12_16_20,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(33, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(33, "SDCMD", 8,
                             44, UNIPHIER_PIN_DRV_8_12_16_20,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(34, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(34, "SDDAT0", 8,
                             48, UNIPHIER_PIN_DRV_8_12_16_20,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(35, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(35, "SDDAT1", 8,
                             52, UNIPHIER_PIN_DRV_8_12_16_20,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(36, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(36, "SDDAT2", 8,
                             56, UNIPHIER_PIN_DRV_8_12_16_20,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(37, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(37, "SDDAT3", 8,
                             60, UNIPHIER_PIN_DRV_8_12_16_20,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(38, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(38, "SDCD", 8,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             129, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(39, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(39, "SDWP", 8,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             130, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(40, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(40, "SDVOLC", 9,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             131, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(41, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(41, "USB0VBUS", 0,
                             37, UNIPHIER_PIN_DRV_4_8,
                             37, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(42, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(42, "USB0OD", 0,
                             38, UNIPHIER_PIN_DRV_4_8,
                             38, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(43, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(43, "USB1VBUS", 0,
                             39, UNIPHIER_PIN_DRV_4_8,
                             39, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(44, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(44, "USB1OD", 0,
                             40, UNIPHIER_PIN_DRV_4_8,
                             40, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(45, "PCRESET", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(45, "PCRESET", 0,
                             41, UNIPHIER_PIN_DRV_4_8,
                             41, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(46, "PCREG", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(46, "PCREG", 0,
                             42, UNIPHIER_PIN_DRV_4_8,
                             42, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(47, "PCCE2", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(47, "PCCE2", 0,
                             43, UNIPHIER_PIN_DRV_4_8,
                             43, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(48, "PCVS1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(48, "PCVS1", 0,
                             44, UNIPHIER_PIN_DRV_4_8,
                             44, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(49, "PCCD2", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(49, "PCCD2", 0,
                             45, UNIPHIER_PIN_DRV_4_8,
                             45, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(50, "PCCD1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(50, "PCCD1", 0,
                             46, UNIPHIER_PIN_DRV_4_8,
                             46, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(51, "PCREADY", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(51, "PCREADY", 0,
                             47, UNIPHIER_PIN_DRV_4_8,
                             47, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(52, "PCDOE", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(52, "PCDOE", 0,
                             48, UNIPHIER_PIN_DRV_4_8,
                             48, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(53, "PCCE1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(53, "PCCE1", 0,
                             49, UNIPHIER_PIN_DRV_4_8,
                             49, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(54, "PCWE", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(54, "PCWE", 0,
                             50, UNIPHIER_PIN_DRV_4_8,
                             50, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(55, "PCOE", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(55, "PCOE", 0,
                             51, UNIPHIER_PIN_DRV_4_8,
                             51, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(56, "PCWAIT", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(56, "PCWAIT", 0,
                             52, UNIPHIER_PIN_DRV_4_8,
                             52, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(57, "PCIOWR", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(57, "PCIOWR", 0,
                             53, UNIPHIER_PIN_DRV_4_8,
                             53, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(58, "PCIORD", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(58, "PCIORD", 0,
                             54, UNIPHIER_PIN_DRV_4_8,
                             54, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(59, "HS0DIN0", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(59, "HS0DIN0", 0,
                             55, UNIPHIER_PIN_DRV_4_8,
                             55, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(60, "HS0DIN1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(60, "HS0DIN1", 0,
                             56, UNIPHIER_PIN_DRV_4_8,
                             56, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(61, "HS0DIN2", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(61, "HS0DIN2", 0,
                             57, UNIPHIER_PIN_DRV_4_8,
                             57, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(62, "HS0DIN3", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(62, "HS0DIN3", 0,
                             58, UNIPHIER_PIN_DRV_4_8,
                             58, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(63, "HS0DIN4", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(63, "HS0DIN4", 0,
                             59, UNIPHIER_PIN_DRV_4_8,
                             59, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(64, "HS0DIN5", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(64, "HS0DIN5", 0,
                             60, UNIPHIER_PIN_DRV_4_8,
                             60, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(65, "HS0DIN6", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(65, "HS0DIN6", 0,
                             61, UNIPHIER_PIN_DRV_4_8,
                             61, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(66, "HS0DIN7", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(66, "HS0DIN7", 0,
                             62, UNIPHIER_PIN_DRV_4_8,
                             62, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(67, "HS0BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(67, "HS0BCLKIN", 0,
                             63, UNIPHIER_PIN_DRV_4_8,
                             63, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(68, "HS0VALIN", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(68, "HS0VALIN", 0,
                             64, UNIPHIER_PIN_DRV_4_8,
                             64, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(69, "HS0SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(69, "HS0SYNCIN", 0,
                             65, UNIPHIER_PIN_DRV_4_8,
                             65, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(70, "HSDOUT0", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(70, "HSDOUT0", 0,
                             66, UNIPHIER_PIN_DRV_4_8,
                             66, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(71, "HSDOUT1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(71, "HSDOUT1", 0,
                             67, UNIPHIER_PIN_DRV_4_8,
                             67, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(72, "HSDOUT2", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(72, "HSDOUT2", 0,
                             68, UNIPHIER_PIN_DRV_4_8,
                             68, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(73, "HSDOUT3", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(73, "HSDOUT3", 0,
                             69, UNIPHIER_PIN_DRV_4_8,
                             69, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(74, "HSDOUT4", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(74, "HSDOUT4", 0,
                             70, UNIPHIER_PIN_DRV_4_8,
                             70, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(75, "HSDOUT5", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(75, "HSDOUT5", 0,
                             71, UNIPHIER_PIN_DRV_4_8,
                             71, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(76, "HSDOUT6", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(76, "HSDOUT6", 0,
                             72, UNIPHIER_PIN_DRV_4_8,
                             72, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(77, "HSDOUT7", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(77, "HSDOUT7", 0,
                             73, UNIPHIER_PIN_DRV_4_8,
                             73, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(78, "HSBCLKOUT", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(78, "HSBCLKOUT", 0,
                             74, UNIPHIER_PIN_DRV_4_8,
                             74, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(79, "HSVALOUT", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(79, "HSVALOUT", 0,
                             75, UNIPHIER_PIN_DRV_4_8,
                             75, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(80, "HSSYNCOUT", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(80, "HSSYNCOUT", 0,
                             76, UNIPHIER_PIN_DRV_4_8,
                             76, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(81, "HS1DIN0", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(81, "HS1DIN0", 0,
                             77, UNIPHIER_PIN_DRV_4_8,
                             77, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(82, "HS1DIN1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(82, "HS1DIN1", 0,
                             78, UNIPHIER_PIN_DRV_4_8,
                             78, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(83, "HS1DIN2", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(83, "HS1DIN2", 0,
                             79, UNIPHIER_PIN_DRV_4_8,
                             79, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(84, "HS1DIN3", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(84, "HS1DIN3", 0,
                             80, UNIPHIER_PIN_DRV_4_8,
                             80, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(85, "HS1DIN4", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(85, "HS1DIN4", 0,
                             81, UNIPHIER_PIN_DRV_4_8,
                             81, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(86, "HS1DIN5", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(86, "HS1DIN5", 0,
                             82, UNIPHIER_PIN_DRV_4_8,
                             82, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(87, "HS1DIN6", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(87, "HS1DIN6", 0,
                             83, UNIPHIER_PIN_DRV_4_8,
                             83, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(88, "HS1DIN7", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(88, "HS1DIN7", 0,
                             84, UNIPHIER_PIN_DRV_4_8,
                             84, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(89, "HS1BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(89, "HS1BCLKIN", 0,
                             85, UNIPHIER_PIN_DRV_4_8,
                             85, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(90, "HS1VALIN", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(90, "HS1VALIN", 0,
                             86, UNIPHIER_PIN_DRV_4_8,
                             86, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(91, "HS1SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(91, "HS1SYNCIN", 0,
                             87, UNIPHIER_PIN_DRV_4_8,
                             87, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(92, "AGCI", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(92, "AGCI", 3,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             132, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(93, "AGCR", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(93, "AGCR", 4,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             133, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(94, "AGCBS", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(94, "AGCBS", 5,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             134, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(95, "IECOUT", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(95, "IECOUT", 0,
                             88, UNIPHIER_PIN_DRV_4_8,
                             88, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(96, "ASMCK", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(96, "ASMCK", 0,
                             89, UNIPHIER_PIN_DRV_4_8,
                             89, UNIPHIER_PIN_PULL_DOWN),
        UNIPHIER_PINCTRL_PIN(97, "ABCKO", UNIPHIER_PIN_IECTRL_NONE,
@@ -325,31 +325,31 @@ static const struct pinctrl_pin_desc ph1_sld8_pins[] = {
        UNIPHIER_PINCTRL_PIN(100, "ASDOUT1", UNIPHIER_PIN_IECTRL_NONE,
                             93, UNIPHIER_PIN_DRV_4_8,
                             93, UNIPHIER_PIN_PULL_UP),
-       UNIPHIER_PINCTRL_PIN(101, "ARCOUT", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(101, "ARCOUT", 0,
                             94, UNIPHIER_PIN_DRV_4_8,
                             94, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(102, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(102, "SDA0", 10,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(103, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(103, "SCL0", 10,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(104, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(104, "SDA1", 11,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(105, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(105, "SCL1", 11,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", 12,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", 12,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", 13,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", 13,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             -1, UNIPHIER_PIN_PULL_NONE),
        UNIPHIER_PINCTRL_PIN(110, "SBO0", UNIPHIER_PIN_IECTRL_NONE,
@@ -358,76 +358,76 @@ static const struct pinctrl_pin_desc ph1_sld8_pins[] = {
        UNIPHIER_PINCTRL_PIN(111, "SBI0", UNIPHIER_PIN_IECTRL_NONE,
                             96, UNIPHIER_PIN_DRV_4_8,
                             96, UNIPHIER_PIN_PULL_UP),
-       UNIPHIER_PINCTRL_PIN(112, "SBO1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(112, "SBO1", 0,
                             97, UNIPHIER_PIN_DRV_4_8,
                             97, UNIPHIER_PIN_PULL_UP),
-       UNIPHIER_PINCTRL_PIN(113, "SBI1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(113, "SBI1", 0,
                             98, UNIPHIER_PIN_DRV_4_8,
                             98, UNIPHIER_PIN_PULL_UP),
-       UNIPHIER_PINCTRL_PIN(114, "TXD1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(114, "TXD1", 0,
                             99, UNIPHIER_PIN_DRV_4_8,
                             99, UNIPHIER_PIN_PULL_UP),
-       UNIPHIER_PINCTRL_PIN(115, "RXD1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(115, "RXD1", 0,
                             100, UNIPHIER_PIN_DRV_4_8,
                             100, UNIPHIER_PIN_PULL_UP),
-       UNIPHIER_PINCTRL_PIN(116, "HIN", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(116, "HIN", 1,
                             -1, UNIPHIER_PIN_DRV_FIXED_5,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(117, "VIN", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(117, "VIN", 2,
                             -1, UNIPHIER_PIN_DRV_FIXED_5,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(118, "TCON0", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(118, "TCON0", 0,
                             101, UNIPHIER_PIN_DRV_4_8,
                             101, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(119, "TCON1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(119, "TCON1", 0,
                             102, UNIPHIER_PIN_DRV_4_8,
                             102, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(120, "TCON2", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(120, "TCON2", 0,
                             103, UNIPHIER_PIN_DRV_4_8,
                             103, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(121, "TCON3", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(121, "TCON3", 0,
                             104, UNIPHIER_PIN_DRV_4_8,
                             104, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(122, "TCON4", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(122, "TCON4", 0,
                             105, UNIPHIER_PIN_DRV_4_8,
                             105, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(123, "TCON5", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(123, "TCON5", 0,
                             106, UNIPHIER_PIN_DRV_4_8,
                             106, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(124, "TCON6", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(124, "TCON6", 0,
                             107, UNIPHIER_PIN_DRV_4_8,
                             107, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(125, "TCON7", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(125, "TCON7", 0,
                             108, UNIPHIER_PIN_DRV_4_8,
                             108, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(126, "TCON8", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(126, "TCON8", 0,
                             109, UNIPHIER_PIN_DRV_4_8,
                             109, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(127, "PWMA", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(127, "PWMA", 0,
                             110, UNIPHIER_PIN_DRV_4_8,
                             110, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(128, "XIRQ0", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(128, "XIRQ0", 0,
                             111, UNIPHIER_PIN_DRV_4_8,
                             111, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(129, "XIRQ1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(129, "XIRQ1", 0,
                             112, UNIPHIER_PIN_DRV_4_8,
                             112, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(130, "XIRQ2", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(130, "XIRQ2", 0,
                             113, UNIPHIER_PIN_DRV_4_8,
                             113, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(131, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(131, "XIRQ3", 0,
                             114, UNIPHIER_PIN_DRV_4_8,
                             114, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(132, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(132, "XIRQ4", 0,
                             115, UNIPHIER_PIN_DRV_4_8,
                             115, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(133, "XIRQ5", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(133, "XIRQ5", 0,
                             116, UNIPHIER_PIN_DRV_4_8,
                             116, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(134, "XIRQ6", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(134, "XIRQ6", 0,
                             117, UNIPHIER_PIN_DRV_4_8,
                             117, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(135, "XIRQ7", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(135, "XIRQ7", 0,
                             118, UNIPHIER_PIN_DRV_4_8,
                             118, UNIPHIER_PIN_PULL_DOWN),
 };
@@ -450,6 +450,8 @@ static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                                        0, 0};
 static const unsigned nand_cs1_pins[] = {22, 23};
 static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const unsigned sd_pins[] = {32, 33, 34, 35, 36, 37, 38, 39, 40};
+static const unsigned sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
 static const unsigned uart0_pins[] = {70, 71};
 static const unsigned uart0_muxvals[] = {3, 3};
 static const unsigned uart1_pins[] = {114, 115};
@@ -536,6 +538,7 @@ static const struct uniphier_pinctrl_group ph1_sld8_groups[] = {
        UNIPHIER_PINCTRL_GROUP(i2c3),
        UNIPHIER_PINCTRL_GROUP(nand),
        UNIPHIER_PINCTRL_GROUP(nand_cs1),
+       UNIPHIER_PINCTRL_GROUP(sd),
        UNIPHIER_PINCTRL_GROUP(uart0),
        UNIPHIER_PINCTRL_GROUP(uart1),
        UNIPHIER_PINCTRL_GROUP(uart2),
@@ -684,6 +687,7 @@ static const char * const i2c1_groups[] = {"i2c1"};
 static const char * const i2c2_groups[] = {"i2c2"};
 static const char * const i2c3_groups[] = {"i2c3"};
 static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const sd_groups[] = {"sd"};
 static const char * const uart0_groups[] = {"uart0"};
 static const char * const uart1_groups[] = {"uart1"};
 static const char * const uart2_groups[] = {"uart2"};
@@ -739,6 +743,7 @@ static const struct uniphier_pinmux_function ph1_sld8_functions[] = {
        UNIPHIER_PINMUX_FUNCTION(i2c2),
        UNIPHIER_PINMUX_FUNCTION(i2c3),
        UNIPHIER_PINMUX_FUNCTION(nand),
+       UNIPHIER_PINMUX_FUNCTION(sd),
        UNIPHIER_PINMUX_FUNCTION(uart0),
        UNIPHIER_PINMUX_FUNCTION(uart1),
        UNIPHIER_PINMUX_FUNCTION(uart2),
index 3f036e236ad90273f8dbd74c67ccdc76330d0cbb..bc00d7591c59b019ba77d55ae1fcd5c46f2e95d9 100644 (file)
@@ -751,6 +751,8 @@ static const unsigned nand_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
                                        8, 8};
 static const unsigned nand_cs1_pins[] = {37, 38};
 static const unsigned nand_cs1_muxvals[] = {8, 8};
+static const unsigned sd_pins[] = {47, 48, 49, 50, 51, 52, 53, 54, 55};
+static const unsigned sd_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8};
 static const unsigned uart0_pins[] = {217, 218};
 static const unsigned uart0_muxvals[] = {8, 8};
 static const unsigned uart0b_pins[] = {179, 180};
@@ -857,6 +859,7 @@ static const struct uniphier_pinctrl_group proxstream2_groups[] = {
        UNIPHIER_PINCTRL_GROUP(i2c6),
        UNIPHIER_PINCTRL_GROUP(nand),
        UNIPHIER_PINCTRL_GROUP(nand_cs1),
+       UNIPHIER_PINCTRL_GROUP(sd),
        UNIPHIER_PINCTRL_GROUP(uart0),
        UNIPHIER_PINCTRL_GROUP(uart0b),
        UNIPHIER_PINCTRL_GROUP(uart1),
@@ -1128,6 +1131,7 @@ static const char * const i2c3_groups[] = {"i2c3"};
 static const char * const i2c5_groups[] = {"i2c5"};
 static const char * const i2c6_groups[] = {"i2c6"};
 static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const sd_groups[] = {"sd"};
 static const char * const uart0_groups[] = {"uart0", "uart0b"};
 static const char * const uart1_groups[] = {"uart1"};
 static const char * const uart2_groups[] = {"uart2"};
@@ -1213,6 +1217,7 @@ static const struct uniphier_pinmux_function proxstream2_functions[] = {
        UNIPHIER_PINMUX_FUNCTION(i2c5),
        UNIPHIER_PINMUX_FUNCTION(i2c6),
        UNIPHIER_PINMUX_FUNCTION(nand),
+       UNIPHIER_PINMUX_FUNCTION(sd),
        UNIPHIER_PINMUX_FUNCTION(uart0),
        UNIPHIER_PINMUX_FUNCTION(uart1),
        UNIPHIER_PINMUX_FUNCTION(uart2),
index 918f3b643f1b00e15db2332a0417543621b11a60..589872cc8adbbd93bfd94a4b32c58f461b12b890 100644 (file)
@@ -539,6 +539,12 @@ static int uniphier_pmx_set_one_mux(struct pinctrl_dev *pctldev, unsigned pin,
        unsigned reg, reg_end, shift, mask;
        int ret;
 
+       /* some pins need input-enabling */
+       ret = uniphier_conf_pin_input_enable(pctldev,
+                                            &pctldev->desc->pins[pin], 1);
+       if (ret)
+               return ret;
+
        reg = UNIPHIER_PINCTRL_PINMUX_BASE + pin * mux_bits / 32 * reg_stride;
        reg_end = reg + reg_stride;
        shift = pin * mux_bits % 32;
@@ -563,9 +569,7 @@ static int uniphier_pmx_set_one_mux(struct pinctrl_dev *pctldev, unsigned pin,
                        return ret;
        }
 
-       /* some pins need input-enabling */
-       return uniphier_conf_pin_input_enable(pctldev,
-                                             &pctldev->desc->pins[pin], 1);
+       return 0;
 }
 
 static int uniphier_pmx_set_mux(struct pinctrl_dev *pctldev,
index c15316b003c578b430106f1a6f4a3c76b3b90d9b..fb22d3f62480d9f88f04cb4f93e7f0de45e5bf66 100644 (file)
@@ -486,16 +486,6 @@ static struct pinctrl_desc wmt_desc = {
        .confops = &wmt_pinconf_ops,
 };
 
-static int wmt_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-       return pinctrl_request_gpio(chip->base + offset);
-}
-
-static void wmt_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       pinctrl_free_gpio(chip->base + offset);
-}
-
 static int wmt_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
 {
        struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev);
@@ -560,8 +550,8 @@ static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
 static struct gpio_chip wmt_gpio_chip = {
        .label = "gpio-wmt",
        .owner = THIS_MODULE,
-       .request = wmt_gpio_request,
-       .free = wmt_gpio_free,
+       .request = gpiochip_generic_request,
+       .free = gpiochip_generic_free,
        .get_direction = wmt_gpio_get_direction,
        .direction_input = wmt_gpio_direction_input,
        .direction_output = wmt_gpio_direction_output,
index cdad4d95b20eed060e6bc961da9bb27ab23865fb..805c749ac1ad035975a36c00231a83395e55e102 100644 (file)
@@ -179,8 +179,8 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
        /* check event type */
        BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
 
-       dev_dbg(pps->dev, "PPS event at %ld.%09ld\n",
-                       ts->ts_real.tv_sec, ts->ts_real.tv_nsec);
+       dev_dbg(pps->dev, "PPS event at %lld.%09ld\n",
+                       (s64)ts->ts_real.tv_sec, ts->ts_real.tv_nsec);
 
        timespec_to_pps_ktime(&ts_real, ts->ts_real);
 
index 01bf3476a79183714f62f67efcf5d8b17b70d497..a9567af7cec02c5a13102be118010e7bb7b1c888 100644 (file)
@@ -192,9 +192,9 @@ static const struct regulator_desc axp22x_regulators[] = {
        AXP_DESC(AXP22X, DCDC3, "dcdc3", "vin3", 600, 1860, 20,
                 AXP22X_DCDC3_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)),
        AXP_DESC(AXP22X, DCDC4, "dcdc4", "vin4", 600, 1540, 20,
-                AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)),
+                AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(4)),
        AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50,
-                AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(4)),
+                AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(5)),
        /* secondary switchable output of DCDC1 */
        AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", "dcdc1", 1600, 3400, 100,
                    AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(7)),
index 7849187d91aea909fdd9d0ce5bbabb35fc2e5736..8a34f6acc801531ce8eb16882fed2b04ed4c874c 100644 (file)
@@ -1403,6 +1403,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
                        return 0;
                }
 
+               /* Did the lookup explicitly defer for us? */
+               if (ret == -EPROBE_DEFER)
+                       return ret;
+
                if (have_full_constraints()) {
                        r = dummy_regulator_rdev;
                } else {
index add419d6ff34996ed4aab8a145aee637ee987dbf..a56a7b243e91fae96b05cae0118d96e9d284dd7b 100644 (file)
@@ -212,6 +212,17 @@ static const struct file_operations twa_fops = {
        .llseek         = noop_llseek,
 };
 
+/*
+ * The controllers use an inline buffer instead of a mapped SGL for small,
+ * single entry buffers.  Note that we treat a zero-length transfer like
+ * a mapped SGL.
+ */
+static bool twa_command_mapped(struct scsi_cmnd *cmd)
+{
+       return scsi_sg_count(cmd) != 1 ||
+               scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
+}
+
 /* This function will complete an aen request from the isr */
 static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
 {
@@ -1339,7 +1350,8 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
                                }
 
                                /* Now complete the io */
-                               scsi_dma_unmap(cmd);
+                               if (twa_command_mapped(cmd))
+                                       scsi_dma_unmap(cmd);
                                cmd->scsi_done(cmd);
                                tw_dev->state[request_id] = TW_S_COMPLETED;
                                twa_free_request_id(tw_dev, request_id);
@@ -1582,7 +1594,8 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
                                struct scsi_cmnd *cmd = tw_dev->srb[i];
 
                                cmd->result = (DID_RESET << 16);
-                               scsi_dma_unmap(cmd);
+                               if (twa_command_mapped(cmd))
+                                       scsi_dma_unmap(cmd);
                                cmd->scsi_done(cmd);
                        }
                }
@@ -1765,12 +1778,14 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
        retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
        switch (retval) {
        case SCSI_MLQUEUE_HOST_BUSY:
-               scsi_dma_unmap(SCpnt);
+               if (twa_command_mapped(SCpnt))
+                       scsi_dma_unmap(SCpnt);
                twa_free_request_id(tw_dev, request_id);
                break;
        case 1:
                SCpnt->result = (DID_ERROR << 16);
-               scsi_dma_unmap(SCpnt);
+               if (twa_command_mapped(SCpnt))
+                       scsi_dma_unmap(SCpnt);
                done(SCpnt);
                tw_dev->state[request_id] = TW_S_COMPLETED;
                twa_free_request_id(tw_dev, request_id);
@@ -1831,8 +1846,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
                /* Map sglist from scsi layer to cmd packet */
 
                if (scsi_sg_count(srb)) {
-                       if ((scsi_sg_count(srb) == 1) &&
-                           (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
+                       if (!twa_command_mapped(srb)) {
                                if (srb->sc_data_direction == DMA_TO_DEVICE ||
                                    srb->sc_data_direction == DMA_BIDIRECTIONAL)
                                        scsi_sg_copy_to_buffer(srb,
@@ -1905,7 +1919,7 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re
 {
        struct scsi_cmnd *cmd = tw_dev->srb[request_id];
 
-       if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
+       if (!twa_command_mapped(cmd) &&
            (cmd->sc_data_direction == DMA_FROM_DEVICE ||
             cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
                if (scsi_sg_count(cmd) == 1) {
index 33c74d3436c947a7f11ca22498206f6efa97fcc2..6bffd91b973a475d614500a077be0034dbf6786f 100644 (file)
@@ -976,13 +976,13 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
        wake_up(&conn->ehwait);
 }
 
-static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
 {
         struct iscsi_nopout hdr;
        struct iscsi_task *task;
 
        if (!rhdr && conn->ping_task)
-               return;
+               return -EINVAL;
 
        memset(&hdr, 0, sizeof(struct iscsi_nopout));
        hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
@@ -996,13 +996,16 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
                hdr.ttt = RESERVED_ITT;
 
        task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
-       if (!task)
+       if (!task) {
                iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
-       else if (!rhdr) {
+               return -EIO;
+       } else if (!rhdr) {
                /* only track our nops */
                conn->ping_task = task;
                conn->last_ping = jiffies;
        }
+
+       return 0;
 }
 
 static int iscsi_nop_out_rsp(struct iscsi_task *task,
@@ -2092,8 +2095,10 @@ static void iscsi_check_transport_timeouts(unsigned long data)
        if (time_before_eq(last_recv + recv_timeout, jiffies)) {
                /* send a ping to try to provoke some traffic */
                ISCSI_DBG_CONN(conn, "Sending nopout as ping\n");
-               iscsi_send_nopout(conn, NULL);
-               next_timeout = conn->last_ping + (conn->ping_timeout * HZ);
+               if (iscsi_send_nopout(conn, NULL))
+                       next_timeout = jiffies + (1 * HZ);
+               else
+                       next_timeout = conn->last_ping + (conn->ping_timeout * HZ);
        } else
                next_timeout = last_recv + recv_timeout;
 
index 454536c49315dd6f367051bfcb37ea59fb23e2bd..9c780740fb829db69d411c48717415f2ac10cb0e 100644 (file)
@@ -887,6 +887,8 @@ static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
 static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
                          struct mvs_slot_info *slot, u32 slot_idx)
 {
+       if (!slot)
+               return;
        if (!slot->task)
                return;
        if (!sas_protocol_ata(task->task_proto))
index edb044a7b56d348a269634212155edce3a89f9b8..e7649ed3f6677e69fe2e3644da24d99de2aa3181 100644 (file)
@@ -111,7 +111,7 @@ static struct scsi_device_handler *scsi_dh_lookup(const char *name)
 
        dh = __scsi_dh_lookup(name);
        if (!dh) {
-               request_module(name);
+               request_module("scsi_dh_%s", name);
                dh = __scsi_dh_lookup(name);
        }
 
@@ -226,16 +226,20 @@ int scsi_dh_add_device(struct scsi_device *sdev)
 
        drv = scsi_dh_find_driver(sdev);
        if (drv)
-               devinfo = scsi_dh_lookup(drv);
+               devinfo = __scsi_dh_lookup(drv);
        if (devinfo)
                err = scsi_dh_handler_attach(sdev, devinfo);
        return err;
 }
 
-void scsi_dh_remove_device(struct scsi_device *sdev)
+void scsi_dh_release_device(struct scsi_device *sdev)
 {
        if (sdev->handler)
                scsi_dh_handler_detach(sdev);
+}
+
+void scsi_dh_remove_device(struct scsi_device *sdev)
+{
        device_remove_file(&sdev->sdev_gendev, &scsi_dh_state_attr);
 }
 
index cbfc5990052b6b2733ae1c8a81467d3a0e9e70f4..126a48c6431e5a5d9798aed3472916b06ef476c8 100644 (file)
@@ -1957,7 +1957,7 @@ static int scsi_mq_prep_fn(struct request *req)
 static void scsi_mq_done(struct scsi_cmnd *cmd)
 {
        trace_scsi_dispatch_cmd_done(cmd);
-       blk_mq_complete_request(cmd->request);
+       blk_mq_complete_request(cmd->request, cmd->request->errors);
 }
 
 static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
index 644bb7339b55bd89068e2a1c34a096dab682cb62..4d01cdb1b348306807663467bf56c254f16b94df 100644 (file)
@@ -173,9 +173,11 @@ extern struct async_domain scsi_sd_probe_domain;
 /* scsi_dh.c */
 #ifdef CONFIG_SCSI_DH
 int scsi_dh_add_device(struct scsi_device *sdev);
+void scsi_dh_release_device(struct scsi_device *sdev);
 void scsi_dh_remove_device(struct scsi_device *sdev);
 #else
 static inline int scsi_dh_add_device(struct scsi_device *sdev) { return 0; }
+static inline void scsi_dh_release_device(struct scsi_device *sdev) { }
 static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
 #endif
 
index b333389f248ffec291958014a39829156a188bd0..dff8fafb741c1bff625131e73e7fe4425375ced2 100644 (file)
@@ -399,6 +399,8 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
 
        sdev = container_of(work, struct scsi_device, ew.work);
 
+       scsi_dh_release_device(sdev);
+
        parent = sdev->sdev_gendev.parent;
 
        spin_lock_irqsave(sdev->host->host_lock, flags);
index 3cf9faa6cc3fe871174ec1b2777472b0ac4c6883..a85d863d4a442f2f30633db5de0ff469ee9c6348 100644 (file)
@@ -992,11 +992,12 @@ static int davinci_spi_probe(struct platform_device *pdev)
                goto free_master;
        }
 
-       dspi->irq = platform_get_irq(pdev, 0);
-       if (dspi->irq <= 0) {
+       ret = platform_get_irq(pdev, 0);
+       if (ret == 0)
                ret = -EINVAL;
+       if (ret < 0)
                goto free_master;
-       }
+       dspi->irq = ret;
 
        ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq,
                                dummy_thread_fn, 0, dev_name(&pdev->dev), dspi);
index 4a3cf9ba152f6b8060e3c7709550dfdc7ec9fe17..fb36810ae89a40a1aae2729e5767191c82840c50 100644 (file)
@@ -657,7 +657,7 @@ static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
                "intspec[0] 0x%1x intspec[1] 0x%02x intspec[2] 0x%02x\n",
                intspec[0], intspec[1], intspec[2]);
 
-       if (d->of_node != controller)
+       if (irq_domain_get_of_node(d) != controller)
                return -EINVAL;
        if (intsize != 4)
                return -EINVAL;
index 23685e74917e2a5c3f3ab6dc6542f48d5ecbb2a9..bd2c69f85949ba6ce40b80c588a2e6a4036964a2 100644 (file)
@@ -116,7 +116,7 @@ static int sca3000_read_first_n_hw_rb(struct iio_buffer *r,
        if (ret)
                goto error_ret;
 
-       for (i = 0; i < num_read; i++)
+       for (i = 0; i < num_read / sizeof(u16); i++)
                *(((u16 *)rx) + i) = be16_to_cpup((__be16 *)rx + i);
 
        if (copy_to_user(buf, rx, num_read))
index 3f7715c9968b83b25d6d1a4f6099c29b051db5fd..47fc00a3f63bc9e5106f0c77354c7cf8e309f36e 100644 (file)
@@ -915,11 +915,12 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
        case IIO_CHAN_INFO_OFFSET:
                if (chan->type == IIO_TEMP) {
                        /* The calculated value from the ADC is in Kelvin, we
-                        * want Celsius for hwmon so the offset is
-                        * -272.15 * scale
+                        * want Celsius for hwmon so the offset is -273.15
+                        * The offset is applied before scaling so it is
+                        * actually -213.15 * 4 / 1.012 = -1079.644268
                         */
-                       *val = -1075;
-                       *val2 = 691699;
+                       *val = -1079;
+                       *val2 = 644268;
 
                        return IIO_VAL_INT_PLUS_MICRO;
                }
index 769b61193d87ef29c7868465c50e9b8ab87ee045..a9bc6e23fc2582f39c5a753638979fba15451e61 100644 (file)
@@ -224,7 +224,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
 
                prefetchw(&page->flags);
                ret = add_to_page_cache_lru(page, inode->i_mapping, offset,
-                                           GFP_KERNEL);
+                                           GFP_NOFS);
                if (ret == 0) {
                        unlock_page(page);
                } else {
index 4299cf45f947ded9433fa045c1cb54bc957a02c4..5e1f16c36b49adfd45dbd2221435fd9bcda57daa 100644 (file)
@@ -81,6 +81,7 @@ void speakup_fake_down_arrow(void)
        __this_cpu_write(reporting_keystroke, true);
        input_report_key(virt_keyboard, KEY_DOWN, PRESSED);
        input_report_key(virt_keyboard, KEY_DOWN, RELEASED);
+       input_sync(virt_keyboard);
        __this_cpu_write(reporting_keystroke, false);
 
        /* reenable preemption */
index 98af3b1f2d2a13efea585a11429bc00a48451582..aa5ab6c80ed4ffefc12aaca9c1337e403371d478 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/workqueue.h>
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
-#include <asm/cmpxchg.h>
+#include <linux/atomic.h>
 
 #include "speakup.h"
 
index 7ff96270c933b1ad577ee3dbb9cedf92d8f07a89..e570ff084add5b0596cbac05d07bca560d350b01 100644 (file)
@@ -144,6 +144,16 @@ static void estimate_pid_constants(struct thermal_zone_device *tz,
                switch_on_temp = 0;
 
        temperature_threshold = control_temp - switch_on_temp;
+       /*
+        * estimate_pid_constants() tries to find appropriate default
+        * values for thermal zones that don't provide them. If a
+        * system integrator has configured a thermal zone with two
+        * passive trip points at the same temperature, that person
+        * hasn't put any effort to set up the thermal zone properly
+        * so just give up.
+        */
+       if (!temperature_threshold)
+               return;
 
        if (!tz->tzp->k_po || force)
                tz->tzp->k_po = int_to_frac(sustainable_power) /
index 0bae8cc6c23a0be622b2addf1479830e838cb243..ca920b0ecf8f8688763426ba2d6b253a3620a1b6 100644 (file)
@@ -932,7 +932,7 @@ static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
 
        if (data->soc == SOC_ARCH_EXYNOS5260)
                emul_con = EXYNOS5260_EMUL_CON;
-       if (data->soc == SOC_ARCH_EXYNOS5433)
+       else if (data->soc == SOC_ARCH_EXYNOS5433)
                emul_con = EXYNOS5433_TMU_EMUL_CON;
        else if (data->soc == SOC_ARCH_EXYNOS7)
                emul_con = EXYNOS7_TMU_REG_EMUL_CON;
index 20932cc9c8f71681038bf5e505fec87d9c402280..b09023b071696c2a5d25e003dcff798b42235602 100644 (file)
@@ -343,8 +343,7 @@ static void n_tty_packet_mode_flush(struct tty_struct *tty)
                spin_lock_irqsave(&tty->ctrl_lock, flags);
                tty->ctrl_status |= TIOCPKT_FLUSHREAD;
                spin_unlock_irqrestore(&tty->ctrl_lock, flags);
-               if (waitqueue_active(&tty->link->read_wait))
-                       wake_up_interruptible(&tty->link->read_wait);
+               wake_up_interruptible(&tty->link->read_wait);
        }
 }
 
@@ -1382,8 +1381,7 @@ handle_newline:
                        put_tty_queue(c, ldata);
                        smp_store_release(&ldata->canon_head, ldata->read_head);
                        kill_fasync(&tty->fasync, SIGIO, POLL_IN);
-                       if (waitqueue_active(&tty->read_wait))
-                               wake_up_interruptible_poll(&tty->read_wait, POLLIN);
+                       wake_up_interruptible_poll(&tty->read_wait, POLLIN);
                        return 0;
                }
        }
@@ -1667,8 +1665,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
 
        if ((read_cnt(ldata) >= ldata->minimum_to_wake) || L_EXTPROC(tty)) {
                kill_fasync(&tty->fasync, SIGIO, POLL_IN);
-               if (waitqueue_active(&tty->read_wait))
-                       wake_up_interruptible_poll(&tty->read_wait, POLLIN);
+               wake_up_interruptible_poll(&tty->read_wait, POLLIN);
        }
 }
 
@@ -1887,10 +1884,8 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
        }
 
        /* The termios change make the tty ready for I/O */
-       if (waitqueue_active(&tty->write_wait))
-               wake_up_interruptible(&tty->write_wait);
-       if (waitqueue_active(&tty->read_wait))
-               wake_up_interruptible(&tty->read_wait);
+       wake_up_interruptible(&tty->write_wait);
+       wake_up_interruptible(&tty->read_wait);
 }
 
 /**
index 21d01a491405a2c52cd1ec02f3198fa4c6b8f10c..e508939daea3f3128a2ada1e696a5f751d212077 100644 (file)
@@ -80,10 +80,6 @@ int serial8250_tx_dma(struct uart_8250_port *p)
                return 0;
 
        dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
-       if (dma->tx_size < p->port.fifosize) {
-               ret = -EINVAL;
-               goto err;
-       }
 
        desc = dmaengine_prep_slave_single(dma->txchan,
                                           dma->tx_addr + xmit->tail,
index b1e0ba3e525b069d9649dff9d7cd4a661f2c2014..0bbf34035d6a51edb267d2f53c66fc13d7b54260 100644 (file)
@@ -261,6 +261,14 @@ configured less than Maximum supported fifo bytes */
                                  UART_FCR7_64BYTE,
                .flags          = UART_CAP_FIFO,
        },
+       [PORT_RT2880] = {
+               .name           = "Palmchip BK-3103",
+               .fifo_size      = 16,
+               .tx_loadsz      = 16,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+               .rxtrig_bytes   = {1, 4, 8, 14},
+               .flags          = UART_CAP_FIFO,
+       },
 };
 
 /* Uart divisor latch read */
index 5ca5cf3e9359cf17f9a3aaebbff028ecada3a910..538ea03bc101a2994324d2ce33b8f7b237c12c78 100644 (file)
@@ -2786,7 +2786,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
        ret = atmel_init_gpios(port, &pdev->dev);
        if (ret < 0) {
                dev_err(&pdev->dev, "Failed to initialize GPIOs.");
-               goto err;
+               goto err_clear_bit;
        }
 
        ret = atmel_init_port(port, pdev);
index fe3d41cc841632134fd907b1fb7af08f0e9d6e81..d0388a071ba1d474025a74fec8cfb80f5a1ed4a0 100644 (file)
@@ -1631,12 +1631,12 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
        int locked = 1;
        int retval;
 
-       retval = clk_prepare_enable(sport->clk_per);
+       retval = clk_enable(sport->clk_per);
        if (retval)
                return;
-       retval = clk_prepare_enable(sport->clk_ipg);
+       retval = clk_enable(sport->clk_ipg);
        if (retval) {
-               clk_disable_unprepare(sport->clk_per);
+               clk_disable(sport->clk_per);
                return;
        }
 
@@ -1675,8 +1675,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
        if (locked)
                spin_unlock_irqrestore(&sport->port.lock, flags);
 
-       clk_disable_unprepare(sport->clk_ipg);
-       clk_disable_unprepare(sport->clk_per);
+       clk_disable(sport->clk_ipg);
+       clk_disable(sport->clk_per);
 }
 
 /*
@@ -1777,7 +1777,15 @@ imx_console_setup(struct console *co, char *options)
 
        retval = uart_set_options(&sport->port, co, baud, parity, bits, flow);
 
-       clk_disable_unprepare(sport->clk_ipg);
+       clk_disable(sport->clk_ipg);
+       if (retval) {
+               clk_unprepare(sport->clk_ipg);
+               goto error_console;
+       }
+
+       retval = clk_prepare(sport->clk_per);
+       if (retval)
+               clk_disable_unprepare(sport->clk_ipg);
 
 error_console:
        return retval;
index 5a3fa89138801ea63907ec102fbb589b36d7201c..a660ab181cca7357c59c7256303628eb8bb929a9 100644 (file)
@@ -242,7 +242,10 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
        atomic_inc(&buf->priority);
 
        mutex_lock(&buf->lock);
-       while ((next = buf->head->next) != NULL) {
+       /* paired w/ release in __tty_buffer_request_room; ensures there are
+        * no pending memory accesses to the freed buffer
+        */
+       while ((next = smp_load_acquire(&buf->head->next)) != NULL) {
                tty_buffer_free(port, buf->head);
                buf->head = next;
        }
@@ -290,7 +293,10 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size,
                if (n != NULL) {
                        n->flags = flags;
                        buf->tail = n;
-                       b->commit = b->used;
+                       /* paired w/ acquire in flush_to_ldisc(); ensures
+                        * flush_to_ldisc() sees buffer data.
+                        */
+                       smp_store_release(&b->commit, b->used);
                        /* paired w/ acquire in flush_to_ldisc(); ensures the
                         * latest commit value can be read before the head is
                         * advanced to the next buffer
@@ -393,7 +399,10 @@ void tty_schedule_flip(struct tty_port *port)
 {
        struct tty_bufhead *buf = &port->buf;
 
-       buf->tail->commit = buf->tail->used;
+       /* paired w/ acquire in flush_to_ldisc(); ensures
+        * flush_to_ldisc() sees buffer data.
+        */
+       smp_store_release(&buf->tail->commit, buf->tail->used);
        schedule_work(&buf->work);
 }
 EXPORT_SYMBOL(tty_schedule_flip);
@@ -467,7 +476,7 @@ static void flush_to_ldisc(struct work_struct *work)
        struct tty_struct *tty;
        struct tty_ldisc *disc;
 
-       tty = port->itty;
+       tty = READ_ONCE(port->itty);
        if (tty == NULL)
                return;
 
@@ -491,7 +500,10 @@ static void flush_to_ldisc(struct work_struct *work)
                 * is advancing to the next buffer
                 */
                next = smp_load_acquire(&head->next);
-               count = head->commit - head->read;
+               /* paired w/ release in __tty_buffer_request_room() or in
+                * tty_buffer_flush(); ensures we see the committed buffer data
+                */
+               count = smp_load_acquire(&head->commit) - head->read;
                if (!count) {
                        if (next == NULL) {
                                check_other_closed(tty);
index 02785d844354be01b9774ad10e70ab398297c6da..2eefaa6e3e3a4af9a5ab2b03cf03f9e75a04ca1d 100644 (file)
@@ -2128,8 +2128,24 @@ retry_open:
        if (!noctty &&
            current->signal->leader &&
            !current->signal->tty &&
-           tty->session == NULL)
-               __proc_set_tty(tty);
+           tty->session == NULL) {
+               /*
+                * Don't let a process that only has write access to the tty
+                * obtain the privileges associated with having a tty as
+                * controlling terminal (being able to reopen it with full
+                * access through /dev/tty, being able to perform pushback).
+                * Many distributions set the group of all ttys to "tty" and
+                * grant write-only access to all terminals for setgid tty
+                * binaries, which should not imply full privileges on all ttys.
+                *
+                * This could theoretically break old code that performs open()
+                * on a write-only file descriptor. In that case, it might be
+                * necessary to also permit this if
+                * inode_permission(inode, MAY_READ) == 0.
+                */
+               if (filp->f_mode & FMODE_READ)
+                       __proc_set_tty(tty);
+       }
        spin_unlock_irq(&current->sighand->siglock);
        read_unlock(&tasklist_lock);
        tty_unlock(tty);
@@ -2418,7 +2434,7 @@ static int fionbio(struct file *file, int __user *p)
  *             Takes ->siglock() when updating signal->tty
  */
 
-static int tiocsctty(struct tty_struct *tty, int arg)
+static int tiocsctty(struct tty_struct *tty, struct file *file, int arg)
 {
        int ret = 0;
 
@@ -2452,6 +2468,13 @@ static int tiocsctty(struct tty_struct *tty, int arg)
                        goto unlock;
                }
        }
+
+       /* See the comment in tty_open(). */
+       if ((file->f_mode & FMODE_READ) == 0 && !capable(CAP_SYS_ADMIN)) {
+               ret = -EPERM;
+               goto unlock;
+       }
+
        proc_set_tty(tty);
 unlock:
        read_unlock(&tasklist_lock);
@@ -2844,7 +2867,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                no_tty();
                return 0;
        case TIOCSCTTY:
-               return tiocsctty(tty, arg);
+               return tiocsctty(tty, file, arg);
        case TIOCGPGRP:
                return tiocgpgrp(tty, real_tty, p);
        case TIOCSPGRP:
@@ -3151,13 +3174,18 @@ struct class *tty_class;
 static int tty_cdev_add(struct tty_driver *driver, dev_t dev,
                unsigned int index, unsigned int count)
 {
+       int err;
+
        /* init here, since reused cdevs cause crashes */
        driver->cdevs[index] = cdev_alloc();
        if (!driver->cdevs[index])
                return -ENOMEM;
-       cdev_init(driver->cdevs[index], &tty_fops);
+       driver->cdevs[index]->ops = &tty_fops;
        driver->cdevs[index]->owner = driver->owner;
-       return cdev_add(driver->cdevs[index], dev, count);
+       err = cdev_add(driver->cdevs[index], dev, count);
+       if (err)
+               kobject_put(&driver->cdevs[index]->kobj);
+       return err;
 }
 
 /**
index d85abfed84ccaa2327820f1b35cabac11422d647..f5a381945db2886a77e23a8fcf40ba9a34bb7fe7 100644 (file)
@@ -54,6 +54,13 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
        { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
 
+       /* Logitech ConferenceCam CC3000e */
+       { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
+       { USB_DEVICE(0x046d, 0x0848), .driver_info = USB_QUIRK_DELAY_INIT },
+
+       /* Logitech PTZ Pro Camera */
+       { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
+
        /* Logitech Quickcam Fusion */
        { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
 
@@ -78,6 +85,12 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Philips PSC805 audio device */
        { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Plantronic Audio 655 DSP */
+       { USB_DEVICE(0x047f, 0xc008), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Plantronic Audio 648 USB */
+       { USB_DEVICE(0x047f, 0xc013), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Artisman Watchdog Dongle */
        { USB_DEVICE(0x04b4, 0x0526), .driver_info =
                        USB_QUIRK_CONFIG_INTF_STRINGS },
index d1b81539d6320b3baed7c5bc9bc4cdde7440aee9..d6199507f86140b15439463f97f234aa7955d6fe 100644 (file)
@@ -159,8 +159,10 @@ static int ep_bd_list_alloc(struct bdc_ep *ep)
                bd_table->start_bd = dma_pool_alloc(bdc->bd_table_pool,
                                                        GFP_ATOMIC,
                                                        &dma);
-               if (!bd_table->start_bd)
+               if (!bd_table->start_bd) {
+                       kfree(bd_table);
                        goto fail;
+               }
 
                bd_table->dma = dma;
 
index c79d33676672daca6047ebead9d8afb70a49007d..c47d3e48058659230e8d48ed342f9c163b08732d 100644 (file)
@@ -147,6 +147,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
                pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
                xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+               xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
        }
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
                (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
index 43291f93afeb59a90a3b39cbb045a26b3bd1ad5b..97ffe39972735109f1ca1f86e003fe2550e4f936 100644 (file)
@@ -2191,6 +2191,10 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
                }
        /* Fast path - was this the last TRB in the TD for this URB? */
        } else if (event_trb == td->last_trb) {
+               if (td->urb_length_set && trb_comp_code == COMP_SHORT_TX)
+                       return finish_td(xhci, td, event_trb, event, ep,
+                                        status, false);
+
                if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
                        td->urb->actual_length =
                                td->urb->transfer_buffer_length -
@@ -2242,6 +2246,12 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
                        td->urb->actual_length +=
                                TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+
+               if (trb_comp_code == COMP_SHORT_TX) {
+                       xhci_dbg(xhci, "mid bulk/intr SP, wait for last TRB event\n");
+                       td->urb_length_set = true;
+                       return 0;
+               }
        }
 
        return finish_td(xhci, td, event_trb, event, ep, status, false);
@@ -2274,6 +2284,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
        u32 trb_comp_code;
        int ret = 0;
        int td_num = 0;
+       bool handling_skipped_tds = false;
 
        slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
        xdev = xhci->devs[slot_id];
@@ -2410,6 +2421,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                ep->skip = true;
                xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
                goto cleanup;
+       case COMP_PING_ERR:
+               ep->skip = true;
+               xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n");
+               goto cleanup;
        default:
                if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
                        status = 0;
@@ -2546,13 +2561,18 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                                                 ep, &status);
 
 cleanup:
+
+
+               handling_skipped_tds = ep->skip &&
+                       trb_comp_code != COMP_MISSED_INT &&
+                       trb_comp_code != COMP_PING_ERR;
+
                /*
-                * Do not update event ring dequeue pointer if ep->skip is set.
-                * Will roll back to continue process missed tds.
+                * Do not update event ring dequeue pointer if we're in a loop
+                * processing missed tds.
                 */
-               if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
+               if (!handling_skipped_tds)
                        inc_deq(xhci, xhci->event_ring);
-               }
 
                if (ret) {
                        urb = td->urb;
@@ -2587,7 +2607,7 @@ cleanup:
         * Process them as short transfer until reach the td pointed by
         * the event.
         */
-       } while (ep->skip && trb_comp_code != COMP_MISSED_INT);
+       } while (handling_skipped_tds);
 
        return 0;
 }
index 3ad5d19e4d04ede93fb8bc34debdbb9fcf2f4704..23c794813e6a923bff5b0a3719abbea860ee416d 100644 (file)
@@ -472,7 +472,7 @@ static int chaoskey_rng_read(struct hwrng *rng, void *data,
        if (this_time > max)
                this_time = max;
 
-       memcpy(data, dev->buf, this_time);
+       memcpy(data, dev->buf + dev->used, this_time);
 
        dev->used += this_time;
 
index 70f2b8a2e6cfe5d62d6509c507670aee8c981dd7..1bd9232ff76fcb774e6b1e9e1fb8acdb62ef267d 100644 (file)
@@ -391,9 +391,20 @@ static int omap2430_musb_init(struct musb *musb)
        }
        musb->isr = omap2430_musb_interrupt;
 
+       /*
+        * Enable runtime PM for musb parent (this driver). We can't
+        * do it earlier as struct musb is not yet allocated and we
+        * need to touch the musb registers for runtime PM.
+        */
+       pm_runtime_enable(glue->dev);
+       status = pm_runtime_get_sync(glue->dev);
+       if (status < 0)
+               goto err1;
+
        status = pm_runtime_get_sync(dev);
        if (status < 0) {
                dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status);
+               pm_runtime_put_sync(glue->dev);
                goto err1;
        }
 
@@ -426,6 +437,7 @@ static int omap2430_musb_init(struct musb *musb)
        phy_power_on(musb->phy);
 
        pm_runtime_put_noidle(musb->controller);
+       pm_runtime_put_noidle(glue->dev);
        return 0;
 
 err1:
@@ -626,7 +638,11 @@ static int omap2430_probe(struct platform_device *pdev)
                goto err2;
        }
 
-       pm_runtime_enable(&pdev->dev);
+       /*
+        * Note that we cannot enable PM runtime yet for this
+        * driver as we need struct musb initialized first.
+        * See omap2430_musb_init above.
+        */
 
        ret = platform_device_add(musb);
        if (ret) {
@@ -675,11 +691,12 @@ static int omap2430_runtime_resume(struct device *dev)
        struct omap2430_glue            *glue = dev_get_drvdata(dev);
        struct musb                     *musb = glue_to_musb(glue);
 
-       if (musb) {
-               omap2430_low_level_init(musb);
-               musb_writel(musb->mregs, OTG_INTERFSEL,
-                               musb->context.otg_interfsel);
-       }
+       if (!musb)
+               return -EPROBE_DEFER;
+
+       omap2430_low_level_init(musb);
+       musb_writel(musb->mregs, OTG_INTERFSEL,
+                   musb->context.otg_interfsel);
 
        return 0;
 }
index 7b98e1d9194cb3571452c7143603f4e9d9966244..d82fa36c346503985867cc55b0e40dfd724ebf12 100644 (file)
@@ -476,6 +476,11 @@ static const struct of_device_id usbhs_of_match[] = {
                .compatible = "renesas,usbhs-r8a7794",
                .data = (void *)USBHS_TYPE_RCAR_GEN2,
        },
+       {
+               /* Gen3 is compatible with Gen2 */
+               .compatible = "renesas,usbhs-r8a7795",
+               .data = (void *)USBHS_TYPE_RCAR_GEN2,
+       },
        { },
 };
 MODULE_DEVICE_TABLE(of, usbhs_of_match);
@@ -493,7 +498,7 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev)
                return NULL;
 
        dparam = &info->driver_param;
-       dparam->type = of_id ? (u32)of_id->data : 0;
+       dparam->type = of_id ? (uintptr_t)of_id->data : 0;
        if (!of_property_read_u32(dev->of_node, "renesas,buswait", &tmp))
                dparam->buswait_bwait = tmp;
        gpio = of_get_named_gpio_flags(dev->of_node, "renesas,enable-gpio", 0,
index 8fc15c0ba3399441959e2570bfb4ff131fa80948..277160bc6f2587325e75da7a36099c61ff37da6a 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/gpio.h>
 #include <linux/of_gpio.h>
 #include <linux/phy/phy.h>
-#include <linux/platform_data/gpio-rcar.h>
 #include <linux/usb/phy.h>
 #include "common.h"
 #include "rcar2.h"
index 4772862b71a744091efef2216d9deaeca54567d1..d3f767448a72c75f468afe6b922e9306c35e5011 100644 (file)
@@ -183,10 +183,17 @@ static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
        return vq->acked_features & (1ULL << bit);
 }
 
+#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
 static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
 {
        return vq->is_le;
 }
+#else
+static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
+{
+       return virtio_legacy_is_little_endian() || vq->is_le;
+}
+#endif
 
 /* Memory accessors */
 static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
index 1aaf89300621abc811f57f549c25b9a540a21d99..92f394927f241bef338a293170e4a09b9f45c647 100644 (file)
@@ -1093,6 +1093,7 @@ static void fbcon_init(struct vc_data *vc, int init)
                con_copy_unimap(vc, svc);
 
        ops = info->fbcon_par;
+       ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
        p->con_rotate = initial_rotation;
        set_blitting_type(vc, info);
 
index 0e5fde1d3ffbe5a152035f33063afa98bf84f33e..9f9a7bef1ff6d46d80fe8cb6dcfeea5a3e26729d 100644 (file)
@@ -752,7 +752,7 @@ static ssize_t broadsheet_loadstore_waveform(struct device *dev,
        if ((fw_entry->size < 8*1024) || (fw_entry->size > 64*1024)) {
                dev_err(dev, "Invalid waveform\n");
                err = -EINVAL;
-               goto err_failed;
+               goto err_fw;
        }
 
        mutex_lock(&(par->io_lock));
@@ -762,13 +762,15 @@ static ssize_t broadsheet_loadstore_waveform(struct device *dev,
        mutex_unlock(&(par->io_lock));
        if (err < 0) {
                dev_err(dev, "Failed to store broadsheet waveform\n");
-               goto err_failed;
+               goto err_fw;
        }
 
        dev_info(dev, "Stored broadsheet waveform, size %zd\n", fw_entry->size);
 
-       return len;
+       err = len;
 
+err_fw:
+       release_firmware(fw_entry);
 err_failed:
        return err;
 }
index 4bfff349b1fbb56f0b99e206f21fc2986ad4db0d..95d293b7445a83473bc2131cf657f5de14ef52b3 100644 (file)
@@ -114,6 +114,20 @@ static int efifb_setup(char *options)
        return 0;
 }
 
+static inline bool fb_base_is_valid(void)
+{
+       if (screen_info.lfb_base)
+               return true;
+
+       if (!(screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE))
+               return false;
+
+       if (screen_info.ext_lfb_base)
+               return true;
+
+       return false;
+}
+
 static int efifb_probe(struct platform_device *dev)
 {
        struct fb_info *info;
@@ -141,7 +155,7 @@ static int efifb_probe(struct platform_device *dev)
                screen_info.lfb_depth = 32;
        if (!screen_info.pages)
                screen_info.pages = 1;
-       if (!screen_info.lfb_base) {
+       if (!fb_base_is_valid()) {
                printk(KERN_DEBUG "efifb: invalid framebuffer address\n");
                return -ENODEV;
        }
@@ -160,6 +174,14 @@ static int efifb_probe(struct platform_device *dev)
        }
 
        efifb_fix.smem_start = screen_info.lfb_base;
+
+       if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) {
+               u64 ext_lfb_base;
+
+               ext_lfb_base = (u64)(unsigned long)screen_info.ext_lfb_base << 32;
+               efifb_fix.smem_start |= ext_lfb_base;
+       }
+
        efifb_defined.bits_per_pixel = screen_info.lfb_depth;
        efifb_defined.xres = screen_info.lfb_width;
        efifb_defined.yres = screen_info.lfb_height;
index 7fa2e6f9e322d1e2223116474800b515684abfc2..b335c1ae8625106efff818d696ebad532ade7f17 100644 (file)
@@ -1628,9 +1628,16 @@ static int fsl_diu_suspend(struct platform_device *ofdev, pm_message_t state)
 static int fsl_diu_resume(struct platform_device *ofdev)
 {
        struct fsl_diu_data *data;
+       unsigned int i;
 
        data = dev_get_drvdata(&ofdev->dev);
-       enable_lcdc(data->fsl_diu_info);
+
+       fsl_diu_enable_interrupts(data);
+       update_lcdc(data->fsl_diu_info);
+       for (i = 0; i < NUM_AOIS; i++) {
+               if (data->mfb[i].count)
+                       fsl_diu_enable_panel(&data->fsl_diu_info[i]);
+       }
 
        return 0;
 }
index 9b8bebdf8f86e1209f0ca2f6f9779e8c64fa2e43..f9ec5c0484fabbd8d6f2cc5b5e5897c003e07b10 100644 (file)
@@ -831,6 +831,7 @@ static struct of_device_id of_platform_mb862xx_tbl[] = {
        { .compatible = "fujitsu,coral", },
        { /* end */ }
 };
+MODULE_DEVICE_TABLE(of, of_platform_mb862xx_tbl);
 
 static struct platform_driver of_platform_mb862xxfb_driver = {
        .driver = {
index a8ce920fa797d335d2dbfbbc1c9d8f93a4378959..d811e6dcaef727588cdc65695673a4f7144f0f30 100644 (file)
@@ -294,7 +294,7 @@ static int dvic_probe_of(struct platform_device *pdev)
 
        adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
        if (adapter_node) {
-               adapter = of_find_i2c_adapter_by_node(adapter_node);
+               adapter = of_get_i2c_adapter_by_node(adapter_node);
                if (adapter == NULL) {
                        dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
                        omap_dss_put_device(ddata->in);
index 90cbc4c3406c719909f3495cb97533face292d3c..c581231c74a53bb837dcc24da190202ed56cb648 100644 (file)
@@ -898,6 +898,7 @@ static const struct of_device_id acx565akm_of_match[] = {
        { .compatible = "omapdss,sony,acx565akm", },
        {},
 };
+MODULE_DEVICE_TABLE(of, acx565akm_of_match);
 
 static struct spi_driver acx565akm_driver = {
        .driver = {
index 7ed9a227f5eaf006ed5c2a9759ee9db299d114e3..01b43e9ce941acb8751c0c2e8294e19db7ce927c 100644 (file)
@@ -226,7 +226,7 @@ static void blade_image_blit(struct tridentfb_par *par, const char *data,
        writemmr(par, DST1, point(x, y));
        writemmr(par, DST2, point(x + w - 1, y + h - 1));
 
-       memcpy(par->io_virt + 0x10000, data, 4 * size);
+       iowrite32_rep(par->io_virt + 0x10000, data, size);
 }
 
 static void blade_copy_rect(struct tridentfb_par *par,
@@ -673,8 +673,14 @@ static int get_nativex(struct tridentfb_par *par)
 static inline void set_lwidth(struct tridentfb_par *par, int width)
 {
        write3X4(par, VGA_CRTC_OFFSET, width & 0xFF);
-       write3X4(par, AddColReg,
-                (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4));
+       /* chips older than TGUI9660 have only 1 width bit in AddColReg */
+       /* touching the other one breaks I2C/DDC */
+       if (par->chip_id == TGUI9440 || par->chip_id == CYBER9320)
+               write3X4(par, AddColReg,
+                    (read3X4(par, AddColReg) & 0xEF) | ((width & 0x100) >> 4));
+       else
+               write3X4(par, AddColReg,
+                    (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4));
 }
 
 /* For resolutions smaller than FP resolution stretch */
index 32d8275e4c88485b2b522f56733e90ba614fc7b2..8a1076beecd33aa29891849f5feaa36b42027036 100644 (file)
@@ -210,6 +210,7 @@ struct display_timings *of_get_display_timings(struct device_node *np)
                         */
                        pr_err("%s: error in timing %d\n",
                                of_node_full_name(np), disp->num_timings + 1);
+                       kfree(dt);
                        goto timingfail;
                }
 
index c68edc16aa54c5e65347588e32c883ffdaf71f63..79e1aa1b0959f1ed8b2e2404fc3a044683c68f0d 100644 (file)
@@ -817,8 +817,9 @@ config ITCO_WDT
        tristate "Intel TCO Timer/Watchdog"
        depends on (X86 || IA64) && PCI
        select WATCHDOG_CORE
+       depends on I2C || I2C=n
        select LPC_ICH if !EXPERT
-       select I2C_I801 if !EXPERT
+       select I2C_I801 if !EXPERT && I2C
        ---help---
          Hardware driver for the intel TCO timer based watchdog devices.
          These drivers are included in the Intel 82801 I/O Controller
index 66c3e656a616619e02c8c523f19913e3274459ff..8a5ce5b5a0b6f9cc684382ddfaa6ed96b4dadb7d 100644 (file)
 #define PM_RSTC_WRCFG_FULL_RESET       0x00000020
 #define PM_RSTC_RESET                  0x00000102
 
+/*
+ * The Raspberry Pi firmware uses the RSTS register to know which partiton
+ * to boot from. The partiton value is spread into bits 0, 2, 4, 6, 8, 10.
+ * Partiton 63 is a special partition used by the firmware to indicate halt.
+ */
+#define PM_RSTS_RASPBERRYPI_HALT       0x555
+
 #define SECS_TO_WDOG_TICKS(x) ((x) << 16)
 #define WDOG_TICKS_TO_SECS(x) ((x) >> 16)
 
@@ -151,8 +158,7 @@ static void bcm2835_power_off(void)
         * hard reset.
         */
        val = readl_relaxed(wdt->base + PM_RSTS);
-       val &= PM_RSTC_WRCFG_CLR;
-       val |= PM_PASSWORD | PM_RSTS_HADWRH_SET;
+       val |= PM_PASSWORD | PM_RSTS_RASPBERRYPI_HALT;
        writel_relaxed(val, wdt->base + PM_RSTS);
 
        /* Continue with normal reset mechanism */
index cc1bdfc2ff71c31b65dc455c382fb3455815fc2a..006e2348022cbc7015831819fa0f2ae0f689ebfc 100644 (file)
@@ -303,6 +303,7 @@ static const struct of_device_id gef_wdt_ids[] = {
        },
        {},
 };
+MODULE_DEVICE_TABLE(of, gef_wdt_ids);
 
 static struct platform_driver gef_wdt_driver = {
        .driver = {
index 69013007dc4701826518c0babd6d94d258719892..098fa9c34d6d8232b86ed51c2e2f7bea9a50c982 100644 (file)
@@ -253,6 +253,7 @@ static const struct of_device_id a21_wdt_ids[] = {
        { .compatible = "men,a021-wdt" },
        { },
 };
+MODULE_DEVICE_TABLE(of, a21_wdt_ids);
 
 static struct platform_driver a21_wdt_driver = {
        .probe = a21_wdt_probe,
index 2789da2c05156d02931779cd21322a24e5f133e2..60b0605bd7e60eb7ed1f8d67f4b01022a70997b3 100644 (file)
@@ -168,6 +168,7 @@ static const struct of_device_id moxart_watchdog_match[] = {
        { .compatible = "moxa,moxart-watchdog" },
        { },
 };
+MODULE_DEVICE_TABLE(of, moxart_watchdog_match);
 
 static struct platform_driver moxart_wdt_driver = {
        .probe      = moxart_wdt_probe,
index ecbc63d3143e78d53a9ab0e3dd2091686e51d620..9a2ec79e8cfb6c4ad26a5578e39f62f8fa80226b 100644 (file)
@@ -1828,7 +1828,6 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
        int found = 0;
        struct extent_buffer *eb;
        struct btrfs_inode_extref *extref;
-       struct extent_buffer *leaf;
        u32 item_size;
        u32 cur_offset;
        unsigned long ptr;
@@ -1856,9 +1855,8 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
                btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
                btrfs_release_path(path);
 
-               leaf = path->nodes[0];
-               item_size = btrfs_item_size_nr(leaf, slot);
-               ptr = btrfs_item_ptr_offset(leaf, slot);
+               item_size = btrfs_item_size_nr(eb, slot);
+               ptr = btrfs_item_ptr_offset(eb, slot);
                cur_offset = 0;
 
                while (cur_offset < item_size) {
@@ -1872,7 +1870,7 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
                        if (ret)
                                break;
 
-                       cur_offset += btrfs_inode_extref_name_len(leaf, extref);
+                       cur_offset += btrfs_inode_extref_name_len(eb, extref);
                        cur_offset += sizeof(*extref);
                }
                btrfs_tree_read_unlock_blocking(eb);
index 295795aebe0b42330cc1147e02340eb2c59f1d7b..1e60d00d4ea7c42104614ede9e203a1f56e6408a 100644 (file)
@@ -2847,6 +2847,8 @@ int open_ctree(struct super_block *sb,
            !extent_buffer_uptodate(chunk_root->node)) {
                printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n",
                       sb->s_id);
+               if (!IS_ERR(chunk_root->node))
+                       free_extent_buffer(chunk_root->node);
                chunk_root->node = NULL;
                goto fail_tree_roots;
        }
@@ -2885,6 +2887,8 @@ retry_root_backup:
            !extent_buffer_uptodate(tree_root->node)) {
                printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n",
                       sb->s_id);
+               if (!IS_ERR(tree_root->node))
+                       free_extent_buffer(tree_root->node);
                tree_root->node = NULL;
                goto recovery_tree_root;
        }
index 8d052209f473be1d0959b6e65bded71b92c05584..2513a7f533342c827c5c5e1150de6a3d196879ac 100644 (file)
@@ -112,11 +112,11 @@ static struct dentry *btrfs_fh_to_parent(struct super_block *sb, struct fid *fh,
        u32 generation;
 
        if (fh_type == FILEID_BTRFS_WITH_PARENT) {
-               if (fh_len !=  BTRFS_FID_SIZE_CONNECTABLE)
+               if (fh_len <  BTRFS_FID_SIZE_CONNECTABLE)
                        return NULL;
                root_objectid = fid->root_objectid;
        } else if (fh_type == FILEID_BTRFS_WITH_PARENT_ROOT) {
-               if (fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT)
+               if (fh_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT)
                        return NULL;
                root_objectid = fid->parent_root_objectid;
        } else
@@ -136,11 +136,11 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
        u32 generation;
 
        if ((fh_type != FILEID_BTRFS_WITH_PARENT ||
-            fh_len != BTRFS_FID_SIZE_CONNECTABLE) &&
+            fh_len < BTRFS_FID_SIZE_CONNECTABLE) &&
            (fh_type != FILEID_BTRFS_WITH_PARENT_ROOT ||
-            fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT) &&
+            fh_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT) &&
            (fh_type != FILEID_BTRFS_WITHOUT_PARENT ||
-            fh_len != BTRFS_FID_SIZE_NON_CONNECTABLE))
+            fh_len < BTRFS_FID_SIZE_NON_CONNECTABLE))
                return NULL;
 
        objectid = fid->objectid;
index 9f960420133307b5d9b26c7b07bd37d64bec89cf..601d7d45d164a7e91477748a900bbef8cf67d0b0 100644 (file)
@@ -2828,6 +2828,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
        struct btrfs_delayed_ref_head *head;
        int ret;
        int run_all = count == (unsigned long)-1;
+       bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
 
        /* We'll clean this up in btrfs_cleanup_transaction */
        if (trans->aborted)
@@ -2844,6 +2845,7 @@ again:
 #ifdef SCRAMBLE_DELAYED_REFS
        delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
 #endif
+       trans->can_flush_pending_bgs = false;
        ret = __btrfs_run_delayed_refs(trans, root, count);
        if (ret < 0) {
                btrfs_abort_transaction(trans, root, ret);
@@ -2893,6 +2895,7 @@ again:
        }
 out:
        assert_qgroups_uptodate(trans);
+       trans->can_flush_pending_bgs = can_flush_pending_bgs;
        return 0;
 }
 
@@ -4306,7 +4309,8 @@ out:
         * the block groups that were made dirty during the lifetime of the
         * transaction.
         */
-       if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
+       if (trans->can_flush_pending_bgs &&
+           trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
                btrfs_create_pending_block_groups(trans, trans->root);
                btrfs_trans_release_chunk_metadata(trans);
        }
@@ -9560,7 +9564,9 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
        struct btrfs_block_group_item item;
        struct btrfs_key key;
        int ret = 0;
+       bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
 
+       trans->can_flush_pending_bgs = false;
        list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
                if (ret)
                        goto next;
@@ -9581,6 +9587,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
 next:
                list_del_init(&block_group->bg_list);
        }
+       trans->can_flush_pending_bgs = can_flush_pending_bgs;
 }
 
 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
index e2357e31609a2e8469b38c7e95b66f6dd68fcd93..3915c9473e9445d4aeada81c8fb96af7fb521f2c 100644 (file)
@@ -3132,12 +3132,12 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
                                             get_extent_t *get_extent,
                                             struct extent_map **em_cached,
                                             struct bio **bio, int mirror_num,
-                                            unsigned long *bio_flags, int rw)
+                                            unsigned long *bio_flags, int rw,
+                                            u64 *prev_em_start)
 {
        struct inode *inode;
        struct btrfs_ordered_extent *ordered;
        int index;
-       u64 prev_em_start = (u64)-1;
 
        inode = pages[0]->mapping->host;
        while (1) {
@@ -3153,7 +3153,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
 
        for (index = 0; index < nr_pages; index++) {
                __do_readpage(tree, pages[index], get_extent, em_cached, bio,
-                             mirror_num, bio_flags, rw, &prev_em_start);
+                             mirror_num, bio_flags, rw, prev_em_start);
                page_cache_release(pages[index]);
        }
 }
@@ -3163,7 +3163,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
                               int nr_pages, get_extent_t *get_extent,
                               struct extent_map **em_cached,
                               struct bio **bio, int mirror_num,
-                              unsigned long *bio_flags, int rw)
+                              unsigned long *bio_flags, int rw,
+                              u64 *prev_em_start)
 {
        u64 start = 0;
        u64 end = 0;
@@ -3184,7 +3185,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
                                                  index - first_index, start,
                                                  end, get_extent, em_cached,
                                                  bio, mirror_num, bio_flags,
-                                                 rw);
+                                                 rw, prev_em_start);
                        start = page_start;
                        end = start + PAGE_CACHE_SIZE - 1;
                        first_index = index;
@@ -3195,7 +3196,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
                __do_contiguous_readpages(tree, &pages[first_index],
                                          index - first_index, start,
                                          end, get_extent, em_cached, bio,
-                                         mirror_num, bio_flags, rw);
+                                         mirror_num, bio_flags, rw,
+                                         prev_em_start);
 }
 
 static int __extent_read_full_page(struct extent_io_tree *tree,
@@ -4207,6 +4209,7 @@ int extent_readpages(struct extent_io_tree *tree,
        struct page *page;
        struct extent_map *em_cached = NULL;
        int nr = 0;
+       u64 prev_em_start = (u64)-1;
 
        for (page_idx = 0; page_idx < nr_pages; page_idx++) {
                page = list_entry(pages->prev, struct page, lru);
@@ -4223,12 +4226,12 @@ int extent_readpages(struct extent_io_tree *tree,
                if (nr < ARRAY_SIZE(pagepool))
                        continue;
                __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
-                                  &bio, 0, &bio_flags, READ);
+                                  &bio, 0, &bio_flags, READ, &prev_em_start);
                nr = 0;
        }
        if (nr)
                __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
-                                  &bio, 0, &bio_flags, READ);
+                                  &bio, 0, &bio_flags, READ, &prev_em_start);
 
        if (em_cached)
                free_extent_map(em_cached);
index b823fac91c9289bc67d3bb5191f4ce96e38294ac..8c6f247ba81d4e84c6a7d3d00d0348c974c1b049 100644 (file)
@@ -2584,7 +2584,7 @@ static long btrfs_fallocate(struct file *file, int mode,
                                        alloc_start);
                if (ret)
                        goto out;
-       } else {
+       } else if (offset + len > inode->i_size) {
                /*
                 * If we are fallocating from the end of the file onward we
                 * need to zero out the end of the page if i_size lands in the
index 0adf5422fce9d4b9fc62c2bbf319429b38aaec7c..8d20f3b1cab0abfa7b5f2bc4b8646c094a9353a5 100644 (file)
@@ -4639,6 +4639,11 @@ locked:
                bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
        }
 
+       if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
+               ret = -EINVAL;
+               goto out_bctl;
+       }
+
 do_balance:
        /*
         * Ownership of bctl and mutually_exclusive_operation_running
@@ -4650,12 +4655,15 @@ do_balance:
        need_unlock = false;
 
        ret = btrfs_balance(bctl, bargs);
+       bctl = NULL;
 
        if (arg) {
                if (copy_to_user(arg, bargs, sizeof(*bargs)))
                        ret = -EFAULT;
        }
 
+out_bctl:
+       kfree(bctl);
 out_bargs:
        kfree(bargs);
 out_unlock:
index aa72bfd28f7dcbd88c73452aafd2a3d9e7f42e00..a739b825bdd364cfa9cbf16edc9f978a68feb95f 100644 (file)
@@ -1920,10 +1920,12 @@ static int did_overwrite_ref(struct send_ctx *sctx,
        /*
         * We know that it is or will be overwritten. Check this now.
         * The current inode being processed might have been the one that caused
-        * inode 'ino' to be orphanized, therefore ow_inode can actually be the
-        * same as sctx->send_progress.
+        * inode 'ino' to be orphanized, therefore check if ow_inode matches
+        * the current inode being processed.
         */
-       if (ow_inode <= sctx->send_progress)
+       if ((ow_inode < sctx->send_progress) ||
+           (ino != sctx->cur_ino && ow_inode == sctx->cur_ino &&
+            gen == sctx->cur_inode_gen))
                ret = 1;
        else
                ret = 0;
index 74bc3338418be39badb2eb73160c20b3e2240c74..a5b06442f0bf9d1630f201da3e0eb5c0422e8cc9 100644 (file)
@@ -557,6 +557,7 @@ again:
        h->delayed_ref_elem.seq = 0;
        h->type = type;
        h->allocating_chunk = false;
+       h->can_flush_pending_bgs = true;
        h->reloc_reserved = false;
        h->sync = false;
        INIT_LIST_HEAD(&h->qgroup_ref_list);
index 87964bf8892d50f1da01abb725a1d3f6286279f9..a994bb097ee59c12bb0d5599f10c8a64b1954f43 100644 (file)
@@ -118,6 +118,7 @@ struct btrfs_trans_handle {
        short aborted;
        short adding_csums;
        bool allocating_chunk;
+       bool can_flush_pending_bgs;
        bool reloc_reserved;
        bool sync;
        unsigned int type;
index 2ca784a14e84bc2a00d0c3d1ec1a15290128edfc..595279a8b99fd461e24cb24df3805fa8401f3dd6 100644 (file)
@@ -376,6 +376,14 @@ struct map_lookup {
 #define BTRFS_BALANCE_ARGS_VRANGE      (1ULL << 4)
 #define BTRFS_BALANCE_ARGS_LIMIT       (1ULL << 5)
 
+#define BTRFS_BALANCE_ARGS_MASK                        \
+       (BTRFS_BALANCE_ARGS_PROFILES |          \
+        BTRFS_BALANCE_ARGS_USAGE |             \
+        BTRFS_BALANCE_ARGS_DEVID |             \
+        BTRFS_BALANCE_ARGS_DRANGE |            \
+        BTRFS_BALANCE_ARGS_VRANGE |            \
+        BTRFS_BALANCE_ARGS_LIMIT)
+
 /*
  * Profile changing flags.  When SOFT is set we won't relocate chunk if
  * it already has the target profile (even though it may be
index 27aea110e92365e1e91610579369215cc54644ea..c3cc1609025fa3a966c2d5b10f32626214a9e4ef 100644 (file)
@@ -136,5 +136,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
 extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "2.07"
+#define CIFS_VERSION   "2.08"
 #endif                         /* _CIFSFS_H */
index e2a6af1508af2aef789d0caab21fedfa91d49c60..62203c387db45a23b05c1cadcc0946843ea5332f 100644 (file)
@@ -3380,6 +3380,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
        struct page *page, *tpage;
        unsigned int expected_index;
        int rc;
+       gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping);
 
        INIT_LIST_HEAD(tmplist);
 
@@ -3392,7 +3393,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
         */
        __set_page_locked(page);
        rc = add_to_page_cache_locked(page, mapping,
-                                     page->index, GFP_KERNEL);
+                                     page->index, gfp);
 
        /* give up if we can't stick it in the cache */
        if (rc) {
@@ -3418,8 +3419,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
                        break;
 
                __set_page_locked(page);
-               if (add_to_page_cache_locked(page, mapping, page->index,
-                                                               GFP_KERNEL)) {
+               if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
                        __clear_page_locked(page);
                        break;
                }
index f621b44cb8009fe87bf631e0a96c941fe63d3408..6b66dd5d15408676ab6510f7ce415164fe5c0571 100644 (file)
@@ -2034,7 +2034,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
        struct tcon_link *tlink = NULL;
        struct cifs_tcon *tcon = NULL;
        struct TCP_Server_Info *server;
-       struct cifs_io_parms io_parms;
 
        /*
         * To avoid spurious oplock breaks from server, in the case of
@@ -2056,18 +2055,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
                        rc = -ENOSYS;
                cifsFileInfo_put(open_file);
                cifs_dbg(FYI, "SetFSize for attrs rc = %d\n", rc);
-               if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
-                       unsigned int bytes_written;
-
-                       io_parms.netfid = open_file->fid.netfid;
-                       io_parms.pid = open_file->pid;
-                       io_parms.tcon = tcon;
-                       io_parms.offset = 0;
-                       io_parms.length = attrs->ia_size;
-                       rc = CIFSSMBWrite(xid, &io_parms, &bytes_written,
-                                         NULL, NULL, 1);
-                       cifs_dbg(FYI, "Wrt seteof rc %d\n", rc);
-               }
        } else
                rc = -EINVAL;
 
@@ -2093,28 +2080,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
        else
                rc = -ENOSYS;
        cifs_dbg(FYI, "SetEOF by path (setattrs) rc = %d\n", rc);
-       if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
-               __u16 netfid;
-               int oplock = 0;
 
-               rc = SMBLegacyOpen(xid, tcon, full_path, FILE_OPEN,
-                                  GENERIC_WRITE, CREATE_NOT_DIR, &netfid,
-                                  &oplock, NULL, cifs_sb->local_nls,
-                                  cifs_remap(cifs_sb));
-               if (rc == 0) {
-                       unsigned int bytes_written;
-
-                       io_parms.netfid = netfid;
-                       io_parms.pid = current->tgid;
-                       io_parms.tcon = tcon;
-                       io_parms.offset = 0;
-                       io_parms.length = attrs->ia_size;
-                       rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, NULL,
-                                         NULL,  1);
-                       cifs_dbg(FYI, "wrt seteof rc %d\n", rc);
-                       CIFSSMBClose(xid, tcon, netfid);
-               }
-       }
        if (tlink)
                cifs_put_tlink(tlink);
 
index ce83e2edbe0a22ae9858ec5a04caa4e2b6ad59d2..597a417ba94d3bb910f52e3f14119a197ff2d090 100644 (file)
@@ -922,7 +922,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
        if (tcon && tcon->bad_network_name)
                return -ENOENT;
 
-       if ((tcon->seal) &&
+       if ((tcon && tcon->seal) &&
            ((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) {
                cifs_dbg(VFS, "encryption requested but no server support");
                return -EOPNOTSUPP;
index 7ae6df7ea1d2d04962ef4554a6a2fa1efb977006..a86d3cc2b38941b0e39f23be84e4986d8852ae42 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -285,6 +285,7 @@ static int copy_user_bh(struct page *to, struct buffer_head *bh,
 static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
                        struct vm_area_struct *vma, struct vm_fault *vmf)
 {
+       struct address_space *mapping = inode->i_mapping;
        sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
        unsigned long vaddr = (unsigned long)vmf->virtual_address;
        void __pmem *addr;
@@ -292,6 +293,8 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
        pgoff_t size;
        int error;
 
+       i_mmap_lock_read(mapping);
+
        /*
         * Check truncate didn't happen while we were allocating a block.
         * If it did, this block may or may not be still allocated to the
@@ -321,6 +324,8 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
        error = vm_insert_mixed(vma, vaddr, pfn);
 
  out:
+       i_mmap_unlock_read(mapping);
+
        return error;
 }
 
@@ -382,17 +387,15 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                         * from a read fault and we've raced with a truncate
                         */
                        error = -EIO;
-                       goto unlock;
+                       goto unlock_page;
                }
-       } else {
-               i_mmap_lock_write(mapping);
        }
 
        error = get_block(inode, block, &bh, 0);
        if (!error && (bh.b_size < PAGE_SIZE))
                error = -EIO;           /* fs corruption? */
        if (error)
-               goto unlock;
+               goto unlock_page;
 
        if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
                if (vmf->flags & FAULT_FLAG_WRITE) {
@@ -403,9 +406,8 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                        if (!error && (bh.b_size < PAGE_SIZE))
                                error = -EIO;
                        if (error)
-                               goto unlock;
+                               goto unlock_page;
                } else {
-                       i_mmap_unlock_write(mapping);
                        return dax_load_hole(mapping, page, vmf);
                }
        }
@@ -417,15 +419,17 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                else
                        clear_user_highpage(new_page, vaddr);
                if (error)
-                       goto unlock;
+                       goto unlock_page;
                vmf->page = page;
                if (!page) {
+                       i_mmap_lock_read(mapping);
                        /* Check we didn't race with truncate */
                        size = (i_size_read(inode) + PAGE_SIZE - 1) >>
                                                                PAGE_SHIFT;
                        if (vmf->pgoff >= size) {
+                               i_mmap_unlock_read(mapping);
                                error = -EIO;
-                               goto unlock;
+                               goto out;
                        }
                }
                return VM_FAULT_LOCKED;
@@ -461,8 +465,6 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                        WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
        }
 
-       if (!page)
-               i_mmap_unlock_write(mapping);
  out:
        if (error == -ENOMEM)
                return VM_FAULT_OOM | major;
@@ -471,14 +473,11 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                return VM_FAULT_SIGBUS | major;
        return VM_FAULT_NOPAGE | major;
 
- unlock:
+ unlock_page:
        if (page) {
                unlock_page(page);
                page_cache_release(page);
-       } else {
-               i_mmap_unlock_write(mapping);
        }
-
        goto out;
 }
 EXPORT_SYMBOL(__dax_fault);
@@ -556,10 +555,10 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
        block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
 
        bh.b_size = PMD_SIZE;
-       i_mmap_lock_write(mapping);
        length = get_block(inode, block, &bh, write);
        if (length)
                return VM_FAULT_SIGBUS;
+       i_mmap_lock_read(mapping);
 
        /*
         * If the filesystem isn't willing to tell us the length of a hole,
@@ -569,24 +568,14 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
        if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE)
                goto fallback;
 
-       if (buffer_unwritten(&bh) || buffer_new(&bh)) {
-               int i;
-               for (i = 0; i < PTRS_PER_PMD; i++)
-                       clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
-               wmb_pmem();
-               count_vm_event(PGMAJFAULT);
-               mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
-               result |= VM_FAULT_MAJOR;
-       }
-
        /*
         * If we allocated new storage, make sure no process has any
         * zero pages covering this hole
         */
        if (buffer_new(&bh)) {
-               i_mmap_unlock_write(mapping);
+               i_mmap_unlock_read(mapping);
                unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0);
-               i_mmap_lock_write(mapping);
+               i_mmap_lock_read(mapping);
        }
 
        /*
@@ -633,15 +622,25 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
                if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
                        goto fallback;
 
+               if (buffer_unwritten(&bh) || buffer_new(&bh)) {
+                       int i;
+                       for (i = 0; i < PTRS_PER_PMD; i++)
+                               clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
+                       wmb_pmem();
+                       count_vm_event(PGMAJFAULT);
+                       mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
+                       result |= VM_FAULT_MAJOR;
+               }
+
                result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write);
        }
 
  out:
+       i_mmap_unlock_read(mapping);
+
        if (buffer_unwritten(&bh))
                complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
 
-       i_mmap_unlock_write(mapping);
-
        return result;
 
  fallback:
index 47728da7702cdf69d2977af996dea42170a8b07d..b46e9fc641960aeba81b48d61b6e933d724b5205 100644 (file)
@@ -63,7 +63,7 @@ config EXT4_FS
          If unsure, say N.
 
 config EXT4_USE_FOR_EXT2
-       bool "Use ext4 for ext2/ext3 file systems"
+       bool "Use ext4 for ext2 file systems"
        depends on EXT4_FS
        depends on EXT2_FS=n
        default y
index e26803fb210d3bf1134f500b85f43802b2ce7e58..560af043770462df6170acd4d523f9d0385884a3 100644 (file)
@@ -165,8 +165,8 @@ int ext4_mpage_readpages(struct address_space *mapping,
                if (pages) {
                        page = list_entry(pages->prev, struct page, lru);
                        list_del(&page->lru);
-                       if (add_to_page_cache_lru(page, mapping,
-                                                 page->index, GFP_KERNEL))
+                       if (add_to_page_cache_lru(page, mapping, page->index,
+                                       GFP_KERNEL & mapping_gfp_mask(mapping)))
                                goto next_page;
                }
 
index 6c672ad329e9a8c6bf3b7925f9e344ea1c5991c4..c6986dce03348b3e0da89335c7853267e9efa8e8 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -56,6 +56,9 @@ static void free_fdtable_rcu(struct rcu_head *rcu)
        __free_fdtable(container_of(rcu, struct fdtable, rcu));
 }
 
+#define BITBIT_NR(nr)  BITS_TO_LONGS(BITS_TO_LONGS(nr))
+#define BITBIT_SIZE(nr)        (BITBIT_NR(nr) * sizeof(long))
+
 /*
  * Expand the fdset in the files_struct.  Called with the files spinlock
  * held for write.
@@ -77,6 +80,11 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
        memset((char *)(nfdt->open_fds) + cpy, 0, set);
        memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
        memset((char *)(nfdt->close_on_exec) + cpy, 0, set);
+
+       cpy = BITBIT_SIZE(ofdt->max_fds);
+       set = BITBIT_SIZE(nfdt->max_fds) - cpy;
+       memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
+       memset(cpy+(char *)nfdt->full_fds_bits, 0, set);
 }
 
 static struct fdtable * alloc_fdtable(unsigned int nr)
@@ -115,12 +123,14 @@ static struct fdtable * alloc_fdtable(unsigned int nr)
        fdt->fd = data;
 
        data = alloc_fdmem(max_t(size_t,
-                                2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES));
+                                2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES));
        if (!data)
                goto out_arr;
        fdt->open_fds = data;
        data += nr / BITS_PER_BYTE;
        fdt->close_on_exec = data;
+       data += nr / BITS_PER_BYTE;
+       fdt->full_fds_bits = data;
 
        return fdt;
 
@@ -226,17 +236,22 @@ static inline void __set_close_on_exec(int fd, struct fdtable *fdt)
 
 static inline void __clear_close_on_exec(int fd, struct fdtable *fdt)
 {
-       __clear_bit(fd, fdt->close_on_exec);
+       if (test_bit(fd, fdt->close_on_exec))
+               __clear_bit(fd, fdt->close_on_exec);
 }
 
-static inline void __set_open_fd(int fd, struct fdtable *fdt)
+static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
 {
        __set_bit(fd, fdt->open_fds);
+       fd /= BITS_PER_LONG;
+       if (!~fdt->open_fds[fd])
+               __set_bit(fd, fdt->full_fds_bits);
 }
 
-static inline void __clear_open_fd(int fd, struct fdtable *fdt)
+static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
 {
        __clear_bit(fd, fdt->open_fds);
+       __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
 }
 
 static int count_open_files(struct fdtable *fdt)
@@ -280,6 +295,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
        new_fdt->max_fds = NR_OPEN_DEFAULT;
        new_fdt->close_on_exec = newf->close_on_exec_init;
        new_fdt->open_fds = newf->open_fds_init;
+       new_fdt->full_fds_bits = newf->full_fds_bits_init;
        new_fdt->fd = &newf->fd_array[0];
 
        spin_lock(&oldf->file_lock);
@@ -323,6 +339,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
 
        memcpy(new_fdt->open_fds, old_fdt->open_fds, open_files / 8);
        memcpy(new_fdt->close_on_exec, old_fdt->close_on_exec, open_files / 8);
+       memcpy(new_fdt->full_fds_bits, old_fdt->full_fds_bits, BITBIT_SIZE(open_files));
 
        for (i = open_files; i != 0; i--) {
                struct file *f = *old_fds++;
@@ -454,10 +471,25 @@ struct files_struct init_files = {
                .fd             = &init_files.fd_array[0],
                .close_on_exec  = init_files.close_on_exec_init,
                .open_fds       = init_files.open_fds_init,
+               .full_fds_bits  = init_files.full_fds_bits_init,
        },
        .file_lock      = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
 };
 
+static unsigned long find_next_fd(struct fdtable *fdt, unsigned long start)
+{
+       unsigned long maxfd = fdt->max_fds;
+       unsigned long maxbit = maxfd / BITS_PER_LONG;
+       unsigned long bitbit = start / BITS_PER_LONG;
+
+       bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
+       if (bitbit > maxfd)
+               return maxfd;
+       if (bitbit > start)
+               start = bitbit;
+       return find_next_zero_bit(fdt->open_fds, maxfd, start);
+}
+
 /*
  * allocate a file descriptor, mark it busy.
  */
@@ -476,7 +508,7 @@ repeat:
                fd = files->next_fd;
 
        if (fd < fdt->max_fds)
-               fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, fd);
+               fd = find_next_fd(fdt, fd);
 
        /*
         * N.B. For clone tasks sharing a files structure, this test
index 091a36444972fa0b746118b0cc58ca2a3dcaa97f..7378169e90be6ed485ac48d0cf633c8e37c4c3d2 100644 (file)
@@ -778,19 +778,24 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
                                  struct wb_writeback_work *base_work,
                                  bool skip_if_busy)
 {
-       int next_memcg_id = 0;
-       struct bdi_writeback *wb;
-       struct wb_iter iter;
+       struct bdi_writeback *last_wb = NULL;
+       struct bdi_writeback *wb = list_entry(&bdi->wb_list,
+                                             struct bdi_writeback, bdi_node);
 
        might_sleep();
 restart:
        rcu_read_lock();
-       bdi_for_each_wb(wb, bdi, &iter, next_memcg_id) {
+       list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
                DEFINE_WB_COMPLETION_ONSTACK(fallback_work_done);
                struct wb_writeback_work fallback_work;
                struct wb_writeback_work *work;
                long nr_pages;
 
+               if (last_wb) {
+                       wb_put(last_wb);
+                       last_wb = NULL;
+               }
+
                /* SYNC_ALL writes out I_DIRTY_TIME too */
                if (!wb_has_dirty_io(wb) &&
                    (base_work->sync_mode == WB_SYNC_NONE ||
@@ -819,12 +824,22 @@ restart:
 
                wb_queue_work(wb, work);
 
-               next_memcg_id = wb->memcg_css->id + 1;
+               /*
+                * Pin @wb so that it stays on @bdi->wb_list.  This allows
+                * continuing iteration from @wb after dropping and
+                * regrabbing rcu read lock.
+                */
+               wb_get(wb);
+               last_wb = wb;
+
                rcu_read_unlock();
                wb_wait_for_completion(bdi, &fallback_work_done);
                goto restart;
        }
        rcu_read_unlock();
+
+       if (last_wb)
+               wb_put(last_wb);
 }
 
 #else  /* CONFIG_CGROUP_WRITEBACK */
@@ -1857,12 +1872,11 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
        rcu_read_lock();
        list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
                struct bdi_writeback *wb;
-               struct wb_iter iter;
 
                if (!bdi_has_dirty_io(bdi))
                        continue;
 
-               bdi_for_each_wb(wb, bdi, &iter, 0)
+               list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
                        wb_start_writeback(wb, wb_split_bdi_pages(wb, nr_pages),
                                           false, reason);
        }
@@ -1894,11 +1908,10 @@ static void wakeup_dirtytime_writeback(struct work_struct *w)
        rcu_read_lock();
        list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
                struct bdi_writeback *wb;
-               struct wb_iter iter;
 
-               bdi_for_each_wb(wb, bdi, &iter, 0)
-                       if (!list_empty(&bdi->wb.b_dirty_time))
-                               wb_wakeup(&bdi->wb);
+               list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
+                       if (!list_empty(&wb->b_dirty_time))
+                               wb_wakeup(wb);
        }
        rcu_read_unlock();
        schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
index 778a4ddef77a21844b08af82058d3b188371dc01..a7c34274f2076bc36e9cb9797f682988e617417f 100644 (file)
@@ -139,7 +139,8 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
 static struct bio *
 do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
                sector_t *last_block_in_bio, struct buffer_head *map_bh,
-               unsigned long *first_logical_block, get_block_t get_block)
+               unsigned long *first_logical_block, get_block_t get_block,
+               gfp_t gfp)
 {
        struct inode *inode = page->mapping->host;
        const unsigned blkbits = inode->i_blkbits;
@@ -277,8 +278,7 @@ alloc_new:
                                goto out;
                }
                bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
-                               min_t(int, nr_pages, BIO_MAX_PAGES),
-                               GFP_KERNEL);
+                               min_t(int, nr_pages, BIO_MAX_PAGES), gfp);
                if (bio == NULL)
                        goto confused;
        }
@@ -361,6 +361,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
        sector_t last_block_in_bio = 0;
        struct buffer_head map_bh;
        unsigned long first_logical_block = 0;
+       gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping);
 
        map_bh.b_state = 0;
        map_bh.b_size = 0;
@@ -370,12 +371,13 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
                prefetchw(&page->flags);
                list_del(&page->lru);
                if (!add_to_page_cache_lru(page, mapping,
-                                       page->index, GFP_KERNEL)) {
+                                       page->index,
+                                       gfp)) {
                        bio = do_mpage_readpage(bio, page,
                                        nr_pages - page_idx,
                                        &last_block_in_bio, &map_bh,
                                        &first_logical_block,
-                                       get_block);
+                                       get_block, gfp);
                }
                page_cache_release(page);
        }
@@ -395,11 +397,12 @@ int mpage_readpage(struct page *page, get_block_t get_block)
        sector_t last_block_in_bio = 0;
        struct buffer_head map_bh;
        unsigned long first_logical_block = 0;
+       gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(page->mapping);
 
        map_bh.b_state = 0;
        map_bh.b_size = 0;
        bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
-                       &map_bh, &first_logical_block, get_block);
+                       &map_bh, &first_logical_block, get_block, gfp);
        if (bio)
                mpage_bio_submit(READ, bio);
        return 0;
index 726d211db4842715f71e1911f6940c93b19fe57f..33e9495a31293e2c080b5b0bd2e50523a460ceee 100644 (file)
@@ -1558,8 +1558,6 @@ static int lookup_fast(struct nameidata *nd,
                negative = d_is_negative(dentry);
                if (read_seqcount_retry(&dentry->d_seq, seq))
                        return -ECHILD;
-               if (negative)
-                       return -ENOENT;
 
                /*
                 * This sequence count validates that the parent had no
@@ -1580,6 +1578,12 @@ static int lookup_fast(struct nameidata *nd,
                                goto unlazy;
                        }
                }
+               /*
+                * Note: do negative dentry check after revalidation in
+                * case that drops it.
+                */
+               if (negative)
+                       return -ENOENT;
                path->mnt = mnt;
                path->dentry = dentry;
                if (likely(__follow_mount_rcu(nd, path, inode, seqp)))
index f93b9cdb4934d17739bf4c6442d79bbfe32dcf13..5133bb18830e8c8b97e68e8f2c55d617ff92a321 100644 (file)
@@ -1458,12 +1458,18 @@ nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
        if (delegation)
                delegation_flags = delegation->flags;
        rcu_read_unlock();
-       if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
+       switch (data->o_arg.claim) {
+       default:
+               break;
+       case NFS4_OPEN_CLAIM_DELEGATE_CUR:
+       case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
                pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
                                   "returning a delegation for "
                                   "OPEN(CLAIM_DELEGATE_CUR)\n",
                                   clp->cl_hostname);
-       } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
+               return;
+       }
+       if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
                nfs_inode_set_delegation(state->inode,
                                         data->owner->so_cred,
                                         &data->o_res);
@@ -1771,6 +1777,9 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
        if (IS_ERR(opendata))
                return PTR_ERR(opendata);
        nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
+       write_seqlock(&state->seqlock);
+       nfs4_stateid_copy(&state->stateid, &state->open_stateid);
+       write_sequnlock(&state->seqlock);
        clear_bit(NFS_DELEGATED_STATE, &state->flags);
        switch (type & (FMODE_READ|FMODE_WRITE)) {
        case FMODE_READ|FMODE_WRITE:
@@ -1863,6 +1872,8 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
        data->rpc_done = 0;
        data->rpc_status = 0;
        data->timestamp = jiffies;
+       if (data->is_recover)
+               nfs4_set_sequence_privileged(&data->c_arg.seq_args);
        task = rpc_run_task(&task_setup_data);
        if (IS_ERR(task))
                return PTR_ERR(task);
index 5db324635e920a51923b37c3d22c9d3dee2f6682..d854693a15b0e2443779986552d29d9db3f6cdc2 100644 (file)
@@ -1725,7 +1725,8 @@ restart:
                        if (!test_and_clear_bit(ops->owner_flag_bit,
                                                        &sp->so_flags))
                                continue;
-                       atomic_inc(&sp->so_count);
+                       if (!atomic_inc_not_zero(&sp->so_count))
+                               continue;
                        spin_unlock(&clp->cl_lock);
                        rcu_read_unlock();
 
index 28df12e525bac5857c0d41aba62d558db82f526a..671cf68fe56bed7a457fddd4ccdd5913509ff1bd 100644 (file)
@@ -409,7 +409,7 @@ DECLARE_EVENT_CLASS(nfs4_open_event,
                        __entry->flags = flags;
                        __entry->fmode = (__force unsigned int)ctx->mode;
                        __entry->dev = ctx->dentry->d_sb->s_dev;
-                       if (!IS_ERR(state))
+                       if (!IS_ERR_OR_NULL(state))
                                inode = state->inode;
                        if (inode != NULL) {
                                __entry->fileid = NFS_FILEID(inode);
index 72624dc4a623b894ca0be949c5feab1cec455e02..75ab7622e0cc193bab28f2ba5bb56d37e5f49465 100644 (file)
@@ -569,19 +569,17 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
        if (!nfs_pageio_add_request(pgio, req)) {
                nfs_redirty_request(req);
                ret = pgio->pg_error;
-       }
+       } else
+               nfs_add_stats(page_file_mapping(page)->host,
+                               NFSIOS_WRITEPAGES, 1);
 out:
        return ret;
 }
 
 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
 {
-       struct inode *inode = page_file_mapping(page)->host;
        int ret;
 
-       nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
-       nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
-
        nfs_pageio_cond_complete(pgio, page_file_index(page));
        ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
        if (ret == -EAGAIN) {
@@ -597,9 +595,11 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st
 static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
 {
        struct nfs_pageio_descriptor pgio;
+       struct inode *inode = page_file_mapping(page)->host;
        int err;
 
-       nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc),
+       nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
+       nfs_pageio_init_write(&pgio, inode, wb_priority(wbc),
                                false, &nfs_async_write_completion_ops);
        err = nfs_do_writepage(page, wbc, &pgio);
        nfs_pageio_complete(&pgio);
@@ -1223,7 +1223,7 @@ static int nfs_can_extend_write(struct file *file, struct page *page, struct ino
                return 1;
        if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
                       list_empty_careful(&flctx->flc_posix)))
-               return 0;
+               return 1;
 
        /* Check to see if there are whole file write locks */
        ret = 0;
index cdefaa331a0719e88df91ef7c04c32706ae199a1..c29d9421bd5e1f8c890178c6ec961899b319ceef 100644 (file)
@@ -56,14 +56,6 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
        u32 device_generation = 0;
        int error;
 
-       /*
-        * We do not attempt to support I/O smaller than the fs block size,
-        * or not aligned to it.
-        */
-       if (args->lg_minlength < block_size) {
-               dprintk("pnfsd: I/O too small\n");
-               goto out_layoutunavailable;
-       }
        if (seg->offset & (block_size - 1)) {
                dprintk("pnfsd: I/O misaligned\n");
                goto out_layoutunavailable;
index ee5aa4daaea0dbf6c1840649ac505f60313aa8dd..ce38b4ccc9ab6c52c796a148b496bab0d8d46510 100644 (file)
@@ -1658,12 +1658,13 @@ send_response:
                if (ret < 0) {
                        mlog(ML_ERROR, "failed to dispatch assert master work\n");
                        response = DLM_MASTER_RESP_ERROR;
+                       spin_unlock(&res->spinlock);
                        dlm_lockres_put(res);
                } else {
                        dispatched = 1;
                        __dlm_lockres_grab_inflight_worker(dlm, res);
+                       spin_unlock(&res->spinlock);
                }
-               spin_unlock(&res->spinlock);
        } else {
                if (res)
                        dlm_lockres_put(res);
index 3d90ad7ff91fe4748386218807f7131bd61d7d69..58eaa5c0d387051301a089f0d234d4b69bf18d51 100644 (file)
@@ -1723,8 +1723,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
                        } else {
                                dispatched = 1;
                                __dlm_lockres_grab_inflight_worker(dlm, res);
+                               spin_unlock(&res->spinlock);
                        }
-                       spin_unlock(&res->spinlock);
                } else {
                        /* put.. incase we are not the master */
                        spin_unlock(&res->spinlock);
index 84d693d374284b580208fec3b8eb3c57bdd4195c..871fcb67be9741f2aab81f3d6552306dedf4c967 100644 (file)
@@ -81,11 +81,11 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
        if (len == 0)
                return 0;
 
-       old_file = ovl_path_open(old, O_RDONLY);
+       old_file = ovl_path_open(old, O_LARGEFILE | O_RDONLY);
        if (IS_ERR(old_file))
                return PTR_ERR(old_file);
 
-       new_file = ovl_path_open(new, O_WRONLY);
+       new_file = ovl_path_open(new, O_LARGEFILE | O_WRONLY);
        if (IS_ERR(new_file)) {
                error = PTR_ERR(new_file);
                goto out_fput;
@@ -267,7 +267,7 @@ out:
 
 out_cleanup:
        ovl_cleanup(wdir, newdentry);
-       goto out;
+       goto out2;
 }
 
 /*
index d9da5a4e93821ddbee9f2fb449666c207417e523..ec0c2a050043afbb3eff4c7451930dc6d86006ec 100644 (file)
@@ -363,6 +363,9 @@ struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags)
                ovl_path_upper(dentry, &realpath);
        }
 
+       if (realpath.dentry->d_flags & DCACHE_OP_SELECT_INODE)
+               return realpath.dentry->d_op->d_select_inode(realpath.dentry, file_flags);
+
        return d_backing_inode(realpath.dentry);
 }
 
index 79073d68b475d71b0f87902550b3eeef945885b5..e38ee0fed24a2f7a343f294a793b6abea93f6f97 100644 (file)
@@ -544,6 +544,7 @@ static void ovl_put_super(struct super_block *sb)
        mntput(ufs->upper_mnt);
        for (i = 0; i < ufs->numlower; i++)
                mntput(ufs->lower_mnt[i]);
+       kfree(ufs->lower_mnt);
 
        kfree(ufs->config.lowerdir);
        kfree(ufs->config.upperdir);
@@ -1048,6 +1049,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
                oe->lowerstack[i].dentry = stack[i].dentry;
                oe->lowerstack[i].mnt = ufs->lower_mnt[i];
        }
+       kfree(stack);
 
        root_dentry->d_fsdata = oe;
 
index f60f0121e3319ec616d0a0151d3e4df146637c88..eed2050db9be9c7795acd2153f976d4742e2fe82 100644 (file)
@@ -375,7 +375,7 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
                        struct pid *pid, struct task_struct *task, int whole)
 {
-       unsigned long vsize, eip, esp, wchan = ~0UL;
+       unsigned long vsize, eip, esp, wchan = 0;
        int priority, nice;
        int tty_pgrp = -1, tty_nr = 0;
        sigset_t sigign, sigcatch;
@@ -507,7 +507,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
        seq_put_decimal_ull(m, ' ', task->blocked.sig[0] & 0x7fffffffUL);
        seq_put_decimal_ull(m, ' ', sigign.sig[0] & 0x7fffffffUL);
        seq_put_decimal_ull(m, ' ', sigcatch.sig[0] & 0x7fffffffUL);
-       seq_put_decimal_ull(m, ' ', wchan);
+
+       /*
+        * We used to output the absolute kernel address, but that's an
+        * information leak - so instead we show a 0/1 flag here, to signal
+        * to user-space whether there's a wchan field in /proc/PID/wchan.
+        *
+        * This works with older implementations of procps as well.
+        */
+       if (wchan)
+               seq_puts(m, " 1");
+       else
+               seq_puts(m, " 0");
+
        seq_put_decimal_ull(m, ' ', 0);
        seq_put_decimal_ull(m, ' ', 0);
        seq_put_decimal_ll(m, ' ', task->exit_signal);
index b25eee4cead5398b69889c95c29480ba9862c397..29595af328669543a70115a7613539a48f6e4226 100644 (file)
@@ -430,13 +430,10 @@ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns,
 
        wchan = get_wchan(task);
 
-       if (lookup_symbol_name(wchan, symname) < 0) {
-               if (!ptrace_may_access(task, PTRACE_MODE_READ))
-                       return 0;
-               seq_printf(m, "%lu", wchan);
-       } else {
+       if (wchan && ptrace_may_access(task, PTRACE_MODE_READ) && !lookup_symbol_name(wchan, symname))
                seq_printf(m, "%s", symname);
-       }
+       else
+               seq_putc(m, '0');
 
        return 0;
 }
index d3ebf2e618535bd2fa7daa0bfe948cb1a9bfc062..9155a5a0d3b9de6114e32d4108e67d7c038f0e20 100644 (file)
@@ -27,7 +27,6 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
 {
        struct sysinfo i;
        unsigned long committed;
-       struct vmalloc_info vmi;
        long cached;
        long available;
        unsigned long pagecache;
@@ -49,8 +48,6 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
        if (cached < 0)
                cached = 0;
 
-       get_vmalloc_info(&vmi);
-
        for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
                pages[lru] = global_page_state(NR_LRU_BASE + lru);
 
@@ -191,8 +188,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                K(vm_commit_limit()),
                K(committed),
                (unsigned long)VMALLOC_TOTAL >> 10,
-               vmi.used >> 10,
-               vmi.largest_chunk >> 10
+               0ul, // used to be vmalloc 'used'
+               0ul  // used to be vmalloc 'largest_chunk'
 #ifdef CONFIG_MEMORY_FAILURE
                , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
 #endif
index ba1323a94924962299d27cbe67d76ff4e0056bb9..a586467f6ff6c73a4f31234fd96fce0a7b0dc34b 100644 (file)
@@ -70,6 +70,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
        unsigned order;
        void *data;
        int ret;
+       gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
 
        /* make various checks */
        order = get_order(newsize);
@@ -84,7 +85,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
 
        /* allocate enough contiguous pages to be able to satisfy the
         * request */
-       pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order);
+       pages = alloc_pages(gfp, order);
        if (!pages)
                return -ENOMEM;
 
@@ -108,7 +109,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
                struct page *page = pages + loop;
 
                ret = add_to_page_cache_lru(page, inode->i_mapping, loop,
-                                       GFP_KERNEL);
+                                       gfp);
                if (ret < 0)
                        goto add_error;
 
index 96f3448b6eb40c0682ec915a71d5030d5bdf4283..fd65b3f1923ccdb609ee29601604624abf32333d 100644 (file)
@@ -652,11 +652,8 @@ int ubifs_init_security(struct inode *dentry, struct inode *inode,
 {
        int err;
 
-       mutex_lock(&inode->i_mutex);
        err = security_inode_init_security(inode, dentry, qstr,
                                           &init_xattrs, 0);
-       mutex_unlock(&inode->i_mutex);
-
        if (err) {
                struct ubifs_info *c = dentry->i_sb->s_fs_info;
                ubifs_err(c, "cannot initialize security for inode %lu, error %d",
index a94cbebbc33df5dab756ce6fce0a739a13212784..eb1973bad80b11376c1f9dcbc0f9f2b7c3eff0cb 100644 (file)
@@ -35,7 +35,7 @@ typedef atomic_t atomic_long_t;
 #endif
 
 #define ATOMIC_LONG_READ_OP(mo)                                                \
-static inline long atomic_long_read##mo(atomic_long_t *l)              \
+static inline long atomic_long_read##mo(const atomic_long_t *l)                \
 {                                                                      \
        ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;              \
                                                                        \
@@ -112,19 +112,23 @@ static inline void atomic_long_dec(atomic_long_t *l)
        ATOMIC_LONG_PFX(_dec)(v);
 }
 
-static inline void atomic_long_add(long i, atomic_long_t *l)
-{
-       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
-
-       ATOMIC_LONG_PFX(_add)(i, v);
+#define ATOMIC_LONG_OP(op)                                             \
+static inline void                                                     \
+atomic_long_##op(long i, atomic_long_t *l)                             \
+{                                                                      \
+       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;              \
+                                                                       \
+       ATOMIC_LONG_PFX(_##op)(i, v);                                   \
 }
 
-static inline void atomic_long_sub(long i, atomic_long_t *l)
-{
-       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+ATOMIC_LONG_OP(add)
+ATOMIC_LONG_OP(sub)
+ATOMIC_LONG_OP(and)
+ATOMIC_LONG_OP(or)
+ATOMIC_LONG_OP(xor)
+ATOMIC_LONG_OP(andnot)
 
-       ATOMIC_LONG_PFX(_sub)(i, v);
-}
+#undef ATOMIC_LONG_OP
 
 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
 {
@@ -154,19 +158,24 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
        return ATOMIC_LONG_PFX(_add_negative)(i, v);
 }
 
-static inline long atomic_long_inc_return(atomic_long_t *l)
-{
-       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
-
-       return (long)ATOMIC_LONG_PFX(_inc_return)(v);
-}
-
-static inline long atomic_long_dec_return(atomic_long_t *l)
-{
-       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
-
-       return (long)ATOMIC_LONG_PFX(_dec_return)(v);
+#define ATOMIC_LONG_INC_DEC_OP(op, mo)                                 \
+static inline long                                                     \
+atomic_long_##op##_return##mo(atomic_long_t *l)                                \
+{                                                                      \
+       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;              \
+                                                                       \
+       return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(v);            \
 }
+ATOMIC_LONG_INC_DEC_OP(inc,)
+ATOMIC_LONG_INC_DEC_OP(inc, _relaxed)
+ATOMIC_LONG_INC_DEC_OP(inc, _acquire)
+ATOMIC_LONG_INC_DEC_OP(inc, _release)
+ATOMIC_LONG_INC_DEC_OP(dec,)
+ATOMIC_LONG_INC_DEC_OP(dec, _relaxed)
+ATOMIC_LONG_INC_DEC_OP(dec, _acquire)
+ATOMIC_LONG_INC_DEC_OP(dec, _release)
+
+#undef ATOMIC_LONG_INC_DEC_OP
 
 static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
 {
index d4d7e337fdcb5d7bda73656c1e8b66fee6ff2cc3..74f1a3704d7a1ddf61707b6da03fc2348521e4a1 100644 (file)
@@ -127,7 +127,7 @@ ATOMIC_OP(xor, ^)
  * Atomically reads the value of @v.
  */
 #ifndef atomic_read
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
+#define atomic_read(v) READ_ONCE((v)->counter)
 #endif
 
 /**
@@ -137,7 +137,7 @@ ATOMIC_OP(xor, ^)
  *
  * Atomically sets the value of @v to @i.
  */
-#define atomic_set(v, i) (((v)->counter) = (i))
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
 
 #include <linux/irqflags.h>
 
index d4f9fb4e53dfa7ea2709bc63f5c98f7c72e64cf0..fd694cfd678af712b2c3ca1055331df24a8db8ba 100644 (file)
@@ -20,7 +20,7 @@
 static inline void
 __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
 {
-       if (unlikely(atomic_dec_return(count) < 0))
+       if (unlikely(atomic_dec_return_acquire(count) < 0))
                fail_fn(count);
 }
 
@@ -35,7 +35,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
 static inline int
 __mutex_fastpath_lock_retval(atomic_t *count)
 {
-       if (unlikely(atomic_dec_return(count) < 0))
+       if (unlikely(atomic_dec_return_acquire(count) < 0))
                return -1;
        return 0;
 }
@@ -56,7 +56,7 @@ __mutex_fastpath_lock_retval(atomic_t *count)
 static inline void
 __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
 {
-       if (unlikely(atomic_inc_return(count) <= 0))
+       if (unlikely(atomic_inc_return_release(count) <= 0))
                fail_fn(count);
 }
 
@@ -80,7 +80,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
 static inline int
 __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
 {
-       if (likely(atomic_cmpxchg(count, 1, 0) == 1))
+       if (likely(atomic_cmpxchg_acquire(count, 1, 0) == 1))
                return 1;
        return 0;
 }
index f169ec064785beea6c8ce4f1bee1d193a9faf079..a6b4a7bd6ac9770356e066c51f295c6b9c33793f 100644 (file)
@@ -31,7 +31,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
                 * to ensure that any waiting tasks are woken up by the
                 * unlock slow path.
                 */
-               if (likely(atomic_xchg(count, -1) != 1))
+               if (likely(atomic_xchg_acquire(count, -1) != 1))
                        fail_fn(count);
 }
 
@@ -46,7 +46,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
 static inline int
 __mutex_fastpath_lock_retval(atomic_t *count)
 {
-       if (unlikely(atomic_xchg(count, 0) != 1))
+       if (unlikely(atomic_xchg_acquire(count, 0) != 1))
                if (likely(atomic_xchg(count, -1) != 1))
                        return -1;
        return 0;
@@ -67,7 +67,7 @@ __mutex_fastpath_lock_retval(atomic_t *count)
 static inline void
 __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
 {
-       if (unlikely(atomic_xchg(count, 1) != 0))
+       if (unlikely(atomic_xchg_release(count, 1) != 0))
                fail_fn(count);
 }
 
@@ -91,7 +91,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
 static inline int
 __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
 {
-       int prev = atomic_xchg(count, 0);
+       int prev = atomic_xchg_acquire(count, 0);
 
        if (unlikely(prev < 0)) {
                /*
@@ -105,7 +105,7 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
                 *   owner's unlock path needlessly, but that's not a problem
                 *   in practice. ]
                 */
-               prev = atomic_xchg(count, prev);
+               prev = atomic_xchg_acquire(count, prev);
                if (prev < 0)
                        prev = 0;
        }
index 29c57b2cb344dfc0bd73034927bfa50848d6709b..3eabbbbfd5780adf2e8c5fc76bf2632d7cbb2dc4 100644 (file)
@@ -30,9 +30,19 @@ extern int ptep_set_access_flags(struct vm_area_struct *vma,
 #endif
 
 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
                                 unsigned long address, pmd_t *pmdp,
                                 pmd_t entry, int dirty);
+#else
+static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
+                                       unsigned long address, pmd_t *pmdp,
+                                       pmd_t entry, int dirty)
+{
+       BUILD_BUG();
+       return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
 
 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
@@ -64,12 +74,12 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
                set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
        return r;
 }
-#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+#else
 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
                                            unsigned long address,
                                            pmd_t *pmdp)
 {
-       BUG();
+       BUILD_BUG();
        return 0;
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -81,8 +91,21 @@ int ptep_clear_flush_young(struct vm_area_struct *vma,
 #endif
 
 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
-int pmdp_clear_flush_young(struct vm_area_struct *vma,
-                          unsigned long address, pmd_t *pmdp);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
+                                 unsigned long address, pmd_t *pmdp);
+#else
+/*
+ * Despite relevant to THP only, this API is called from generic rmap code
+ * under PageTransHuge(), hence needs a dummy implementation for !THP
+ */
+static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
+                                        unsigned long address, pmd_t *pmdp)
+{
+       BUILD_BUG();
+       return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
 
 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
@@ -175,11 +198,11 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
        pmd_t old_pmd = *pmdp;
        set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
 }
-#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+#else
 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
                                      unsigned long address, pmd_t *pmdp)
 {
-       BUG();
+       BUILD_BUG();
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
@@ -248,7 +271,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
 static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
 {
-       BUG();
+       BUILD_BUG();
        return 0;
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
index 0bec580a48854f0a49b012b203663ebeddf98257..5d8ffa3e6f8c8a4e3715f3da4294abc860390386 100644 (file)
@@ -24,7 +24,7 @@ static __always_inline void preempt_count_set(int pc)
  * must be macros to avoid header recursion hell
  */
 #define init_task_preempt_count(p) do { \
-       task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
+       task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
 } while (0)
 
 #define init_idle_preempt_count(p, cpu) do { \
index 4d76f24df5182d9a2736adb987ef7f95866050e6..0abc6b6062fbb1c203bed0f025e05e82ac3eea24 100644 (file)
 
 typedef struct qrwlock {
        atomic_t                cnts;
-       arch_spinlock_t         lock;
+       arch_spinlock_t         wait_lock;
 } arch_rwlock_t;
 
 #define        __ARCH_RW_LOCK_UNLOCKED {               \
        .cnts = ATOMIC_INIT(0),                 \
-       .lock = __ARCH_SPIN_LOCK_UNLOCKED,      \
+       .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
 }
 
 #endif /* __ASM_GENERIC_QRWLOCK_TYPES_H */
index d48bf5a95cc1bd6e2a3f93ebbe6f3022202c1b98..d6d5dc98d7da5509347e56737f3944a37c698490 100644 (file)
@@ -33,7 +33,7 @@
  */
 static inline void __down_read(struct rw_semaphore *sem)
 {
-       if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0))
+       if (unlikely(atomic_long_inc_return_acquire((atomic_long_t *)&sem->count) <= 0))
                rwsem_down_read_failed(sem);
 }
 
@@ -42,7 +42,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
        long tmp;
 
        while ((tmp = sem->count) >= 0) {
-               if (tmp == cmpxchg(&sem->count, tmp,
+               if (tmp == cmpxchg_acquire(&sem->count, tmp,
                                   tmp + RWSEM_ACTIVE_READ_BIAS)) {
                        return 1;
                }
@@ -57,7 +57,7 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
 {
        long tmp;
 
-       tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS,
+       tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
                                     (atomic_long_t *)&sem->count);
        if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
                rwsem_down_write_failed(sem);
@@ -72,7 +72,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
 {
        long tmp;
 
-       tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
+       tmp = cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
                      RWSEM_ACTIVE_WRITE_BIAS);
        return tmp == RWSEM_UNLOCKED_VALUE;
 }
@@ -84,7 +84,7 @@ static inline void __up_read(struct rw_semaphore *sem)
 {
        long tmp;
 
-       tmp = atomic_long_dec_return((atomic_long_t *)&sem->count);
+       tmp = atomic_long_dec_return_release((atomic_long_t *)&sem->count);
        if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
                rwsem_wake(sem);
 }
@@ -94,7 +94,7 @@ static inline void __up_read(struct rw_semaphore *sem)
  */
 static inline void __up_write(struct rw_semaphore *sem)
 {
-       if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
+       if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
                                 (atomic_long_t *)&sem->count) < 0))
                rwsem_wake(sem);
 }
@@ -114,7 +114,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
 {
        long tmp;
 
-       tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS,
+       /*
+        * When downgrading from exclusive to shared ownership,
+        * anything inside the write-locked region cannot leak
+        * into the read side. In contrast, anything in the
+        * read-locked region is ok to be re-ordered into the
+        * write side. As such, rely on RELEASE semantics.
+        */
+       tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS,
                                     (atomic_long_t *)&sem->count);
        if (tmp < 0)
                rwsem_downgrade_wake(sem);
index 94f9ea8abcae35af8ca36560403fbd25facb7c65..011dde083f231e763e4c96fcc7fb3cb9b6ce23c7 100644 (file)
@@ -1,15 +1,10 @@
 #ifndef _ASM_WORD_AT_A_TIME_H
 #define _ASM_WORD_AT_A_TIME_H
 
-/*
- * This says "generic", but it's actually big-endian only.
- * Little-endian can use more efficient versions of these
- * interfaces, see for example
- *      arch/x86/include/asm/word-at-a-time.h
- * for those.
- */
-
 #include <linux/kernel.h>
+#include <asm/byteorder.h>
+
+#ifdef __BIG_ENDIAN
 
 struct word_at_a_time {
        const unsigned long high_bits, low_bits;
@@ -53,4 +48,73 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
 #define zero_bytemask(mask) (~1ul << __fls(mask))
 #endif
 
+#else
+
+/*
+ * The optimal byte mask counting is probably going to be something
+ * that is architecture-specific. If you have a reliably fast
+ * bit count instruction, that might be better than the multiply
+ * and shift, for example.
+ */
+struct word_at_a_time {
+       const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Jan Achrenius on G+: microoptimized version of
+ * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
+ * that works for the bytemasks without having to
+ * mask them first.
+ */
+static inline long count_masked_bytes(unsigned long mask)
+{
+       return mask*0x0001020304050608ul >> 56;
+}
+
+#else  /* 32-bit case */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+       /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+       long a = (0x0ff0001+mask) >> 23;
+       /* Fix the 1 for 00 case */
+       return a & mask;
+}
+
+#endif
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+       unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+       *bits = mask;
+       return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+       return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+       bits = (bits - 1) & ~bits;
+       return bits >> 7;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+       return count_masked_bytes(mask);
+}
+
+#endif /* __BIG_ENDIAN */
+
 #endif /* _ASM_WORD_AT_A_TIME_H */
index 2a747a91fdede982354438e1c48fc1a332d06885..3febb4b9fce9243793fbaf3e8dbb331fe6adf81d 100644 (file)
@@ -240,5 +240,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
 
 extern void drm_kms_helper_poll_disable(struct drm_device *dev);
 extern void drm_kms_helper_poll_enable(struct drm_device *dev);
+extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev);
 
 #endif
index 499e9f625aeffb2f618458b45121a6a6286e6e3e..0212d139a480909a216da244ffd8aadf6effa630 100644 (file)
 #define MODE_I2C_READ  4
 #define MODE_I2C_STOP  8
 
+/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */
+#define DP_MST_PHYSICAL_PORT_0 0
+#define DP_MST_LOGICAL_PORT_0 8
+
 #define DP_LINK_STATUS_SIZE       6
 bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
                          int lane_count);
index 86d0b25ed0547db2f63606af97d8bf9e966e39db..5340099741aec8c48575b1c1056f23903e150ed6 100644 (file)
@@ -253,6 +253,7 @@ struct drm_dp_remote_dpcd_write {
        u8 *bytes;
 };
 
+#define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
 struct drm_dp_remote_i2c_read {
        u8 num_transactions;
        u8 port_number;
@@ -262,7 +263,7 @@ struct drm_dp_remote_i2c_read {
                u8 *bytes;
                u8 no_stop_bit;
                u8 i2c_transaction_delay;
-       } transactions[4];
+       } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
        u8 read_i2c_device_id;
        u8 num_bytes_read;
 };
@@ -374,6 +375,7 @@ struct drm_dp_mst_topology_mgr;
 struct drm_dp_mst_topology_cbs {
        /* create a connector for a port */
        struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
+       void (*register_connector)(struct drm_connector *connector);
        void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
                                  struct drm_connector *connector);
        void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
index e6b1e0a808aebf8c3f4659026df1c7dd54ebf126..c673d2c87c604ae86fc6bfd523a580308752148a 100644 (file)
@@ -9,7 +9,19 @@
 #ifndef _DT_BINDINGS_GPIO_GPIO_H
 #define _DT_BINDINGS_GPIO_GPIO_H
 
+/* Bit 0 express polarity */
 #define GPIO_ACTIVE_HIGH 0
 #define GPIO_ACTIVE_LOW 1
 
+/* Bit 1 express single-endedness */
+#define GPIO_PUSH_PULL 0
+#define GPIO_SINGLE_ENDED 2
+
+/*
+ * Open Drain/Collector is the combination of single-ended active low,
+ * Open Source/Emitter is the combination of single-ended active high.
+ */
+#define GPIO_OPEN_DRAIN (GPIO_SINGLE_ENDED | GPIO_ACTIVE_LOW)
+#define GPIO_OPEN_SOURCE (GPIO_SINGLE_ENDED | GPIO_ACTIVE_HIGH)
+
 #endif
diff --git a/include/dt-bindings/leds/leds-netxbig.h b/include/dt-bindings/leds/leds-netxbig.h
new file mode 100644 (file)
index 0000000..92658b0
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * This header provides constants for netxbig LED bindings.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _DT_BINDINGS_LEDS_NETXBIG_H
+#define _DT_BINDINGS_LEDS_NETXBIG_H
+
+#define NETXBIG_LED_OFF                0
+#define NETXBIG_LED_ON         1
+#define NETXBIG_LED_SATA       2
+#define NETXBIG_LED_TIMER1     3
+#define NETXBIG_LED_TIMER2     4
+
+#endif /* _DT_BINDINGS_LEDS_NETXBIG_H */
index 4e14dac282bb6c963440593090bc060afecf8130..6a3538ef72753cd5ef70ac23605cf1484f852774 100644 (file)
@@ -282,7 +282,7 @@ struct vgic_v2_cpu_if {
 };
 
 struct vgic_v3_cpu_if {
-#ifdef CONFIG_ARM_GIC_V3
+#ifdef CONFIG_KVM_ARM_VGIC_V3
        u32             vgic_hcr;
        u32             vgic_vmcr;
        u32             vgic_sre;       /* Restored only, change ignored */
@@ -364,7 +364,7 @@ void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active);
 int vgic_v2_probe(struct device_node *vgic_node,
                  const struct vgic_ops **ops,
                  const struct vgic_params **params);
-#ifdef CONFIG_ARM_GIC_V3
+#ifdef CONFIG_KVM_ARM_VGIC_V3
 int vgic_v3_probe(struct device_node *vgic_node,
                  const struct vgic_ops **ops,
                  const struct vgic_params **params);
index 7235c4851460e6dc79d6d95a53d368b8f06e3525..d863e12bbead6eb6161ffce64b791e73dca7fd27 100644 (file)
@@ -201,6 +201,9 @@ int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity
 int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
 int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi);
 
+void acpi_set_irq_model(enum acpi_irq_model_id model,
+                       struct fwnode_handle *fwnode);
+
 #ifdef CONFIG_X86_IO_APIC
 extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
 #else
@@ -217,6 +220,7 @@ struct pci_dev;
 
 int acpi_pci_irq_enable (struct pci_dev *dev);
 void acpi_penalize_isa_irq(int irq, int active);
+bool acpi_isa_irq_available(int irq);
 void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
 void acpi_pci_irq_disable (struct pci_dev *dev);
 
index 50fc66868402123dfee3d60738d3f16061e2a6a2..9006c4e75cf737a90335eadcd73e14d59b0f753e 100644 (file)
@@ -41,8 +41,6 @@ struct amba_driver {
        int                     (*probe)(struct amba_device *, const struct amba_id *);
        int                     (*remove)(struct amba_device *);
        void                    (*shutdown)(struct amba_device *);
-       int                     (*suspend)(struct amba_device *, pm_message_t);
-       int                     (*resume)(struct amba_device *);
        const struct amba_id    *id_table;
 };
 
index 00a5763e850e94b123880e8f10b53279bc6b78ba..301de78d65f75c194720cc5168359dbce6eab59f 100644 (file)
 #endif
 #endif /* atomic_add_return_relaxed */
 
+/* atomic_inc_return_relaxed */
+#ifndef atomic_inc_return_relaxed
+#define  atomic_inc_return_relaxed     atomic_inc_return
+#define  atomic_inc_return_acquire     atomic_inc_return
+#define  atomic_inc_return_release     atomic_inc_return
+
+#else /* atomic_inc_return_relaxed */
+
+#ifndef atomic_inc_return_acquire
+#define  atomic_inc_return_acquire(...)                                        \
+       __atomic_op_acquire(atomic_inc_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_inc_return_release
+#define  atomic_inc_return_release(...)                                        \
+       __atomic_op_release(atomic_inc_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_inc_return
+#define  atomic_inc_return(...)                                                \
+       __atomic_op_fence(atomic_inc_return, __VA_ARGS__)
+#endif
+#endif /* atomic_inc_return_relaxed */
+
 /* atomic_sub_return_relaxed */
 #ifndef atomic_sub_return_relaxed
 #define  atomic_sub_return_relaxed     atomic_sub_return
 #endif
 #endif /* atomic_sub_return_relaxed */
 
+/* atomic_dec_return_relaxed */
+#ifndef atomic_dec_return_relaxed
+#define  atomic_dec_return_relaxed     atomic_dec_return
+#define  atomic_dec_return_acquire     atomic_dec_return
+#define  atomic_dec_return_release     atomic_dec_return
+
+#else /* atomic_dec_return_relaxed */
+
+#ifndef atomic_dec_return_acquire
+#define  atomic_dec_return_acquire(...)                                        \
+       __atomic_op_acquire(atomic_dec_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_dec_return_release
+#define  atomic_dec_return_release(...)                                        \
+       __atomic_op_release(atomic_dec_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_dec_return
+#define  atomic_dec_return(...)                                                \
+       __atomic_op_fence(atomic_dec_return, __VA_ARGS__)
+#endif
+#endif /* atomic_dec_return_relaxed */
+
 /* atomic_xchg_relaxed */
 #ifndef atomic_xchg_relaxed
 #define  atomic_xchg_relaxed           atomic_xchg
 #endif
 #endif /* atomic64_add_return_relaxed */
 
+/* atomic64_inc_return_relaxed */
+#ifndef atomic64_inc_return_relaxed
+#define  atomic64_inc_return_relaxed   atomic64_inc_return
+#define  atomic64_inc_return_acquire   atomic64_inc_return
+#define  atomic64_inc_return_release   atomic64_inc_return
+
+#else /* atomic64_inc_return_relaxed */
+
+#ifndef atomic64_inc_return_acquire
+#define  atomic64_inc_return_acquire(...)                              \
+       __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_inc_return_release
+#define  atomic64_inc_return_release(...)                              \
+       __atomic_op_release(atomic64_inc_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_inc_return
+#define  atomic64_inc_return(...)                                      \
+       __atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_inc_return_relaxed */
+
+
 /* atomic64_sub_return_relaxed */
 #ifndef atomic64_sub_return_relaxed
 #define  atomic64_sub_return_relaxed   atomic64_sub_return
 #endif
 #endif /* atomic64_sub_return_relaxed */
 
+/* atomic64_dec_return_relaxed */
+#ifndef atomic64_dec_return_relaxed
+#define  atomic64_dec_return_relaxed   atomic64_dec_return
+#define  atomic64_dec_return_acquire   atomic64_dec_return
+#define  atomic64_dec_return_release   atomic64_dec_return
+
+#else /* atomic64_dec_return_relaxed */
+
+#ifndef atomic64_dec_return_acquire
+#define  atomic64_dec_return_acquire(...)                              \
+       __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_dec_return_release
+#define  atomic64_dec_return_release(...)                              \
+       __atomic_op_release(atomic64_dec_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_dec_return
+#define  atomic64_dec_return(...)                                      \
+       __atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_dec_return_relaxed */
+
 /* atomic64_xchg_relaxed */
 #ifndef atomic64_xchg_relaxed
 #define  atomic64_xchg_relaxed         atomic64_xchg
@@ -451,7 +548,6 @@ static inline int atomic_dec_if_positive(atomic_t *v)
 }
 #endif
 
-#include <asm-generic/atomic-long.h>
 #ifdef CONFIG_GENERIC_ATOMIC64
 #include <asm-generic/atomic64.h>
 #endif
@@ -463,4 +559,6 @@ static inline void atomic64_andnot(long long i, atomic64_t *v)
 }
 #endif
 
+#include <asm-generic/atomic-long.h>
+
 #endif /* _LINUX_ATOMIC_H */
index a23209b43842106c6a74de8a51d6b4b752613157..1b4d69f68c33cc73ad99a1136b2408c71e763fb8 100644 (file)
@@ -116,6 +116,8 @@ struct bdi_writeback {
        struct list_head work_list;
        struct delayed_work dwork;      /* work item used for writeback */
 
+       struct list_head bdi_node;      /* anchored at bdi->wb_list */
+
 #ifdef CONFIG_CGROUP_WRITEBACK
        struct percpu_ref refcnt;       /* used only for !root wb's */
        struct fprop_local_percpu memcg_completions;
@@ -150,6 +152,7 @@ struct backing_dev_info {
        atomic_long_t tot_write_bandwidth;
 
        struct bdi_writeback wb;  /* the root writeback info for this bdi */
+       struct list_head wb_list; /* list of all wbs */
 #ifdef CONFIG_CGROUP_WRITEBACK
        struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
        struct rb_root cgwb_congested_tree; /* their congested states */
index d5eb4ad1c534a8074b64ee75d4b597563e36e89a..c85f74946a8bab65ff3f16cddea6a4446b0a4799 100644 (file)
 #include <linux/slab.h>
 
 int __must_check bdi_init(struct backing_dev_info *bdi);
-void bdi_destroy(struct backing_dev_info *bdi);
+void bdi_exit(struct backing_dev_info *bdi);
 
 __printf(3, 4)
 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
                const char *fmt, ...);
 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
+void bdi_unregister(struct backing_dev_info *bdi);
+
 int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
+void bdi_destroy(struct backing_dev_info *bdi);
+
 void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
                        bool range_cyclic, enum wb_reason reason);
 void wb_start_background_writeback(struct bdi_writeback *wb);
@@ -408,61 +412,6 @@ static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
        rcu_read_unlock();
 }
 
-struct wb_iter {
-       int                     start_memcg_id;
-       struct radix_tree_iter  tree_iter;
-       void                    **slot;
-};
-
-static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
-                                                  struct backing_dev_info *bdi)
-{
-       struct radix_tree_iter *titer = &iter->tree_iter;
-
-       WARN_ON_ONCE(!rcu_read_lock_held());
-
-       if (iter->start_memcg_id >= 0) {
-               iter->slot = radix_tree_iter_init(titer, iter->start_memcg_id);
-               iter->start_memcg_id = -1;
-       } else {
-               iter->slot = radix_tree_next_slot(iter->slot, titer, 0);
-       }
-
-       if (!iter->slot)
-               iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0);
-       if (iter->slot)
-               return *iter->slot;
-       return NULL;
-}
-
-static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter,
-                                                  struct backing_dev_info *bdi,
-                                                  int start_memcg_id)
-{
-       iter->start_memcg_id = start_memcg_id;
-
-       if (start_memcg_id)
-               return __wb_iter_next(iter, bdi);
-       else
-               return &bdi->wb;
-}
-
-/**
- * bdi_for_each_wb - walk all wb's of a bdi in ascending memcg ID order
- * @wb_cur: cursor struct bdi_writeback pointer
- * @bdi: bdi to walk wb's of
- * @iter: pointer to struct wb_iter to be used as iteration buffer
- * @start_memcg_id: memcg ID to start iteration from
- *
- * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
- * memcg ID order starting from @start_memcg_id.  @iter is struct wb_iter
- * to be used as temp storage during iteration.  rcu_read_lock() must be
- * held throughout iteration.
- */
-#define bdi_for_each_wb(wb_cur, bdi, iter, start_memcg_id)             \
-       for ((wb_cur) = __wb_iter_init(iter, bdi, start_memcg_id);      \
-            (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
-
 #else  /* CONFIG_CGROUP_WRITEBACK */
 
 static inline bool inode_cgwb_enabled(struct inode *inode)
@@ -522,14 +471,6 @@ static inline void wb_blkcg_offline(struct blkcg *blkcg)
 {
 }
 
-struct wb_iter {
-       int             next_id;
-};
-
-#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id)             \
-       for ((iter)->next_id = (start_blkcg_id);                        \
-            ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); )
-
 static inline int inode_congested(struct inode *inode, int cong_bits)
 {
        return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
index 0a5cc7a1109b9b2c655020c6c849960506f1cbc6..c02e669945e9279bceb796eb9a1adb938ebf863b 100644 (file)
@@ -713,9 +713,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
 
        if (!throtl) {
                blkg = blkg ?: q->root_blkg;
-               blkg_rwstat_add(&blkg->stat_bytes, bio->bi_flags,
+               blkg_rwstat_add(&blkg->stat_bytes, bio->bi_rw,
                                bio->bi_iter.bi_size);
-               blkg_rwstat_add(&blkg->stat_ios, bio->bi_flags, 1);
+               blkg_rwstat_add(&blkg->stat_ios, bio->bi_rw, 1);
        }
 
        rcu_read_unlock();
index 37d1602c4f7aa08b464577c675910046a4db3dde..5e7d43ab61c000d894164e093132f607344e9cc0 100644 (file)
@@ -145,7 +145,6 @@ enum {
        BLK_MQ_F_SHOULD_MERGE   = 1 << 0,
        BLK_MQ_F_TAG_SHARED     = 1 << 1,
        BLK_MQ_F_SG_MERGE       = 1 << 2,
-       BLK_MQ_F_SYSFS_UP       = 1 << 3,
        BLK_MQ_F_DEFER_ISSUE    = 1 << 4,
        BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
        BLK_MQ_F_ALLOC_POLICY_BITS = 1,
@@ -215,7 +214,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
 void blk_mq_cancel_requeue_work(struct request_queue *q);
 void blk_mq_kick_requeue_list(struct request_queue *q);
 void blk_mq_abort_requeue_list(struct request_queue *q);
-void blk_mq_complete_request(struct request *rq);
+void blk_mq_complete_request(struct request *rq, int error);
 
 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
@@ -224,8 +223,6 @@ void blk_mq_start_hw_queues(struct request_queue *q);
 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
-void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
-               void *priv);
 void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
                void *priv);
 void blk_mq_freeze_queue(struct request_queue *q);
index 99da9ebc73776af0a5efb69a73310f522b952b25..19c2e947d4d127364887a133d4b0d0ce92090e1c 100644 (file)
@@ -456,6 +456,8 @@ struct request_queue {
        struct blk_mq_tag_set   *tag_set;
        struct list_head        tag_set_list;
        struct bio_set          *bio_split;
+
+       bool                    mq_sysfs_init_done;
 };
 
 #define QUEUE_FLAG_QUEUED      1       /* uses generic tag queueing */
index f7ef093ec49a2bbdd637aae6ab0f1bf0fa4e1e32..29f9e774ab76ef5c1deb019a142af3027a43ebce 100644 (file)
@@ -26,6 +26,6 @@ extern int __init cma_declare_contiguous(phys_addr_t base,
 extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
                                        unsigned int order_per_bit,
                                        struct cma **res_cma);
-extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align);
+extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align);
 extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
 #endif
index dfaa7b3e9ae900676b61dc7c3f693c78de97e8cd..8efb40e61d6e48021d68f93635eea8d3ab3e8c0b 100644 (file)
 #define KASAN_ABI_VERSION 3
 #endif
 
+#if GCC_VERSION >= 40902
+/*
+ * Tell the compiler that address safety instrumentation (KASAN)
+ * should not be applied to that function.
+ * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
+ */
+#define __no_sanitize_address __attribute__((no_sanitize_address))
+#endif
+
 #endif /* gcc version >= 40000 specific checks */
 
 #if !defined(__noclone)
 #define __noclone      /* not needed */
 #endif
 
+#if !defined(__no_sanitize_address)
+#define __no_sanitize_address
+#endif
+
 /*
  * A trick to suppress uninitialized variable warning without generating any
  * code
index c836eb2dc44d5b3a4d5e98dfbbd274c4448daa90..fe817432190c1cfb4c3108bbdfaf71b22916b8f8 100644 (file)
@@ -198,19 +198,45 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
 
 #include <uapi/linux/types.h>
 
-static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
+#define __READ_ONCE_SIZE                                               \
+({                                                                     \
+       switch (size) {                                                 \
+       case 1: *(__u8 *)res = *(volatile __u8 *)p; break;              \
+       case 2: *(__u16 *)res = *(volatile __u16 *)p; break;            \
+       case 4: *(__u32 *)res = *(volatile __u32 *)p; break;            \
+       case 8: *(__u64 *)res = *(volatile __u64 *)p; break;            \
+       default:                                                        \
+               barrier();                                              \
+               __builtin_memcpy((void *)res, (const void *)p, size);   \
+               barrier();                                              \
+       }                                                               \
+})
+
+static __always_inline
+void __read_once_size(const volatile void *p, void *res, int size)
 {
-       switch (size) {
-       case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
-       case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
-       case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
-       case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
-       default:
-               barrier();
-               __builtin_memcpy((void *)res, (const void *)p, size);
-               barrier();
-       }
+       __READ_ONCE_SIZE;
+}
+
+#ifdef CONFIG_KASAN
+/*
+ * This function is not 'inline' because __no_sanitize_address confilcts
+ * with inlining. Attempt to inline it may cause a build failure.
+ *     https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
+ * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
+ */
+static __no_sanitize_address __maybe_unused
+void __read_once_size_nocheck(const volatile void *p, void *res, int size)
+{
+       __READ_ONCE_SIZE;
+}
+#else
+static __always_inline
+void __read_once_size_nocheck(const volatile void *p, void *res, int size)
+{
+       __READ_ONCE_SIZE;
 }
+#endif
 
 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
 {
@@ -248,8 +274,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
  * required ordering.
  */
 
-#define READ_ONCE(x) \
-       ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+#define __READ_ONCE(x, check)                                          \
+({                                                                     \
+       union { typeof(x) __val; char __c[1]; } __u;                    \
+       if (check)                                                      \
+               __read_once_size(&(x), __u.__c, sizeof(x));             \
+       else                                                            \
+               __read_once_size_nocheck(&(x), __u.__c, sizeof(x));     \
+       __u.__val;                                                      \
+})
+#define READ_ONCE(x) __READ_ONCE(x, 1)
+
+/*
+ * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
+ * to hide memory access from KASAN.
+ */
+#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
 
 #define WRITE_ONCE(x, val) \
 ({                                                     \
@@ -259,22 +299,6 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
        __u.__val;                                      \
 })
 
-/**
- * READ_ONCE_CTRL - Read a value heading a control dependency
- * @x: The value to be read, heading the control dependency
- *
- * Control dependencies are tricky.  See Documentation/memory-barriers.txt
- * for important information on how to use them.  Note that in many cases,
- * use of smp_load_acquire() will be much simpler.  Control dependencies
- * should be avoided except on the hottest of hotpaths.
- */
-#define READ_ONCE_CTRL(x) \
-({ \
-       typeof(x) __val = READ_ONCE(x); \
-       smp_read_barrier_depends(); /* Enforce control dependency. */ \
-       __val; \
-})
-
 #endif /* __KERNEL__ */
 
 #endif /* __ASSEMBLY__ */
index 23c30bdcca8631f80242d62d4b167d20e2814c75..d2ca8c38f9c45c0b279ef329c7abf130fcd0dc61 100644 (file)
@@ -228,7 +228,6 @@ extern struct bus_type cpu_subsys;
 extern void cpu_hotplug_begin(void);
 extern void cpu_hotplug_done(void);
 extern void get_online_cpus(void);
-extern bool try_get_online_cpus(void);
 extern void put_online_cpus(void);
 extern void cpu_hotplug_disable(void);
 extern void cpu_hotplug_enable(void);
@@ -246,7 +245,6 @@ int cpu_down(unsigned int cpu);
 static inline void cpu_hotplug_begin(void) {}
 static inline void cpu_hotplug_done(void) {}
 #define get_online_cpus()      do { } while (0)
-#define try_get_online_cpus()  true
 #define put_online_cpus()      do { } while (0)
 #define cpu_hotplug_disable()  do { } while (0)
 #define cpu_hotplug_enable()   do { } while (0)
index 569bbd039896f330923d53b4a6231ba1c73e70cc..fec734df1524799e1e7137e943098c907d0fce8c 100644 (file)
@@ -111,7 +111,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
        return ret;
 }
 
-struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
                                       unsigned int order);
 bool dma_release_from_contiguous(struct device *dev, struct page *pages,
                                 int count);
@@ -144,7 +144,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size,
 }
 
 static inline
-struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
                                       unsigned int order)
 {
        return NULL;
index da3b72e95db3cb0ba734ff736ad94aab6fd5b175..b3d87e5822f8aaba74dfe54a0730dffac499d990 100644 (file)
@@ -769,12 +769,10 @@ struct mem_ctl_info {
        /* the internal state of this controller instance */
        int op_state;
 
-#ifdef CONFIG_EDAC_DEBUG
        struct dentry *debugfs;
        u8 fake_inject_layer[EDAC_MAX_LAYERS];
        u32 fake_inject_ue;
        u16 fake_inject_count;
-#endif
 };
 
 /*
index 85ef051ac6fb43a64c38779c6bf754d2e492abcf..569b5a866bb1e6308bbc4c0a28a2da92d106d6b5 100644 (file)
@@ -99,6 +99,7 @@ typedef       struct {
 #define EFI_MEMORY_XP          ((u64)0x0000000000004000ULL)    /* execute-protect */
 #define EFI_MEMORY_MORE_RELIABLE \
                                ((u64)0x0000000000010000ULL)    /* higher reliability */
+#define EFI_MEMORY_RO          ((u64)0x0000000000020000ULL)    /* read-only */
 #define EFI_MEMORY_RUNTIME     ((u64)0x8000000000000000ULL)    /* range requires runtime mapping */
 #define EFI_MEMORY_DESCRIPTOR_VERSION  1
 
@@ -595,6 +596,9 @@ void efi_native_runtime_setup(void);
 #define DEVICE_TREE_GUID \
     EFI_GUID(  0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0 )
 
+#define EFI_PROPERTIES_TABLE_GUID \
+    EFI_GUID(  0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5 )
+
 typedef struct {
        efi_guid_t guid;
        u64 table;
@@ -676,7 +680,7 @@ typedef struct {
 } efi_system_table_t;
 
 struct efi_memory_map {
-       void *phys_map;
+       phys_addr_t phys_map;
        void *map;
        void *map_end;
        int nr_map;
@@ -808,6 +812,15 @@ typedef struct _efi_file_io_interface {
 #define EFI_FILE_MODE_WRITE    0x0000000000000002
 #define EFI_FILE_MODE_CREATE   0x8000000000000000
 
+typedef struct {
+       u32 version;
+       u32 length;
+       u64 memory_protection_attribute;
+} efi_properties_table_t;
+
+#define EFI_PROPERTIES_TABLE_VERSION   0x00010000
+#define EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA        0x1
+
 #define EFI_INVALID_TABLE_ADDR         (~0UL)
 
 /*
@@ -830,6 +843,7 @@ extern struct efi {
        unsigned long runtime;          /* runtime table */
        unsigned long config_table;     /* config tables */
        unsigned long esrt;             /* ESRT table */
+       unsigned long properties_table; /* properties table */
        efi_get_time_t *get_time;
        efi_set_time_t *set_time;
        efi_get_wakeup_time_t *get_wakeup_time;
@@ -901,13 +915,19 @@ extern void efi_initialize_iomem_resources(struct resource *code_resource,
                struct resource *data_resource, struct resource *bss_resource);
 extern void efi_get_time(struct timespec *now);
 extern void efi_reserve_boot_services(void);
-extern int efi_get_fdt_params(struct efi_fdt_params *params, int verbose);
+extern int efi_get_fdt_params(struct efi_fdt_params *params);
 extern struct efi_memory_map memmap;
 extern struct kobject *efi_kobj;
 
 extern int efi_reboot_quirk_mode;
 extern bool efi_poweroff_required(void);
 
+#ifdef CONFIG_EFI_FAKE_MEMMAP
+extern void __init efi_fake_memmap(void);
+#else
+static inline void efi_fake_memmap(void) { }
+#endif
+
 /* Iterate through an efi_memory_map */
 #define for_each_efi_memory_desc(m, md)                                           \
        for ((md) = (m)->map;                                              \
@@ -959,6 +979,7 @@ extern int __init efi_setup_pcdp_console(char *);
 #define EFI_PARAVIRT           6       /* Access is via a paravirt interface */
 #define EFI_ARCH_1             7       /* First arch-specific bit */
 #define EFI_DBG                        8       /* Print additional debug info at runtime */
+#define EFI_NX_PE_DATA         9       /* Can runtime data regions be mapped non-executable? */
 
 #ifdef CONFIG_EFI
 /*
index 674e3e226465a0b21a283e068d77d665eca84052..5295535b60c60768ccb91034b3ba290dc1a57f1a 100644 (file)
@@ -26,6 +26,7 @@ struct fdtable {
        struct file __rcu **fd;      /* current fd array */
        unsigned long *close_on_exec;
        unsigned long *open_fds;
+       unsigned long *full_fds_bits;
        struct rcu_head rcu;
 };
 
@@ -59,6 +60,7 @@ struct files_struct {
        int next_fd;
        unsigned long close_on_exec_init[1];
        unsigned long open_fds_init[1];
+       unsigned long full_fds_bits_init[1];
        struct file __rcu * fd_array[NR_OPEN_DEFAULT];
 };
 
index 0408545bce42403ecd50ecbeed8bffb78b336274..37ec668546ab21897393bcebc1ba548796db7994 100644 (file)
@@ -17,6 +17,7 @@ enum fwnode_type {
        FWNODE_OF,
        FWNODE_ACPI,
        FWNODE_PDATA,
+       FWNODE_IRQCHIP,
 };
 
 struct fwnode_handle {
index 14cac67c2012b6883d03445331337d139aba0307..fb0fde686cb1f80717c544ac18b380f6501afd39 100644 (file)
@@ -400,6 +400,7 @@ static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
 {
        return ERR_PTR(-EINVAL);
 }
+
 static inline int desc_to_gpio(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
index 1aed31c5ffba33c3a122779b9feb151f24953168..d1baebf350d8109a30a6bbe3ff15dc25f3146e14 100644 (file)
@@ -206,6 +206,9 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
 
 #endif /* CONFIG_GPIOLIB_IRQCHIP */
 
+int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset);
+void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset);
+
 #ifdef CONFIG_PINCTRL
 
 /**
index e38681f4912d02f9c571dd7d232a4b4bb4911ea4..810a34f604247065d78c2820ed34b0bf8d3aabf6 100644 (file)
@@ -59,7 +59,8 @@ extern struct fs_struct init_fs;
        .rlim           = INIT_RLIMITS,                                 \
        .cputimer       = {                                             \
                .cputime_atomic = INIT_CPUTIME_ATOMIC,                  \
-               .running        = 0,                                    \
+               .running        = false,                                \
+               .checking_timer = false,                                \
        },                                                              \
        INIT_PREV_CPUTIME(sig)                                          \
        .cred_guard_mutex =                                             \
index be7e75c945e97b07d5f248ded5c1e0d9240756ad..ad16809c85961e16ebc804280e4a78d66cf77634 100644 (file)
@@ -102,6 +102,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
  * @flags:     flags (see IRQF_* above)
  * @thread_fn: interrupt handler function for threaded interrupts
  * @thread:    thread pointer for threaded interrupts
+ * @secondary: pointer to secondary irqaction (force threading)
  * @thread_flags:      flags related to @thread
  * @thread_mask:       bitmask for keeping track of @thread activity
  * @dir:       pointer to the proc/irq/NN/name entry
@@ -113,6 +114,7 @@ struct irqaction {
        struct irqaction        *next;
        irq_handler_t           thread_fn;
        struct task_struct      *thread;
+       struct irqaction        *secondary;
        unsigned int            irq;
        unsigned int            flags;
        unsigned long           thread_flags;
index 3920a19d819415bc3109a491171e32388f9bda18..92f7177db2ce869a29db8813911c3a8a0c2b86b2 100644 (file)
@@ -68,8 +68,8 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
        return iova >> iova_shift(iovad);
 }
 
-int iommu_iova_cache_init(void);
-void iommu_iova_cache_destroy(void);
+int iova_cache_get(void);
+void iova_cache_put(void);
 
 struct iova *alloc_iova_mem(void);
 void free_iova_mem(struct iova *iova);
index 11bf09288ddb08ab277b6f213696c1216348155c..3c1c96786248cb02d2d9792cce455892323ef275 100644 (file)
@@ -67,11 +67,12 @@ enum irqchip_irq_state;
  *                               request/setup_irq()
  * IRQ_NO_BALANCING            - Interrupt cannot be balanced (affinity set)
  * IRQ_MOVE_PCNTXT             - Interrupt can be migrated from process context
- * IRQ_NESTED_TRHEAD           - Interrupt nests into another thread
+ * IRQ_NESTED_THREAD           - Interrupt nests into another thread
  * IRQ_PER_CPU_DEVID           - Dev_id is a per-cpu variable
  * IRQ_IS_POLLED               - Always polled by another interrupt. Exclude
  *                               it from the spurious interrupt detection
  *                               mechanism and from core side polling.
+ * IRQ_DISABLE_UNLAZY          - Disable lazy irq disable
  */
 enum {
        IRQ_TYPE_NONE           = 0x00000000,
@@ -97,13 +98,14 @@ enum {
        IRQ_NOTHREAD            = (1 << 16),
        IRQ_PER_CPU_DEVID       = (1 << 17),
        IRQ_IS_POLLED           = (1 << 18),
+       IRQ_DISABLE_UNLAZY      = (1 << 19),
 };
 
 #define IRQF_MODIFY_MASK       \
        (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
         IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
         IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
-        IRQ_IS_POLLED)
+        IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
 
 #define IRQ_NO_BALANCING_MASK  (IRQ_PER_CPU | IRQ_NO_BALANCING)
 
@@ -297,21 +299,6 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
        __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
 }
 
-/*
- * Functions for chained handlers which can be enabled/disabled by the
- * standard disable_irq/enable_irq calls. Must be called with
- * irq_desc->lock held.
- */
-static inline void irqd_set_chained_irq_inprogress(struct irq_data *d)
-{
-       __irqd_to_state(d) |= IRQD_IRQ_INPROGRESS;
-}
-
-static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d)
-{
-       __irqd_to_state(d) &= ~IRQD_IRQ_INPROGRESS;
-}
-
 static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
 {
        return d->hwirq;
@@ -452,6 +439,8 @@ extern int irq_set_affinity_locked(struct irq_data *data,
                                   const struct cpumask *cpumask, bool force);
 extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
 
+extern void irq_migrate_all_off_this_cpu(void);
+
 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
 void irq_move_irq(struct irq_data *data);
 void irq_move_masked_irq(struct irq_data *data);
index 9eeeb9589acfc35baed79a89d7d3f0c1be134cf3..c9ae0c6ec050569fc592b6f07b3ce1fa052bcc59 100644 (file)
@@ -18,8 +18,6 @@
 #ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H
 #define __LINUX_IRQCHIP_ARM_GIC_V3_H
 
-#include <asm/sysreg.h>
-
 /*
  * Distributor registers. We assume we're running non-secure, with ARE
  * being set. Secure-only and non-ARE registers are not described.
 #define GITS_BASER_PAGE_SIZE_16K       (1UL << GITS_BASER_PAGE_SIZE_SHIFT)
 #define GITS_BASER_PAGE_SIZE_64K       (2UL << GITS_BASER_PAGE_SIZE_SHIFT)
 #define GITS_BASER_PAGE_SIZE_MASK      (3UL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGES_MAX           256
 
 #define GITS_BASER_TYPE_NONE           0
 #define GITS_BASER_TYPE_DEVICE         1
 /*
  * Hypervisor interface registers (SRE only)
  */
-#define ICH_LR_VIRTUAL_ID_MASK         ((1UL << 32) - 1)
-
-#define ICH_LR_EOI                     (1UL << 41)
-#define ICH_LR_GROUP                   (1UL << 60)
-#define ICH_LR_HW                      (1UL << 61)
-#define ICH_LR_STATE                   (3UL << 62)
-#define ICH_LR_PENDING_BIT             (1UL << 62)
-#define ICH_LR_ACTIVE_BIT              (1UL << 63)
+#define ICH_LR_VIRTUAL_ID_MASK         ((1ULL << 32) - 1)
+
+#define ICH_LR_EOI                     (1ULL << 41)
+#define ICH_LR_GROUP                   (1ULL << 60)
+#define ICH_LR_HW                      (1ULL << 61)
+#define ICH_LR_STATE                   (3ULL << 62)
+#define ICH_LR_PENDING_BIT             (1ULL << 62)
+#define ICH_LR_ACTIVE_BIT              (1ULL << 63)
 #define ICH_LR_PHYS_ID_SHIFT           32
-#define ICH_LR_PHYS_ID_MASK            (0x3ffUL << ICH_LR_PHYS_ID_SHIFT)
+#define ICH_LR_PHYS_ID_MASK            (0x3ffULL << ICH_LR_PHYS_ID_SHIFT)
 
 #define ICH_MISR_EOI                   (1 << 0)
 #define ICH_MISR_U                     (1 << 1)
 #define ICH_VMCR_PMR_SHIFT             24
 #define ICH_VMCR_PMR_MASK              (0xffUL << ICH_VMCR_PMR_SHIFT)
 
-#define ICC_EOIR1_EL1                  sys_reg(3, 0, 12, 12, 1)
-#define ICC_DIR_EL1                    sys_reg(3, 0, 12, 11, 1)
-#define ICC_IAR1_EL1                   sys_reg(3, 0, 12, 12, 0)
-#define ICC_SGI1R_EL1                  sys_reg(3, 0, 12, 11, 5)
-#define ICC_PMR_EL1                    sys_reg(3, 0, 4, 6, 0)
-#define ICC_CTLR_EL1                   sys_reg(3, 0, 12, 12, 4)
-#define ICC_SRE_EL1                    sys_reg(3, 0, 12, 12, 5)
-#define ICC_GRPEN1_EL1                 sys_reg(3, 0, 12, 12, 7)
-
 #define ICC_IAR1_EL1_SPURIOUS          0x3ff
 
-#define ICC_SRE_EL2                    sys_reg(3, 4, 12, 9, 5)
-
 #define ICC_SRE_EL2_SRE                        (1 << 0)
 #define ICC_SRE_EL2_ENABLE             (1 << 3)
 
 #define ICC_SGI1R_AFFINITY_3_SHIFT     48
 #define ICC_SGI1R_AFFINITY_3_MASK      (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
 
-/*
- * System register definitions
- */
-#define ICH_VSEIR_EL2                  sys_reg(3, 4, 12, 9, 4)
-#define ICH_HCR_EL2                    sys_reg(3, 4, 12, 11, 0)
-#define ICH_VTR_EL2                    sys_reg(3, 4, 12, 11, 1)
-#define ICH_MISR_EL2                   sys_reg(3, 4, 12, 11, 2)
-#define ICH_EISR_EL2                   sys_reg(3, 4, 12, 11, 3)
-#define ICH_ELSR_EL2                   sys_reg(3, 4, 12, 11, 5)
-#define ICH_VMCR_EL2                   sys_reg(3, 4, 12, 11, 7)
-
-#define __LR0_EL2(x)                   sys_reg(3, 4, 12, 12, x)
-#define __LR8_EL2(x)                   sys_reg(3, 4, 12, 13, x)
-
-#define ICH_LR0_EL2                    __LR0_EL2(0)
-#define ICH_LR1_EL2                    __LR0_EL2(1)
-#define ICH_LR2_EL2                    __LR0_EL2(2)
-#define ICH_LR3_EL2                    __LR0_EL2(3)
-#define ICH_LR4_EL2                    __LR0_EL2(4)
-#define ICH_LR5_EL2                    __LR0_EL2(5)
-#define ICH_LR6_EL2                    __LR0_EL2(6)
-#define ICH_LR7_EL2                    __LR0_EL2(7)
-#define ICH_LR8_EL2                    __LR8_EL2(0)
-#define ICH_LR9_EL2                    __LR8_EL2(1)
-#define ICH_LR10_EL2                   __LR8_EL2(2)
-#define ICH_LR11_EL2                   __LR8_EL2(3)
-#define ICH_LR12_EL2                   __LR8_EL2(4)
-#define ICH_LR13_EL2                   __LR8_EL2(5)
-#define ICH_LR14_EL2                   __LR8_EL2(6)
-#define ICH_LR15_EL2                   __LR8_EL2(7)
-
-#define __AP0Rx_EL2(x)                 sys_reg(3, 4, 12, 8, x)
-#define ICH_AP0R0_EL2                  __AP0Rx_EL2(0)
-#define ICH_AP0R1_EL2                  __AP0Rx_EL2(1)
-#define ICH_AP0R2_EL2                  __AP0Rx_EL2(2)
-#define ICH_AP0R3_EL2                  __AP0Rx_EL2(3)
-
-#define __AP1Rx_EL2(x)                 sys_reg(3, 4, 12, 9, x)
-#define ICH_AP1R0_EL2                  __AP1Rx_EL2(0)
-#define ICH_AP1R1_EL2                  __AP1Rx_EL2(1)
-#define ICH_AP1R2_EL2                  __AP1Rx_EL2(2)
-#define ICH_AP1R3_EL2                  __AP1Rx_EL2(3)
+#include <asm/arch_gicv3.h>
 
 #ifndef __ASSEMBLY__
 
-#include <linux/stringify.h>
-#include <asm/msi.h>
-
 /*
  * We need a value to serve as a irq-type for LPIs. Choose one that will
  * hopefully pique the interest of the reviewer.
@@ -385,23 +329,26 @@ struct rdists {
        u64                     flags;
 };
 
-static inline void gic_write_eoir(u64 irq)
-{
-       asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
-       isb();
-}
-
-static inline void gic_write_dir(u64 irq)
-{
-       asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" (irq));
-       isb();
-}
-
 struct irq_domain;
 int its_cpu_init(void);
 int its_init(struct device_node *node, struct rdists *rdists,
             struct irq_domain *domain);
 
+static inline bool gic_enable_sre(void)
+{
+       u32 val;
+
+       val = gic_read_sre();
+       if (val & ICC_SRE_EL1_SRE)
+               return true;
+
+       val |= ICC_SRE_EL1_SRE;
+       gic_write_sre(val);
+       val = gic_read_sre();
+
+       return !!(val & ICC_SRE_EL1_SRE);
+}
+
 #endif
 
 #endif
index b8901dfd9e9584ba06de5f585f14c6edc9da8fb5..bae69e5d693c3e60cea1a209fc06d2d33fdfb960 100644 (file)
 
 struct device_node;
 
-void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
-                   u32 offset, struct device_node *);
 void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
 int gic_cpu_if_down(unsigned int gic_nr);
 
-static inline void gic_init(unsigned int nr, int start,
-                           void __iomem *dist , void __iomem *cpu)
-{
-       gic_init_bases(nr, start, dist, cpu, 0, NULL);
-}
+void gic_init(unsigned int nr, int start,
+             void __iomem *dist , void __iomem *cpu);
 
 int gicv2m_of_init(struct device_node *node, struct irq_domain *parent);
 
index d3ca79236fb00ee5543e507ae9e69bc62b700e43..d5e5c5bef28c45e29bff88b175d581c0595b73f9 100644 (file)
@@ -5,9 +5,10 @@
  * helpful for interrupt controllers to implement mapping between hardware
  * irq numbers and the Linux irq number space.
  *
- * irq_domains also have a hook for translating device tree interrupt
- * representation into a hardware irq number that can be mapped back to a
- * Linux irq number without any extra platform support code.
+ * irq_domains also have hooks for translating device tree or other
+ * firmware interrupt representations into a hardware irq number that
+ * can be mapped back to a Linux irq number without any extra platform
+ * support code.
  *
  * Interrupt controller "domain" data structure. This could be defined as a
  * irq domain controller. That is, it handles the mapping between hardware
  * model). It's the domain callbacks that are responsible for setting the
  * irq_chip on a given irq_desc after it's been mapped.
  *
- * The host code and data structures are agnostic to whether or not
- * we use an open firmware device-tree. We do have references to struct
- * device_node in two places: in irq_find_host() to find the host matching
- * a given interrupt controller node, and of course as an argument to its
- * counterpart domain->ops->match() callback. However, those are treated as
- * generic pointers by the core and the fact that it's actually a device-node
- * pointer is purely a convention between callers and implementation. This
- * code could thus be used on other architectures by replacing those two
- * by some sort of arch-specific void * "token" used to identify interrupt
- * controllers.
+ * The host code and data structures use a fwnode_handle pointer to
+ * identify the domain. In some cases, and in order to preserve source
+ * code compatibility, this fwnode pointer is "upgraded" to a DT
+ * device_node. For those firmware infrastructures that do not provide
+ * a unique identifier for an interrupt controller, the irq_domain
+ * code offers a fwnode allocator.
  */
 
 #ifndef _LINUX_IRQDOMAIN_H
@@ -34,6 +31,7 @@
 
 #include <linux/types.h>
 #include <linux/irqhandler.h>
+#include <linux/of.h>
 #include <linux/radix-tree.h>
 
 struct device_node;
@@ -45,6 +43,24 @@ struct irq_data;
 /* Number of irqs reserved for a legacy isa controller */
 #define NUM_ISA_INTERRUPTS     16
 
+#define IRQ_DOMAIN_IRQ_SPEC_PARAMS 16
+
+/**
+ * struct irq_fwspec - generic IRQ specifier structure
+ *
+ * @fwnode:            Pointer to a firmware-specific descriptor
+ * @param_count:       Number of device-specific parameters
+ * @param:             Device-specific parameters
+ *
+ * This structure, directly modeled after of_phandle_args, is used to
+ * pass a device-specific description of an interrupt.
+ */
+struct irq_fwspec {
+       struct fwnode_handle *fwnode;
+       int param_count;
+       u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS];
+};
+
 /*
  * Should several domains have the same device node, but serve
  * different purposes (for example one domain is for PCI/MSI, and the
@@ -91,6 +107,8 @@ struct irq_domain_ops {
                     unsigned int nr_irqs);
        void (*activate)(struct irq_domain *d, struct irq_data *irq_data);
        void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
+       int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
+                        unsigned long *out_hwirq, unsigned int *out_type);
 #endif
 };
 
@@ -130,7 +148,7 @@ struct irq_domain {
        unsigned int flags;
 
        /* Optional data */
-       struct device_node *of_node;
+       struct fwnode_handle *fwnode;
        enum irq_domain_bus_token bus_token;
        struct irq_domain_chip_generic *gc;
 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
@@ -161,8 +179,15 @@ enum {
        IRQ_DOMAIN_FLAG_NONCORE         = (1 << 16),
 };
 
+static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d)
+{
+       return to_of_node(d->fwnode);
+}
+
 #ifdef CONFIG_IRQ_DOMAIN
-struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
+struct fwnode_handle *irq_domain_alloc_fwnode(void *data);
+void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
+struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
                                    irq_hw_number_t hwirq_max, int direct_max,
                                    const struct irq_domain_ops *ops,
                                    void *host_data);
@@ -177,10 +202,21 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
                                         irq_hw_number_t first_hwirq,
                                         const struct irq_domain_ops *ops,
                                         void *host_data);
-extern struct irq_domain *irq_find_matching_host(struct device_node *node,
-                                                enum irq_domain_bus_token bus_token);
+extern struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
+                                                  enum irq_domain_bus_token bus_token);
 extern void irq_set_default_host(struct irq_domain *host);
 
+static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
+{
+       return node ? &node->fwnode : NULL;
+}
+
+static inline struct irq_domain *irq_find_matching_host(struct device_node *node,
+                                                       enum irq_domain_bus_token bus_token)
+{
+       return irq_find_matching_fwnode(of_node_to_fwnode(node), bus_token);
+}
+
 static inline struct irq_domain *irq_find_host(struct device_node *node)
 {
        return irq_find_matching_host(node, DOMAIN_BUS_ANY);
@@ -198,14 +234,14 @@ static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_no
                                         const struct irq_domain_ops *ops,
                                         void *host_data)
 {
-       return __irq_domain_add(of_node, size, size, 0, ops, host_data);
+       return __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
 }
 static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
                                         unsigned int max_irq,
                                         const struct irq_domain_ops *ops,
                                         void *host_data)
 {
-       return __irq_domain_add(of_node, 0, max_irq, max_irq, ops, host_data);
+       return __irq_domain_add(of_node_to_fwnode(of_node), 0, max_irq, max_irq, ops, host_data);
 }
 static inline struct irq_domain *irq_domain_add_legacy_isa(
                                struct device_node *of_node,
@@ -219,7 +255,22 @@ static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node
                                         const struct irq_domain_ops *ops,
                                         void *host_data)
 {
-       return __irq_domain_add(of_node, 0, ~0, 0, ops, host_data);
+       return __irq_domain_add(of_node_to_fwnode(of_node), 0, ~0, 0, ops, host_data);
+}
+
+static inline struct irq_domain *irq_domain_create_linear(struct fwnode_handle *fwnode,
+                                        unsigned int size,
+                                        const struct irq_domain_ops *ops,
+                                        void *host_data)
+{
+       return __irq_domain_add(fwnode, size, size, 0, ops, host_data);
+}
+
+static inline struct irq_domain *irq_domain_create_tree(struct fwnode_handle *fwnode,
+                                        const struct irq_domain_ops *ops,
+                                        void *host_data)
+{
+       return __irq_domain_add(fwnode, 0, ~0, 0, ops, host_data);
 }
 
 extern void irq_domain_remove(struct irq_domain *host);
@@ -234,6 +285,7 @@ extern void irq_domain_disassociate(struct irq_domain *domain,
 
 extern unsigned int irq_create_mapping(struct irq_domain *host,
                                       irq_hw_number_t hwirq);
+extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec);
 extern void irq_dispose_mapping(unsigned int virq);
 
 /**
@@ -285,10 +337,23 @@ extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
                                void *chip_data, irq_flow_handler_t handler,
                                void *handler_data, const char *handler_name);
 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
-extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
+extern struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
                        unsigned int flags, unsigned int size,
-                       struct device_node *node,
+                       struct fwnode_handle *fwnode,
                        const struct irq_domain_ops *ops, void *host_data);
+
+static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
+                                           unsigned int flags,
+                                           unsigned int size,
+                                           struct device_node *node,
+                                           const struct irq_domain_ops *ops,
+                                           void *host_data)
+{
+       return irq_domain_create_hierarchy(parent, flags, size,
+                                          of_node_to_fwnode(node),
+                                          ops, host_data);
+}
+
 extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
                                   unsigned int nr_irqs, int node, void *arg,
                                   bool realloc);
index e374e369fb2f4c9eb5ace48679376550d5ee633c..eb1bdcf95f2e0c931a3a5cb599ab06547ff34f3d 100644 (file)
@@ -3,7 +3,7 @@
 
 /**
  * enum irqreturn
- * @IRQ_NONE           interrupt was not from this device
+ * @IRQ_NONE           interrupt was not from this device or was not handled
  * @IRQ_HANDLED                interrupt was handled by this device
  * @IRQ_WAKE_THREAD    handler requests to wake the handler thread
  */
index 3e3e64a6100241b328a82f39f77ab403583637c6..993395a2e55c5483c890b40e216193d95c85bd64 100644 (file)
@@ -87,7 +87,7 @@ static inline void list_add_tail(struct list_head *new, struct list_head *head)
 static inline void __list_del(struct list_head * prev, struct list_head * next)
 {
        next->prev = prev;
-       prev->next = next;
+       WRITE_ONCE(prev->next, next);
 }
 
 /**
@@ -615,7 +615,8 @@ static inline void __hlist_del(struct hlist_node *n)
 {
        struct hlist_node *next = n->next;
        struct hlist_node **pprev = n->pprev;
-       *pprev = next;
+
+       WRITE_ONCE(*pprev, next);
        if (next)
                next->pprev = pprev;
 }
index 2eb88556c5c5b0ee0cb0fc664e9d13dc852fea6e..8132214e8efd2930ff752fcc4c37da7d23b9643f 100644 (file)
@@ -93,9 +93,10 @@ static inline void __hlist_bl_del(struct hlist_bl_node *n)
        LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
 
        /* pprev may be `first`, so be careful not to lose the lock bit */
-       *pprev = (struct hlist_bl_node *)
+       WRITE_ONCE(*pprev,
+                  (struct hlist_bl_node *)
                        ((unsigned long)next |
-                        ((unsigned long)*pprev & LIST_BL_LOCKMASK));
+                        ((unsigned long)*pprev & LIST_BL_LOCKMASK)));
        if (next)
                next->pprev = pprev;
 }
index f266661d2666596d808bbe8756c91239f5b8e6dc..444d2b1313bda37647b1660e4582202a7e3b66e4 100644 (file)
@@ -76,7 +76,8 @@ static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
 {
        struct hlist_nulls_node *next = n->next;
        struct hlist_nulls_node **pprev = n->pprev;
-       *pprev = next;
+
+       WRITE_ONCE(*pprev, next);
        if (!is_a_nulls(next))
                next->pprev = pprev;
 }
index ad800e62cb7a603fdd5f7fb1a752edb03417dbb4..3e3318ddfc0e3e09a0e15825f78eb6052d628d78 100644 (file)
@@ -242,7 +242,6 @@ struct mem_cgroup {
         * percpu counter.
         */
        struct mem_cgroup_stat_cpu __percpu *stat;
-       spinlock_t pcp_counter_lock;
 
 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
        struct cg_proto tcp_mem;
@@ -677,8 +676,9 @@ enum {
 
 struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
-void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail,
-                        unsigned long *pdirty, unsigned long *pwriteback);
+void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
+                        unsigned long *pheadroom, unsigned long *pdirty,
+                        unsigned long *pwriteback);
 
 #else  /* CONFIG_CGROUP_WRITEBACK */
 
@@ -688,7 +688,8 @@ static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
 }
 
 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
-                                      unsigned long *pavail,
+                                      unsigned long *pfilepages,
+                                      unsigned long *pheadroom,
                                       unsigned long *pdirty,
                                       unsigned long *pwriteback)
 {
index 8eb3b19af2a4bc2ece866e8d07c6243115ce13d4..250b1ff8b48d43c0f9f2479e388b405d8238eb41 100644 (file)
@@ -402,17 +402,6 @@ struct mlx5_cmd_teardown_hca_mbox_out {
        u8                      rsvd[8];
 };
 
-struct mlx5_cmd_query_special_contexts_mbox_in {
-       struct mlx5_inbox_hdr   hdr;
-       u8                      rsvd[8];
-};
-
-struct mlx5_cmd_query_special_contexts_mbox_out {
-       struct mlx5_outbox_hdr  hdr;
-       __be32                  dump_fill_mkey;
-       __be32                  resd_lkey;
-};
-
 struct mlx5_cmd_layout {
        u8              type;
        u8              rsvd0[3];
index 27b53f9a24ad85a4be3928a470ee4627a20859a6..8b6d6f2154a4eaab1cce3db487b92d8d1b36d4a9 100644 (file)
@@ -845,7 +845,6 @@ void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
 int mlx5_register_interface(struct mlx5_interface *intf);
 void mlx5_unregister_interface(struct mlx5_interface *intf);
 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
-int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey);
 
 struct mlx5_profile {
        u64     mask;
index 91c08f6f0dc96dbb7474d3349f62b5d3f723fe80..80001de019ba33d86b90b9922b39722270cb0449 100644 (file)
@@ -905,6 +905,27 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
 #endif
 }
 
+#ifdef CONFIG_MEMCG
+static inline struct mem_cgroup *page_memcg(struct page *page)
+{
+       return page->mem_cgroup;
+}
+
+static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
+{
+       page->mem_cgroup = memcg;
+}
+#else
+static inline struct mem_cgroup *page_memcg(struct page *page)
+{
+       return NULL;
+}
+
+static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
+{
+}
+#endif
+
 /*
  * Some inline functions in vmstat.h depend on page_zone()
  */
index fdd0779ccdfa52309f5e99aeeadd84b356c1a472..eb0151bac50c1fd796f479d017bc3c3d7017c9e9 100644 (file)
@@ -269,7 +269,6 @@ struct mmc_card {
                                                /* for byte mode */
 #define MMC_QUIRK_NONSTD_SDIO  (1<<2)          /* non-standard SDIO card attached */
                                                /* (missing CIA registers) */
-#define MMC_QUIRK_BROKEN_CLK_GATING (1<<3)     /* clock gating the sdio bus will make card fail */
 #define MMC_QUIRK_NONSTD_FUNC_IF (1<<4)                /* SDIO card has nonstd function interfaces */
 #define MMC_QUIRK_DISABLE_CD   (1<<5)          /* disconnect CD/DAT[3] resistor */
 #define MMC_QUIRK_INAND_CMD38  (1<<6)          /* iNAND devices have broken CMD38 */
index 258daf914c6df301330b70f00d95ac95a44ac545..37967b6da03cf542d7a5762342c2089d93054b7a 100644 (file)
@@ -152,10 +152,8 @@ extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *);
 extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
        struct mmc_command *, int);
 extern void mmc_start_bkops(struct mmc_card *card, bool from_exception);
-extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool,
-                       bool, bool);
 extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
-extern int mmc_send_tuning(struct mmc_host *host);
+extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
 extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
 
 #define MMC_ERASE_ARG          0x00000000
index 134c57422740944fb8d1d004c2f8126b2bdeb604..f67b2ec18e6d87c8df27b314f7320e49163850f5 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/scatterlist.h>
 #include <linux/mmc/core.h>
+#include <linux/dmaengine.h>
 
 #define MAX_MCI_SLOTS  2
 
@@ -40,6 +41,17 @@ enum {
 
 struct mmc_data;
 
+enum {
+       TRANS_MODE_PIO = 0,
+       TRANS_MODE_IDMAC,
+       TRANS_MODE_EDMAC
+};
+
+struct dw_mci_dma_slave {
+       struct dma_chan *ch;
+       enum dma_transfer_direction direction;
+};
+
 /**
  * struct dw_mci - MMC controller state shared between all slots
  * @lock: Spinlock protecting the queue and associated data.
@@ -154,7 +166,14 @@ struct dw_mci {
        dma_addr_t              sg_dma;
        void                    *sg_cpu;
        const struct dw_mci_dma_ops     *dma_ops;
+       /* For idmac */
        unsigned int            ring_size;
+
+       /* For edmac */
+       struct dw_mci_dma_slave *dms;
+       /* Registers's physical base address */
+       void                    *phy_regs;
+
        u32                     cmd_status;
        u32                     data_status;
        u32                     stop_cmdr;
@@ -208,8 +227,8 @@ struct dw_mci {
 struct dw_mci_dma_ops {
        /* DMA Ops */
        int (*init)(struct dw_mci *host);
-       void (*start)(struct dw_mci *host, unsigned int sg_len);
-       void (*complete)(struct dw_mci *host);
+       int (*start)(struct dw_mci *host, unsigned int sg_len);
+       void (*complete)(void *host);
        void (*stop)(struct dw_mci *host);
        void (*cleanup)(struct dw_mci *host);
        void (*exit)(struct dw_mci *host);
index 83b81fd865f3bba12e7bc7d4c0ac8091ca067c09..8673ffe3d86ef83fc657cde31a43058b94840f61 100644 (file)
@@ -292,18 +292,6 @@ struct mmc_host {
 
        mmc_pm_flag_t           pm_caps;        /* supported pm features */
 
-#ifdef CONFIG_MMC_CLKGATE
-       int                     clk_requests;   /* internal reference counter */
-       unsigned int            clk_delay;      /* number of MCI clk hold cycles */
-       bool                    clk_gated;      /* clock gated */
-       struct delayed_work     clk_gate_work; /* delayed clock gate */
-       unsigned int            clk_old;        /* old clock value cache */
-       spinlock_t              clk_lock;       /* lock for clk fields */
-       struct mutex            clk_gate_mutex; /* mutex for clock gating */
-       struct device_attribute clkgate_delay_attr;
-       unsigned long           clkgate_delay;
-#endif
-
        /* host specific block data */
        unsigned int            max_seg_size;   /* see blk_queue_max_segment_size */
        unsigned short          max_segs;       /* see blk_queue_max_segments */
@@ -423,6 +411,7 @@ int mmc_regulator_get_ocrmask(struct regulator *supply);
 int mmc_regulator_set_ocr(struct mmc_host *mmc,
                        struct regulator *supply,
                        unsigned short vdd_bit);
+int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios);
 #else
 static inline int mmc_regulator_get_ocrmask(struct regulator *supply)
 {
@@ -435,6 +424,12 @@ static inline int mmc_regulator_set_ocr(struct mmc_host *mmc,
 {
        return 0;
 }
+
+static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc,
+                                         struct mmc_ios *ios)
+{
+       return -EINVAL;
+}
 #endif
 
 int mmc_regulator_get_supply(struct mmc_host *mmc);
@@ -479,26 +474,6 @@ static inline int mmc_host_packed_wr(struct mmc_host *host)
        return host->caps2 & MMC_CAP2_PACKED_WR;
 }
 
-#ifdef CONFIG_MMC_CLKGATE
-void mmc_host_clk_hold(struct mmc_host *host);
-void mmc_host_clk_release(struct mmc_host *host);
-unsigned int mmc_host_clk_rate(struct mmc_host *host);
-
-#else
-static inline void mmc_host_clk_hold(struct mmc_host *host)
-{
-}
-
-static inline void mmc_host_clk_release(struct mmc_host *host)
-{
-}
-
-static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
-{
-       return host->ios.clock;
-}
-#endif
-
 static inline int mmc_card_hs(struct mmc_card *card)
 {
        return card->host->ios.timing == MMC_TIMING_SD_HS ||
index ad939d0ba816372b975d5c44d9582617b7b618e0..0b4460374020fd98e40248c990eb6110493a21be 100644 (file)
@@ -174,6 +174,7 @@ struct msi_controller {
 struct irq_domain;
 struct irq_chip;
 struct device_node;
+struct fwnode_handle;
 struct msi_domain_info;
 
 /**
@@ -262,7 +263,7 @@ enum {
 int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
                            bool force);
 
-struct irq_domain *msi_create_irq_domain(struct device_node *of_node,
+struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
                                         struct msi_domain_info *info,
                                         struct irq_domain *parent);
 int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
@@ -270,7 +271,7 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
 void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
 
-struct irq_domain *platform_msi_create_irq_domain(struct device_node *np,
+struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
                                                  struct msi_domain_info *info,
                                                  struct irq_domain *parent);
 int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
@@ -280,19 +281,26 @@ void platform_msi_domain_free_irqs(struct device *dev);
 
 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
 void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
-struct irq_domain *pci_msi_create_irq_domain(struct device_node *node,
+struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
                                             struct msi_domain_info *info,
                                             struct irq_domain *parent);
 int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev,
                              int nvec, int type);
 void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev);
-struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node,
+struct irq_domain *pci_msi_create_default_irq_domain(struct fwnode_handle *fwnode,
                 struct msi_domain_info *info, struct irq_domain *parent);
 
 irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
                                          struct msi_desc *desc);
 int pci_msi_domain_check_cap(struct irq_domain *domain,
                             struct msi_domain_info *info, struct device *dev);
+u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
+struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
+#else
+static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
+{
+       return NULL;
+}
 #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
 
 #endif /* LINUX_MSI_H */
index 2d15e383144038ca12d5e876bf4e5cbfe5f84feb..210d11a75e4ff36bcdedb6496c9e5943cb24f465 100644 (file)
@@ -1054,6 +1054,10 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  *     This function is used to pass protocol port error state information
  *     to the switch driver. The switch driver can react to the proto_down
  *      by doing a phys down on the associated switch port.
+ * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
+ *     This function is used to get egress tunnel information for given skb.
+ *     This is useful for retrieving outer tunnel header parameters while
+ *     sampling packet.
  *
  */
 struct net_device_ops {
@@ -1227,6 +1231,8 @@ struct net_device_ops {
        int                     (*ndo_get_iflink)(const struct net_device *dev);
        int                     (*ndo_change_proto_down)(struct net_device *dev,
                                                         bool proto_down);
+       int                     (*ndo_fill_metadata_dst)(struct net_device *dev,
+                                                      struct sk_buff *skb);
 };
 
 /**
@@ -2203,6 +2209,7 @@ void dev_add_offload(struct packet_offload *po);
 void dev_remove_offload(struct packet_offload *po);
 
 int dev_get_iflink(const struct net_device *dev);
+int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
                                      unsigned short mask);
 struct net_device *dev_get_by_name(struct net *net, const char *name);
index f3191828f037368a28b3fc1affc2c5d919b74001..87d6d1632dd4866064fb7319e7ed6f2bff260c49 100644 (file)
@@ -29,6 +29,7 @@ struct device_node;
  */
 enum of_gpio_flags {
        OF_GPIO_ACTIVE_LOW = 0x1,
+       OF_GPIO_SINGLE_ENDED = 0x2,
 };
 
 #ifdef CONFIG_OF_GPIO
index 4bcbd586a67296081fadb219ee96f32d7fcb65b2..65d969246a4d02e1ee8854451c923d5003941d31 100644 (file)
@@ -46,6 +46,11 @@ extern int of_irq_get(struct device_node *dev, int index);
 extern int of_irq_get_byname(struct device_node *dev, const char *name);
 extern int of_irq_to_resource_table(struct device_node *dev,
                struct resource *res, int nr_irqs);
+extern struct irq_domain *of_msi_get_domain(struct device *dev,
+                                           struct device_node *np,
+                                           enum irq_domain_bus_token token);
+extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
+                                                      u32 rid);
 #else
 static inline int of_irq_count(struct device_node *dev)
 {
@@ -64,6 +69,17 @@ static inline int of_irq_to_resource_table(struct device_node *dev,
 {
        return 0;
 }
+static inline struct irq_domain *of_msi_get_domain(struct device *dev,
+                                                  struct device_node *np,
+                                                  enum irq_domain_bus_token token)
+{
+       return NULL;
+}
+static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
+                                                             u32 rid)
+{
+       return NULL;
+}
 #endif
 
 #if defined(CONFIG_OF)
@@ -75,6 +91,7 @@ static inline int of_irq_to_resource_table(struct device_node *dev,
 extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
 extern struct device_node *of_irq_find_parent(struct device_node *child);
 extern void of_msi_configure(struct device *dev, struct device_node *np);
+u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in);
 
 #else /* !CONFIG_OF */
 static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
@@ -87,6 +104,12 @@ static inline void *of_irq_find_parent(struct device_node *child)
 {
        return NULL;
 }
+
+static inline u32 of_msi_map_rid(struct device *dev,
+                                struct device_node *msi_np, u32 rid_in)
+{
+       return rid_in;
+}
 #endif /* !CONFIG_OF */
 
 #endif /* __OF_IRQ_H */
index e5a70132a240f66803a0c27a30eb2375111d97e0..88fa8af2b937c02a7402d5c8b2499009d4d0ff21 100644 (file)
@@ -17,7 +17,7 @@
 
 #include <linux/platform_device.h>
 
-#define INT_DMA_LCD                    25
+#define INT_DMA_LCD                    (NR_IRQS_LEGACY + 25)
 
 #define OMAP1_DMA_TOUT_IRQ             (1 << 0)
 #define OMAP_DMA_DROP_IRQ              (1 << 1)
index 834c4e52cb2d1cab57ba33a3e87c025bc7e4a7cc..c2fa3ecb0dce57dde9ad4a92a35dc4178d7a159e 100644 (file)
@@ -5,11 +5,12 @@
 #include <linux/rwsem.h>
 #include <linux/percpu.h>
 #include <linux/wait.h>
+#include <linux/rcu_sync.h>
 #include <linux/lockdep.h>
 
 struct percpu_rw_semaphore {
+       struct rcu_sync         rss;
        unsigned int __percpu   *fast_read_ctr;
-       atomic_t                write_ctr;
        struct rw_semaphore     rw_sem;
        atomic_t                slow_read_ctr;
        wait_queue_head_t       write_waitq;
index 092a0e8a479aa19569fa0704e050a853bc38883c..d841d33bcdc9c370742db6408e1f979594d37099 100644 (file)
@@ -140,33 +140,67 @@ struct hw_perf_event {
                };
 #endif
        };
+       /*
+        * If the event is a per task event, this will point to the task in
+        * question. See the comment in perf_event_alloc().
+        */
        struct task_struct              *target;
+
+/*
+ * hw_perf_event::state flags; used to track the PERF_EF_* state.
+ */
+#define PERF_HES_STOPPED       0x01 /* the counter is stopped */
+#define PERF_HES_UPTODATE      0x02 /* event->count up-to-date */
+#define PERF_HES_ARCH          0x04
+
        int                             state;
+
+       /*
+        * The last observed hardware counter value, updated with a
+        * local64_cmpxchg() such that pmu::read() can be called nested.
+        */
        local64_t                       prev_count;
+
+       /*
+        * The period to start the next sample with.
+        */
        u64                             sample_period;
+
+       /*
+        * The period we started this sample with.
+        */
        u64                             last_period;
+
+       /*
+        * However much is left of the current period; note that this is
+        * a full 64bit value and allows for generation of periods longer
+        * than hardware might allow.
+        */
        local64_t                       period_left;
+
+       /*
+        * State for throttling the event, see __perf_event_overflow() and
+        * perf_adjust_freq_unthr_context().
+        */
        u64                             interrupts_seq;
        u64                             interrupts;
 
+       /*
+        * State for freq target events, see __perf_event_overflow() and
+        * perf_adjust_freq_unthr_context().
+        */
        u64                             freq_time_stamp;
        u64                             freq_count_stamp;
 #endif
 };
 
-/*
- * hw_perf_event::state flags
- */
-#define PERF_HES_STOPPED       0x01 /* the counter is stopped */
-#define PERF_HES_UPTODATE      0x02 /* event->count up-to-date */
-#define PERF_HES_ARCH          0x04
-
 struct perf_event;
 
 /*
  * Common implementation detail of pmu::{start,commit,cancel}_txn
  */
-#define PERF_EVENT_TXN 0x1
+#define PERF_PMU_TXN_ADD  0x1          /* txn to add/schedule event on PMU */
+#define PERF_PMU_TXN_READ 0x2          /* txn to read event group from PMU */
 
 /**
  * pmu::capabilities flags
@@ -210,7 +244,19 @@ struct pmu {
 
        /*
         * Try and initialize the event for this PMU.
-        * Should return -ENOENT when the @event doesn't match this PMU.
+        *
+        * Returns:
+        *  -ENOENT     -- @event is not for this PMU
+        *
+        *  -ENODEV     -- @event is for this PMU but PMU not present
+        *  -EBUSY      -- @event is for this PMU but PMU temporarily unavailable
+        *  -EINVAL     -- @event is for this PMU but @event is not valid
+        *  -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
+        *  -EACCESS    -- @event is for this PMU, @event is valid, but no privilidges
+        *
+        *  0           -- @event is for this PMU and valid
+        *
+        * Other error return values are allowed.
         */
        int (*event_init)               (struct perf_event *event);
 
@@ -221,27 +267,61 @@ struct pmu {
        void (*event_mapped)            (struct perf_event *event); /*optional*/
        void (*event_unmapped)          (struct perf_event *event); /*optional*/
 
+       /*
+        * Flags for ->add()/->del()/ ->start()/->stop(). There are
+        * matching hw_perf_event::state flags.
+        */
 #define PERF_EF_START  0x01            /* start the counter when adding    */
 #define PERF_EF_RELOAD 0x02            /* reload the counter when starting */
 #define PERF_EF_UPDATE 0x04            /* update the counter when stopping */
 
        /*
-        * Adds/Removes a counter to/from the PMU, can be done inside
-        * a transaction, see the ->*_txn() methods.
+        * Adds/Removes a counter to/from the PMU, can be done inside a
+        * transaction, see the ->*_txn() methods.
+        *
+        * The add/del callbacks will reserve all hardware resources required
+        * to service the event, this includes any counter constraint
+        * scheduling etc.
+        *
+        * Called with IRQs disabled and the PMU disabled on the CPU the event
+        * is on.
+        *
+        * ->add() called without PERF_EF_START should result in the same state
+        *  as ->add() followed by ->stop().
+        *
+        * ->del() must always PERF_EF_UPDATE stop an event. If it calls
+        *  ->stop() that must deal with already being stopped without
+        *  PERF_EF_UPDATE.
         */
        int  (*add)                     (struct perf_event *event, int flags);
        void (*del)                     (struct perf_event *event, int flags);
 
        /*
-        * Starts/Stops a counter present on the PMU. The PMI handler
-        * should stop the counter when perf_event_overflow() returns
-        * !0. ->start() will be used to continue.
+        * Starts/Stops a counter present on the PMU.
+        *
+        * The PMI handler should stop the counter when perf_event_overflow()
+        * returns !0. ->start() will be used to continue.
+        *
+        * Also used to change the sample period.
+        *
+        * Called with IRQs disabled and the PMU disabled on the CPU the event
+        * is on -- will be called from NMI context with the PMU generates
+        * NMIs.
+        *
+        * ->stop() with PERF_EF_UPDATE will read the counter and update
+        *  period/count values like ->read() would.
+        *
+        * ->start() with PERF_EF_RELOAD will reprogram the the counter
+        *  value, must be preceded by a ->stop() with PERF_EF_UPDATE.
         */
        void (*start)                   (struct perf_event *event, int flags);
        void (*stop)                    (struct perf_event *event, int flags);
 
        /*
         * Updates the counter value of the event.
+        *
+        * For sampling capable PMUs this will also update the software period
+        * hw_perf_event::period_left field.
         */
        void (*read)                    (struct perf_event *event);
 
@@ -252,20 +332,26 @@ struct pmu {
         *
         * Start the transaction, after this ->add() doesn't need to
         * do schedulability tests.
+        *
+        * Optional.
         */
-       void (*start_txn)               (struct pmu *pmu); /* optional */
+       void (*start_txn)               (struct pmu *pmu, unsigned int txn_flags);
        /*
         * If ->start_txn() disabled the ->add() schedulability test
         * then ->commit_txn() is required to perform one. On success
         * the transaction is closed. On error the transaction is kept
         * open until ->cancel_txn() is called.
+        *
+        * Optional.
         */
-       int  (*commit_txn)              (struct pmu *pmu); /* optional */
+       int  (*commit_txn)              (struct pmu *pmu);
        /*
         * Will cancel the transaction, assumes ->del() is called
         * for each successful ->add() during the transaction.
+        *
+        * Optional.
         */
-       void (*cancel_txn)              (struct pmu *pmu); /* optional */
+       void (*cancel_txn)              (struct pmu *pmu);
 
        /*
         * Will return the value for perf_event_mmap_page::index for this event,
index 281cb91ddcf54c3a5275acbbc30abd9f0e9d11b1..05082e407c4a7c242250b05a08fbf028447e6ad6 100644 (file)
  * struct dev_pin_info - pin state container for devices
  * @p: pinctrl handle for the containing device
  * @default_state: the default state for the handle, if found
+ * @init_state: the state at probe time, if found
+ * @sleep_state: the state at suspend time, if found
+ * @idle_state: the state at idle (runtime suspend) time, if found
  */
 struct dev_pin_info {
        struct pinctrl *p;
        struct pinctrl_state *default_state;
+       struct pinctrl_state *init_state;
 #ifdef CONFIG_PM
        struct pinctrl_state *sleep_state;
        struct pinctrl_state *idle_state;
@@ -35,6 +39,7 @@ struct dev_pin_info {
 };
 
 extern int pinctrl_bind_pins(struct device *dev);
+extern int pinctrl_init_done(struct device *dev);
 
 #else
 
@@ -45,5 +50,10 @@ static inline int pinctrl_bind_pins(struct device *dev)
        return 0;
 }
 
+static inline int pinctrl_init_done(struct device *dev)
+{
+       return 0;
+}
+
 #endif /* CONFIG_PINCTRL */
 #endif /* PINCTRL_DEVINFO_H */
index fe65962b264ff6e14eb6b0a5c322bc11f055f56e..d921afd5f10907c8d8058c0136463f6fcf54f345 100644 (file)
 
 /**
  * enum pin_config_param - possible pin configuration parameters
+ * @PIN_CONFIG_BIAS_BUS_HOLD: the pin will be set to weakly latch so that it
+ *     weakly drives the last value on a tristate bus, also known as a "bus
+ *     holder", "bus keeper" or "repeater". This allows another device on the
+ *     bus to change the value by driving the bus high or low and switching to
+ *     tristate. The argument is ignored.
  * @PIN_CONFIG_BIAS_DISABLE: disable any pin bias on the pin, a
  *     transition from say pull-up to pull-down implies that you disable
  *     pull-up in the process, this setting disables all biasing.
  *     if for example some other pin is going to drive the signal connected
  *     to it for a while. Pins used for input are usually always high
  *     impedance.
- * @PIN_CONFIG_BIAS_BUS_HOLD: the pin will be set to weakly latch so that it
- *     weakly drives the last value on a tristate bus, also known as a "bus
- *     holder", "bus keeper" or "repeater". This allows another device on the
- *     bus to change the value by driving the bus high or low and switching to
- *     tristate. The argument is ignored.
- * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high
- *     impedance to VDD). If the argument is != 0 pull-up is enabled,
- *     if it is 0, pull-up is total, i.e. the pin is connected to VDD.
  * @PIN_CONFIG_BIAS_PULL_DOWN: the pin will be pulled down (usually with high
  *     impedance to GROUND). If the argument is != 0 pull-down is enabled,
  *     if it is 0, pull-down is total, i.e. the pin is connected to GROUND.
  *     If the argument is != 0 pull up/down is enabled, if it is 0, the
  *     configuration is ignored. The proper way to disable it is to use
  *     @PIN_CONFIG_BIAS_DISABLE.
- * @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and
- *     low, this is the most typical case and is typically achieved with two
- *     active transistors on the output. Setting this config will enable
- *     push-pull mode, the argument is ignored.
+ * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high
+ *     impedance to VDD). If the argument is != 0 pull-up is enabled,
+ *     if it is 0, pull-up is total, i.e. the pin is connected to VDD.
  * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open
  *     collector) which means it is usually wired with other output ports
  *     which are then pulled up with an external resistor. Setting this
  * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source
  *     (open emitter). Setting this config will enable open source mode, the
  *     argument is ignored.
+ * @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and
+ *     low, this is the most typical case and is typically achieved with two
+ *     active transistors on the output. Setting this config will enable
+ *     push-pull mode, the argument is ignored.
  * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current
  *     passed as argument. The argument is in mA.
+ * @PIN_CONFIG_INPUT_DEBOUNCE: this will configure the pin to debounce mode,
+ *     which means it will wait for signals to settle when reading inputs. The
+ *     argument gives the debounce time in usecs. Setting the
+ *     argument to zero turns debouncing off.
  * @PIN_CONFIG_INPUT_ENABLE: enable the pin's input.  Note that this does not
  *     affect the pin's ability to drive output.  1 enables input, 0 disables
  *     input.
- * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin.
- *      If the argument != 0, schmitt-trigger mode is enabled. If it's 0,
- *      schmitt-trigger mode is disabled.
  * @PIN_CONFIG_INPUT_SCHMITT: this will configure an input pin to run in
  *     schmitt-trigger mode. If the schmitt-trigger has adjustable hysteresis,
  *     the threshold value is given on a custom format as argument when
  *     setting pins to this mode.
- * @PIN_CONFIG_INPUT_DEBOUNCE: this will configure the pin to debounce mode,
- *     which means it will wait for signals to settle when reading inputs. The
- *     argument gives the debounce time in usecs. Setting the
- *     argument to zero turns debouncing off.
- * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
- *     supplies, the argument to this parameter (on a custom format) tells
- *     the driver which alternative power source to use.
- * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to
- *     this parameter (on a custom format) tells the driver which alternative
- *     slew rate to use.
+ * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin.
+ *      If the argument != 0, schmitt-trigger mode is enabled. If it's 0,
+ *      schmitt-trigger mode is disabled.
  * @PIN_CONFIG_LOW_POWER_MODE: this will configure the pin for low power
  *     operation, if several modes of operation are supported these can be
  *     passed in the argument on a custom form, else just use argument 1
  *     1 to indicate high level, argument 0 to indicate low level. (Please
  *     see Documentation/pinctrl.txt, section "GPIO mode pitfalls" for a
  *     discussion around this parameter.)
+ * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
+ *     supplies, the argument to this parameter (on a custom format) tells
+ *     the driver which alternative power source to use.
+ * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to
+ *     this parameter (on a custom format) tells the driver which alternative
+ *     slew rate to use.
  * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if
  *     you need to pass in custom configurations to the pin controller, use
  *     PIN_CONFIG_END+1 as the base offset.
  */
 enum pin_config_param {
+       PIN_CONFIG_BIAS_BUS_HOLD,
        PIN_CONFIG_BIAS_DISABLE,
        PIN_CONFIG_BIAS_HIGH_IMPEDANCE,
-       PIN_CONFIG_BIAS_BUS_HOLD,
-       PIN_CONFIG_BIAS_PULL_UP,
        PIN_CONFIG_BIAS_PULL_DOWN,
        PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
-       PIN_CONFIG_DRIVE_PUSH_PULL,
+       PIN_CONFIG_BIAS_PULL_UP,
        PIN_CONFIG_DRIVE_OPEN_DRAIN,
        PIN_CONFIG_DRIVE_OPEN_SOURCE,
+       PIN_CONFIG_DRIVE_PUSH_PULL,
        PIN_CONFIG_DRIVE_STRENGTH,
+       PIN_CONFIG_INPUT_DEBOUNCE,
        PIN_CONFIG_INPUT_ENABLE,
-       PIN_CONFIG_INPUT_SCHMITT_ENABLE,
        PIN_CONFIG_INPUT_SCHMITT,
-       PIN_CONFIG_INPUT_DEBOUNCE,
-       PIN_CONFIG_POWER_SOURCE,
-       PIN_CONFIG_SLEW_RATE,
+       PIN_CONFIG_INPUT_SCHMITT_ENABLE,
        PIN_CONFIG_LOW_POWER_MODE,
        PIN_CONFIG_OUTPUT,
+       PIN_CONFIG_POWER_SOURCE,
+       PIN_CONFIG_SLEW_RATE,
        PIN_CONFIG_END = 0x7FFF,
 };
 
index b5919f8e6d1ad87c618047cdcc7aa7a06324b354..23073519339f35e5bf42ccd72764e0e58cd67a6e 100644 (file)
@@ -9,6 +9,13 @@
  *     hogs to configure muxing and pins at boot, and also as a state
  *     to go into when returning from sleep and idle in
  *     .pm_runtime_resume() or ordinary .resume() for example.
+ * @PINCTRL_STATE_INIT: normally the pinctrl will be set to "default"
+ *     before the driver's probe() function is called.  There are some
+ *     drivers where that is not appropriate becausing doing so would
+ *     glitch the pins.  In those cases you can add an "init" pinctrl
+ *     which is the state of the pins before drive probe.  After probe
+ *     if the pins are still in "init" state they'll be moved to
+ *     "default".
  * @PINCTRL_STATE_IDLE: the state the pinctrl handle shall be put into
  *     when the pins are idle. This is a state where the system is relaxed
  *     but not fully sleeping - some power may be on but clocks gated for
@@ -20,5 +27,6 @@
  *     ordinary .suspend() function.
  */
 #define PINCTRL_STATE_DEFAULT "default"
+#define PINCTRL_STATE_INIT "init"
 #define PINCTRL_STATE_IDLE "idle"
 #define PINCTRL_STATE_SLEEP "sleep"
index d2be19a51acde32102710cdf0db536fe4d364f01..3c85a735c380539e71c4094f2dd48ef33d3e6c6f 100644 (file)
@@ -40,6 +40,7 @@ struct netxbig_led {
        int             mode_addr;
        int             *mode_val;
        int             bright_addr;
+       int             bright_max;
 };
 
 struct netxbig_led_platform_data {
index 1d2cd21242e8a75c1ac6aeb191d9964d2f3388ea..54bf1484d41f0333cc860c84307d23eda76477e5 100644 (file)
@@ -48,9 +48,9 @@ struct pps_source_info {
 
 struct pps_event_time {
 #ifdef CONFIG_NTP_PPS
-       struct timespec ts_raw;
+       struct timespec64 ts_raw;
 #endif /* CONFIG_NTP_PPS */
-       struct timespec ts_real;
+       struct timespec64 ts_real;
 };
 
 /* The main struct */
@@ -105,7 +105,7 @@ extern void pps_event(struct pps_device *pps,
 struct pps_device *pps_lookup_dev(void const *cookie);
 
 static inline void timespec_to_pps_ktime(struct pps_ktime *kt,
-               struct timespec ts)
+               struct timespec64 ts)
 {
        kt->sec = ts.tv_sec;
        kt->nsec = ts.tv_nsec;
@@ -115,24 +115,24 @@ static inline void timespec_to_pps_ktime(struct pps_ktime *kt,
 
 static inline void pps_get_ts(struct pps_event_time *ts)
 {
-       getnstime_raw_and_real(&ts->ts_raw, &ts->ts_real);
+       ktime_get_raw_and_real_ts64(&ts->ts_raw, &ts->ts_real);
 }
 
 #else /* CONFIG_NTP_PPS */
 
 static inline void pps_get_ts(struct pps_event_time *ts)
 {
-       getnstimeofday(&ts->ts_real);
+       ktime_get_real_ts64(&ts->ts_real);
 }
 
 #endif /* CONFIG_NTP_PPS */
 
 /* Subtract known time delay from PPS event time(s) */
-static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec delta)
+static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta)
 {
-       ts->ts_real = timespec_sub(ts->ts_real, delta);
+       ts->ts_real = timespec64_sub(ts->ts_real, delta);
 #ifdef CONFIG_NTP_PPS
-       ts->ts_raw = timespec_sub(ts->ts_raw, delta);
+       ts->ts_raw = timespec64_sub(ts->ts_raw, delta);
 #endif
 }
 
index bea8dd8ff5e026f8fc3e3bc446af7aea7ec891e2..75e4e30677f1254fb2c9f076f71fc4e2df1fb9be 100644 (file)
@@ -26,7 +26,6 @@
  *         SOFTIRQ_MASK:       0x0000ff00
  *         HARDIRQ_MASK:       0x000f0000
  *             NMI_MASK:       0x00100000
- *       PREEMPT_ACTIVE:       0x00200000
  * PREEMPT_NEED_RESCHED:       0x80000000
  */
 #define PREEMPT_BITS   8
 
 #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
 
-#define PREEMPT_ACTIVE_BITS    1
-#define PREEMPT_ACTIVE_SHIFT   (NMI_SHIFT + NMI_BITS)
-#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
-
 /* We use the MSB mostly because its available */
 #define PREEMPT_NEED_RESCHED   0x80000000
 
  * Check whether we were atomic before we did preempt_disable():
  * (used by the scheduler)
  */
-#define in_atomic_preempt_off() \
-               ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_DISABLE_OFFSET)
+#define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
 
 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
 extern void preempt_count_add(int val);
@@ -146,18 +140,6 @@ extern void preempt_count_sub(int val);
 #define preempt_count_inc() preempt_count_add(1)
 #define preempt_count_dec() preempt_count_sub(1)
 
-#define preempt_active_enter() \
-do { \
-       preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
-       barrier(); \
-} while (0)
-
-#define preempt_active_exit() \
-do { \
-       barrier(); \
-       preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
-} while (0)
-
 #ifdef CONFIG_PREEMPT_COUNT
 
 #define preempt_disable() \
diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h
new file mode 100644 (file)
index 0000000..a63a33e
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * RCU-based infrastructure for lightweight reader-writer locking
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright (c) 2015, Red Hat, Inc.
+ *
+ * Author: Oleg Nesterov <oleg@redhat.com>
+ */
+
+#ifndef _LINUX_RCU_SYNC_H_
+#define _LINUX_RCU_SYNC_H_
+
+#include <linux/wait.h>
+#include <linux/rcupdate.h>
+
+enum rcu_sync_type { RCU_SYNC, RCU_SCHED_SYNC, RCU_BH_SYNC };
+
+/* Structure to mediate between updaters and fastpath-using readers.  */
+struct rcu_sync {
+       int                     gp_state;
+       int                     gp_count;
+       wait_queue_head_t       gp_wait;
+
+       int                     cb_state;
+       struct rcu_head         cb_head;
+
+       enum rcu_sync_type      gp_type;
+};
+
+extern void rcu_sync_lockdep_assert(struct rcu_sync *);
+
+/**
+ * rcu_sync_is_idle() - Are readers permitted to use their fastpaths?
+ * @rsp: Pointer to rcu_sync structure to use for synchronization
+ *
+ * Returns true if readers are permitted to use their fastpaths.
+ * Must be invoked within an RCU read-side critical section whose
+ * flavor matches that of the rcu_sync struture.
+ */
+static inline bool rcu_sync_is_idle(struct rcu_sync *rsp)
+{
+#ifdef CONFIG_PROVE_RCU
+       rcu_sync_lockdep_assert(rsp);
+#endif
+       return !rsp->gp_state; /* GP_IDLE */
+}
+
+extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type);
+extern void rcu_sync_enter(struct rcu_sync *);
+extern void rcu_sync_exit(struct rcu_sync *);
+extern void rcu_sync_dtor(struct rcu_sync *);
+
+#define __RCU_SYNC_INITIALIZER(name, type) {                           \
+               .gp_state = 0,                                          \
+               .gp_count = 0,                                          \
+               .gp_wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.gp_wait), \
+               .cb_state = 0,                                          \
+               .gp_type = type,                                        \
+       }
+
+#define        __DEFINE_RCU_SYNC(name, type)   \
+       struct rcu_sync_struct name = __RCU_SYNC_INITIALIZER(name, type)
+
+#define DEFINE_RCU_SYNC(name)          \
+       __DEFINE_RCU_SYNC(name, RCU_SYNC)
+
+#define DEFINE_RCU_SCHED_SYNC(name)    \
+       __DEFINE_RCU_SYNC(name, RCU_SCHED_SYNC)
+
+#define DEFINE_RCU_BH_SYNC(name)       \
+       __DEFINE_RCU_SYNC(name, RCU_BH_SYNC)
+
+#endif /* _LINUX_RCU_SYNC_H_ */
index 17c6b1f84a77d3b3073bc272fffc6a6138b322b5..5ed540986019b910ddee9645aff3605d13cf4b3b 100644 (file)
@@ -247,10 +247,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
  * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
  */
 #define list_entry_rcu(ptr, type, member) \
-({ \
-       typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \
-       container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \
-})
+       container_of(lockless_dereference(ptr), type, member)
 
 /**
  * Where are list_empty_rcu() and list_first_entry_rcu()?
index ff476515f7163ab1b0247cfd64c0a4f6a364b2f3..a0189ba67fde721a824cc7c8db4b1f015197059e 100644 (file)
@@ -160,7 +160,7 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
  * more than one CPU).
  */
 void call_rcu(struct rcu_head *head,
-             void (*func)(struct rcu_head *head));
+             rcu_callback_t func);
 
 #else /* #ifdef CONFIG_PREEMPT_RCU */
 
@@ -191,7 +191,7 @@ void call_rcu(struct rcu_head *head,
  * memory ordering guarantees.
  */
 void call_rcu_bh(struct rcu_head *head,
-                void (*func)(struct rcu_head *head));
+                rcu_callback_t func);
 
 /**
  * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
@@ -213,7 +213,7 @@ void call_rcu_bh(struct rcu_head *head,
  * memory ordering guarantees.
  */
 void call_rcu_sched(struct rcu_head *head,
-                   void (*func)(struct rcu_head *rcu));
+                   rcu_callback_t func);
 
 void synchronize_sched(void);
 
@@ -230,12 +230,11 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
                   struct rcu_synchronize *rs_array);
 
 #define _wait_rcu_gp(checktiny, ...) \
-do { \
-       call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \
-       const int __n = ARRAY_SIZE(__crcu_array); \
-       struct rcu_synchronize __rs_array[__n]; \
-       \
-       __wait_rcu_gp(checktiny, __n, __crcu_array, __rs_array); \
+do {                                                                   \
+       call_rcu_func_t __crcu_array[] = { __VA_ARGS__ };               \
+       struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)];    \
+       __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array),              \
+                       __crcu_array, __rs_array);                      \
 } while (0)
 
 #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
@@ -275,7 +274,7 @@ do { \
  * See the description of call_rcu() for more detailed information on
  * memory ordering guarantees.
  */
-void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head));
+void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
 void synchronize_rcu_tasks(void);
 void rcu_barrier_tasks(void);
 
@@ -298,12 +297,14 @@ void synchronize_rcu(void);
 
 static inline void __rcu_read_lock(void)
 {
-       preempt_disable();
+       if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
+               preempt_disable();
 }
 
 static inline void __rcu_read_unlock(void)
 {
-       preempt_enable();
+       if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
+               preempt_enable();
 }
 
 static inline void synchronize_rcu(void)
@@ -536,28 +537,8 @@ static inline int rcu_read_lock_sched_held(void)
 
 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 
-/* Deprecate rcu_lockdep_assert():  Use RCU_LOCKDEP_WARN() instead. */
-static inline void __attribute((deprecated)) deprecate_rcu_lockdep_assert(void)
-{
-}
-
 #ifdef CONFIG_PROVE_RCU
 
-/**
- * rcu_lockdep_assert - emit lockdep splat if specified condition not met
- * @c: condition to check
- * @s: informative message
- */
-#define rcu_lockdep_assert(c, s)                                       \
-       do {                                                            \
-               static bool __section(.data.unlikely) __warned;         \
-               deprecate_rcu_lockdep_assert();                         \
-               if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
-                       __warned = true;                                \
-                       lockdep_rcu_suspicious(__FILE__, __LINE__, s);  \
-               }                                                       \
-       } while (0)
-
 /**
  * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
  * @c: condition to check
@@ -595,7 +576,6 @@ static inline void rcu_preempt_sleep_check(void)
 
 #else /* #ifdef CONFIG_PROVE_RCU */
 
-#define rcu_lockdep_assert(c, s) deprecate_rcu_lockdep_assert()
 #define RCU_LOCKDEP_WARN(c, s) do { } while (0)
 #define rcu_sleep_check() do { } while (0)
 
@@ -811,6 +791,28 @@ static inline void rcu_preempt_sleep_check(void)
  */
 #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
 
+/**
+ * rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism
+ * @p: The pointer to hand off
+ *
+ * This is simply an identity function, but it documents where a pointer
+ * is handed off from RCU to some other synchronization mechanism, for
+ * example, reference counting or locking.  In C11, it would map to
+ * kill_dependency().  It could be used as follows:
+ *
+ *     rcu_read_lock();
+ *     p = rcu_dereference(gp);
+ *     long_lived = is_long_lived(p);
+ *     if (long_lived) {
+ *             if (!atomic_inc_not_zero(p->refcnt))
+ *                     long_lived = false;
+ *             else
+ *                     p = rcu_pointer_handoff(p);
+ *     }
+ *     rcu_read_unlock();
+ */
+#define rcu_pointer_handoff(p) (p)
+
 /**
  * rcu_read_lock() - mark the beginning of an RCU read-side critical section
  *
@@ -1066,7 +1068,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
 #define __kfree_rcu(head, offset) \
        do { \
                BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
-               kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
+               kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \
        } while (0)
 
 /**
index ff968b7af3a4b61c9496f5222a7f600a42f5eca6..4c1aaf9cce7b3ac767c69d7849715d69089a9b48 100644 (file)
@@ -83,7 +83,7 @@ static inline void synchronize_sched_expedited(void)
 }
 
 static inline void kfree_call_rcu(struct rcu_head *head,
-                                 void (*func)(struct rcu_head *rcu))
+                                 rcu_callback_t func)
 {
        call_rcu(head, func);
 }
@@ -216,6 +216,7 @@ static inline bool rcu_is_watching(void)
 
 static inline void rcu_all_qs(void)
 {
+       barrier(); /* Avoid RCU read-side critical sections leaking across. */
 }
 
 #endif /* __LINUX_RCUTINY_H */
index 5abec82f325ecf5e2a0c06c21d566f94f5559721..60d15a080d7c3dcde45cf458df5632dfc5b36ac2 100644 (file)
@@ -48,7 +48,7 @@ void synchronize_rcu_bh(void);
 void synchronize_sched_expedited(void);
 void synchronize_rcu_expedited(void);
 
-void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
+void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
 
 /**
  * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period
index 8fc0bfd8edc4434fc79fda8591323d6be41f0645..d68bb402120e2c1a6d640720ef384663ceaefebe 100644 (file)
@@ -296,6 +296,8 @@ typedef int (*regmap_hw_reg_read)(void *context, unsigned int reg,
                                  unsigned int *val);
 typedef int (*regmap_hw_reg_write)(void *context, unsigned int reg,
                                   unsigned int val);
+typedef int (*regmap_hw_reg_update_bits)(void *context, unsigned int reg,
+                                        unsigned int mask, unsigned int val);
 typedef struct regmap_async *(*regmap_hw_async_alloc)(void);
 typedef void (*regmap_hw_free_context)(void *context);
 
@@ -335,6 +337,7 @@ struct regmap_bus {
        regmap_hw_gather_write gather_write;
        regmap_hw_async_write async_write;
        regmap_hw_reg_write reg_write;
+       regmap_hw_reg_update_bits reg_update_bits;
        regmap_hw_read read;
        regmap_hw_reg_read reg_read;
        regmap_hw_free_context free_context;
@@ -791,6 +794,9 @@ struct regmap_irq {
        unsigned int mask;
 };
 
+#define REGMAP_IRQ_REG(_irq, _off, _mask)              \
+       [_irq] = { .reg_offset = (_off), .mask = (_mask) }
+
 /**
  * Description of a generic regmap irq_chip.  This is not intended to
  * handle every possible interrupt controller, but it should handle a
@@ -800,6 +806,8 @@ struct regmap_irq {
  *
  * @status_base: Base status register address.
  * @mask_base:   Base mask register address.
+ * @unmask_base:  Base unmask register address. for chips who have
+ *                separate mask and unmask registers
  * @ack_base:    Base ack address. If zero then the chip is clear on read.
  *               Using zero value is possible with @use_ack bit.
  * @wake_base:   Base address for wake enables.  If zero unsupported.
@@ -807,6 +815,7 @@ struct regmap_irq {
  * @init_ack_masked: Ack all masked interrupts once during initalization.
  * @mask_invert: Inverted mask register: cleared bits are masked out.
  * @use_ack:     Use @ack register even if it is zero.
+ * @ack_invert:  Inverted ack register: cleared bits for ack.
  * @wake_invert: Inverted wake register: cleared bits are wake enabled.
  * @runtime_pm:  Hold a runtime PM lock on the device when accessing it.
  *
@@ -820,12 +829,14 @@ struct regmap_irq_chip {
 
        unsigned int status_base;
        unsigned int mask_base;
+       unsigned int unmask_base;
        unsigned int ack_base;
        unsigned int wake_base;
        unsigned int irq_reg_stride;
        bool init_ack_masked:1;
        bool mask_invert:1;
        bool use_ack:1;
+       bool ack_invert:1;
        bool wake_invert:1;
        bool runtime_pm:1;
 
index b7b9501b41af4eab6a096601c3baf85c85854807..9e1e06c3ce051e63862a22e7edb5897e7027591e 100644 (file)
@@ -599,33 +599,42 @@ struct task_cputime_atomic {
                .sum_exec_runtime = ATOMIC64_INIT(0),           \
        }
 
-#ifdef CONFIG_PREEMPT_COUNT
-#define PREEMPT_DISABLED       (1 + PREEMPT_ENABLED)
-#else
-#define PREEMPT_DISABLED       PREEMPT_ENABLED
-#endif
+#define PREEMPT_DISABLED       (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
+
+/*
+ * Disable preemption until the scheduler is running -- use an unconditional
+ * value so that it also works on !PREEMPT_COUNT kernels.
+ *
+ * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
+ */
+#define INIT_PREEMPT_COUNT     PREEMPT_OFFSET
 
 /*
- * Disable preemption until the scheduler is running.
- * Reset by start_kernel()->sched_init()->init_idle().
+ * Initial preempt_count value; reflects the preempt_count schedule invariant
+ * which states that during context switches:
  *
- * We include PREEMPT_ACTIVE to avoid cond_resched() from working
- * before the scheduler is active -- see should_resched().
+ *    preempt_count() == 2*PREEMPT_DISABLE_OFFSET
+ *
+ * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
+ * Note: See finish_task_switch().
  */
-#define INIT_PREEMPT_COUNT     (PREEMPT_DISABLED + PREEMPT_ACTIVE)
+#define FORK_PREEMPT_COUNT     (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
 
 /**
  * struct thread_group_cputimer - thread group interval timer counts
  * @cputime_atomic:    atomic thread group interval timers.
- * @running:           non-zero when there are timers running and
- *                     @cputime receives updates.
+ * @running:           true when there are timers running and
+ *                     @cputime_atomic receives updates.
+ * @checking_timer:    true when a thread in the group is in the
+ *                     process of checking for thread group timers.
  *
  * This structure contains the version of task_cputime, above, that is
  * used for thread group CPU timer calculations.
  */
 struct thread_group_cputimer {
        struct task_cputime_atomic cputime_atomic;
-       int running;
+       bool running;
+       bool checking_timer;
 };
 
 #include <linux/rwsem.h>
@@ -1139,8 +1148,6 @@ struct sched_domain_topology_level {
 #endif
 };
 
-extern struct sched_domain_topology_level *sched_domain_topology;
-
 extern void set_sched_topology(struct sched_domain_topology_level *tl);
 extern void wake_up_if_idle(int cpu);
 
@@ -1189,10 +1196,10 @@ struct load_weight {
 
 /*
  * The load_avg/util_avg accumulates an infinite geometric series.
- * 1) load_avg factors the amount of time that a sched_entity is
- * runnable on a rq into its weight. For cfs_rq, it is the aggregated
- * such weights of all runnable and blocked sched_entities.
- * 2) util_avg factors frequency scaling into the amount of time
+ * 1) load_avg factors frequency scaling into the amount of time that a
+ * sched_entity is runnable on a rq into its weight. For cfs_rq, it is the
+ * aggregated such weights of all runnable and blocked sched_entities.
+ * 2) util_avg factors frequency and cpu scaling into the amount of time
  * that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE].
  * For cfs_rq, it is the aggregated such times of all runnable and
  * blocked sched_entities.
@@ -1342,10 +1349,12 @@ struct sched_dl_entity {
 
 union rcu_special {
        struct {
-               bool blocked;
-               bool need_qs;
-       } b;
-       short s;
+               u8 blocked;
+               u8 need_qs;
+               u8 exp_need_qs;
+               u8 pad; /* Otherwise the compiler can store garbage here. */
+       } b; /* Bits. */
+       u32 s; /* Set of bits. */
 };
 struct rcu_node;
 
index 9d303b8847df28e4851746515fd9ee388e1cf8e1..9089a2ae913ddf4d12f10a7bcff209bbd1894400 100644 (file)
@@ -21,4 +21,9 @@ static inline int dl_task(struct task_struct *p)
        return dl_prio(p->prio);
 }
 
+static inline bool dl_time_before(u64 a, u64 b)
+{
+       return (s64)(a - b) < 0;
+}
+
 #endif /* _SCHED_DEADLINE_H */
index 2b0a30a6e31cf780fd8fcf11b7857b45b5c86bee..4398411236f16c3f87691162909dc6197fb62b08 100644 (file)
@@ -2708,7 +2708,7 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,
        if (skb->ip_summed == CHECKSUM_COMPLETE)
                skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
        else if (skb->ip_summed == CHECKSUM_PARTIAL &&
-                skb_checksum_start_offset(skb) <= len)
+                skb_checksum_start_offset(skb) < 0)
                skb->ip_summed = CHECKSUM_NONE;
 }
 
index e6109a6cd8f65eb779163d1a084a6e0256a11db0..12910cf19869c7db32d1ca34ddda0931e80570d1 100644 (file)
@@ -24,9 +24,6 @@ struct smpboot_thread_data;
  *                     parked (cpu offline)
  * @unpark:            Optional unpark function, called when the thread is
  *                     unparked (cpu online)
- * @pre_unpark:                Optional unpark function, called before the thread is
- *                     unparked (cpu online). This is not guaranteed to be
- *                     called on the target cpu of the thread. Careful!
  * @cpumask:           Internal state.  To update which threads are unparked,
  *                     call smpboot_update_cpumask_percpu_thread().
  * @selfparking:       Thread is not parked by the park function.
@@ -42,7 +39,6 @@ struct smp_hotplug_thread {
        void                            (*cleanup)(unsigned int cpu, bool online);
        void                            (*park)(unsigned int cpu);
        void                            (*unpark)(unsigned int cpu);
-       void                            (*pre_unpark)(unsigned int cpu);
        cpumask_var_t                   cpumask;
        bool                            selfparking;
        const char                      *thread_comm;
index bdeb4567b71e55edc785d5173f5ccc15ec0cda69..f5f80c5643ac5669abbb1e3e736ded2f49b5f3a0 100644 (file)
@@ -215,8 +215,11 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)
  */
 static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
 {
-       int retval = __srcu_read_lock(sp);
+       int retval;
 
+       preempt_disable();
+       retval = __srcu_read_lock(sp);
+       preempt_enable();
        rcu_lock_acquire(&(sp)->dep_map);
        return retval;
 }
index 414d924318ce1ba57a448a485d8594a99a9012d5..0adedca24c5bfbd4ca7a25b80641e76e0e638738 100644 (file)
@@ -33,6 +33,8 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
                         struct cpu_stop_work *work_buf);
 int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
 int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
+void stop_machine_park(int cpu);
+void stop_machine_unpark(int cpu);
 
 #else  /* CONFIG_SMP */
 
index a8d90db9c4b058626b2d5ddcc84f3fa5d16561aa..9ef7795e65e40c5dfbee53726909fbcb2ce341b0 100644 (file)
@@ -25,6 +25,9 @@ extern char * strncpy(char *,const char *, __kernel_size_t);
 #ifndef __HAVE_ARCH_STRLCPY
 size_t strlcpy(char *, const char *, size_t);
 #endif
+#ifndef __HAVE_ARCH_STRSCPY
+ssize_t __must_check strscpy(char *, const char *, size_t);
+#endif
 #ifndef __HAVE_ARCH_STRCAT
 extern char * strcat(char *, const char *);
 #endif
index ba0ae09cbb21f2038c71104a435f6782f70c1668..ec89d846324cac714807f7c002339636c497e4d8 100644 (file)
@@ -263,8 +263,8 @@ extern void timekeeping_inject_sleeptime64(struct timespec64 *delta);
 /*
  * PPS accessor
  */
-extern void getnstime_raw_and_real(struct timespec *ts_raw,
-                                  struct timespec *ts_real);
+extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw,
+                                       struct timespec64 *ts_real);
 
 /*
  * Persistent clock related interfaces
index 9d3f1a5b6178a9dd1aa3914b35051b96f5ceb279..39c25dbebfe8043b71892840918a384b14bf6e70 100644 (file)
@@ -152,7 +152,7 @@ extern unsigned long tick_nsec;             /* SHIFTED_HZ period (nsec) */
 #define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ)
 
 extern int do_adjtimex(struct timex *);
-extern void hardpps(const struct timespec *, const struct timespec *);
+extern void hardpps(const struct timespec64 *, const struct timespec64 *);
 
 int read_current_timer(unsigned long *timer_val);
 void ntp_notify_cmos_timer(void);
index 3dd5a781da99f163930e8a0611f269d1946daf6b..bfb74723f151512780ceb404255cb43ef92c764d 100644 (file)
@@ -157,7 +157,7 @@ struct renesas_usbhs_driver_param {
         */
        int pio_dma_border; /* default is 64byte */
 
-       u32 type;
+       uintptr_t type;
        u32 enable_gpio;
 
        /*
index 0ec598381f9766182db52f246afc2f0a5f28b36f..3bff87a25a42fc2bcf2cff58cd201cdb10452fe1 100644 (file)
@@ -182,22 +182,10 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
 # endif
 #endif
 
-struct vmalloc_info {
-       unsigned long   used;
-       unsigned long   largest_chunk;
-};
-
 #ifdef CONFIG_MMU
 #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
-extern void get_vmalloc_info(struct vmalloc_info *vmi);
 #else
-
 #define VMALLOC_TOTAL 0UL
-#define get_vmalloc_info(vmi)                  \
-do {                                           \
-       (vmi)->used = 0;                        \
-       (vmi)->largest_chunk = 0;               \
-} while (0)
 #endif
 
 #endif /* _LINUX_VMALLOC_H */
index 4a167b30a12ff0d127cccab36c07669689223441..b36d837c701ec9fe94280a91df3cf1e359ad50af 100644 (file)
@@ -63,7 +63,11 @@ struct unix_sock {
 #define UNIX_GC_MAYBE_CYCLE    1
        struct socket_wq        peer_wq;
 };
-#define unix_sk(__sk) ((struct unix_sock *)__sk)
+
+static inline struct unix_sock *unix_sk(const struct sock *sk)
+{
+       return (struct unix_sock *)sk;
+}
 
 #define peer_wait peer_wq.wait
 
index af9d5382f6cbae8c38d45106f5702bd1e66c5671..ce009710120ca8b541615b237a329ee089ec357b 100644 (file)
@@ -60,6 +60,38 @@ static inline struct metadata_dst *tun_rx_dst(int md_size)
        return tun_dst;
 }
 
+static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb)
+{
+       struct metadata_dst *md_dst = skb_metadata_dst(skb);
+       int md_size = md_dst->u.tun_info.options_len;
+       struct metadata_dst *new_md;
+
+       if (!md_dst)
+               return ERR_PTR(-EINVAL);
+
+       new_md = metadata_dst_alloc(md_size, GFP_ATOMIC);
+       if (!new_md)
+               return ERR_PTR(-ENOMEM);
+
+       memcpy(&new_md->u.tun_info, &md_dst->u.tun_info,
+              sizeof(struct ip_tunnel_info) + md_size);
+       skb_dst_drop(skb);
+       dst_hold(&new_md->dst);
+       skb_dst_set(skb, &new_md->dst);
+       return new_md;
+}
+
+static inline struct ip_tunnel_info *skb_tunnel_info_unclone(struct sk_buff *skb)
+{
+       struct metadata_dst *dst;
+
+       dst = tun_dst_unclone(skb);
+       if (IS_ERR(dst))
+               return NULL;
+
+       return &dst->u.tun_info;
+}
+
 static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
                                                 __be16 flags,
                                                 __be64 tunnel_id,
index 186f3a1e1b1f6ddd898d0f5871cb46222ae6b80d..fc19376986259de8faf9c2be4c5c3376780e54ff 100644 (file)
@@ -113,12 +113,12 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
 void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
                          bool rearm);
 
-static void inline inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
+static inline void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
 {
        __inet_twsk_schedule(tw, timeo, false);
 }
 
-static void inline inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
+static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
 {
        __inet_twsk_schedule(tw, timeo, true);
 }
index 7aa78440559a47db8e5ccc8ea69a34f87b90c125..e23717013a4e6cb1ef84e6b8ba6654b011ee8670 100644 (file)
@@ -828,6 +828,14 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
        if (sk_rcvqueues_full(sk, limit))
                return -ENOBUFS;
 
+       /*
+        * If the skb was allocated from pfmemalloc reserves, only
+        * allow SOCK_MEMALLOC sockets to use it as this socket is
+        * helping free memory
+        */
+       if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
+               return -ENOMEM;
+
        __sk_add_backlog(sk, skb);
        sk->sk_backlog.len += skb->truesize;
        return 0;
index 884e728b09d9a57e8a56ac8ea224b3f873cb81c3..26ede14597daba32f8bf2cfcc84a821d094cc3d3 100644 (file)
@@ -86,7 +86,7 @@
        .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
        SNDRV_CTL_ELEM_ACCESS_READWRITE, \
        .tlv.p  = (tlv_array),\
-       .info = snd_soc_info_volsw, \
+       .info = snd_soc_info_volsw_sx, \
        .get = snd_soc_get_volsw_sx,\
        .put = snd_soc_put_volsw_sx, \
        .private_value = (unsigned long)&(struct soc_mixer_control) \
        .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
        SNDRV_CTL_ELEM_ACCESS_READWRITE, \
        .tlv.p  = (tlv_array), \
-       .info = snd_soc_info_volsw, \
+       .info = snd_soc_info_volsw_sx, \
        .get = snd_soc_get_volsw_sx, \
        .put = snd_soc_put_volsw_sx, \
        .private_value = (unsigned long)&(struct soc_mixer_control) \
@@ -574,6 +574,8 @@ int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol);
 int snd_soc_info_volsw(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_info *uinfo);
+int snd_soc_info_volsw_sx(struct snd_kcontrol *kcontrol,
+                         struct snd_ctl_elem_info *uinfo);
 #define snd_soc_info_bool_ext          snd_ctl_boolean_mono_info
 int snd_soc_get_volsw(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol);
index 898be3a8db9aecfc8c9e942e03053b582be3af2a..6d8f8fba33414a40cefc344811ec3acfecd72f01 100644 (file)
 #define WM8904_MIC_REGS  2
 #define WM8904_GPIO_REGS 4
 #define WM8904_DRC_REGS  4
-#define WM8904_EQ_REGS   25
+#define WM8904_EQ_REGS   24
 
 /**
  * DRC configurations are specified with a label and a set of register
index 539d6bc3216a3784f9ad5b4d1e3ef06e7a4cc223..9b90c57517a918687189933ae6920b80d251e98e 100644 (file)
@@ -104,22 +104,17 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
             TP_ARGS(p));
 
 #ifdef CREATE_TRACE_POINTS
-static inline long __trace_sched_switch_state(struct task_struct *p)
+static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
 {
-       long state = p->state;
-
-#ifdef CONFIG_PREEMPT
 #ifdef CONFIG_SCHED_DEBUG
        BUG_ON(p != current);
 #endif /* CONFIG_SCHED_DEBUG */
+
        /*
-        * For all intents and purposes a preempted task is a running task.
+        * Preemption ignores task state, therefore preempted tasks are always
+        * RUNNING (we will not have dequeued if state != RUNNING).
         */
-       if (preempt_count() & PREEMPT_ACTIVE)
-               state = TASK_RUNNING | TASK_STATE_MAX;
-#endif /* CONFIG_PREEMPT */
-
-       return state;
+       return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
 }
 #endif /* CREATE_TRACE_POINTS */
 
@@ -128,10 +123,11 @@ static inline long __trace_sched_switch_state(struct task_struct *p)
  */
 TRACE_EVENT(sched_switch,
 
-       TP_PROTO(struct task_struct *prev,
+       TP_PROTO(bool preempt,
+                struct task_struct *prev,
                 struct task_struct *next),
 
-       TP_ARGS(prev, next),
+       TP_ARGS(preempt, prev, next),
 
        TP_STRUCT__entry(
                __array(        char,   prev_comm,      TASK_COMM_LEN   )
@@ -147,7 +143,7 @@ TRACE_EVENT(sched_switch,
                memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
                __entry->prev_pid       = prev->pid;
                __entry->prev_prio      = prev->prio;
-               __entry->prev_state     = __trace_sched_switch_state(prev);
+               __entry->prev_state     = __trace_sched_switch_state(preempt, prev);
                memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
                __entry->next_pid       = next->pid;
                __entry->next_prio      = next->prio;
index 9df61f1edb0f8048472bee5136f81097e47ac569..3094618d382f4d661dd14f9ceb4f4180a8f2c39d 100644 (file)
  *     SA_RESTORER     0x04000000
  */
 
+#if !defined MINSIGSTKSZ || !defined SIGSTKSZ
 #define MINSIGSTKSZ    2048
 #define SIGSTKSZ       8192
+#endif
 
 #ifndef __ASSEMBLY__
 typedef struct {
index 1f5e6892392981bbd06a60ee6e1ff91de223e85d..7e385b83b9d82057ec79e6fbc76b4a258f0eec2a 100644 (file)
@@ -45,8 +45,24 @@ struct mmc_ioc_cmd {
 };
 #define mmc_ioc_cmd_set_data(ic, ptr) ic.data_ptr = (__u64)(unsigned long) ptr
 
-#define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd)
+/**
+ * struct mmc_ioc_multi_cmd - multi command information
+ * @num_of_cmds: Number of commands to send. Must be equal to or less than
+ *     MMC_IOC_MAX_CMDS.
+ * @cmds: Array of commands with length equal to 'num_of_cmds'
+ */
+struct mmc_ioc_multi_cmd {
+       __u64 num_of_cmds;
+       struct mmc_ioc_cmd cmds[0];
+};
 
+#define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd)
+/*
+ * MMC_IOC_MULTI_CMD: Used to send an array of MMC commands described by
+ *     the structure mmc_ioc_multi_cmd. The MMC driver will issue all
+ *     commands in array in sequence to card.
+ */
+#define MMC_IOC_MULTI_CMD _IOWR(MMC_BLOCK_MAJOR, 1, struct mmc_ioc_multi_cmd)
 /*
  * Since this ioctl is only meant to enhance (and not replace) normal access
  * to the mmc bus device, an upper data transfer limit of MMC_IOC_MAX_BYTES
@@ -54,4 +70,5 @@ struct mmc_ioc_cmd {
  * block device operations.
  */
 #define MMC_IOC_MAX_BYTES  (512L * 256)
+#define MMC_IOC_MAX_CMDS    255
 #endif /* LINUX_MMC_IOCTL_H */
index 32e07d8cbaf47cf80841adf0e872d85c26c8c708..e663627a8ef36530b4dc658e9d30d4b7bc32a90a 100644 (file)
@@ -323,10 +323,10 @@ enum ovs_key_attr {
        OVS_KEY_ATTR_MPLS,      /* array of struct ovs_key_mpls.
                                 * The implementation may restrict
                                 * the accepted length of the array. */
-       OVS_KEY_ATTR_CT_STATE,  /* u8 bitmask of OVS_CS_F_* */
+       OVS_KEY_ATTR_CT_STATE,  /* u32 bitmask of OVS_CS_F_* */
        OVS_KEY_ATTR_CT_ZONE,   /* u16 connection tracking zone. */
        OVS_KEY_ATTR_CT_MARK,   /* u32 connection tracking mark */
-       OVS_KEY_ATTR_CT_LABEL /* 16-octet connection tracking label */
+       OVS_KEY_ATTR_CT_LABELS, /* 16-octet connection tracking label */
 
 #ifdef __KERNEL__
        OVS_KEY_ATTR_TUNNEL_INFO,  /* struct ip_tunnel_info */
@@ -439,9 +439,9 @@ struct ovs_key_nd {
        __u8    nd_tll[ETH_ALEN];
 };
 
-#define OVS_CT_LABEL_LEN       16
-struct ovs_key_ct_label {
-       __u8    ct_label[OVS_CT_LABEL_LEN];
+#define OVS_CT_LABELS_LEN      16
+struct ovs_key_ct_labels {
+       __u8    ct_labels[OVS_CT_LABELS_LEN];
 };
 
 /* OVS_KEY_ATTR_CT_STATE flags */
@@ -449,9 +449,9 @@ struct ovs_key_ct_label {
 #define OVS_CS_F_ESTABLISHED       0x02 /* Part of an existing connection. */
 #define OVS_CS_F_RELATED           0x04 /* Related to an established
                                         * connection. */
-#define OVS_CS_F_INVALID           0x20 /* Could not track connection. */
-#define OVS_CS_F_REPLY_DIR         0x40 /* Flow is in the reply direction. */
-#define OVS_CS_F_TRACKED           0x80 /* Conntrack has occurred. */
+#define OVS_CS_F_REPLY_DIR         0x08 /* Flow is in the reply direction. */
+#define OVS_CS_F_INVALID           0x10 /* Could not track connection. */
+#define OVS_CS_F_TRACKED           0x20 /* Conntrack has occurred. */
 
 /**
  * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands.
@@ -618,22 +618,25 @@ struct ovs_action_hash {
 
 /**
  * enum ovs_ct_attr - Attributes for %OVS_ACTION_ATTR_CT action.
- * @OVS_CT_ATTR_FLAGS: u32 connection tracking flags.
+ * @OVS_CT_ATTR_COMMIT: If present, commits the connection to the conntrack
+ * table. This allows future packets for the same connection to be identified
+ * as 'established' or 'related'. The flow key for the current packet will
+ * retain the pre-commit connection state.
  * @OVS_CT_ATTR_ZONE: u16 connection tracking zone.
  * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the
  * mask, the corresponding bit in the value is copied to the connection
  * tracking mark field in the connection.
- * @OVS_CT_ATTR_LABEL: %OVS_CT_LABEL_LEN value followed by %OVS_CT_LABEL_LEN
+ * @OVS_CT_ATTR_LABEL: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN
  * mask. For each bit set in the mask, the corresponding bit in the value is
  * copied to the connection tracking label field in the connection.
  * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG.
  */
 enum ovs_ct_attr {
        OVS_CT_ATTR_UNSPEC,
-       OVS_CT_ATTR_FLAGS,      /* u8 bitmask of OVS_CT_F_*. */
+       OVS_CT_ATTR_COMMIT,     /* No argument, commits connection. */
        OVS_CT_ATTR_ZONE,       /* u16 zone id. */
        OVS_CT_ATTR_MARK,       /* mark to associate with this connection. */
-       OVS_CT_ATTR_LABEL,      /* label to associate with this connection. */
+       OVS_CT_ATTR_LABELS,     /* labels to associate with this connection. */
        OVS_CT_ATTR_HELPER,     /* netlink helper to assist detection of
                                   related connections. */
        __OVS_CT_ATTR_MAX
@@ -641,14 +644,6 @@ enum ovs_ct_attr {
 
 #define OVS_CT_ATTR_MAX (__OVS_CT_ATTR_MAX - 1)
 
-/*
- * OVS_CT_ATTR_FLAGS flags - bitmask of %OVS_CT_F_*
- * @OVS_CT_F_COMMIT: Commits the flow to the conntrack table. This allows
- * future packets for the same connection to be identified as 'established'
- * or 'related'.
- */
-#define OVS_CT_F_COMMIT                0x01
-
 /**
  * enum ovs_action_attr - Action types.
  *
@@ -705,7 +700,7 @@ enum ovs_action_attr {
                                       * data immediately followed by a mask.
                                       * The data must be zero for the unmasked
                                       * bits. */
-       OVS_ACTION_ATTR_CT,           /* One nested OVS_CT_ATTR_* . */
+       OVS_ACTION_ATTR_CT,           /* Nested OVS_CT_ATTR_* . */
 
        __OVS_ACTION_ATTR_MAX,        /* Nothing past this will be accepted
                                       * from userspace. */
index 2881145cda86cda91621da082ebec83ae490f3c5..651221334f4980c189c9b713d75f5f8f912c40f0 100644 (file)
@@ -168,6 +168,7 @@ enum perf_branch_sample_type_shift {
 
        PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT     = 11, /* call/ret stack */
        PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT       = 12, /* indirect jumps */
+       PERF_SAMPLE_BRANCH_CALL_SHIFT           = 13, /* direct call */
 
        PERF_SAMPLE_BRANCH_MAX_SHIFT            /* non-ABI */
 };
@@ -188,6 +189,7 @@ enum perf_branch_sample_type {
 
        PERF_SAMPLE_BRANCH_CALL_STACK   = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
        PERF_SAMPLE_BRANCH_IND_JUMP     = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
+       PERF_SAMPLE_BRANCH_CALL         = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT,
 
        PERF_SAMPLE_BRANCH_MAX          = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
 };
@@ -476,7 +478,7 @@ struct perf_event_mmap_page {
         *   u64 delta;
         *
         *   quot = (cyc >> time_shift);
-        *   rem = cyc & ((1 << time_shift) - 1);
+        *   rem = cyc & (((u64)1 << time_shift) - 1);
         *   delta = time_offset + quot * time_mult +
         *              ((rem * time_mult) >> time_shift);
         *
@@ -507,7 +509,7 @@ struct perf_event_mmap_page {
         * And vice versa:
         *
         *   quot = cyc >> time_shift;
-        *   rem  = cyc & ((1 << time_shift) - 1);
+        *   rem  = cyc & (((u64)1 << time_shift) - 1);
         *   timestamp = time_zero + quot * time_mult +
         *               ((rem * time_mult) >> time_shift);
         */
index 702024769c74bc39f1e9f8400b23ec5bbd7a8ffb..9d8f5d10c1e553122be08d3407434ea24d19c136 100644 (file)
@@ -160,7 +160,7 @@ struct rtattr {
 
 /* Macros to handle rtattributes */
 
-#define RTA_ALIGNTO    4
+#define RTA_ALIGNTO    4U
 #define RTA_ALIGN(len) ( ((len)+RTA_ALIGNTO-1) & ~(RTA_ALIGNTO-1) )
 #define RTA_OK(rta,len) ((len) >= (int)sizeof(struct rtattr) && \
                         (rta)->rta_len >= sizeof(struct rtattr) && \
index 7530e7447620ef8e4a1fee697c016e84da52cf0f..8b8d39dfb67f1992bc8e9fa95af34c8c4f05eb3a 100644 (file)
@@ -43,7 +43,8 @@ struct screen_info {
        __u16 pages;            /* 0x32 */
        __u16 vesa_attributes;  /* 0x34 */
        __u32 capabilities;     /* 0x36 */
-       __u8  _reserved[6];     /* 0x3a */
+       __u32 ext_lfb_base;     /* 0x3a */
+       __u8  _reserved[2];     /* 0x3e */
 } __attribute__((packed));
 
 #define VIDEO_TYPE_MDA         0x10    /* Monochrome Text Display      */
@@ -69,6 +70,6 @@ struct screen_info {
 #define VIDEO_FLAGS_NOCURSOR   (1 << 0) /* The video mode has no cursor set */
 
 #define VIDEO_CAPABILITY_SKIP_QUIRKS   (1 << 0)
-
+#define VIDEO_CAPABILITY_64BIT_BASE    (1 << 1)        /* Frame buffer base is 64-bit */
 
 #endif /* _UAPI_SCREEN_INFO_H */
index df0e09bb7dd5a20f068b6b2a916d6a33a3ccf5ef..9057d7af3ae145ba711c837f4bcbe58e851f2320 100644 (file)
@@ -11,8 +11,6 @@
 
 #include <linux/types.h>
 
-#include <linux/compiler.h>
-
 #define UFFD_API ((__u64)0xAA)
 /*
  * After implementing the respective features it will become:
index 9ce083960a2575df0bd2a4e31ac3c8b881012880..f18490985fc8e5f39d10ed442d302293ac0e7699 100644 (file)
@@ -107,5 +107,13 @@ struct sched_watchdog {
 #define SHUTDOWN_suspend    2  /* Clean up, save suspend info, kill.         */
 #define SHUTDOWN_crash      3  /* Tell controller we've crashed.             */
 #define SHUTDOWN_watchdog   4  /* Restart because watchdog time expired.     */
+/*
+ * Domain asked to perform 'soft reset' for it. The expected behavior is to
+ * reset internal Xen state for the domain returning it to the point where it
+ * was created but leaving the domain's memory contents and vCPU contexts
+ * intact. This will allow the domain to start over and set up all Xen specific
+ * interfaces again.
+ */
+#define SHUTDOWN_soft_reset 5
 
 #endif /* __XEN_PUBLIC_SCHED_H__ */
index 66c4f567eb7368d21ff11377f629c53cc169bc8b..1471db9a7e6112b3316ae887b50c6d8d1352f171 100644 (file)
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -137,13 +137,6 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
                return retval;
        }
 
-       /* ipc_addid() locks msq upon success. */
-       id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
-       if (id < 0) {
-               ipc_rcu_putref(msq, msg_rcu_free);
-               return id;
-       }
-
        msq->q_stime = msq->q_rtime = 0;
        msq->q_ctime = get_seconds();
        msq->q_cbytes = msq->q_qnum = 0;
@@ -153,6 +146,13 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
        INIT_LIST_HEAD(&msq->q_receivers);
        INIT_LIST_HEAD(&msq->q_senders);
 
+       /* ipc_addid() locks msq upon success. */
+       id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
+       if (id < 0) {
+               ipc_rcu_putref(msq, msg_rcu_free);
+               return id;
+       }
+
        ipc_unlock_object(&msq->q_perm);
        rcu_read_unlock();
 
index 222131e8e38f334547004bf0830b26bf808cc6a2..41787276e14170af7de8261181721991fde528bf 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -551,12 +551,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
        if (IS_ERR(file))
                goto no_file;
 
-       id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
-       if (id < 0) {
-               error = id;
-               goto no_id;
-       }
-
        shp->shm_cprid = task_tgid_vnr(current);
        shp->shm_lprid = 0;
        shp->shm_atim = shp->shm_dtim = 0;
@@ -565,6 +559,13 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
        shp->shm_nattch = 0;
        shp->shm_file = file;
        shp->shm_creator = current;
+
+       id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
+       if (id < 0) {
+               error = id;
+               goto no_id;
+       }
+
        list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
 
        /*
index be4230020a1f718c31b02012554600c710b928b9..0f401d94b7c657d5e7126fe78f149c94ffea8e24 100644 (file)
@@ -237,6 +237,10 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
        rcu_read_lock();
        spin_lock(&new->lock);
 
+       current_euid_egid(&euid, &egid);
+       new->cuid = new->uid = euid;
+       new->gid = new->cgid = egid;
+
        id = idr_alloc(&ids->ipcs_idr, new,
                       (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
                       GFP_NOWAIT);
@@ -249,10 +253,6 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
 
        ids->in_use++;
 
-       current_euid_egid(&euid, &egid);
-       new->cuid = new->uid = euid;
-       new->gid = new->cgid = egid;
-
        if (next_id < 0) {
                new->seq = ids->seq++;
                if (ids->seq > IPCID_SEQ_MAX)
index 82cf9dff4295eaa82305fe04a43cecb0c8a4c27e..85ff5e26e23b45b34201120c758082599f995b7e 100644 (file)
@@ -102,19 +102,6 @@ void get_online_cpus(void)
 }
 EXPORT_SYMBOL_GPL(get_online_cpus);
 
-bool try_get_online_cpus(void)
-{
-       if (cpu_hotplug.active_writer == current)
-               return true;
-       if (!mutex_trylock(&cpu_hotplug.lock))
-               return false;
-       cpuhp_lock_acquire_tryread();
-       atomic_inc(&cpu_hotplug.refcount);
-       mutex_unlock(&cpu_hotplug.lock);
-       return true;
-}
-EXPORT_SYMBOL_GPL(try_get_online_cpus);
-
 void put_online_cpus(void)
 {
        int refcount;
@@ -304,8 +291,8 @@ static inline void check_for_tasks(int dead_cpu)
 {
        struct task_struct *g, *p;
 
-       read_lock_irq(&tasklist_lock);
-       do_each_thread(g, p) {
+       read_lock(&tasklist_lock);
+       for_each_process_thread(g, p) {
                if (!p->on_rq)
                        continue;
                /*
@@ -320,8 +307,8 @@ static inline void check_for_tasks(int dead_cpu)
 
                pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
                        p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
-       } while_each_thread(g, p);
-       read_unlock_irq(&tasklist_lock);
+       }
+       read_unlock(&tasklist_lock);
 }
 
 struct take_cpu_down_param {
@@ -344,7 +331,7 @@ static int take_cpu_down(void *_param)
        /* Give up timekeeping duties */
        tick_handover_do_timer();
        /* Park the stopper thread */
-       kthread_park(current);
+       stop_machine_park((long)param->hcpu);
        return 0;
 }
 
index f548f69c4299dd1ee44bfdc1f84d79d655d0d6d7..ea02109aee77e057713ff8787f5e2d6eb9f9fb23 100644 (file)
@@ -196,7 +196,7 @@ static int perf_sample_period_ns __read_mostly      = DEFAULT_SAMPLE_PERIOD_NS;
 static int perf_sample_allowed_ns __read_mostly =
        DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
 
-void update_perf_cpu_limits(void)
+static void update_perf_cpu_limits(void)
 {
        u64 tmp = perf_sample_period_ns;
 
@@ -472,7 +472,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
  * mode SWOUT : schedule out everything
  * mode SWIN : schedule in based on cgroup for next
  */
-void perf_cgroup_switch(struct task_struct *task, int mode)
+static void perf_cgroup_switch(struct task_struct *task, int mode)
 {
        struct perf_cpu_context *cpuctx;
        struct pmu *pmu;
@@ -1243,11 +1243,7 @@ static inline void perf_event__state_init(struct perf_event *event)
                                              PERF_EVENT_STATE_INACTIVE;
 }
 
-/*
- * Called at perf_event creation and when events are attached/detached from a
- * group.
- */
-static void perf_event__read_size(struct perf_event *event)
+static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
 {
        int entry = sizeof(u64); /* value */
        int size = 0;
@@ -1263,7 +1259,7 @@ static void perf_event__read_size(struct perf_event *event)
                entry += sizeof(u64);
 
        if (event->attr.read_format & PERF_FORMAT_GROUP) {
-               nr += event->group_leader->nr_siblings;
+               nr += nr_siblings;
                size += sizeof(u64);
        }
 
@@ -1271,14 +1267,11 @@ static void perf_event__read_size(struct perf_event *event)
        event->read_size = size;
 }
 
-static void perf_event__header_size(struct perf_event *event)
+static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
 {
        struct perf_sample_data *data;
-       u64 sample_type = event->attr.sample_type;
        u16 size = 0;
 
-       perf_event__read_size(event);
-
        if (sample_type & PERF_SAMPLE_IP)
                size += sizeof(data->ip);
 
@@ -1303,6 +1296,17 @@ static void perf_event__header_size(struct perf_event *event)
        event->header_size = size;
 }
 
+/*
+ * Called at perf_event creation and when events are attached/detached from a
+ * group.
+ */
+static void perf_event__header_size(struct perf_event *event)
+{
+       __perf_event_read_size(event,
+                              event->group_leader->nr_siblings);
+       __perf_event_header_size(event, event->attr.sample_type);
+}
+
 static void perf_event__id_header_size(struct perf_event *event)
 {
        struct perf_sample_data *data;
@@ -1330,6 +1334,27 @@ static void perf_event__id_header_size(struct perf_event *event)
        event->id_header_size = size;
 }
 
+static bool perf_event_validate_size(struct perf_event *event)
+{
+       /*
+        * The values computed here will be over-written when we actually
+        * attach the event.
+        */
+       __perf_event_read_size(event, event->group_leader->nr_siblings + 1);
+       __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
+       perf_event__id_header_size(event);
+
+       /*
+        * Sum the lot; should not exceed the 64k limit we have on records.
+        * Conservative limit to allow for callchains and other variable fields.
+        */
+       if (event->read_size + event->header_size +
+           event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
+               return false;
+
+       return true;
+}
+
 static void perf_group_attach(struct perf_event *event)
 {
        struct perf_event *group_leader = event->group_leader, *pos;
@@ -1914,7 +1939,7 @@ group_sched_in(struct perf_event *group_event,
        if (group_event->state == PERF_EVENT_STATE_OFF)
                return 0;
 
-       pmu->start_txn(pmu);
+       pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
 
        if (event_sched_in(group_event, cpuctx, ctx)) {
                pmu->cancel_txn(pmu);
@@ -3184,14 +3209,22 @@ void perf_event_exec(void)
        rcu_read_unlock();
 }
 
+struct perf_read_data {
+       struct perf_event *event;
+       bool group;
+       int ret;
+};
+
 /*
  * Cross CPU call to read the hardware event
  */
 static void __perf_event_read(void *info)
 {
-       struct perf_event *event = info;
+       struct perf_read_data *data = info;
+       struct perf_event *sub, *event = data->event;
        struct perf_event_context *ctx = event->ctx;
        struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
+       struct pmu *pmu = event->pmu;
 
        /*
         * If this is a task context, we need to check whether it is
@@ -3208,9 +3241,35 @@ static void __perf_event_read(void *info)
                update_context_time(ctx);
                update_cgrp_time_from_event(event);
        }
+
        update_event_times(event);
-       if (event->state == PERF_EVENT_STATE_ACTIVE)
-               event->pmu->read(event);
+       if (event->state != PERF_EVENT_STATE_ACTIVE)
+               goto unlock;
+
+       if (!data->group) {
+               pmu->read(event);
+               data->ret = 0;
+               goto unlock;
+       }
+
+       pmu->start_txn(pmu, PERF_PMU_TXN_READ);
+
+       pmu->read(event);
+
+       list_for_each_entry(sub, &event->sibling_list, group_entry) {
+               update_event_times(sub);
+               if (sub->state == PERF_EVENT_STATE_ACTIVE) {
+                       /*
+                        * Use sibling's PMU rather than @event's since
+                        * sibling could be on different (eg: software) PMU.
+                        */
+                       sub->pmu->read(sub);
+               }
+       }
+
+       data->ret = pmu->commit_txn(pmu);
+
+unlock:
        raw_spin_unlock(&ctx->lock);
 }
 
@@ -3275,15 +3334,23 @@ u64 perf_event_read_local(struct perf_event *event)
        return val;
 }
 
-static u64 perf_event_read(struct perf_event *event)
+static int perf_event_read(struct perf_event *event, bool group)
 {
+       int ret = 0;
+
        /*
         * If event is enabled and currently active on a CPU, update the
         * value in the event structure:
         */
        if (event->state == PERF_EVENT_STATE_ACTIVE) {
+               struct perf_read_data data = {
+                       .event = event,
+                       .group = group,
+                       .ret = 0,
+               };
                smp_call_function_single(event->oncpu,
-                                        __perf_event_read, event, 1);
+                                        __perf_event_read, &data, 1);
+               ret = data.ret;
        } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
                struct perf_event_context *ctx = event->ctx;
                unsigned long flags;
@@ -3298,11 +3365,14 @@ static u64 perf_event_read(struct perf_event *event)
                        update_context_time(ctx);
                        update_cgrp_time_from_event(event);
                }
-               update_event_times(event);
+               if (group)
+                       update_group_times(event);
+               else
+                       update_event_times(event);
                raw_spin_unlock_irqrestore(&ctx->lock, flags);
        }
 
-       return perf_event_count(event);
+       return ret;
 }
 
 /*
@@ -3744,7 +3814,7 @@ static void put_event(struct perf_event *event)
         *     see the comment there.
         *
         *  2) there is a lock-inversion with mmap_sem through
-        *     perf_event_read_group(), which takes faults while
+        *     perf_read_group(), which takes faults while
         *     holding ctx->mutex, however this is called after
         *     the last filedesc died, so there is no possibility
         *     to trigger the AB-BA case.
@@ -3818,14 +3888,18 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
        *running = 0;
 
        mutex_lock(&event->child_mutex);
-       total += perf_event_read(event);
+
+       (void)perf_event_read(event, false);
+       total += perf_event_count(event);
+
        *enabled += event->total_time_enabled +
                        atomic64_read(&event->child_total_time_enabled);
        *running += event->total_time_running +
                        atomic64_read(&event->child_total_time_running);
 
        list_for_each_entry(child, &event->child_list, child_list) {
-               total += perf_event_read(child);
+               (void)perf_event_read(child, false);
+               total += perf_event_count(child);
                *enabled += child->total_time_enabled;
                *running += child->total_time_running;
        }
@@ -3835,55 +3909,95 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
 }
 EXPORT_SYMBOL_GPL(perf_event_read_value);
 
-static int perf_event_read_group(struct perf_event *event,
-                                  u64 read_format, char __user *buf)
+static int __perf_read_group_add(struct perf_event *leader,
+                                       u64 read_format, u64 *values)
 {
-       struct perf_event *leader = event->group_leader, *sub;
-       struct perf_event_context *ctx = leader->ctx;
-       int n = 0, size = 0, ret;
-       u64 count, enabled, running;
-       u64 values[5];
+       struct perf_event *sub;
+       int n = 1; /* skip @nr */
+       int ret;
 
-       lockdep_assert_held(&ctx->mutex);
+       ret = perf_event_read(leader, true);
+       if (ret)
+               return ret;
 
-       count = perf_event_read_value(leader, &enabled, &running);
+       /*
+        * Since we co-schedule groups, {enabled,running} times of siblings
+        * will be identical to those of the leader, so we only publish one
+        * set.
+        */
+       if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+               values[n++] += leader->total_time_enabled +
+                       atomic64_read(&leader->child_total_time_enabled);
+       }
 
-       values[n++] = 1 + leader->nr_siblings;
-       if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
-               values[n++] = enabled;
-       if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
-               values[n++] = running;
-       values[n++] = count;
+       if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+               values[n++] += leader->total_time_running +
+                       atomic64_read(&leader->child_total_time_running);
+       }
+
+       /*
+        * Write {count,id} tuples for every sibling.
+        */
+       values[n++] += perf_event_count(leader);
        if (read_format & PERF_FORMAT_ID)
                values[n++] = primary_event_id(leader);
 
-       size = n * sizeof(u64);
+       list_for_each_entry(sub, &leader->sibling_list, group_entry) {
+               values[n++] += perf_event_count(sub);
+               if (read_format & PERF_FORMAT_ID)
+                       values[n++] = primary_event_id(sub);
+       }
 
-       if (copy_to_user(buf, values, size))
-               return -EFAULT;
+       return 0;
+}
 
-       ret = size;
+static int perf_read_group(struct perf_event *event,
+                                  u64 read_format, char __user *buf)
+{
+       struct perf_event *leader = event->group_leader, *child;
+       struct perf_event_context *ctx = leader->ctx;
+       int ret;
+       u64 *values;
 
-       list_for_each_entry(sub, &leader->sibling_list, group_entry) {
-               n = 0;
+       lockdep_assert_held(&ctx->mutex);
 
-               values[n++] = perf_event_read_value(sub, &enabled, &running);
-               if (read_format & PERF_FORMAT_ID)
-                       values[n++] = primary_event_id(sub);
+       values = kzalloc(event->read_size, GFP_KERNEL);
+       if (!values)
+               return -ENOMEM;
 
-               size = n * sizeof(u64);
+       values[0] = 1 + leader->nr_siblings;
 
-               if (copy_to_user(buf + ret, values, size)) {
-                       return -EFAULT;
-               }
+       /*
+        * By locking the child_mutex of the leader we effectively
+        * lock the child list of all siblings.. XXX explain how.
+        */
+       mutex_lock(&leader->child_mutex);
+
+       ret = __perf_read_group_add(leader, read_format, values);
+       if (ret)
+               goto unlock;
 
-               ret += size;
+       list_for_each_entry(child, &leader->child_list, child_list) {
+               ret = __perf_read_group_add(child, read_format, values);
+               if (ret)
+                       goto unlock;
        }
 
+       mutex_unlock(&leader->child_mutex);
+
+       ret = event->read_size;
+       if (copy_to_user(buf, values, event->read_size))
+               ret = -EFAULT;
+       goto out;
+
+unlock:
+       mutex_unlock(&leader->child_mutex);
+out:
+       kfree(values);
        return ret;
 }
 
-static int perf_event_read_one(struct perf_event *event,
+static int perf_read_one(struct perf_event *event,
                                 u64 read_format, char __user *buf)
 {
        u64 enabled, running;
@@ -3921,7 +4035,7 @@ static bool is_event_hup(struct perf_event *event)
  * Read the performance event - simple non blocking version for now
  */
 static ssize_t
-perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
+__perf_read(struct perf_event *event, char __user *buf, size_t count)
 {
        u64 read_format = event->attr.read_format;
        int ret;
@@ -3939,9 +4053,9 @@ perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
 
        WARN_ON_ONCE(event->ctx->parent_ctx);
        if (read_format & PERF_FORMAT_GROUP)
-               ret = perf_event_read_group(event, read_format, buf);
+               ret = perf_read_group(event, read_format, buf);
        else
-               ret = perf_event_read_one(event, read_format, buf);
+               ret = perf_read_one(event, read_format, buf);
 
        return ret;
 }
@@ -3954,7 +4068,7 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
        int ret;
 
        ctx = perf_event_ctx_lock(event);
-       ret = perf_read_hw(event, buf, count);
+       ret = __perf_read(event, buf, count);
        perf_event_ctx_unlock(event, ctx);
 
        return ret;
@@ -3985,7 +4099,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
 
 static void _perf_event_reset(struct perf_event *event)
 {
-       (void)perf_event_read(event);
+       (void)perf_event_read(event, false);
        local64_set(&event->count, 0);
        perf_event_update_userpage(event);
 }
@@ -7267,24 +7381,49 @@ static void perf_pmu_nop_void(struct pmu *pmu)
 {
 }
 
+static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
+{
+}
+
 static int perf_pmu_nop_int(struct pmu *pmu)
 {
        return 0;
 }
 
-static void perf_pmu_start_txn(struct pmu *pmu)
+static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
+
+static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
 {
+       __this_cpu_write(nop_txn_flags, flags);
+
+       if (flags & ~PERF_PMU_TXN_ADD)
+               return;
+
        perf_pmu_disable(pmu);
 }
 
 static int perf_pmu_commit_txn(struct pmu *pmu)
 {
+       unsigned int flags = __this_cpu_read(nop_txn_flags);
+
+       __this_cpu_write(nop_txn_flags, 0);
+
+       if (flags & ~PERF_PMU_TXN_ADD)
+               return 0;
+
        perf_pmu_enable(pmu);
        return 0;
 }
 
 static void perf_pmu_cancel_txn(struct pmu *pmu)
 {
+       unsigned int flags =  __this_cpu_read(nop_txn_flags);
+
+       __this_cpu_write(nop_txn_flags, 0);
+
+       if (flags & ~PERF_PMU_TXN_ADD)
+               return;
+
        perf_pmu_enable(pmu);
 }
 
@@ -7523,7 +7662,7 @@ got_cpu_context:
                        pmu->commit_txn = perf_pmu_commit_txn;
                        pmu->cancel_txn = perf_pmu_cancel_txn;
                } else {
-                       pmu->start_txn  = perf_pmu_nop_void;
+                       pmu->start_txn  = perf_pmu_nop_txn;
                        pmu->commit_txn = perf_pmu_nop_int;
                        pmu->cancel_txn = perf_pmu_nop_void;
                }
@@ -7611,7 +7750,7 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
        return ret;
 }
 
-struct pmu *perf_init_event(struct perf_event *event)
+static struct pmu *perf_init_event(struct perf_event *event)
 {
        struct pmu *pmu = NULL;
        int idx;
@@ -8297,13 +8436,35 @@ SYSCALL_DEFINE5(perf_event_open,
 
        if (move_group) {
                gctx = group_leader->ctx;
+               mutex_lock_double(&gctx->mutex, &ctx->mutex);
+       } else {
+               mutex_lock(&ctx->mutex);
+       }
+
+       if (!perf_event_validate_size(event)) {
+               err = -E2BIG;
+               goto err_locked;
+       }
+
+       /*
+        * Must be under the same ctx::mutex as perf_install_in_context(),
+        * because we need to serialize with concurrent event creation.
+        */
+       if (!exclusive_event_installable(event, ctx)) {
+               /* exclusive and group stuff are assumed mutually exclusive */
+               WARN_ON_ONCE(move_group);
+
+               err = -EBUSY;
+               goto err_locked;
+       }
 
+       WARN_ON_ONCE(ctx->parent_ctx);
+
+       if (move_group) {
                /*
                 * See perf_event_ctx_lock() for comments on the details
                 * of swizzling perf_event::ctx.
                 */
-               mutex_lock_double(&gctx->mutex, &ctx->mutex);
-
                perf_remove_from_context(group_leader, false);
 
                list_for_each_entry(sibling, &group_leader->sibling_list,
@@ -8311,13 +8472,7 @@ SYSCALL_DEFINE5(perf_event_open,
                        perf_remove_from_context(sibling, false);
                        put_ctx(gctx);
                }
-       } else {
-               mutex_lock(&ctx->mutex);
-       }
-
-       WARN_ON_ONCE(ctx->parent_ctx);
 
-       if (move_group) {
                /*
                 * Wait for everybody to stop referencing the events through
                 * the old lists, before installing it on new lists.
@@ -8349,22 +8504,29 @@ SYSCALL_DEFINE5(perf_event_open,
                perf_event__state_init(group_leader);
                perf_install_in_context(ctx, group_leader, group_leader->cpu);
                get_ctx(ctx);
-       }
 
-       if (!exclusive_event_installable(event, ctx)) {
-               err = -EBUSY;
-               mutex_unlock(&ctx->mutex);
-               fput(event_file);
-               goto err_context;
+               /*
+                * Now that all events are installed in @ctx, nothing
+                * references @gctx anymore, so drop the last reference we have
+                * on it.
+                */
+               put_ctx(gctx);
        }
 
+       /*
+        * Precalculate sample_data sizes; do while holding ctx::mutex such
+        * that we're serialized against further additions and before
+        * perf_install_in_context() which is the point the event is active and
+        * can use these values.
+        */
+       perf_event__header_size(event);
+       perf_event__id_header_size(event);
+
        perf_install_in_context(ctx, event, event->cpu);
        perf_unpin_context(ctx);
 
-       if (move_group) {
+       if (move_group)
                mutex_unlock(&gctx->mutex);
-               put_ctx(gctx);
-       }
        mutex_unlock(&ctx->mutex);
 
        put_online_cpus();
@@ -8375,12 +8537,6 @@ SYSCALL_DEFINE5(perf_event_open,
        list_add_tail(&event->owner_entry, &current->perf_event_list);
        mutex_unlock(&current->perf_event_mutex);
 
-       /*
-        * Precalculate sample_data sizes
-        */
-       perf_event__header_size(event);
-       perf_event__id_header_size(event);
-
        /*
         * Drop the reference on the group_event after placing the
         * new event on the sibling_list. This ensures destruction
@@ -8391,6 +8547,12 @@ SYSCALL_DEFINE5(perf_event_open,
        fd_install(event_fd, event_file);
        return event_fd;
 
+err_locked:
+       if (move_group)
+               mutex_unlock(&gctx->mutex);
+       mutex_unlock(&ctx->mutex);
+/* err_file: */
+       fput(event_file);
 err_context:
        perf_unpin_context(ctx);
        put_ctx(ctx);
@@ -9297,14 +9459,6 @@ static void perf_cgroup_exit(struct cgroup_subsys_state *css,
                             struct cgroup_subsys_state *old_css,
                             struct task_struct *task)
 {
-       /*
-        * cgroup_exit() is called in the copy_process() failure path.
-        * Ignore this case since the task hasn't ran yet, this avoids
-        * trying to poke a half freed task state from generic code.
-        */
-       if (!(task->flags & PF_EXITING))
-               return;
-
        task_function_call(task, __perf_cgroup_move, task);
 }
 
index 182bc30899d5b095d6e317327065eb7f8409795d..b5d1ea79c5953e2a2c7a5ee843de5f76ad47077a 100644 (file)
@@ -141,7 +141,7 @@ int perf_output_begin(struct perf_output_handle *handle,
        perf_output_get_handle(handle);
 
        do {
-               tail = READ_ONCE_CTRL(rb->user_page->data_tail);
+               tail = READ_ONCE(rb->user_page->data_tail);
                offset = head = local_read(&rb->head);
                if (!rb->overwrite &&
                    unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size))
index ea95ee1b5ef7038ec1ade3f5cd5d98181177f22f..07110c6020a04ea37c04bc18bd0b9287cd0466dc 100644 (file)
@@ -706,10 +706,12 @@ void do_exit(long code)
        smp_mb();
        raw_spin_unlock_wait(&tsk->pi_lock);
 
-       if (unlikely(in_atomic()))
+       if (unlikely(in_atomic())) {
                pr_info("note: %s[%d] exited with preempt_count %d\n",
                        current->comm, task_pid_nr(current),
                        preempt_count());
+               preempt_count_set(PREEMPT_ENABLED);
+       }
 
        /* sync mm's RSS info before statistics gathering */
        if (tsk->mm)
@@ -761,7 +763,9 @@ void do_exit(long code)
         */
        flush_ptrace_hw_breakpoint(tsk);
 
+       TASKS_RCU(preempt_disable());
        TASKS_RCU(tasks_rcu_i = __srcu_read_lock(&tasks_rcu_exit_srcu));
+       TASKS_RCU(preempt_enable());
        exit_notify(tsk, group_dead);
        proc_exit_connector(tsk);
 #ifdef CONFIG_NUMA
index 2845623fb58264eec28a8b99b48d4511856e718d..6ac894244d3978fb800f7a1a02912bb2901e5e84 100644 (file)
@@ -1101,7 +1101,7 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
        cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
        if (cpu_limit != RLIM_INFINITY) {
                sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
-               sig->cputimer.running = 1;
+               sig->cputimer.running = true;
        }
 
        /* The timer lists. */
index 6e443efc65f41e8cd1d0bdf2d31afc63207390c1..dfc86e93c31d4a3b8d5e9b0f70ae1c3720411f25 100644 (file)
@@ -255,9 +255,18 @@ struct futex_hash_bucket {
        struct plist_head chain;
 } ____cacheline_aligned_in_smp;
 
-static unsigned long __read_mostly futex_hashsize;
+/*
+ * The base of the bucket array and its size are always used together
+ * (after initialization only in hash_futex()), so ensure that they
+ * reside in the same cacheline.
+ */
+static struct {
+       struct futex_hash_bucket *queues;
+       unsigned long            hashsize;
+} __futex_data __read_mostly __aligned(2*sizeof(long));
+#define futex_queues   (__futex_data.queues)
+#define futex_hashsize (__futex_data.hashsize)
 
-static struct futex_hash_bucket *futex_queues;
 
 /*
  * Fault injections for futexes.
index 9a76e3beda5423b7743f1d67a0945755b771c977..3b48dab801648dc1b7ef9962b2dd8d8067d9adf8 100644 (file)
@@ -30,6 +30,10 @@ config GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
 config GENERIC_PENDING_IRQ
        bool
 
+# Support for generic irq migrating off cpu before the cpu is offline.
+config GENERIC_IRQ_MIGRATION
+       bool
+
 # Alpha specific irq affinity mechanism
 config AUTO_IRQ_AFFINITY
        bool
index d12123526e2b48b07dbf63274aa03f31dcd215e8..2fc9cbdf35b6221385ab1f8393098523cc4c43cd 100644 (file)
@@ -5,5 +5,6 @@ obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
 obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
+obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o
 obj-$(CONFIG_PM_SLEEP) += pm.o
 obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o
index e28169dd1c36a51f92523208984ec4c7a4b5b251..15206453b12aab09cf96dd09cc3fa2da92fd9c14 100644 (file)
 
 #include "internals.h"
 
+static irqreturn_t bad_chained_irq(int irq, void *dev_id)
+{
+       WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
+       return IRQ_NONE;
+}
+
+/*
+ * Chained handlers should never call action on their IRQ. This default
+ * action will emit warning if such thing happens.
+ */
+struct irqaction chained_action = {
+       .handler = bad_chained_irq,
+};
+
 /**
  *     irq_set_chip - set the irq chip for an irq
  *     @irq:   irq number
@@ -227,6 +241,13 @@ void irq_enable(struct irq_desc *desc)
  * disabled. If an interrupt happens, then the interrupt flow
  * handler masks the line at the hardware level and marks it
  * pending.
+ *
+ * If the interrupt chip does not implement the irq_disable callback,
+ * a driver can disable the lazy approach for a particular irq line by
+ * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
+ * be used for devices which cannot disable the interrupt at the
+ * device level under certain circumstances and have to use
+ * disable_irq[_nosync] instead.
  */
 void irq_disable(struct irq_desc *desc)
 {
@@ -234,6 +255,8 @@ void irq_disable(struct irq_desc *desc)
        if (desc->irq_data.chip->irq_disable) {
                desc->irq_data.chip->irq_disable(&desc->irq_data);
                irq_state_set_masked(desc);
+       } else if (irq_settings_disable_unlazy(desc)) {
+               mask_irq(desc);
        }
 }
 
@@ -669,7 +692,7 @@ void handle_percpu_irq(struct irq_desc *desc)
        if (chip->irq_ack)
                chip->irq_ack(&desc->irq_data);
 
-       handle_irq_event_percpu(desc, desc->action);
+       handle_irq_event_percpu(desc);
 
        if (chip->irq_eoi)
                chip->irq_eoi(&desc->irq_data);
@@ -746,6 +769,8 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
                if (desc->irq_data.chip != &no_irq_chip)
                        mask_ack_irq(desc);
                irq_state_set_disabled(desc);
+               if (is_chained)
+                       desc->action = NULL;
                desc->depth = 1;
        }
        desc->handle_irq = handle;
@@ -755,6 +780,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
                irq_settings_set_noprobe(desc);
                irq_settings_set_norequest(desc);
                irq_settings_set_nothread(desc);
+               desc->action = &chained_action;
                irq_startup(desc, true);
        }
 }
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
new file mode 100644 (file)
index 0000000..80f4f4e
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * Generic cpu hotunplug interrupt migration code copied from the
+ * arch/arm implementation
+ *
+ * Copyright (C) Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/interrupt.h>
+#include <linux/ratelimit.h>
+#include <linux/irq.h>
+
+#include "internals.h"
+
+static bool migrate_one_irq(struct irq_desc *desc)
+{
+       struct irq_data *d = irq_desc_get_irq_data(desc);
+       const struct cpumask *affinity = d->common->affinity;
+       struct irq_chip *c;
+       bool ret = false;
+
+       /*
+        * If this is a per-CPU interrupt, or the affinity does not
+        * include this CPU, then we have nothing to do.
+        */
+       if (irqd_is_per_cpu(d) ||
+           !cpumask_test_cpu(smp_processor_id(), affinity))
+               return false;
+
+       if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+               affinity = cpu_online_mask;
+               ret = true;
+       }
+
+       c = irq_data_get_irq_chip(d);
+       if (!c->irq_set_affinity) {
+               pr_warn_ratelimited("IRQ%u: unable to set affinity\n", d->irq);
+       } else {
+               int r = irq_do_set_affinity(d, affinity, false);
+               if (r)
+                       pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
+                                           d->irq, r);
+       }
+
+       return ret;
+}
+
+/**
+ * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
+ *
+ * The current CPU has been marked offline.  Migrate IRQs off this CPU.
+ * If the affinity settings do not allow other CPUs, force them onto any
+ * available CPU.
+ *
+ * Note: we must iterate over all IRQs, whether they have an attached
+ * action structure or not, as we need to get chained interrupts too.
+ */
+void irq_migrate_all_off_this_cpu(void)
+{
+       unsigned int irq;
+       struct irq_desc *desc;
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       for_each_active_irq(irq) {
+               bool affinity_broken;
+
+               desc = irq_to_desc(irq);
+               raw_spin_lock(&desc->lock);
+               affinity_broken = migrate_one_irq(desc);
+               raw_spin_unlock(&desc->lock);
+
+               if (affinity_broken)
+                       pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
+                                           irq, smp_processor_id());
+       }
+
+       local_irq_restore(flags);
+}
index de41a68fc038df70b8578e6ec1174fbcf53a1f0f..a302cf9a2126c8a911ff4da1a944c88c6d3b3f8a 100644 (file)
@@ -22,7 +22,6 @@
 
 /**
  * handle_bad_irq - handle spurious and unhandled irqs
- * @irq:       the interrupt number
  * @desc:      description of the interrupt
  *
  * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
@@ -35,6 +34,7 @@ void handle_bad_irq(struct irq_desc *desc)
        kstat_incr_irqs_this_cpu(desc);
        ack_bad_irq(irq);
 }
+EXPORT_SYMBOL_GPL(handle_bad_irq);
 
 /*
  * Special, empty irq handler:
@@ -132,11 +132,11 @@ void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
        wake_up_process(action->thread);
 }
 
-irqreturn_t
-handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
+irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
 {
        irqreturn_t retval = IRQ_NONE;
        unsigned int flags = 0, irq = desc->irq_data.irq;
+       struct irqaction *action = desc->action;
 
        do {
                irqreturn_t res;
@@ -184,14 +184,13 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
 
 irqreturn_t handle_irq_event(struct irq_desc *desc)
 {
-       struct irqaction *action = desc->action;
        irqreturn_t ret;
 
        desc->istate &= ~IRQS_PENDING;
        irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
        raw_spin_unlock(&desc->lock);
 
-       ret = handle_irq_event_percpu(desc, action);
+       ret = handle_irq_event_percpu(desc);
 
        raw_spin_lock(&desc->lock);
        irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
index 5ef0c2dbe9302a846e724d5106c01a8eec248fda..05c2188271b86730323c9bce053adc2d16e8d5a3 100644 (file)
@@ -18,6 +18,8 @@
 
 extern bool noirqdebug;
 
+extern struct irqaction chained_action;
+
 /*
  * Bits used by threaded handlers:
  * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
@@ -81,7 +83,7 @@ extern void irq_mark_irq(unsigned int irq);
 
 extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
 
-irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action);
+irqreturn_t handle_irq_event_percpu(struct irq_desc *desc);
 irqreturn_t handle_irq_event(struct irq_desc *desc);
 
 /* Resending of interrupts :*/
index dc9d27c0c1589e58bb14a15e3442ed2ff0d091a4..22aa9612ef7ca98cd8796188a7021810cc86d05a 100644 (file)
@@ -27,6 +27,57 @@ static int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
                                  irq_hw_number_t hwirq, int node);
 static void irq_domain_check_hierarchy(struct irq_domain *domain);
 
+struct irqchip_fwid {
+       struct fwnode_handle fwnode;
+       char *name;
+       void *data;
+};
+
+/**
+ * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
+ *                           identifying an irq domain
+ * @data: optional user-provided data
+ *
+ * Allocate a struct device_node, and return a poiner to the embedded
+ * fwnode_handle (or NULL on failure).
+ */
+struct fwnode_handle *irq_domain_alloc_fwnode(void *data)
+{
+       struct irqchip_fwid *fwid;
+       char *name;
+
+       fwid = kzalloc(sizeof(*fwid), GFP_KERNEL);
+       name = kasprintf(GFP_KERNEL, "irqchip@%p", data);
+
+       if (!fwid || !name) {
+               kfree(fwid);
+               kfree(name);
+               return NULL;
+       }
+
+       fwid->name = name;
+       fwid->data = data;
+       fwid->fwnode.type = FWNODE_IRQCHIP;
+       return &fwid->fwnode;
+}
+
+/**
+ * irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle
+ *
+ * Free a fwnode_handle allocated with irq_domain_alloc_fwnode.
+ */
+void irq_domain_free_fwnode(struct fwnode_handle *fwnode)
+{
+       struct irqchip_fwid *fwid;
+
+       if (WARN_ON(fwnode->type != FWNODE_IRQCHIP))
+               return;
+
+       fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
+       kfree(fwid->name);
+       kfree(fwid);
+}
+
 /**
  * __irq_domain_add() - Allocate a new irq_domain data structure
  * @of_node: optional device-tree node of the interrupt controller
@@ -40,23 +91,28 @@ static void irq_domain_check_hierarchy(struct irq_domain *domain);
  * Allocates and initialize and irq_domain structure.
  * Returns pointer to IRQ domain, or NULL on failure.
  */
-struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
+struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
                                    irq_hw_number_t hwirq_max, int direct_max,
                                    const struct irq_domain_ops *ops,
                                    void *host_data)
 {
        struct irq_domain *domain;
+       struct device_node *of_node;
+
+       of_node = to_of_node(fwnode);
 
        domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
                              GFP_KERNEL, of_node_to_nid(of_node));
        if (WARN_ON(!domain))
                return NULL;
 
+       of_node_get(of_node);
+
        /* Fill structure */
        INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
        domain->ops = ops;
        domain->host_data = host_data;
-       domain->of_node = of_node_get(of_node);
+       domain->fwnode = fwnode;
        domain->hwirq_max = hwirq_max;
        domain->revmap_size = size;
        domain->revmap_direct_max_irq = direct_max;
@@ -102,7 +158,7 @@ void irq_domain_remove(struct irq_domain *domain)
 
        pr_debug("Removed domain %s\n", domain->name);
 
-       of_node_put(domain->of_node);
+       of_node_put(irq_domain_get_of_node(domain));
        kfree(domain);
 }
 EXPORT_SYMBOL_GPL(irq_domain_remove);
@@ -133,7 +189,7 @@ struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
 {
        struct irq_domain *domain;
 
-       domain = __irq_domain_add(of_node, size, size, 0, ops, host_data);
+       domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
        if (!domain)
                return NULL;
 
@@ -177,7 +233,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
 {
        struct irq_domain *domain;
 
-       domain = __irq_domain_add(of_node, first_hwirq + size,
+       domain = __irq_domain_add(of_node_to_fwnode(of_node), first_hwirq + size,
                                  first_hwirq + size, 0, ops, host_data);
        if (domain)
                irq_domain_associate_many(domain, first_irq, first_hwirq, size);
@@ -187,12 +243,12 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
 EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
 
 /**
- * irq_find_matching_host() - Locates a domain for a given device node
- * @node: device-tree node of the interrupt controller
+ * irq_find_matching_fwnode() - Locates a domain for a given fwnode
+ * @fwnode: FW descriptor of the interrupt controller
  * @bus_token: domain-specific data
  */
-struct irq_domain *irq_find_matching_host(struct device_node *node,
-                                         enum irq_domain_bus_token bus_token)
+struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
+                                           enum irq_domain_bus_token bus_token)
 {
        struct irq_domain *h, *found = NULL;
        int rc;
@@ -209,9 +265,9 @@ struct irq_domain *irq_find_matching_host(struct device_node *node,
        mutex_lock(&irq_domain_mutex);
        list_for_each_entry(h, &irq_domain_list, link) {
                if (h->ops->match)
-                       rc = h->ops->match(h, node, bus_token);
+                       rc = h->ops->match(h, to_of_node(fwnode), bus_token);
                else
-                       rc = ((h->of_node != NULL) && (h->of_node == node) &&
+                       rc = ((fwnode != NULL) && (h->fwnode == fwnode) &&
                              ((bus_token == DOMAIN_BUS_ANY) ||
                               (h->bus_token == bus_token)));
 
@@ -223,7 +279,7 @@ struct irq_domain *irq_find_matching_host(struct device_node *node,
        mutex_unlock(&irq_domain_mutex);
        return found;
 }
-EXPORT_SYMBOL_GPL(irq_find_matching_host);
+EXPORT_SYMBOL_GPL(irq_find_matching_fwnode);
 
 /**
  * irq_set_default_host() - Set a "default" irq domain
@@ -336,10 +392,12 @@ EXPORT_SYMBOL_GPL(irq_domain_associate);
 void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
                               irq_hw_number_t hwirq_base, int count)
 {
+       struct device_node *of_node;
        int i;
 
+       of_node = irq_domain_get_of_node(domain);
        pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
-               of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count);
+               of_node_full_name(of_node), irq_base, (int)hwirq_base, count);
 
        for (i = 0; i < count; i++) {
                irq_domain_associate(domain, irq_base + i, hwirq_base + i);
@@ -359,12 +417,14 @@ EXPORT_SYMBOL_GPL(irq_domain_associate_many);
  */
 unsigned int irq_create_direct_mapping(struct irq_domain *domain)
 {
+       struct device_node *of_node;
        unsigned int virq;
 
        if (domain == NULL)
                domain = irq_default_domain;
 
-       virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
+       of_node = irq_domain_get_of_node(domain);
+       virq = irq_alloc_desc_from(1, of_node_to_nid(of_node));
        if (!virq) {
                pr_debug("create_direct virq allocation failed\n");
                return 0;
@@ -399,6 +459,7 @@ EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
 unsigned int irq_create_mapping(struct irq_domain *domain,
                                irq_hw_number_t hwirq)
 {
+       struct device_node *of_node;
        int virq;
 
        pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
@@ -412,6 +473,8 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
        }
        pr_debug("-> using domain @%p\n", domain);
 
+       of_node = irq_domain_get_of_node(domain);
+
        /* Check if mapping already exists */
        virq = irq_find_mapping(domain, hwirq);
        if (virq) {
@@ -420,8 +483,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
        }
 
        /* Allocate a virtual interrupt number */
-       virq = irq_domain_alloc_descs(-1, 1, hwirq,
-                                     of_node_to_nid(domain->of_node));
+       virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node));
        if (virq <= 0) {
                pr_debug("-> virq allocation failed\n");
                return 0;
@@ -433,7 +495,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
        }
 
        pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
-               hwirq, of_node_full_name(domain->of_node), virq);
+               hwirq, of_node_full_name(of_node), virq);
 
        return virq;
 }
@@ -460,10 +522,12 @@ EXPORT_SYMBOL_GPL(irq_create_mapping);
 int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
                               irq_hw_number_t hwirq_base, int count)
 {
+       struct device_node *of_node;
        int ret;
 
+       of_node = irq_domain_get_of_node(domain);
        ret = irq_alloc_descs(irq_base, irq_base, count,
-                             of_node_to_nid(domain->of_node));
+                             of_node_to_nid(of_node));
        if (unlikely(ret < 0))
                return ret;
 
@@ -472,28 +536,56 @@ int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
 }
 EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
 
-unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
+static int irq_domain_translate(struct irq_domain *d,
+                               struct irq_fwspec *fwspec,
+                               irq_hw_number_t *hwirq, unsigned int *type)
+{
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+       if (d->ops->translate)
+               return d->ops->translate(d, fwspec, hwirq, type);
+#endif
+       if (d->ops->xlate)
+               return d->ops->xlate(d, to_of_node(fwspec->fwnode),
+                                    fwspec->param, fwspec->param_count,
+                                    hwirq, type);
+
+       /* If domain has no translation, then we assume interrupt line */
+       *hwirq = fwspec->param[0];
+       return 0;
+}
+
+static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data,
+                                     struct irq_fwspec *fwspec)
+{
+       int i;
+
+       fwspec->fwnode = irq_data->np ? &irq_data->np->fwnode : NULL;
+       fwspec->param_count = irq_data->args_count;
+
+       for (i = 0; i < irq_data->args_count; i++)
+               fwspec->param[i] = irq_data->args[i];
+}
+
+unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
 {
        struct irq_domain *domain;
        irq_hw_number_t hwirq;
        unsigned int type = IRQ_TYPE_NONE;
        int virq;
 
-       domain = irq_data->np ? irq_find_host(irq_data->np) : irq_default_domain;
+       if (fwspec->fwnode)
+               domain = irq_find_matching_fwnode(fwspec->fwnode, DOMAIN_BUS_ANY);
+       else
+               domain = irq_default_domain;
+
        if (!domain) {
                pr_warn("no irq domain found for %s !\n",
-                       of_node_full_name(irq_data->np));
+                       of_node_full_name(to_of_node(fwspec->fwnode)));
                return 0;
        }
 
-       /* If domain has no translation, then we assume interrupt line */
-       if (domain->ops->xlate == NULL)
-               hwirq = irq_data->args[0];
-       else {
-               if (domain->ops->xlate(domain, irq_data->np, irq_data->args,
-                                       irq_data->args_count, &hwirq, &type))
-                       return 0;
-       }
+       if (irq_domain_translate(domain, fwspec, &hwirq, &type))
+               return 0;
 
        if (irq_domain_is_hierarchy(domain)) {
                /*
@@ -504,7 +596,7 @@ unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
                if (virq)
                        return virq;
 
-               virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, irq_data);
+               virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
                if (virq <= 0)
                        return 0;
        } else {
@@ -520,6 +612,15 @@ unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
                irq_set_irq_type(virq, type);
        return virq;
 }
+EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping);
+
+unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
+{
+       struct irq_fwspec fwspec;
+
+       of_phandle_args_to_fwspec(irq_data, &fwspec);
+       return irq_create_fwspec_mapping(&fwspec);
+}
 EXPORT_SYMBOL_GPL(irq_create_of_mapping);
 
 /**
@@ -590,14 +691,16 @@ static int virq_debug_show(struct seq_file *m, void *private)
                   "name", "mapped", "linear-max", "direct-max", "devtree-node");
        mutex_lock(&irq_domain_mutex);
        list_for_each_entry(domain, &irq_domain_list, link) {
+               struct device_node *of_node;
                int count = 0;
+               of_node = irq_domain_get_of_node(domain);
                radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0)
                        count++;
                seq_printf(m, "%c%-16s  %6u  %10u  %10u  %s\n",
                           domain == irq_default_domain ? '*' : ' ', domain->name,
                           domain->revmap_size + count, domain->revmap_size,
                           domain->revmap_direct_max_irq,
-                          domain->of_node ? of_node_full_name(domain->of_node) : "");
+                          of_node ? of_node_full_name(of_node) : "");
        }
        mutex_unlock(&irq_domain_mutex);
 
@@ -751,11 +854,11 @@ static int irq_domain_alloc_descs(int virq, unsigned int cnt,
 
 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
 /**
- * irq_domain_add_hierarchy - Add a irqdomain into the hierarchy
+ * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy
  * @parent:    Parent irq domain to associate with the new domain
  * @flags:     Irq domain flags associated to the domain
  * @size:      Size of the domain. See below
- * @node:      Optional device-tree node of the interrupt controller
+ * @fwnode:    Optional fwnode of the interrupt controller
  * @ops:       Pointer to the interrupt domain callbacks
  * @host_data: Controller private data pointer
  *
@@ -765,19 +868,19 @@ static int irq_domain_alloc_descs(int virq, unsigned int cnt,
  * domain flags are set.
  * Returns pointer to IRQ domain, or NULL on failure.
  */
-struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
+struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
                                            unsigned int flags,
                                            unsigned int size,
-                                           struct device_node *node,
+                                           struct fwnode_handle *fwnode,
                                            const struct irq_domain_ops *ops,
                                            void *host_data)
 {
        struct irq_domain *domain;
 
        if (size)
-               domain = irq_domain_add_linear(node, size, ops, host_data);
+               domain = irq_domain_create_linear(fwnode, size, ops, host_data);
        else
-               domain = irq_domain_add_tree(node, ops, host_data);
+               domain = irq_domain_create_tree(fwnode, ops, host_data);
        if (domain) {
                domain->parent = parent;
                domain->flags |= flags;
index f9a59f6cabd2d3a96f8cd9bee179b6ab4214b83e..a71175ff98d58f7c274786cadd5b6b520678c665 100644 (file)
@@ -258,37 +258,6 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
 }
 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
 
-/**
- *     irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
- *     @irq: interrupt number to set affinity
- *     @vcpu_info: vCPU specific data
- *
- *     This function uses the vCPU specific data to set the vCPU
- *     affinity for an irq. The vCPU specific data is passed from
- *     outside, such as KVM. One example code path is as below:
- *     KVM -> IOMMU -> irq_set_vcpu_affinity().
- */
-int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
-{
-       unsigned long flags;
-       struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
-       struct irq_data *data;
-       struct irq_chip *chip;
-       int ret = -ENOSYS;
-
-       if (!desc)
-               return -EINVAL;
-
-       data = irq_desc_get_irq_data(desc);
-       chip = irq_data_get_irq_chip(data);
-       if (chip && chip->irq_set_vcpu_affinity)
-               ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
-       irq_put_desc_unlock(desc, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
-
 static void irq_affinity_notify(struct work_struct *work)
 {
        struct irq_affinity_notify *notify =
@@ -424,6 +393,37 @@ setup_affinity(struct irq_desc *desc, struct cpumask *mask)
 }
 #endif
 
+/**
+ *     irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
+ *     @irq: interrupt number to set affinity
+ *     @vcpu_info: vCPU specific data
+ *
+ *     This function uses the vCPU specific data to set the vCPU
+ *     affinity for an irq. The vCPU specific data is passed from
+ *     outside, such as KVM. One example code path is as below:
+ *     KVM -> IOMMU -> irq_set_vcpu_affinity().
+ */
+int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
+{
+       unsigned long flags;
+       struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
+       struct irq_data *data;
+       struct irq_chip *chip;
+       int ret = -ENOSYS;
+
+       if (!desc)
+               return -EINVAL;
+
+       data = irq_desc_get_irq_data(desc);
+       chip = irq_data_get_irq_chip(data);
+       if (chip && chip->irq_set_vcpu_affinity)
+               ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
+       irq_put_desc_unlock(desc, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
+
 void __disable_irq(struct irq_desc *desc)
 {
        if (!desc->depth++)
@@ -730,6 +730,12 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
        return IRQ_NONE;
 }
 
+static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
+{
+       WARN(1, "Secondary action handler called for irq %d\n", irq);
+       return IRQ_NONE;
+}
+
 static int irq_wait_for_interrupt(struct irqaction *action)
 {
        set_current_state(TASK_INTERRUPTIBLE);
@@ -756,7 +762,8 @@ static int irq_wait_for_interrupt(struct irqaction *action)
 static void irq_finalize_oneshot(struct irq_desc *desc,
                                 struct irqaction *action)
 {
-       if (!(desc->istate & IRQS_ONESHOT))
+       if (!(desc->istate & IRQS_ONESHOT) ||
+           action->handler == irq_forced_secondary_handler)
                return;
 again:
        chip_bus_lock(desc);
@@ -910,6 +917,18 @@ static void irq_thread_dtor(struct callback_head *unused)
        irq_finalize_oneshot(desc, action);
 }
 
+static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
+{
+       struct irqaction *secondary = action->secondary;
+
+       if (WARN_ON_ONCE(!secondary))
+               return;
+
+       raw_spin_lock_irq(&desc->lock);
+       __irq_wake_thread(desc, secondary);
+       raw_spin_unlock_irq(&desc->lock);
+}
+
 /*
  * Interrupt handler thread
  */
@@ -940,6 +959,8 @@ static int irq_thread(void *data)
                action_ret = handler_fn(desc, action);
                if (action_ret == IRQ_HANDLED)
                        atomic_inc(&desc->threads_handled);
+               if (action_ret == IRQ_WAKE_THREAD)
+                       irq_wake_secondary(desc, action);
 
                wake_threads_waitq(desc);
        }
@@ -984,20 +1005,36 @@ void irq_wake_thread(unsigned int irq, void *dev_id)
 }
 EXPORT_SYMBOL_GPL(irq_wake_thread);
 
-static void irq_setup_forced_threading(struct irqaction *new)
+static int irq_setup_forced_threading(struct irqaction *new)
 {
        if (!force_irqthreads)
-               return;
+               return 0;
        if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
-               return;
+               return 0;
 
        new->flags |= IRQF_ONESHOT;
 
-       if (!new->thread_fn) {
-               set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
-               new->thread_fn = new->handler;
-               new->handler = irq_default_primary_handler;
+       /*
+        * Handle the case where we have a real primary handler and a
+        * thread handler. We force thread them as well by creating a
+        * secondary action.
+        */
+       if (new->handler != irq_default_primary_handler && new->thread_fn) {
+               /* Allocate the secondary action */
+               new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
+               if (!new->secondary)
+                       return -ENOMEM;
+               new->secondary->handler = irq_forced_secondary_handler;
+               new->secondary->thread_fn = new->thread_fn;
+               new->secondary->dev_id = new->dev_id;
+               new->secondary->irq = new->irq;
+               new->secondary->name = new->name;
        }
+       /* Deal with the primary handler */
+       set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
+       new->thread_fn = new->handler;
+       new->handler = irq_default_primary_handler;
+       return 0;
 }
 
 static int irq_request_resources(struct irq_desc *desc)
@@ -1017,6 +1054,48 @@ static void irq_release_resources(struct irq_desc *desc)
                c->irq_release_resources(d);
 }
 
+static int
+setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
+{
+       struct task_struct *t;
+       struct sched_param param = {
+               .sched_priority = MAX_USER_RT_PRIO/2,
+       };
+
+       if (!secondary) {
+               t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
+                                  new->name);
+       } else {
+               t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
+                                  new->name);
+               param.sched_priority -= 1;
+       }
+
+       if (IS_ERR(t))
+               return PTR_ERR(t);
+
+       sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
+
+       /*
+        * We keep the reference to the task struct even if
+        * the thread dies to avoid that the interrupt code
+        * references an already freed task_struct.
+        */
+       get_task_struct(t);
+       new->thread = t;
+       /*
+        * Tell the thread to set its affinity. This is
+        * important for shared interrupt handlers as we do
+        * not invoke setup_affinity() for the secondary
+        * handlers as everything is already set up. Even for
+        * interrupts marked with IRQF_NO_BALANCE this is
+        * correct as we want the thread to move to the cpu(s)
+        * on which the requesting code placed the interrupt.
+        */
+       set_bit(IRQTF_AFFINITY, &new->thread_flags);
+       return 0;
+}
+
 /*
  * Internal function to register an irqaction - typically used to
  * allocate special interrupts that are part of the architecture.
@@ -1037,6 +1116,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
        if (!try_module_get(desc->owner))
                return -ENODEV;
 
+       new->irq = irq;
+
        /*
         * Check whether the interrupt nests into another interrupt
         * thread.
@@ -1054,8 +1135,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                 */
                new->handler = irq_nested_primary_handler;
        } else {
-               if (irq_settings_can_thread(desc))
-                       irq_setup_forced_threading(new);
+               if (irq_settings_can_thread(desc)) {
+                       ret = irq_setup_forced_threading(new);
+                       if (ret)
+                               goto out_mput;
+               }
        }
 
        /*
@@ -1064,37 +1148,14 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
         * thread.
         */
        if (new->thread_fn && !nested) {
-               struct task_struct *t;
-               static const struct sched_param param = {
-                       .sched_priority = MAX_USER_RT_PRIO/2,
-               };
-
-               t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
-                                  new->name);
-               if (IS_ERR(t)) {
-                       ret = PTR_ERR(t);
+               ret = setup_irq_thread(new, irq, false);
+               if (ret)
                        goto out_mput;
+               if (new->secondary) {
+                       ret = setup_irq_thread(new->secondary, irq, true);
+                       if (ret)
+                               goto out_thread;
                }
-
-               sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
-
-               /*
-                * We keep the reference to the task struct even if
-                * the thread dies to avoid that the interrupt code
-                * references an already freed task_struct.
-                */
-               get_task_struct(t);
-               new->thread = t;
-               /*
-                * Tell the thread to set its affinity. This is
-                * important for shared interrupt handlers as we do
-                * not invoke setup_affinity() for the secondary
-                * handlers as everything is already set up. Even for
-                * interrupts marked with IRQF_NO_BALANCE this is
-                * correct as we want the thread to move to the cpu(s)
-                * on which the requesting code placed the interrupt.
-                */
-               set_bit(IRQTF_AFFINITY, &new->thread_flags);
        }
 
        if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
@@ -1267,7 +1328,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                                   irq, nmsk, omsk);
        }
 
-       new->irq = irq;
        *old_ptr = new;
 
        irq_pm_install_action(desc, new);
@@ -1293,6 +1353,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
         */
        if (new->thread)
                wake_up_process(new->thread);
+       if (new->secondary)
+               wake_up_process(new->secondary->thread);
 
        register_irq_proc(irq, desc);
        new->dir = NULL;
@@ -1323,6 +1385,13 @@ out_thread:
                kthread_stop(t);
                put_task_struct(t);
        }
+       if (new->secondary && new->secondary->thread) {
+               struct task_struct *t = new->secondary->thread;
+
+               new->secondary->thread = NULL;
+               kthread_stop(t);
+               put_task_struct(t);
+       }
 out_mput:
        module_put(desc->owner);
        return ret;
@@ -1394,6 +1463,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
 
        /* If this was the last handler, shut down the IRQ line: */
        if (!desc->action) {
+               irq_settings_clr_disable_unlazy(desc);
                irq_shutdown(desc);
                irq_release_resources(desc);
        }
@@ -1430,9 +1500,14 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
        if (action->thread) {
                kthread_stop(action->thread);
                put_task_struct(action->thread);
+               if (action->secondary && action->secondary->thread) {
+                       kthread_stop(action->secondary->thread);
+                       put_task_struct(action->secondary->thread);
+               }
        }
 
        module_put(desc->owner);
+       kfree(action->secondary);
        return action;
 }
 
@@ -1576,8 +1651,10 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
        retval = __setup_irq(irq, desc, action);
        chip_bus_sync_unlock(desc);
 
-       if (retval)
+       if (retval) {
+               kfree(action->secondary);
                kfree(action);
+       }
 
 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
        if (!retval && (irqflags & IRQF_SHARED)) {
index 7e6512b9dc1ff2682394cdd0fea9a8c6d01cf6e9..6b0c0b74a2a1a88c0d3f81519fea7c520b290967 100644 (file)
@@ -228,22 +228,18 @@ static void msi_domain_update_chip_ops(struct msi_domain_info *info)
 {
        struct irq_chip *chip = info->chip;
 
-       BUG_ON(!chip);
-       if (!chip->irq_mask)
-               chip->irq_mask = pci_msi_mask_irq;
-       if (!chip->irq_unmask)
-               chip->irq_unmask = pci_msi_unmask_irq;
+       BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
        if (!chip->irq_set_affinity)
                chip->irq_set_affinity = msi_domain_set_affinity;
 }
 
 /**
  * msi_create_irq_domain - Create a MSI interrupt domain
- * @of_node:   Optional device-tree node of the interrupt controller
+ * @fwnode:    Optional fwnode of the interrupt controller
  * @info:      MSI domain info
  * @parent:    Parent irq domain
  */
-struct irq_domain *msi_create_irq_domain(struct device_node *node,
+struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
                                         struct msi_domain_info *info,
                                         struct irq_domain *parent)
 {
@@ -252,8 +248,8 @@ struct irq_domain *msi_create_irq_domain(struct device_node *node,
        if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
                msi_domain_update_chip_ops(info);
 
-       return irq_domain_add_hierarchy(parent, 0, 0, node, &msi_domain_ops,
-                                       info);
+       return irq_domain_create_hierarchy(parent, 0, 0, fwnode,
+                                          &msi_domain_ops, info);
 }
 
 /**
index e3a8c9577ba641c38747a0925a7896eca7ef4d79..a916cf144b6550caf452fc304a1e848d40246b8a 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/seq_file.h>
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
+#include <linux/mutex.h>
 
 #include "internals.h"
 
@@ -323,18 +324,29 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
 
 void register_irq_proc(unsigned int irq, struct irq_desc *desc)
 {
+       static DEFINE_MUTEX(register_lock);
        char name [MAX_NAMELEN];
 
-       if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
+       if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
                return;
 
+       /*
+        * irq directories are registered only when a handler is
+        * added, not when the descriptor is created, so multiple
+        * tasks might try to register at the same time.
+        */
+       mutex_lock(&register_lock);
+
+       if (desc->dir)
+               goto out_unlock;
+
        memset(name, 0, MAX_NAMELEN);
        sprintf(name, "%d", irq);
 
        /* create /proc/irq/1234 */
        desc->dir = proc_mkdir(name, root_irq_dir);
        if (!desc->dir)
-               return;
+               goto out_unlock;
 
 #ifdef CONFIG_SMP
        /* create /proc/irq/<irq>/smp_affinity */
@@ -355,6 +367,9 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
 
        proc_create_data("spurious", 0444, desc->dir,
                         &irq_spurious_proc_fops, (void *)(long)irq);
+
+out_unlock:
+       mutex_unlock(&register_lock);
 }
 
 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
@@ -460,7 +475,7 @@ int show_interrupts(struct seq_file *p, void *v)
        for_each_online_cpu(j)
                any_count |= kstat_irqs_cpu(i, j);
        action = desc->action;
-       if (!action && !any_count)
+       if ((!action || action == &chained_action) && !any_count)
                goto out;
 
        seq_printf(p, "%*d: ", prec, i);
index 3320b84cc60f79ac09501d6f4d55acc01b0cebe6..320579d8909100d1ebb181510dd8ce4433ab5456 100644 (file)
@@ -15,6 +15,7 @@ enum {
        _IRQ_NESTED_THREAD      = IRQ_NESTED_THREAD,
        _IRQ_PER_CPU_DEVID      = IRQ_PER_CPU_DEVID,
        _IRQ_IS_POLLED          = IRQ_IS_POLLED,
+       _IRQ_DISABLE_UNLAZY     = IRQ_DISABLE_UNLAZY,
        _IRQF_MODIFY_MASK       = IRQF_MODIFY_MASK,
 };
 
@@ -28,6 +29,7 @@ enum {
 #define IRQ_NESTED_THREAD      GOT_YOU_MORON
 #define IRQ_PER_CPU_DEVID      GOT_YOU_MORON
 #define IRQ_IS_POLLED          GOT_YOU_MORON
+#define IRQ_DISABLE_UNLAZY     GOT_YOU_MORON
 #undef IRQF_MODIFY_MASK
 #define IRQF_MODIFY_MASK       GOT_YOU_MORON
 
@@ -154,3 +156,13 @@ static inline bool irq_settings_is_polled(struct irq_desc *desc)
 {
        return desc->status_use_accessors & _IRQ_IS_POLLED;
 }
+
+static inline bool irq_settings_disable_unlazy(struct irq_desc *desc)
+{
+       return desc->status_use_accessors & _IRQ_DISABLE_UNLAZY;
+}
+
+static inline void irq_settings_clr_disable_unlazy(struct irq_desc *desc)
+{
+       desc->status_use_accessors &= ~_IRQ_DISABLE_UNLAZY;
+}
index 201b45327804d230edc9017eef6c07805ae3f8d3..bd9f8a03cefa4ef05c08d54a357910286487afd8 100644 (file)
@@ -1149,7 +1149,7 @@ static int __init parse_crashkernel_simple(char *cmdline,
        if (*cur == '@')
                *crash_base = memparse(cur+1, &cur);
        else if (*cur != ' ' && *cur != '\0') {
-               pr_warn("crashkernel: unrecognized char\n");
+               pr_warn("crashkernel: unrecognized char: %c\n", *cur);
                return -EINVAL;
        }
 
@@ -1186,12 +1186,12 @@ static int __init parse_crashkernel_suffix(char *cmdline,
 
        /* check with suffix */
        if (strncmp(cur, suffix, strlen(suffix))) {
-               pr_warn("crashkernel: unrecognized char\n");
+               pr_warn("crashkernel: unrecognized char: %c\n", *cur);
                return -EINVAL;
        }
        cur += strlen(suffix);
        if (*cur != ' ' && *cur != '\0') {
-               pr_warn("crashkernel: unrecognized char\n");
+               pr_warn("crashkernel: unrecognized char: %c\n", *cur);
                return -EINVAL;
        }
 
index da98d0593de24206d68222d787d059a5a2b025a1..0277d1216f80ae1adeed84a686ed34c9b2931fc2 100644 (file)
@@ -327,9 +327,13 @@ static void call_usermodehelper_exec_work(struct work_struct *work)
                call_usermodehelper_exec_sync(sub_info);
        } else {
                pid_t pid;
-
+               /*
+                * Use CLONE_PARENT to reparent it to kthreadd; we do not
+                * want to pollute current->children, and we need a parent
+                * that always ignores SIGCHLD to ensure auto-reaping.
+                */
                pid = kernel_thread(call_usermodehelper_exec_async, sub_info,
-                                   SIGCHLD);
+                                   CLONE_PARENT | SIGCHLD);
                if (pid < 0) {
                        sub_info->retval = pid;
                        umh_complete(sub_info);
index 8acfbf773e0623f187b8c6677ed48d63b6bd7eef..4e49cc4c9952ca82eff8a2b5e5e61765d48ea96f 100644 (file)
@@ -3068,7 +3068,7 @@ static int __lock_is_held(struct lockdep_map *lock);
 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
                          int trylock, int read, int check, int hardirqs_off,
                          struct lockdep_map *nest_lock, unsigned long ip,
-                         int references)
+                         int references, int pin_count)
 {
        struct task_struct *curr = current;
        struct lock_class *class = NULL;
@@ -3157,7 +3157,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        hlock->waittime_stamp = 0;
        hlock->holdtime_stamp = lockstat_clock();
 #endif
-       hlock->pin_count = 0;
+       hlock->pin_count = pin_count;
 
        if (check && !mark_irqflags(curr, hlock))
                return 0;
@@ -3343,7 +3343,7 @@ found_it:
                        hlock_class(hlock)->subclass, hlock->trylock,
                                hlock->read, hlock->check, hlock->hardirqs_off,
                                hlock->nest_lock, hlock->acquire_ip,
-                               hlock->references))
+                               hlock->references, hlock->pin_count))
                        return 0;
        }
 
@@ -3433,7 +3433,7 @@ found_it:
                        hlock_class(hlock)->subclass, hlock->trylock,
                                hlock->read, hlock->check, hlock->hardirqs_off,
                                hlock->nest_lock, hlock->acquire_ip,
-                               hlock->references))
+                               hlock->references, hlock->pin_count))
                        return 0;
        }
 
@@ -3583,7 +3583,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        current->lockdep_recursion = 1;
        trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
        __lock_acquire(lock, subclass, trylock, read, check,
-                      irqs_disabled_flags(flags), nest_lock, ip, 0);
+                      irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
        current->lockdep_recursion = 0;
        raw_local_irq_restore(flags);
 }
index 32244186f1f2ae0e7a6343ad084f416aa0cda055..8ef1919d63b2401a4832404d92c598c3e656b090 100644 (file)
  *
  * Copyright (C) IBM Corporation, 2014
  *
- * Author: Paul E. McKenney <paulmck@us.ibm.com>
+ * Authors: Paul E. McKenney <paulmck@us.ibm.com>
+ *          Davidlohr Bueso <dave@stgolabs.net>
  *     Based on kernel/rcu/torture.c.
  */
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/kthread.h>
+#include <linux/sched/rt.h>
 #include <linux/spinlock.h>
 #include <linux/rwlock.h>
 #include <linux/mutex.h>
@@ -34,6 +36,7 @@
 #include <linux/moduleparam.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
+#include <linux/percpu-rwsem.h>
 #include <linux/torture.h>
 
 MODULE_LICENSE("GPL");
@@ -91,11 +94,13 @@ struct lock_torture_ops {
        void (*init)(void);
        int (*writelock)(void);
        void (*write_delay)(struct torture_random_state *trsp);
+       void (*task_boost)(struct torture_random_state *trsp);
        void (*writeunlock)(void);
        int (*readlock)(void);
        void (*read_delay)(struct torture_random_state *trsp);
        void (*readunlock)(void);
-       unsigned long flags;
+
+       unsigned long flags; /* for irq spinlocks */
        const char *name;
 };
 
@@ -139,9 +144,15 @@ static void torture_lock_busted_write_unlock(void)
          /* BUGGY, do not use in real life!!! */
 }
 
+static void torture_boost_dummy(struct torture_random_state *trsp)
+{
+       /* Only rtmutexes care about priority */
+}
+
 static struct lock_torture_ops lock_busted_ops = {
        .writelock      = torture_lock_busted_write_lock,
        .write_delay    = torture_lock_busted_write_delay,
+       .task_boost     = torture_boost_dummy,
        .writeunlock    = torture_lock_busted_write_unlock,
        .readlock       = NULL,
        .read_delay     = NULL,
@@ -185,6 +196,7 @@ static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
 static struct lock_torture_ops spin_lock_ops = {
        .writelock      = torture_spin_lock_write_lock,
        .write_delay    = torture_spin_lock_write_delay,
+       .task_boost     = torture_boost_dummy,
        .writeunlock    = torture_spin_lock_write_unlock,
        .readlock       = NULL,
        .read_delay     = NULL,
@@ -211,6 +223,7 @@ __releases(torture_spinlock)
 static struct lock_torture_ops spin_lock_irq_ops = {
        .writelock      = torture_spin_lock_write_lock_irq,
        .write_delay    = torture_spin_lock_write_delay,
+       .task_boost     = torture_boost_dummy,
        .writeunlock    = torture_lock_spin_write_unlock_irq,
        .readlock       = NULL,
        .read_delay     = NULL,
@@ -275,6 +288,7 @@ static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
 static struct lock_torture_ops rw_lock_ops = {
        .writelock      = torture_rwlock_write_lock,
        .write_delay    = torture_rwlock_write_delay,
+       .task_boost     = torture_boost_dummy,
        .writeunlock    = torture_rwlock_write_unlock,
        .readlock       = torture_rwlock_read_lock,
        .read_delay     = torture_rwlock_read_delay,
@@ -315,6 +329,7 @@ __releases(torture_rwlock)
 static struct lock_torture_ops rw_lock_irq_ops = {
        .writelock      = torture_rwlock_write_lock_irq,
        .write_delay    = torture_rwlock_write_delay,
+       .task_boost     = torture_boost_dummy,
        .writeunlock    = torture_rwlock_write_unlock_irq,
        .readlock       = torture_rwlock_read_lock_irq,
        .read_delay     = torture_rwlock_read_delay,
@@ -354,6 +369,7 @@ static void torture_mutex_unlock(void) __releases(torture_mutex)
 static struct lock_torture_ops mutex_lock_ops = {
        .writelock      = torture_mutex_lock,
        .write_delay    = torture_mutex_delay,
+       .task_boost     = torture_boost_dummy,
        .writeunlock    = torture_mutex_unlock,
        .readlock       = NULL,
        .read_delay     = NULL,
@@ -361,6 +377,90 @@ static struct lock_torture_ops mutex_lock_ops = {
        .name           = "mutex_lock"
 };
 
+#ifdef CONFIG_RT_MUTEXES
+static DEFINE_RT_MUTEX(torture_rtmutex);
+
+static int torture_rtmutex_lock(void) __acquires(torture_rtmutex)
+{
+       rt_mutex_lock(&torture_rtmutex);
+       return 0;
+}
+
+static void torture_rtmutex_boost(struct torture_random_state *trsp)
+{
+       int policy;
+       struct sched_param param;
+       const unsigned int factor = 50000; /* yes, quite arbitrary */
+
+       if (!rt_task(current)) {
+               /*
+                * (1) Boost priority once every ~50k operations. When the
+                * task tries to take the lock, the rtmutex it will account
+                * for the new priority, and do any corresponding pi-dance.
+                */
+               if (!(torture_random(trsp) %
+                     (cxt.nrealwriters_stress * factor))) {
+                       policy = SCHED_FIFO;
+                       param.sched_priority = MAX_RT_PRIO - 1;
+               } else /* common case, do nothing */
+                       return;
+       } else {
+               /*
+                * The task will remain boosted for another ~500k operations,
+                * then restored back to its original prio, and so forth.
+                *
+                * When @trsp is nil, we want to force-reset the task for
+                * stopping the kthread.
+                */
+               if (!trsp || !(torture_random(trsp) %
+                              (cxt.nrealwriters_stress * factor * 2))) {
+                       policy = SCHED_NORMAL;
+                       param.sched_priority = 0;
+               } else /* common case, do nothing */
+                       return;
+       }
+
+       sched_setscheduler_nocheck(current, policy, &param);
+}
+
+static void torture_rtmutex_delay(struct torture_random_state *trsp)
+{
+       const unsigned long shortdelay_us = 2;
+       const unsigned long longdelay_ms = 100;
+
+       /*
+        * We want a short delay mostly to emulate likely code, and
+        * we want a long delay occasionally to force massive contention.
+        */
+       if (!(torture_random(trsp) %
+             (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
+               mdelay(longdelay_ms);
+       if (!(torture_random(trsp) %
+             (cxt.nrealwriters_stress * 2 * shortdelay_us)))
+               udelay(shortdelay_us);
+#ifdef CONFIG_PREEMPT
+       if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
+               preempt_schedule();  /* Allow test to be preempted. */
+#endif
+}
+
+static void torture_rtmutex_unlock(void) __releases(torture_rtmutex)
+{
+       rt_mutex_unlock(&torture_rtmutex);
+}
+
+static struct lock_torture_ops rtmutex_lock_ops = {
+       .writelock      = torture_rtmutex_lock,
+       .write_delay    = torture_rtmutex_delay,
+       .task_boost     = torture_rtmutex_boost,
+       .writeunlock    = torture_rtmutex_unlock,
+       .readlock       = NULL,
+       .read_delay     = NULL,
+       .readunlock     = NULL,
+       .name           = "rtmutex_lock"
+};
+#endif
+
 static DECLARE_RWSEM(torture_rwsem);
 static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
 {
@@ -419,6 +519,7 @@ static void torture_rwsem_up_read(void) __releases(torture_rwsem)
 static struct lock_torture_ops rwsem_lock_ops = {
        .writelock      = torture_rwsem_down_write,
        .write_delay    = torture_rwsem_write_delay,
+       .task_boost     = torture_boost_dummy,
        .writeunlock    = torture_rwsem_up_write,
        .readlock       = torture_rwsem_down_read,
        .read_delay     = torture_rwsem_read_delay,
@@ -426,6 +527,48 @@ static struct lock_torture_ops rwsem_lock_ops = {
        .name           = "rwsem_lock"
 };
 
+#include <linux/percpu-rwsem.h>
+static struct percpu_rw_semaphore pcpu_rwsem;
+
+void torture_percpu_rwsem_init(void)
+{
+       BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
+}
+
+static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem)
+{
+       percpu_down_write(&pcpu_rwsem);
+       return 0;
+}
+
+static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem)
+{
+       percpu_up_write(&pcpu_rwsem);
+}
+
+static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem)
+{
+       percpu_down_read(&pcpu_rwsem);
+       return 0;
+}
+
+static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem)
+{
+       percpu_up_read(&pcpu_rwsem);
+}
+
+static struct lock_torture_ops percpu_rwsem_lock_ops = {
+       .init           = torture_percpu_rwsem_init,
+       .writelock      = torture_percpu_rwsem_down_write,
+       .write_delay    = torture_rwsem_write_delay,
+       .task_boost     = torture_boost_dummy,
+       .writeunlock    = torture_percpu_rwsem_up_write,
+       .readlock       = torture_percpu_rwsem_down_read,
+       .read_delay     = torture_rwsem_read_delay,
+       .readunlock     = torture_percpu_rwsem_up_read,
+       .name           = "percpu_rwsem_lock"
+};
+
 /*
  * Lock torture writer kthread.  Repeatedly acquires and releases
  * the lock, checking for duplicate acquisitions.
@@ -442,6 +585,7 @@ static int lock_torture_writer(void *arg)
                if ((torture_random(&rand) & 0xfffff) == 0)
                        schedule_timeout_uninterruptible(1);
 
+               cxt.cur_ops->task_boost(&rand);
                cxt.cur_ops->writelock();
                if (WARN_ON_ONCE(lock_is_write_held))
                        lwsp->n_lock_fail++;
@@ -456,6 +600,8 @@ static int lock_torture_writer(void *arg)
 
                stutter_wait("lock_torture_writer");
        } while (!torture_must_stop());
+
+       cxt.cur_ops->task_boost(NULL); /* reset prio */
        torture_kthread_stopping("lock_torture_writer");
        return 0;
 }
@@ -642,7 +788,11 @@ static int __init lock_torture_init(void)
                &spin_lock_ops, &spin_lock_irq_ops,
                &rw_lock_ops, &rw_lock_irq_ops,
                &mutex_lock_ops,
+#ifdef CONFIG_RT_MUTEXES
+               &rtmutex_lock_ops,
+#endif
                &rwsem_lock_ops,
+               &percpu_rwsem_lock_ops,
        };
 
        if (!torture_init_begin(torture_type, verbose, &torture_runnable))
@@ -661,11 +811,11 @@ static int __init lock_torture_init(void)
                for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
                        pr_alert(" %s", torture_ops[i]->name);
                pr_alert("\n");
-               torture_init_end();
-               return -EINVAL;
+               firsterr = -EINVAL;
+               goto unwind;
        }
        if (cxt.cur_ops->init)
-               cxt.cur_ops->init(); /* no "goto unwind" prior to this point!!! */
+               cxt.cur_ops->init();
 
        if (nwriters_stress >= 0)
                cxt.nrealwriters_stress = nwriters_stress;
@@ -676,6 +826,10 @@ static int __init lock_torture_init(void)
        if (strncmp(torture_type, "mutex", 5) == 0)
                cxt.debug_lock = true;
 #endif
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+       if (strncmp(torture_type, "rtmutex", 7) == 0)
+               cxt.debug_lock = true;
+#endif
 #ifdef CONFIG_DEBUG_SPINLOCK
        if ((strncmp(torture_type, "spin", 4) == 0) ||
            (strncmp(torture_type, "rw_lock", 7) == 0))
index fd91aaa4554c8be6cf5b981d7ac3022a8046e77e..5b9102a47ea5209dd887b6dfbca16215b2bad922 100644 (file)
@@ -67,7 +67,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
        node->locked = 0;
        node->next   = NULL;
 
-       prev = xchg(lock, node);
+       prev = xchg_acquire(lock, node);
        if (likely(prev == NULL)) {
                /*
                 * Lock acquired, don't need to set node->locked to 1. Threads
@@ -98,7 +98,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
                /*
                 * Release the lock by setting it to NULL
                 */
-               if (likely(cmpxchg(lock, node, NULL) == node))
+               if (likely(cmpxchg_release(lock, node, NULL) == node))
                        return;
                /* Wait until the next pointer is set */
                while (!(next = READ_ONCE(node->next)))
index 4cccea6b8934f5697fa3dfa609ae4bd10db78b6f..0551c219c40e5bb8f8f87bfb0445100fe1b31cc9 100644 (file)
@@ -277,7 +277,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
 static inline bool mutex_try_to_acquire(struct mutex *lock)
 {
        return !mutex_is_locked(lock) &&
-               (atomic_cmpxchg(&lock->count, 1, 0) == 1);
+               (atomic_cmpxchg_acquire(&lock->count, 1, 0) == 1);
 }
 
 /*
@@ -529,7 +529,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
         * Once more, try to acquire the lock. Only try-lock the mutex if
         * it is unlocked to reduce unnecessary xchg() operations.
         */
-       if (!mutex_is_locked(lock) && (atomic_xchg(&lock->count, 0) == 1))
+       if (!mutex_is_locked(lock) &&
+           (atomic_xchg_acquire(&lock->count, 0) == 1))
                goto skip_wait;
 
        debug_mutex_lock_common(lock, &waiter);
@@ -553,7 +554,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                 * non-negative in order to avoid unnecessary xchg operations:
                 */
                if (atomic_read(&lock->count) >= 0 &&
-                   (atomic_xchg(&lock->count, -1) == 1))
+                   (atomic_xchg_acquire(&lock->count, -1) == 1))
                        break;
 
                /*
@@ -867,7 +868,7 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
 
        spin_lock_mutex(&lock->wait_lock, flags);
 
-       prev = atomic_xchg(&lock->count, -1);
+       prev = atomic_xchg_acquire(&lock->count, -1);
        if (likely(prev == 1)) {
                mutex_set_owner(lock);
                mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
index dc85ee23a26f79416a140241e3067a5a2ca24d0b..d092a0c9c2d4ed838004215cdf038691a14f4598 100644 (file)
@@ -50,7 +50,7 @@ osq_wait_next(struct optimistic_spin_queue *lock,
 
        for (;;) {
                if (atomic_read(&lock->tail) == curr &&
-                   atomic_cmpxchg(&lock->tail, curr, old) == curr) {
+                   atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) {
                        /*
                         * We were the last queued, we moved @lock back. @prev
                         * will now observe @lock and will complete its
@@ -92,7 +92,11 @@ bool osq_lock(struct optimistic_spin_queue *lock)
        node->next = NULL;
        node->cpu = curr;
 
-       old = atomic_xchg(&lock->tail, curr);
+       /*
+        * ACQUIRE semantics, pairs with corresponding RELEASE
+        * in unlock() uncontended, or fastpath.
+        */
+       old = atomic_xchg_acquire(&lock->tail, curr);
        if (old == OSQ_UNLOCKED_VAL)
                return true;
 
@@ -184,7 +188,8 @@ void osq_unlock(struct optimistic_spin_queue *lock)
        /*
         * Fast path for the uncontended case.
         */
-       if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr))
+       if (likely(atomic_cmpxchg_release(&lock->tail, curr,
+                                         OSQ_UNLOCKED_VAL) == curr))
                return;
 
        /*
index f3256725486738878d0f640216a95d91d16359d4..f231e0bb311ce0827d281d34f737a3a06405c072 100644 (file)
@@ -17,50 +17,43 @@ int __percpu_init_rwsem(struct percpu_rw_semaphore *brw,
 
        /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
        __init_rwsem(&brw->rw_sem, name, rwsem_key);
-       atomic_set(&brw->write_ctr, 0);
+       rcu_sync_init(&brw->rss, RCU_SCHED_SYNC);
        atomic_set(&brw->slow_read_ctr, 0);
        init_waitqueue_head(&brw->write_waitq);
        return 0;
 }
+EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
 
 void percpu_free_rwsem(struct percpu_rw_semaphore *brw)
 {
+       /*
+        * XXX: temporary kludge. The error path in alloc_super()
+        * assumes that percpu_free_rwsem() is safe after kzalloc().
+        */
+       if (!brw->fast_read_ctr)
+               return;
+
+       rcu_sync_dtor(&brw->rss);
        free_percpu(brw->fast_read_ctr);
        brw->fast_read_ctr = NULL; /* catch use after free bugs */
 }
 
 /*
- * This is the fast-path for down_read/up_read, it only needs to ensure
- * there is no pending writer (atomic_read(write_ctr) == 0) and inc/dec the
- * fast per-cpu counter. The writer uses synchronize_sched_expedited() to
- * serialize with the preempt-disabled section below.
- *
- * The nontrivial part is that we should guarantee acquire/release semantics
- * in case when
- *
- *     R_W: down_write() comes after up_read(), the writer should see all
- *          changes done by the reader
- * or
- *     W_R: down_read() comes after up_write(), the reader should see all
- *          changes done by the writer
+ * This is the fast-path for down_read/up_read. If it succeeds we rely
+ * on the barriers provided by rcu_sync_enter/exit; see the comments in
+ * percpu_down_write() and percpu_up_write().
  *
  * If this helper fails the callers rely on the normal rw_semaphore and
  * atomic_dec_and_test(), so in this case we have the necessary barriers.
- *
- * But if it succeeds we do not have any barriers, atomic_read(write_ctr) or
- * __this_cpu_add() below can be reordered with any LOAD/STORE done by the
- * reader inside the critical section. See the comments in down_write and
- * up_write below.
  */
 static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
 {
-       bool success = false;
+       bool success;
 
        preempt_disable();
-       if (likely(!atomic_read(&brw->write_ctr))) {
+       success = rcu_sync_is_idle(&brw->rss);
+       if (likely(success))
                __this_cpu_add(*brw->fast_read_ctr, val);
-               success = true;
-       }
        preempt_enable();
 
        return success;
@@ -77,16 +70,17 @@ static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
 void percpu_down_read(struct percpu_rw_semaphore *brw)
 {
        might_sleep();
-       if (likely(update_fast_ctr(brw, +1))) {
-               rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
+       rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
+
+       if (likely(update_fast_ctr(brw, +1)))
                return;
-       }
 
-       down_read(&brw->rw_sem);
+       /* Avoid rwsem_acquire_read() and rwsem_release() */
+       __down_read(&brw->rw_sem);
        atomic_inc(&brw->slow_read_ctr);
-       /* avoid up_read()->rwsem_release() */
        __up_read(&brw->rw_sem);
 }
+EXPORT_SYMBOL_GPL(percpu_down_read);
 
 int percpu_down_read_trylock(struct percpu_rw_semaphore *brw)
 {
@@ -112,6 +106,7 @@ void percpu_up_read(struct percpu_rw_semaphore *brw)
        if (atomic_dec_and_test(&brw->slow_read_ctr))
                wake_up_all(&brw->write_waitq);
 }
+EXPORT_SYMBOL_GPL(percpu_up_read);
 
 static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
 {
@@ -126,33 +121,17 @@ static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
        return sum;
 }
 
-/*
- * A writer increments ->write_ctr to force the readers to switch to the
- * slow mode, note the atomic_read() check in update_fast_ctr().
- *
- * After that the readers can only inc/dec the slow ->slow_read_ctr counter,
- * ->fast_read_ctr is stable. Once the writer moves its sum into the slow
- * counter it represents the number of active readers.
- *
- * Finally the writer takes ->rw_sem for writing and blocks the new readers,
- * then waits until the slow counter becomes zero.
- */
 void percpu_down_write(struct percpu_rw_semaphore *brw)
 {
-       /* tell update_fast_ctr() there is a pending writer */
-       atomic_inc(&brw->write_ctr);
        /*
-        * 1. Ensures that write_ctr != 0 is visible to any down_read/up_read
-        *    so that update_fast_ctr() can't succeed.
-        *
-        * 2. Ensures we see the result of every previous this_cpu_add() in
-        *    update_fast_ctr().
+        * Make rcu_sync_is_idle() == F and thus disable the fast-path in
+        * percpu_down_read() and percpu_up_read(), and wait for gp pass.
         *
-        * 3. Ensures that if any reader has exited its critical section via
-        *    fast-path, it executes a full memory barrier before we return.
-        *    See R_W case in the comment above update_fast_ctr().
+        * The latter synchronises us with the preceding readers which used
+        * the fast-past, so we can not miss the result of __this_cpu_add()
+        * or anything else inside their criticial sections.
         */
-       synchronize_sched_expedited();
+       rcu_sync_enter(&brw->rss);
 
        /* exclude other writers, and block the new readers completely */
        down_write(&brw->rw_sem);
@@ -163,16 +142,17 @@ void percpu_down_write(struct percpu_rw_semaphore *brw)
        /* wait for all readers to complete their percpu_up_read() */
        wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr));
 }
+EXPORT_SYMBOL_GPL(percpu_down_write);
 
 void percpu_up_write(struct percpu_rw_semaphore *brw)
 {
        /* release the lock, but the readers can't use the fast-path */
        up_write(&brw->rw_sem);
        /*
-        * Insert the barrier before the next fast-path in down_read,
-        * see W_R case in the comment above update_fast_ctr().
+        * Enable the fast-path in percpu_down_read() and percpu_up_read()
+        * but only after another gp pass; this adds the necessary barrier
+        * to ensure the reader can't miss the changes done by us.
         */
-       synchronize_sched_expedited();
-       /* the last writer unblocks update_fast_ctr() */
-       atomic_dec(&brw->write_ctr);
+       rcu_sync_exit(&brw->rss);
 }
+EXPORT_SYMBOL_GPL(percpu_up_write);
index f17a3e3b355079b6658b55b2f21811a69e695190..fec08233866875ca1962d836b4e5d6f88d475092 100644 (file)
@@ -86,7 +86,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
        /*
         * Put the reader into the wait queue
         */
-       arch_spin_lock(&lock->lock);
+       arch_spin_lock(&lock->wait_lock);
 
        /*
         * The ACQUIRE semantics of the following spinning code ensure
@@ -99,7 +99,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
        /*
         * Signal the next one in queue to become queue head
         */
-       arch_spin_unlock(&lock->lock);
+       arch_spin_unlock(&lock->wait_lock);
 }
 EXPORT_SYMBOL(queued_read_lock_slowpath);
 
@@ -112,7 +112,7 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
        u32 cnts;
 
        /* Put the writer into the wait queue */
-       arch_spin_lock(&lock->lock);
+       arch_spin_lock(&lock->wait_lock);
 
        /* Try to acquire the lock directly if no reader is present */
        if (!atomic_read(&lock->cnts) &&
@@ -144,6 +144,6 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
                cpu_relax_lowlatency();
        }
 unlock:
-       arch_spin_unlock(&lock->lock);
+       arch_spin_unlock(&lock->wait_lock);
 }
 EXPORT_SYMBOL(queued_write_lock_slowpath);
index c8e6e9a596f513baa8a85af3cb0f2b9ff9116a91..f0450ff4829b6c1308d4768b2ae3a7c575b1cf51 100644 (file)
@@ -267,7 +267,6 @@ static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
                }
 
                if (!lp) { /* ONCE */
-                       WRITE_ONCE(pn->state, vcpu_hashed);
                        lp = pv_hash(lock, pn);
 
                        /*
@@ -275,11 +274,9 @@ static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
                         * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock()
                         * we'll be sure to be able to observe our hash entry.
                         *
-                        *   [S] pn->state
                         *   [S] <hash>                 [Rmw] l->locked == _Q_SLOW_VAL
                         *       MB                           RMB
                         * [RmW] l->locked = _Q_SLOW_VAL  [L] <unhash>
-                        *                                [L] pn->state
                         *
                         * Matches the smp_rmb() in __pv_queued_spin_unlock().
                         */
@@ -364,8 +361,7 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
         * vCPU is harmless other than the additional latency in completing
         * the unlock.
         */
-       if (READ_ONCE(node->state) == vcpu_hashed)
-               pv_kick(node->cpu);
+       pv_kick(node->cpu);
 }
 /*
  * Include the architecture specific callee-save thunk of the
index 7781d801212fa32082593cf6bbb5dd44077cb78e..8251e75dd9c0bd67337754baf6f03fb2cf1f956f 100644 (file)
@@ -74,14 +74,23 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
  * set up.
  */
 #ifndef CONFIG_DEBUG_RT_MUTEXES
-# define rt_mutex_cmpxchg(l,c,n)       (cmpxchg(&l->owner, c, n) == c)
+# define rt_mutex_cmpxchg_relaxed(l,c,n) (cmpxchg_relaxed(&l->owner, c, n) == c)
+# define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c)
+# define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c)
+
+/*
+ * Callers must hold the ->wait_lock -- which is the whole purpose as we force
+ * all future threads that attempt to [Rmw] the lock to the slowpath. As such
+ * relaxed semantics suffice.
+ */
 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
 {
        unsigned long owner, *p = (unsigned long *) &lock->owner;
 
        do {
                owner = *p;
-       } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
+       } while (cmpxchg_relaxed(p, owner,
+                                owner | RT_MUTEX_HAS_WAITERS) != owner);
 }
 
 /*
@@ -121,11 +130,14 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
         *                                      lock(wait_lock);
         *                                      acquire(lock);
         */
-       return rt_mutex_cmpxchg(lock, owner, NULL);
+       return rt_mutex_cmpxchg_release(lock, owner, NULL);
 }
 
 #else
-# define rt_mutex_cmpxchg(l,c,n)       (0)
+# define rt_mutex_cmpxchg_relaxed(l,c,n)       (0)
+# define rt_mutex_cmpxchg_acquire(l,c,n)       (0)
+# define rt_mutex_cmpxchg_release(l,c,n)       (0)
+
 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
 {
        lock->owner = (struct task_struct *)
@@ -158,7 +170,8 @@ rt_mutex_waiter_less(struct rt_mutex_waiter *left,
         * then right waiter has a dl_prio() too.
         */
        if (dl_prio(left->prio))
-               return (left->task->dl.deadline < right->task->dl.deadline);
+               return dl_time_before(left->task->dl.deadline,
+                                     right->task->dl.deadline);
 
        return 0;
 }
@@ -1321,7 +1334,7 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state,
                                struct hrtimer_sleeper *timeout,
                                enum rtmutex_chainwalk chwalk))
 {
-       if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
+       if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
                rt_mutex_deadlock_account_lock(lock, current);
                return 0;
        } else
@@ -1337,7 +1350,7 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
                                      enum rtmutex_chainwalk chwalk))
 {
        if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
-           likely(rt_mutex_cmpxchg(lock, NULL, current))) {
+           likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
                rt_mutex_deadlock_account_lock(lock, current);
                return 0;
        } else
@@ -1348,7 +1361,7 @@ static inline int
 rt_mutex_fasttrylock(struct rt_mutex *lock,
                     int (*slowfn)(struct rt_mutex *lock))
 {
-       if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
+       if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
                rt_mutex_deadlock_account_lock(lock, current);
                return 1;
        }
@@ -1362,7 +1375,7 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
 {
        WAKE_Q(wake_q);
 
-       if (likely(rt_mutex_cmpxchg(lock, current, NULL))) {
+       if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
                rt_mutex_deadlock_account_unlock(current);
 
        } else {
@@ -1484,7 +1497,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
 bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
                                   struct wake_q_head *wqh)
 {
-       if (likely(rt_mutex_cmpxchg(lock, current, NULL))) {
+       if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
                rt_mutex_deadlock_account_unlock(current);
                return false;
        }
index 0f189714e457016ba7801c788a44673907c7746b..a4d4de05b2d16707becf6179f8ae2ebed334bead 100644 (file)
@@ -262,7 +262,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
         * to reduce unnecessary expensive cmpxchg() operations.
         */
        if (count == RWSEM_WAITING_BIAS &&
-           cmpxchg(&sem->count, RWSEM_WAITING_BIAS,
+           cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS,
                    RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) {
                if (!list_is_singular(&sem->wait_list))
                        rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
@@ -285,7 +285,8 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
                if (!(count == 0 || count == RWSEM_WAITING_BIAS))
                        return false;
 
-               old = cmpxchg(&sem->count, count, count + RWSEM_ACTIVE_WRITE_BIAS);
+               old = cmpxchg_acquire(&sem->count, count,
+                                     count + RWSEM_ACTIVE_WRITE_BIAS);
                if (old == count) {
                        rwsem_set_owner(sem);
                        return true;
index 72b0c66628b6b3db589ad0cd6ed6533edb68a9ea..9d6b55587eaa59a25c38776e43aeff51ba682c40 100644 (file)
@@ -24,6 +24,16 @@ __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
 }
 #endif
 
+static void *try_ram_remap(resource_size_t offset, size_t size)
+{
+       struct page *page = pfn_to_page(offset >> PAGE_SHIFT);
+
+       /* In the simple case just return the existing linear address */
+       if (!PageHighMem(page))
+               return __va(offset);
+       return NULL; /* fallback to ioremap_cache */
+}
+
 /**
  * memremap() - remap an iomem_resource as cacheable memory
  * @offset: iomem resource start address
@@ -66,8 +76,8 @@ void *memremap(resource_size_t offset, size_t size, unsigned long flags)
                 * the requested range is potentially in "System RAM"
                 */
                if (is_ram == REGION_INTERSECTS)
-                       addr = __va(offset);
-               else
+                       addr = try_ram_remap(offset, size);
+               if (!addr)
                        addr = ioremap_cache(offset, size);
        }
 
index b86b7bf1be388d72fe92fb6038b4a67b4710df1f..8f051a106676fb8f2d5457a9104340cce81ce37a 100644 (file)
@@ -1063,11 +1063,15 @@ void symbol_put_addr(void *addr)
        if (core_kernel_text(a))
                return;
 
-       /* module_text_address is safe here: we're supposed to have reference
-        * to module from symbol_get, so it can't go away. */
+       /*
+        * Even though we hold a reference on the module; we still need to
+        * disable preemption in order to safely traverse the data structure.
+        */
+       preempt_disable();
        modaddr = __module_text_address(a);
        BUG_ON(!modaddr);
        module_put(modaddr);
+       preempt_enable();
 }
 EXPORT_SYMBOL_GPL(symbol_put_addr);
 
index 50a808424b06af45fdd0fd6ab3ae732e1917680d..61a16569ffbf53c2b19b9b67d908edcd225dc201 100644 (file)
@@ -1,4 +1,4 @@
-obj-y += update.o
+obj-y += update.o sync.o
 obj-$(CONFIG_SRCU) += srcu.o
 obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
 obj-$(CONFIG_TREE_RCU) += tree.o
index 77192953dee5f5a54b10131ee70684fafb18c2c6..d89328e260df6f4f953b649fea98587aacf12318 100644 (file)
@@ -252,7 +252,7 @@ struct rcu_torture_ops {
        void (*exp_sync)(void);
        unsigned long (*get_state)(void);
        void (*cond_sync)(unsigned long oldstate);
-       void (*call)(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
+       call_rcu_func_t call;
        void (*cb_barrier)(void);
        void (*fqs)(void);
        void (*stats)(void);
@@ -448,7 +448,7 @@ static void synchronize_rcu_busted(void)
 }
 
 static void
-call_rcu_busted(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
 {
        /* This is a deliberate bug for testing purposes only! */
        func(head);
@@ -523,7 +523,7 @@ static void srcu_torture_synchronize(void)
 }
 
 static void srcu_torture_call(struct rcu_head *head,
-                             void (*func)(struct rcu_head *head))
+                             rcu_callback_t func)
 {
        call_srcu(srcu_ctlp, head, func);
 }
@@ -695,7 +695,7 @@ static bool __maybe_unused torturing_tasks(void)
 
 #define RCUTORTURE_TASKS_OPS
 
-static bool torturing_tasks(void)
+static bool __maybe_unused torturing_tasks(void)
 {
        return false;
 }
@@ -768,7 +768,6 @@ static int rcu_torture_boost(void *arg)
                                }
                                call_rcu_time = jiffies;
                        }
-                       cond_resched_rcu_qs();
                        stutter_wait("rcu_torture_boost");
                        if (torture_must_stop())
                                goto checkwait;
@@ -1208,7 +1207,6 @@ rcu_torture_reader(void *arg)
                __this_cpu_inc(rcu_torture_batch[completed]);
                preempt_enable();
                cur_ops->readunlock(idx);
-               cond_resched_rcu_qs();
                stutter_wait("rcu_torture_reader");
        } while (!torture_must_stop());
        if (irqreader && cur_ops->irq_capable) {
@@ -1742,15 +1740,15 @@ rcu_torture_init(void)
                for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
                        pr_alert(" %s", torture_ops[i]->name);
                pr_alert("\n");
-               torture_init_end();
-               return -EINVAL;
+               firsterr = -EINVAL;
+               goto unwind;
        }
        if (cur_ops->fqs == NULL && fqs_duration != 0) {
                pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
                fqs_duration = 0;
        }
        if (cur_ops->init)
-               cur_ops->init(); /* no "goto unwind" prior to this point!!! */
+               cur_ops->init();
 
        if (nreaders >= 0) {
                nrealreaders = nreaders;
index d3fcb2ec8536724615cbdd5ce5a891cbcc020934..a63a1ea5a41bf450f6b162674149a5928fca3530 100644 (file)
@@ -298,11 +298,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
        int idx;
 
        idx = READ_ONCE(sp->completed) & 0x1;
-       preempt_disable();
        __this_cpu_inc(sp->per_cpu_ref->c[idx]);
        smp_mb(); /* B */  /* Avoid leaking the critical section. */
        __this_cpu_inc(sp->per_cpu_ref->seq[idx]);
-       preempt_enable();
        return idx;
 }
 EXPORT_SYMBOL_GPL(__srcu_read_lock);
@@ -387,7 +385,7 @@ static void srcu_flip(struct srcu_struct *sp)
  * srcu_struct structure.
  */
 void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
-               void (*func)(struct rcu_head *head))
+              rcu_callback_t func)
 {
        unsigned long flags;
 
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c
new file mode 100644 (file)
index 0000000..be922c9
--- /dev/null
@@ -0,0 +1,223 @@
+/*
+ * RCU-based infrastructure for lightweight reader-writer locking
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright (c) 2015, Red Hat, Inc.
+ *
+ * Author: Oleg Nesterov <oleg@redhat.com>
+ */
+
+#include <linux/rcu_sync.h>
+#include <linux/sched.h>
+
+#ifdef CONFIG_PROVE_RCU
+#define __INIT_HELD(func)      .held = func,
+#else
+#define __INIT_HELD(func)
+#endif
+
+static const struct {
+       void (*sync)(void);
+       void (*call)(struct rcu_head *, void (*)(struct rcu_head *));
+       void (*wait)(void);
+#ifdef CONFIG_PROVE_RCU
+       int  (*held)(void);
+#endif
+} gp_ops[] = {
+       [RCU_SYNC] = {
+               .sync = synchronize_rcu,
+               .call = call_rcu,
+               .wait = rcu_barrier,
+               __INIT_HELD(rcu_read_lock_held)
+       },
+       [RCU_SCHED_SYNC] = {
+               .sync = synchronize_sched,
+               .call = call_rcu_sched,
+               .wait = rcu_barrier_sched,
+               __INIT_HELD(rcu_read_lock_sched_held)
+       },
+       [RCU_BH_SYNC] = {
+               .sync = synchronize_rcu_bh,
+               .call = call_rcu_bh,
+               .wait = rcu_barrier_bh,
+               __INIT_HELD(rcu_read_lock_bh_held)
+       },
+};
+
+enum { GP_IDLE = 0, GP_PENDING, GP_PASSED };
+enum { CB_IDLE = 0, CB_PENDING, CB_REPLAY };
+
+#define        rss_lock        gp_wait.lock
+
+#ifdef CONFIG_PROVE_RCU
+void rcu_sync_lockdep_assert(struct rcu_sync *rsp)
+{
+       RCU_LOCKDEP_WARN(!gp_ops[rsp->gp_type].held(),
+                        "suspicious rcu_sync_is_idle() usage");
+}
+#endif
+
+/**
+ * rcu_sync_init() - Initialize an rcu_sync structure
+ * @rsp: Pointer to rcu_sync structure to be initialized
+ * @type: Flavor of RCU with which to synchronize rcu_sync structure
+ */
+void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type)
+{
+       memset(rsp, 0, sizeof(*rsp));
+       init_waitqueue_head(&rsp->gp_wait);
+       rsp->gp_type = type;
+}
+
+/**
+ * rcu_sync_enter() - Force readers onto slowpath
+ * @rsp: Pointer to rcu_sync structure to use for synchronization
+ *
+ * This function is used by updaters who need readers to make use of
+ * a slowpath during the update.  After this function returns, all
+ * subsequent calls to rcu_sync_is_idle() will return false, which
+ * tells readers to stay off their fastpaths.  A later call to
+ * rcu_sync_exit() re-enables reader slowpaths.
+ *
+ * When called in isolation, rcu_sync_enter() must wait for a grace
+ * period, however, closely spaced calls to rcu_sync_enter() can
+ * optimize away the grace-period wait via a state machine implemented
+ * by rcu_sync_enter(), rcu_sync_exit(), and rcu_sync_func().
+ */
+void rcu_sync_enter(struct rcu_sync *rsp)
+{
+       bool need_wait, need_sync;
+
+       spin_lock_irq(&rsp->rss_lock);
+       need_wait = rsp->gp_count++;
+       need_sync = rsp->gp_state == GP_IDLE;
+       if (need_sync)
+               rsp->gp_state = GP_PENDING;
+       spin_unlock_irq(&rsp->rss_lock);
+
+       BUG_ON(need_wait && need_sync);
+
+       if (need_sync) {
+               gp_ops[rsp->gp_type].sync();
+               rsp->gp_state = GP_PASSED;
+               wake_up_all(&rsp->gp_wait);
+       } else if (need_wait) {
+               wait_event(rsp->gp_wait, rsp->gp_state == GP_PASSED);
+       } else {
+               /*
+                * Possible when there's a pending CB from a rcu_sync_exit().
+                * Nobody has yet been allowed the 'fast' path and thus we can
+                * avoid doing any sync(). The callback will get 'dropped'.
+                */
+               BUG_ON(rsp->gp_state != GP_PASSED);
+       }
+}
+
+/**
+ * rcu_sync_func() - Callback function managing reader access to fastpath
+ * @rsp: Pointer to rcu_sync structure to use for synchronization
+ *
+ * This function is passed to one of the call_rcu() functions by
+ * rcu_sync_exit(), so that it is invoked after a grace period following the
+ * that invocation of rcu_sync_exit().  It takes action based on events that
+ * have taken place in the meantime, so that closely spaced rcu_sync_enter()
+ * and rcu_sync_exit() pairs need not wait for a grace period.
+ *
+ * If another rcu_sync_enter() is invoked before the grace period
+ * ended, reset state to allow the next rcu_sync_exit() to let the
+ * readers back onto their fastpaths (after a grace period).  If both
+ * another rcu_sync_enter() and its matching rcu_sync_exit() are invoked
+ * before the grace period ended, re-invoke call_rcu() on behalf of that
+ * rcu_sync_exit().  Otherwise, set all state back to idle so that readers
+ * can again use their fastpaths.
+ */
+static void rcu_sync_func(struct rcu_head *rcu)
+{
+       struct rcu_sync *rsp = container_of(rcu, struct rcu_sync, cb_head);
+       unsigned long flags;
+
+       BUG_ON(rsp->gp_state != GP_PASSED);
+       BUG_ON(rsp->cb_state == CB_IDLE);
+
+       spin_lock_irqsave(&rsp->rss_lock, flags);
+       if (rsp->gp_count) {
+               /*
+                * A new rcu_sync_begin() has happened; drop the callback.
+                */
+               rsp->cb_state = CB_IDLE;
+       } else if (rsp->cb_state == CB_REPLAY) {
+               /*
+                * A new rcu_sync_exit() has happened; requeue the callback
+                * to catch a later GP.
+                */
+               rsp->cb_state = CB_PENDING;
+               gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
+       } else {
+               /*
+                * We're at least a GP after rcu_sync_exit(); eveybody will now
+                * have observed the write side critical section. Let 'em rip!.
+                */
+               rsp->cb_state = CB_IDLE;
+               rsp->gp_state = GP_IDLE;
+       }
+       spin_unlock_irqrestore(&rsp->rss_lock, flags);
+}
+
+/**
+ * rcu_sync_exit() - Allow readers back onto fast patch after grace period
+ * @rsp: Pointer to rcu_sync structure to use for synchronization
+ *
+ * This function is used by updaters who have completed, and can therefore
+ * now allow readers to make use of their fastpaths after a grace period
+ * has elapsed.  After this grace period has completed, all subsequent
+ * calls to rcu_sync_is_idle() will return true, which tells readers that
+ * they can once again use their fastpaths.
+ */
+void rcu_sync_exit(struct rcu_sync *rsp)
+{
+       spin_lock_irq(&rsp->rss_lock);
+       if (!--rsp->gp_count) {
+               if (rsp->cb_state == CB_IDLE) {
+                       rsp->cb_state = CB_PENDING;
+                       gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
+               } else if (rsp->cb_state == CB_PENDING) {
+                       rsp->cb_state = CB_REPLAY;
+               }
+       }
+       spin_unlock_irq(&rsp->rss_lock);
+}
+
+/**
+ * rcu_sync_dtor() - Clean up an rcu_sync structure
+ * @rsp: Pointer to rcu_sync structure to be cleaned up
+ */
+void rcu_sync_dtor(struct rcu_sync *rsp)
+{
+       int cb_state;
+
+       BUG_ON(rsp->gp_count);
+
+       spin_lock_irq(&rsp->rss_lock);
+       if (rsp->cb_state == CB_REPLAY)
+               rsp->cb_state = CB_PENDING;
+       cb_state = rsp->cb_state;
+       spin_unlock_irq(&rsp->rss_lock);
+
+       if (cb_state != CB_IDLE) {
+               gp_ops[rsp->gp_type].wait();
+               BUG_ON(rsp->cb_state != CB_IDLE);
+       }
+}
index d0471056d0afac18ed1739981aaa415495414791..944b1b491ed84b3d2d1cf977a9838a30cf405121 100644 (file)
@@ -44,7 +44,7 @@ struct rcu_ctrlblk;
 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
 static void rcu_process_callbacks(struct softirq_action *unused);
 static void __call_rcu(struct rcu_head *head,
-                      void (*func)(struct rcu_head *rcu),
+                      rcu_callback_t func,
                       struct rcu_ctrlblk *rcp);
 
 #include "tiny_plugin.h"
@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(synchronize_sched);
  * Helper function for call_rcu() and call_rcu_bh().
  */
 static void __call_rcu(struct rcu_head *head,
-                      void (*func)(struct rcu_head *rcu),
+                      rcu_callback_t func,
                       struct rcu_ctrlblk *rcp)
 {
        unsigned long flags;
@@ -229,7 +229,7 @@ static void __call_rcu(struct rcu_head *head,
  * period.  But since we have but one CPU, that would be after any
  * quiescent state.
  */
-void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
 {
        __call_rcu(head, func, &rcu_sched_ctrlblk);
 }
@@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(call_rcu_sched);
  * Post an RCU bottom-half callback to be invoked after any subsequent
  * quiescent state.
  */
-void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
 {
        __call_rcu(head, func, &rcu_bh_ctrlblk);
 }
index 9f75f25cc5d92667c27d70dd1b1a6091b42fbceb..f07343b54fe5a29d4fcabe7e78b9c3ea79029414 100644 (file)
@@ -71,7 +71,6 @@ MODULE_ALIAS("rcutree");
 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
 static struct lock_class_key rcu_exp_class[RCU_NUM_LVLS];
-static struct lock_class_key rcu_exp_sched_class[RCU_NUM_LVLS];
 
 /*
  * In order to export the rcu_state name to the tracing tools, it
@@ -98,7 +97,7 @@ struct rcu_state sname##_state = { \
        .level = { &sname##_state.node[0] }, \
        .rda = &sname##_data, \
        .call = cr, \
-       .fqs_state = RCU_GP_IDLE, \
+       .gp_state = RCU_GP_IDLE, \
        .gpnum = 0UL - 300UL, \
        .completed = 0UL - 300UL, \
        .orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \
@@ -161,6 +160,8 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
 static void invoke_rcu_core(void);
 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
+static void rcu_report_exp_rdp(struct rcu_state *rsp,
+                              struct rcu_data *rdp, bool wake);
 
 /* rcuc/rcub kthread realtime priority */
 #ifdef CONFIG_RCU_KTHREAD_PRIO
@@ -245,21 +246,33 @@ static int rcu_gp_in_progress(struct rcu_state *rsp)
  */
 void rcu_sched_qs(void)
 {
-       if (!__this_cpu_read(rcu_sched_data.passed_quiesce)) {
+       unsigned long flags;
+
+       if (__this_cpu_read(rcu_sched_data.cpu_no_qs.s)) {
                trace_rcu_grace_period(TPS("rcu_sched"),
                                       __this_cpu_read(rcu_sched_data.gpnum),
                                       TPS("cpuqs"));
-               __this_cpu_write(rcu_sched_data.passed_quiesce, 1);
+               __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
+               if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
+                       return;
+               local_irq_save(flags);
+               if (__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) {
+                       __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
+                       rcu_report_exp_rdp(&rcu_sched_state,
+                                          this_cpu_ptr(&rcu_sched_data),
+                                          true);
+               }
+               local_irq_restore(flags);
        }
 }
 
 void rcu_bh_qs(void)
 {
-       if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) {
+       if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
                trace_rcu_grace_period(TPS("rcu_bh"),
                                       __this_cpu_read(rcu_bh_data.gpnum),
                                       TPS("cpuqs"));
-               __this_cpu_write(rcu_bh_data.passed_quiesce, 1);
+               __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
        }
 }
 
@@ -337,12 +350,14 @@ static void rcu_momentary_dyntick_idle(void)
  */
 void rcu_note_context_switch(void)
 {
+       barrier(); /* Avoid RCU read-side critical sections leaking down. */
        trace_rcu_utilization(TPS("Start context switch"));
        rcu_sched_qs();
        rcu_preempt_note_context_switch();
        if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
                rcu_momentary_dyntick_idle();
        trace_rcu_utilization(TPS("End context switch"));
+       barrier(); /* Avoid RCU read-side critical sections leaking up. */
 }
 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
 
@@ -353,12 +368,19 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch);
  * RCU flavors in desperate need of a quiescent state, which will normally
  * be none of them).  Either way, do a lightweight quiescent state for
  * all RCU flavors.
+ *
+ * The barrier() calls are redundant in the common case when this is
+ * called externally, but just in case this is called from within this
+ * file.
+ *
  */
 void rcu_all_qs(void)
 {
+       barrier(); /* Avoid RCU read-side critical sections leaking down. */
        if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
                rcu_momentary_dyntick_idle();
        this_cpu_inc(rcu_qs_ctr);
+       barrier(); /* Avoid RCU read-side critical sections leaking up. */
 }
 EXPORT_SYMBOL_GPL(rcu_all_qs);
 
@@ -1744,9 +1766,9 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
                 */
                rdp->gpnum = rnp->gpnum;
                trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
-               rdp->passed_quiesce = 0;
+               rdp->cpu_no_qs.b.norm = true;
                rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
-               rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
+               rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
                zero_cpu_stall_ticks(rdp);
                WRITE_ONCE(rdp->gpwrap, false);
        }
@@ -1927,16 +1949,15 @@ static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
 /*
  * Do one round of quiescent-state forcing.
  */
-static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
+static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
 {
-       int fqs_state = fqs_state_in;
        bool isidle = false;
        unsigned long maxj;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
        WRITE_ONCE(rsp->gp_activity, jiffies);
        rsp->n_force_qs++;
-       if (fqs_state == RCU_SAVE_DYNTICK) {
+       if (first_time) {
                /* Collect dyntick-idle snapshots. */
                if (is_sysidle_rcu_state(rsp)) {
                        isidle = true;
@@ -1945,7 +1966,6 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
                force_qs_rnp(rsp, dyntick_save_progress_counter,
                             &isidle, &maxj);
                rcu_sysidle_report_gp(rsp, isidle, maxj);
-               fqs_state = RCU_FORCE_QS;
        } else {
                /* Handle dyntick-idle and offline CPUs. */
                isidle = true;
@@ -1959,7 +1979,6 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
                           READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
                raw_spin_unlock_irq(&rnp->lock);
        }
-       return fqs_state;
 }
 
 /*
@@ -2023,7 +2042,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
        /* Declare grace period done. */
        WRITE_ONCE(rsp->completed, rsp->gpnum);
        trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
-       rsp->fqs_state = RCU_GP_IDLE;
+       rsp->gp_state = RCU_GP_IDLE;
        rdp = this_cpu_ptr(rsp->rda);
        /* Advance CBs to reduce false positives below. */
        needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
@@ -2041,7 +2060,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
  */
 static int __noreturn rcu_gp_kthread(void *arg)
 {
-       int fqs_state;
+       bool first_gp_fqs;
        int gf;
        unsigned long j;
        int ret;
@@ -2073,7 +2092,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
                }
 
                /* Handle quiescent-state forcing. */
-               fqs_state = RCU_SAVE_DYNTICK;
+               first_gp_fqs = true;
                j = jiffies_till_first_fqs;
                if (j > HZ) {
                        j = HZ;
@@ -2101,7 +2120,8 @@ static int __noreturn rcu_gp_kthread(void *arg)
                                trace_rcu_grace_period(rsp->name,
                                                       READ_ONCE(rsp->gpnum),
                                                       TPS("fqsstart"));
-                               fqs_state = rcu_gp_fqs(rsp, fqs_state);
+                               rcu_gp_fqs(rsp, first_gp_fqs);
+                               first_gp_fqs = false;
                                trace_rcu_grace_period(rsp->name,
                                                       READ_ONCE(rsp->gpnum),
                                                       TPS("fqsend"));
@@ -2337,7 +2357,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
        rnp = rdp->mynode;
        raw_spin_lock_irqsave(&rnp->lock, flags);
        smp_mb__after_unlock_lock();
-       if ((rdp->passed_quiesce == 0 &&
+       if ((rdp->cpu_no_qs.b.norm &&
             rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) ||
            rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum ||
            rdp->gpwrap) {
@@ -2348,7 +2368,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
                 * We will instead need a new quiescent state that lies
                 * within the current grace period.
                 */
-               rdp->passed_quiesce = 0;        /* need qs for new gp. */
+               rdp->cpu_no_qs.b.norm = true;   /* need qs for new gp. */
                rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
                return;
@@ -2357,7 +2377,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
        if ((rnp->qsmask & mask) == 0) {
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
        } else {
-               rdp->qs_pending = 0;
+               rdp->core_needs_qs = 0;
 
                /*
                 * This GP can't end until cpu checks in, so all of our
@@ -2388,14 +2408,14 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
         * Does this CPU still need to do its part for current grace period?
         * If no, return and let the other CPUs do their part as well.
         */
-       if (!rdp->qs_pending)
+       if (!rdp->core_needs_qs)
                return;
 
        /*
         * Was there a quiescent state since the beginning of the grace
         * period? If no, then exit and wait for the next call.
         */
-       if (!rdp->passed_quiesce &&
+       if (rdp->cpu_no_qs.b.norm &&
            rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr))
                return;
 
@@ -3017,7 +3037,7 @@ static void rcu_leak_callback(struct rcu_head *rhp)
  * is expected to specify a CPU.
  */
 static void
-__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
+__call_rcu(struct rcu_head *head, rcu_callback_t func,
           struct rcu_state *rsp, int cpu, bool lazy)
 {
        unsigned long flags;
@@ -3088,7 +3108,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
 /*
  * Queue an RCU-sched callback for invocation after a grace period.
  */
-void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
 {
        __call_rcu(head, func, &rcu_sched_state, -1, 0);
 }
@@ -3097,7 +3117,7 @@ EXPORT_SYMBOL_GPL(call_rcu_sched);
 /*
  * Queue an RCU callback for invocation after a quicker grace period.
  */
-void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
 {
        __call_rcu(head, func, &rcu_bh_state, -1, 0);
 }
@@ -3111,7 +3131,7 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
  * function may only be called from __kfree_rcu().
  */
 void kfree_call_rcu(struct rcu_head *head,
-                   void (*func)(struct rcu_head *rcu))
+                   rcu_callback_t func)
 {
        __call_rcu(head, func, rcu_state_p, -1, 1);
 }
@@ -3379,6 +3399,191 @@ static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
        return rcu_seq_done(&rsp->expedited_sequence, s);
 }
 
+/*
+ * Reset the ->expmaskinit values in the rcu_node tree to reflect any
+ * recent CPU-online activity.  Note that these masks are not cleared
+ * when CPUs go offline, so they reflect the union of all CPUs that have
+ * ever been online.  This means that this function normally takes its
+ * no-work-to-do fastpath.
+ */
+static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
+{
+       bool done;
+       unsigned long flags;
+       unsigned long mask;
+       unsigned long oldmask;
+       int ncpus = READ_ONCE(rsp->ncpus);
+       struct rcu_node *rnp;
+       struct rcu_node *rnp_up;
+
+       /* If no new CPUs onlined since last time, nothing to do. */
+       if (likely(ncpus == rsp->ncpus_snap))
+               return;
+       rsp->ncpus_snap = ncpus;
+
+       /*
+        * Each pass through the following loop propagates newly onlined
+        * CPUs for the current rcu_node structure up the rcu_node tree.
+        */
+       rcu_for_each_leaf_node(rsp, rnp) {
+               raw_spin_lock_irqsave(&rnp->lock, flags);
+               smp_mb__after_unlock_lock();
+               if (rnp->expmaskinit == rnp->expmaskinitnext) {
+                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                       continue;  /* No new CPUs, nothing to do. */
+               }
+
+               /* Update this node's mask, track old value for propagation. */
+               oldmask = rnp->expmaskinit;
+               rnp->expmaskinit = rnp->expmaskinitnext;
+               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+
+               /* If was already nonzero, nothing to propagate. */
+               if (oldmask)
+                       continue;
+
+               /* Propagate the new CPU up the tree. */
+               mask = rnp->grpmask;
+               rnp_up = rnp->parent;
+               done = false;
+               while (rnp_up) {
+                       raw_spin_lock_irqsave(&rnp_up->lock, flags);
+                       smp_mb__after_unlock_lock();
+                       if (rnp_up->expmaskinit)
+                               done = true;
+                       rnp_up->expmaskinit |= mask;
+                       raw_spin_unlock_irqrestore(&rnp_up->lock, flags);
+                       if (done)
+                               break;
+                       mask = rnp_up->grpmask;
+                       rnp_up = rnp_up->parent;
+               }
+       }
+}
+
+/*
+ * Reset the ->expmask values in the rcu_node tree in preparation for
+ * a new expedited grace period.
+ */
+static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
+{
+       unsigned long flags;
+       struct rcu_node *rnp;
+
+       sync_exp_reset_tree_hotplug(rsp);
+       rcu_for_each_node_breadth_first(rsp, rnp) {
+               raw_spin_lock_irqsave(&rnp->lock, flags);
+               smp_mb__after_unlock_lock();
+               WARN_ON_ONCE(rnp->expmask);
+               rnp->expmask = rnp->expmaskinit;
+               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       }
+}
+
+/*
+ * Return non-zero if there is no RCU expedited grace period in progress
+ * for the specified rcu_node structure, in other words, if all CPUs and
+ * tasks covered by the specified rcu_node structure have done their bit
+ * for the current expedited grace period.  Works only for preemptible
+ * RCU -- other RCU implementation use other means.
+ *
+ * Caller must hold the root rcu_node's exp_funnel_mutex.
+ */
+static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
+{
+       return rnp->exp_tasks == NULL &&
+              READ_ONCE(rnp->expmask) == 0;
+}
+
+/*
+ * Report the exit from RCU read-side critical section for the last task
+ * that queued itself during or before the current expedited preemptible-RCU
+ * grace period.  This event is reported either to the rcu_node structure on
+ * which the task was queued or to one of that rcu_node structure's ancestors,
+ * recursively up the tree.  (Calm down, calm down, we do the recursion
+ * iteratively!)
+ *
+ * Caller must hold the root rcu_node's exp_funnel_mutex and the
+ * specified rcu_node structure's ->lock.
+ */
+static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
+                                bool wake, unsigned long flags)
+       __releases(rnp->lock)
+{
+       unsigned long mask;
+
+       for (;;) {
+               if (!sync_rcu_preempt_exp_done(rnp)) {
+                       if (!rnp->expmask)
+                               rcu_initiate_boost(rnp, flags);
+                       else
+                               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                       break;
+               }
+               if (rnp->parent == NULL) {
+                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                       if (wake) {
+                               smp_mb(); /* EGP done before wake_up(). */
+                               wake_up(&rsp->expedited_wq);
+                       }
+                       break;
+               }
+               mask = rnp->grpmask;
+               raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
+               rnp = rnp->parent;
+               raw_spin_lock(&rnp->lock); /* irqs already disabled */
+               smp_mb__after_unlock_lock();
+               WARN_ON_ONCE(!(rnp->expmask & mask));
+               rnp->expmask &= ~mask;
+       }
+}
+
+/*
+ * Report expedited quiescent state for specified node.  This is a
+ * lock-acquisition wrapper function for __rcu_report_exp_rnp().
+ *
+ * Caller must hold the root rcu_node's exp_funnel_mutex.
+ */
+static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp,
+                                             struct rcu_node *rnp, bool wake)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&rnp->lock, flags);
+       smp_mb__after_unlock_lock();
+       __rcu_report_exp_rnp(rsp, rnp, wake, flags);
+}
+
+/*
+ * Report expedited quiescent state for multiple CPUs, all covered by the
+ * specified leaf rcu_node structure.  Caller must hold the root
+ * rcu_node's exp_funnel_mutex.
+ */
+static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
+                                   unsigned long mask, bool wake)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&rnp->lock, flags);
+       smp_mb__after_unlock_lock();
+       if (!(rnp->expmask & mask)) {
+               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               return;
+       }
+       rnp->expmask &= ~mask;
+       __rcu_report_exp_rnp(rsp, rnp, wake, flags); /* Releases rnp->lock. */
+}
+
+/*
+ * Report expedited quiescent state for specified rcu_data (CPU).
+ * Caller must hold the root rcu_node's exp_funnel_mutex.
+ */
+static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
+                              bool wake)
+{
+       rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake);
+}
+
 /* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
 static bool sync_exp_work_done(struct rcu_state *rsp, struct rcu_node *rnp,
                               struct rcu_data *rdp,
@@ -3455,16 +3660,111 @@ static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
 }
 
 /* Invoked on each online non-idle CPU for expedited quiescent state. */
-static int synchronize_sched_expedited_cpu_stop(void *data)
+static void sync_sched_exp_handler(void *data)
 {
-       struct rcu_data *rdp = data;
-       struct rcu_state *rsp = rdp->rsp;
+       struct rcu_data *rdp;
+       struct rcu_node *rnp;
+       struct rcu_state *rsp = data;
 
-       /* We are here: If we are last, do the wakeup. */
-       rdp->exp_done = true;
-       if (atomic_dec_and_test(&rsp->expedited_need_qs))
-               wake_up(&rsp->expedited_wq);
-       return 0;
+       rdp = this_cpu_ptr(rsp->rda);
+       rnp = rdp->mynode;
+       if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
+           __this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
+               return;
+       __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
+       resched_cpu(smp_processor_id());
+}
+
+/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
+static void sync_sched_exp_online_cleanup(int cpu)
+{
+       struct rcu_data *rdp;
+       int ret;
+       struct rcu_node *rnp;
+       struct rcu_state *rsp = &rcu_sched_state;
+
+       rdp = per_cpu_ptr(rsp->rda, cpu);
+       rnp = rdp->mynode;
+       if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
+               return;
+       ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0);
+       WARN_ON_ONCE(ret);
+}
+
+/*
+ * Select the nodes that the upcoming expedited grace period needs
+ * to wait for.
+ */
+static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
+                                    smp_call_func_t func)
+{
+       int cpu;
+       unsigned long flags;
+       unsigned long mask;
+       unsigned long mask_ofl_test;
+       unsigned long mask_ofl_ipi;
+       int ret;
+       struct rcu_node *rnp;
+
+       sync_exp_reset_tree(rsp);
+       rcu_for_each_leaf_node(rsp, rnp) {
+               raw_spin_lock_irqsave(&rnp->lock, flags);
+               smp_mb__after_unlock_lock();
+
+               /* Each pass checks a CPU for identity, offline, and idle. */
+               mask_ofl_test = 0;
+               for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
+                       struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+                       struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+                       if (raw_smp_processor_id() == cpu ||
+                           !(atomic_add_return(0, &rdtp->dynticks) & 0x1))
+                               mask_ofl_test |= rdp->grpmask;
+               }
+               mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
+
+               /*
+                * Need to wait for any blocked tasks as well.  Note that
+                * additional blocking tasks will also block the expedited
+                * GP until such time as the ->expmask bits are cleared.
+                */
+               if (rcu_preempt_has_tasks(rnp))
+                       rnp->exp_tasks = rnp->blkd_tasks.next;
+               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+
+               /* IPI the remaining CPUs for expedited quiescent state. */
+               mask = 1;
+               for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
+                       if (!(mask_ofl_ipi & mask))
+                               continue;
+retry_ipi:
+                       ret = smp_call_function_single(cpu, func, rsp, 0);
+                       if (!ret) {
+                               mask_ofl_ipi &= ~mask;
+                       } else {
+                               /* Failed, raced with offline. */
+                               raw_spin_lock_irqsave(&rnp->lock, flags);
+                               if (cpu_online(cpu) &&
+                                   (rnp->expmask & mask)) {
+                                       raw_spin_unlock_irqrestore(&rnp->lock,
+                                                                  flags);
+                                       schedule_timeout_uninterruptible(1);
+                                       if (cpu_online(cpu) &&
+                                           (rnp->expmask & mask))
+                                               goto retry_ipi;
+                                       raw_spin_lock_irqsave(&rnp->lock,
+                                                             flags);
+                               }
+                               if (!(rnp->expmask & mask))
+                                       mask_ofl_ipi &= ~mask;
+                               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                       }
+               }
+               /* Report quiescent states for those that went offline. */
+               mask_ofl_test |= mask_ofl_ipi;
+               if (mask_ofl_test)
+                       rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false);
+       }
 }
 
 static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
@@ -3472,7 +3772,9 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
        int cpu;
        unsigned long jiffies_stall;
        unsigned long jiffies_start;
-       struct rcu_data *rdp;
+       unsigned long mask;
+       struct rcu_node *rnp;
+       struct rcu_node *rnp_root = rcu_get_root(rsp);
        int ret;
 
        jiffies_stall = rcu_jiffies_till_stall_check();
@@ -3481,33 +3783,43 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
        for (;;) {
                ret = wait_event_interruptible_timeout(
                                rsp->expedited_wq,
-                               !atomic_read(&rsp->expedited_need_qs),
+                               sync_rcu_preempt_exp_done(rnp_root),
                                jiffies_stall);
                if (ret > 0)
                        return;
                if (ret < 0) {
                        /* Hit a signal, disable CPU stall warnings. */
                        wait_event(rsp->expedited_wq,
-                                  !atomic_read(&rsp->expedited_need_qs));
+                                  sync_rcu_preempt_exp_done(rnp_root));
                        return;
                }
-               pr_err("INFO: %s detected expedited stalls on CPUs: {",
+               pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
                       rsp->name);
-               for_each_online_cpu(cpu) {
-                       rdp = per_cpu_ptr(rsp->rda, cpu);
-
-                       if (rdp->exp_done)
-                               continue;
-                       pr_cont(" %d", cpu);
+               rcu_for_each_leaf_node(rsp, rnp) {
+                       (void)rcu_print_task_exp_stall(rnp);
+                       mask = 1;
+                       for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
+                               struct rcu_data *rdp;
+
+                               if (!(rnp->expmask & mask))
+                                       continue;
+                               rdp = per_cpu_ptr(rsp->rda, cpu);
+                               pr_cont(" %d-%c%c%c", cpu,
+                                       "O."[cpu_online(cpu)],
+                                       "o."[!!(rdp->grpmask & rnp->expmaskinit)],
+                                       "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
+                       }
+                       mask <<= 1;
                }
                pr_cont(" } %lu jiffies s: %lu\n",
                        jiffies - jiffies_start, rsp->expedited_sequence);
-               for_each_online_cpu(cpu) {
-                       rdp = per_cpu_ptr(rsp->rda, cpu);
-
-                       if (rdp->exp_done)
-                               continue;
-                       dump_cpu_task(cpu);
+               rcu_for_each_leaf_node(rsp, rnp) {
+                       mask = 1;
+                       for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
+                               if (!(rnp->expmask & mask))
+                                       continue;
+                               dump_cpu_task(cpu);
+                       }
                }
                jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
        }
@@ -3531,7 +3843,6 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
  */
 void synchronize_sched_expedited(void)
 {
-       int cpu;
        unsigned long s;
        struct rcu_node *rnp;
        struct rcu_state *rsp = &rcu_sched_state;
@@ -3539,48 +3850,16 @@ void synchronize_sched_expedited(void)
        /* Take a snapshot of the sequence number.  */
        s = rcu_exp_gp_seq_snap(rsp);
 
-       if (!try_get_online_cpus()) {
-               /* CPU hotplug operation in flight, fall back to normal GP. */
-               wait_rcu_gp(call_rcu_sched);
-               atomic_long_inc(&rsp->expedited_normal);
-               return;
-       }
-       WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
-
        rnp = exp_funnel_lock(rsp, s);
-       if (rnp == NULL) {
-               put_online_cpus();
+       if (rnp == NULL)
                return;  /* Someone else did our work for us. */
-       }
 
        rcu_exp_gp_seq_start(rsp);
-
-       /* Stop each CPU that is online, non-idle, and not us. */
-       init_waitqueue_head(&rsp->expedited_wq);
-       atomic_set(&rsp->expedited_need_qs, 1); /* Extra count avoids race. */
-       for_each_online_cpu(cpu) {
-               struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
-               struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
-
-               rdp->exp_done = false;
-
-               /* Skip our CPU and any idle CPUs. */
-               if (raw_smp_processor_id() == cpu ||
-                   !(atomic_add_return(0, &rdtp->dynticks) & 0x1))
-                       continue;
-               atomic_inc(&rsp->expedited_need_qs);
-               stop_one_cpu_nowait(cpu, synchronize_sched_expedited_cpu_stop,
-                                   rdp, &rdp->exp_stop_work);
-       }
-
-       /* Remove extra count and, if necessary, wait for CPUs to stop. */
-       if (!atomic_dec_and_test(&rsp->expedited_need_qs))
-               synchronize_sched_expedited_wait(rsp);
+       sync_rcu_exp_select_cpus(rsp, sync_sched_exp_handler);
+       synchronize_sched_expedited_wait(rsp);
 
        rcu_exp_gp_seq_end(rsp);
        mutex_unlock(&rnp->exp_funnel_mutex);
-
-       put_online_cpus();
 }
 EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
 
@@ -3606,11 +3885,11 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
 
        /* Is the RCU core waiting for a quiescent state from this CPU? */
        if (rcu_scheduler_fully_active &&
-           rdp->qs_pending && !rdp->passed_quiesce &&
+           rdp->core_needs_qs && rdp->cpu_no_qs.b.norm &&
            rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) {
-               rdp->n_rp_qs_pending++;
-       } else if (rdp->qs_pending &&
-                  (rdp->passed_quiesce ||
+               rdp->n_rp_core_needs_qs++;
+       } else if (rdp->core_needs_qs &&
+                  (!rdp->cpu_no_qs.b.norm ||
                    rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) {
                rdp->n_rp_report_qs++;
                return 1;
@@ -3901,7 +4180,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
 
        /* Set up local state, ensuring consistent view of global state. */
        raw_spin_lock_irqsave(&rnp->lock, flags);
-       rdp->beenonline = 1;     /* We have now been online. */
        rdp->qlen_last_fqs_check = 0;
        rdp->n_force_qs_snap = rsp->n_force_qs;
        rdp->blimit = blimit;
@@ -3923,11 +4201,15 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
        raw_spin_lock(&rnp->lock);              /* irqs already disabled. */
        smp_mb__after_unlock_lock();
        rnp->qsmaskinitnext |= mask;
+       rnp->expmaskinitnext |= mask;
+       if (!rdp->beenonline)
+               WRITE_ONCE(rsp->ncpus, READ_ONCE(rsp->ncpus) + 1);
+       rdp->beenonline = true;  /* We have now been online. */
        rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
        rdp->completed = rnp->completed;
-       rdp->passed_quiesce = false;
+       rdp->cpu_no_qs.b.norm = true;
        rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu);
-       rdp->qs_pending = false;
+       rdp->core_needs_qs = false;
        trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 }
@@ -3960,6 +4242,7 @@ int rcu_cpu_notify(struct notifier_block *self,
                break;
        case CPU_ONLINE:
        case CPU_DOWN_FAILED:
+               sync_sched_exp_online_cleanup(cpu);
                rcu_boost_kthread_setaffinity(rnp, -1);
                break;
        case CPU_DOWN_PREPARE:
@@ -3971,6 +4254,12 @@ int rcu_cpu_notify(struct notifier_block *self,
                        rcu_cleanup_dying_cpu(rsp);
                break;
        case CPU_DYING_IDLE:
+               /* QS for any half-done expedited RCU-sched GP. */
+               preempt_disable();
+               rcu_report_exp_rdp(&rcu_sched_state,
+                                  this_cpu_ptr(rcu_sched_state.rda), true);
+               preempt_enable();
+
                for_each_rcu_flavor(rsp) {
                        rcu_cleanup_dying_idle_cpu(cpu, rsp);
                }
@@ -4102,7 +4391,6 @@ static void __init rcu_init_one(struct rcu_state *rsp,
        static const char * const buf[] = RCU_NODE_NAME_INIT;
        static const char * const fqs[] = RCU_FQS_NAME_INIT;
        static const char * const exp[] = RCU_EXP_NAME_INIT;
-       static const char * const exp_sched[] = RCU_EXP_SCHED_NAME_INIT;
        static u8 fl_mask = 0x1;
 
        int levelcnt[RCU_NUM_LVLS];             /* # nodes in each level. */
@@ -4162,18 +4450,13 @@ static void __init rcu_init_one(struct rcu_state *rsp,
                        INIT_LIST_HEAD(&rnp->blkd_tasks);
                        rcu_init_one_nocb(rnp);
                        mutex_init(&rnp->exp_funnel_mutex);
-                       if (rsp == &rcu_sched_state)
-                               lockdep_set_class_and_name(
-                                       &rnp->exp_funnel_mutex,
-                                       &rcu_exp_sched_class[i], exp_sched[i]);
-                       else
-                               lockdep_set_class_and_name(
-                                       &rnp->exp_funnel_mutex,
-                                       &rcu_exp_class[i], exp[i]);
+                       lockdep_set_class_and_name(&rnp->exp_funnel_mutex,
+                                                  &rcu_exp_class[i], exp[i]);
                }
        }
 
        init_waitqueue_head(&rsp->gp_wq);
+       init_waitqueue_head(&rsp->expedited_wq);
        rnp = rsp->level[rcu_num_lvls - 1];
        for_each_possible_cpu(i) {
                while (i > rnp->grphi)
@@ -4216,13 +4499,12 @@ static void __init rcu_init_geometry(void)
                rcu_fanout_leaf, nr_cpu_ids);
 
        /*
-        * The boot-time rcu_fanout_leaf parameter is only permitted
-        * to increase the leaf-level fanout, not decrease it.  Of course,
-        * the leaf-level fanout cannot exceed the number of bits in
-        * the rcu_node masks.  Complain and fall back to the compile-
-        * time values if these limits are exceeded.
+        * The boot-time rcu_fanout_leaf parameter must be at least two
+        * and cannot exceed the number of bits in the rcu_node masks.
+        * Complain and fall back to the compile-time values if this
+        * limit is exceeded.
         */
-       if (rcu_fanout_leaf < RCU_FANOUT_LEAF ||
+       if (rcu_fanout_leaf < 2 ||
            rcu_fanout_leaf > sizeof(unsigned long) * 8) {
                rcu_fanout_leaf = RCU_FANOUT_LEAF;
                WARN_ON(1);
@@ -4239,10 +4521,13 @@ static void __init rcu_init_geometry(void)
 
        /*
         * The tree must be able to accommodate the configured number of CPUs.
-        * If this limit is exceeded than we have a serious problem elsewhere.
+        * If this limit is exceeded, fall back to the compile-time values.
         */
-       if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1])
-               panic("rcu_init_geometry: rcu_capacity[] is too small");
+       if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
+               rcu_fanout_leaf = RCU_FANOUT_LEAF;
+               WARN_ON(1);
+               return;
+       }
 
        /* Calculate the number of levels in the tree. */
        for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
index 2e991f8361e4203a85d7b8e55afc6be370d7d7be..9fb4e238d4dcaaed1565dd7bf08a3c71a7f518bf 100644 (file)
@@ -70,8 +70,6 @@
 #  define RCU_NODE_NAME_INIT  { "rcu_node_0" }
 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0" }
 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0" }
-#  define RCU_EXP_SCHED_NAME_INIT \
-                             { "rcu_node_exp_sched_0" }
 #elif NR_CPUS <= RCU_FANOUT_2
 #  define RCU_NUM_LVLS       2
 #  define NUM_RCU_LVL_0              1
@@ -81,8 +79,6 @@
 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1" }
 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1" }
 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1" }
-#  define RCU_EXP_SCHED_NAME_INIT \
-                             { "rcu_node_exp_sched_0", "rcu_node_exp_sched_1" }
 #elif NR_CPUS <= RCU_FANOUT_3
 #  define RCU_NUM_LVLS       3
 #  define NUM_RCU_LVL_0              1
@@ -93,8 +89,6 @@
 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2" }
 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" }
 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2" }
-#  define RCU_EXP_SCHED_NAME_INIT \
-                             { "rcu_node_exp_sched_0", "rcu_node_exp_sched_1", "rcu_node_exp_sched_2" }
 #elif NR_CPUS <= RCU_FANOUT_4
 #  define RCU_NUM_LVLS       4
 #  define NUM_RCU_LVL_0              1
 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" }
 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }
 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2", "rcu_node_exp_3" }
-#  define RCU_EXP_SCHED_NAME_INIT \
-                             { "rcu_node_exp_sched_0", "rcu_node_exp_sched_1", "rcu_node_exp_sched_2", "rcu_node_exp_sched_3" }
 #else
 # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
 #endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */
@@ -171,16 +163,21 @@ struct rcu_node {
                                /*  an rcu_data structure, otherwise, each */
                                /*  bit corresponds to a child rcu_node */
                                /*  structure. */
-       unsigned long expmask;  /* Groups that have ->blkd_tasks */
-                               /*  elements that need to drain to allow the */
-                               /*  current expedited grace period to */
-                               /*  complete (only for PREEMPT_RCU). */
        unsigned long qsmaskinit;
-                               /* Per-GP initial value for qsmask & expmask. */
+                               /* Per-GP initial value for qsmask. */
                                /*  Initialized from ->qsmaskinitnext at the */
                                /*  beginning of each grace period. */
        unsigned long qsmaskinitnext;
                                /* Online CPUs for next grace period. */
+       unsigned long expmask;  /* CPUs or groups that need to check in */
+                               /*  to allow the current expedited GP */
+                               /*  to complete. */
+       unsigned long expmaskinit;
+                               /* Per-GP initial values for expmask. */
+                               /*  Initialized from ->expmaskinitnext at the */
+                               /*  beginning of each expedited GP. */
+       unsigned long expmaskinitnext;
+                               /* Online CPUs for next expedited GP. */
        unsigned long grpmask;  /* Mask to apply to parent qsmask. */
                                /*  Only one bit will be set in this mask. */
        int     grplo;          /* lowest-numbered CPU or group here. */
@@ -281,6 +278,18 @@ struct rcu_node {
        for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
             (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
 
+/*
+ * Union to allow "aggregate OR" operation on the need for a quiescent
+ * state by the normal and expedited grace periods.
+ */
+union rcu_noqs {
+       struct {
+               u8 norm;
+               u8 exp;
+       } b; /* Bits. */
+       u16 s; /* Set of bits, aggregate OR here. */
+};
+
 /* Index values for nxttail array in struct rcu_data. */
 #define RCU_DONE_TAIL          0       /* Also RCU_WAIT head. */
 #define RCU_WAIT_TAIL          1       /* Also RCU_NEXT_READY head. */
@@ -297,8 +306,8 @@ struct rcu_data {
                                        /*  is aware of having started. */
        unsigned long   rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
                                        /*  for rcu_all_qs() invocations. */
-       bool            passed_quiesce; /* User-mode/idle loop etc. */
-       bool            qs_pending;     /* Core waits for quiesc state. */
+       union rcu_noqs  cpu_no_qs;      /* No QSes yet for this CPU. */
+       bool            core_needs_qs;  /* Core waits for quiesc state. */
        bool            beenonline;     /* CPU online at least once. */
        bool            gpwrap;         /* Possible gpnum/completed wrap. */
        struct rcu_node *mynode;        /* This CPU's leaf of hierarchy */
@@ -307,9 +316,6 @@ struct rcu_data {
                                        /*  ticks this CPU has handled */
                                        /*  during and after the last grace */
                                        /* period it is aware of. */
-       struct cpu_stop_work exp_stop_work;
-                                       /* Expedited grace-period control */
-                                       /*  for CPU stopping. */
 
        /* 2) batch handling */
        /*
@@ -363,7 +369,7 @@ struct rcu_data {
 
        /* 5) __rcu_pending() statistics. */
        unsigned long n_rcu_pending;    /* rcu_pending() calls since boot. */
-       unsigned long n_rp_qs_pending;
+       unsigned long n_rp_core_needs_qs;
        unsigned long n_rp_report_qs;
        unsigned long n_rp_cb_ready;
        unsigned long n_rp_cpu_needs_gp;
@@ -378,7 +384,6 @@ struct rcu_data {
        struct rcu_head oom_head;
 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
        struct mutex exp_funnel_mutex;
-       bool exp_done;                  /* Expedited QS for this CPU? */
 
        /* 7) Callback offloading. */
 #ifdef CONFIG_RCU_NOCB_CPU
@@ -412,13 +417,6 @@ struct rcu_data {
        struct rcu_state *rsp;
 };
 
-/* Values for fqs_state field in struct rcu_state. */
-#define RCU_GP_IDLE            0       /* No grace period in progress. */
-#define RCU_GP_INIT            1       /* Grace period being initialized. */
-#define RCU_SAVE_DYNTICK       2       /* Need to scan dyntick state. */
-#define RCU_FORCE_QS           3       /* Need to force quiescent state. */
-#define RCU_SIGNAL_INIT                RCU_SAVE_DYNTICK
-
 /* Values for nocb_defer_wakeup field in struct rcu_data. */
 #define RCU_NOGP_WAKE_NOT      0
 #define RCU_NOGP_WAKE          1
@@ -464,14 +462,13 @@ struct rcu_state {
                                                /*  shut bogus gcc warning) */
        u8 flavor_mask;                         /* bit in flavor mask. */
        struct rcu_data __percpu *rda;          /* pointer of percu rcu_data. */
-       void (*call)(struct rcu_head *head,     /* call_rcu() flavor. */
-                    void (*func)(struct rcu_head *head));
+       call_rcu_func_t call;                   /* call_rcu() flavor. */
+       int ncpus;                              /* # CPUs seen so far. */
 
        /* The following fields are guarded by the root rcu_node's lock. */
 
-       u8      fqs_state ____cacheline_internodealigned_in_smp;
-                                               /* Force QS state. */
-       u8      boost;                          /* Subject to priority boost. */
+       u8      boost ____cacheline_internodealigned_in_smp;
+                                               /* Subject to priority boost. */
        unsigned long gpnum;                    /* Current gp number. */
        unsigned long completed;                /* # of last completed gp. */
        struct task_struct *gp_kthread;         /* Task for grace periods. */
@@ -508,6 +505,7 @@ struct rcu_state {
        atomic_long_t expedited_normal;         /* # fallbacks to normal. */
        atomic_t expedited_need_qs;             /* # CPUs left to check in. */
        wait_queue_head_t expedited_wq;         /* Wait for check-ins. */
+       int ncpus_snap;                         /* # CPUs seen last time. */
 
        unsigned long jiffies_force_qs;         /* Time at which to invoke */
                                                /*  force_quiescent_state(). */
@@ -538,8 +536,8 @@ struct rcu_state {
 #define RCU_GP_FLAG_INIT 0x1   /* Need grace-period initialization. */
 #define RCU_GP_FLAG_FQS  0x2   /* Need grace-period quiescent-state forcing. */
 
-/* Values for rcu_state structure's gp_flags field. */
-#define RCU_GP_WAIT_INIT 0     /* Initial state. */
+/* Values for rcu_state structure's gp_state field. */
+#define RCU_GP_IDLE     0      /* Initial state and no GP in progress. */
 #define RCU_GP_WAIT_GPS  1     /* Wait for grace-period start. */
 #define RCU_GP_DONE_GPS  2     /* Wait done for grace-period start. */
 #define RCU_GP_WAIT_FQS  3     /* Wait for force-quiescent-state time. */
@@ -582,9 +580,10 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 static void rcu_print_detail_task_stall(struct rcu_state *rsp);
 static int rcu_print_task_stall(struct rcu_node *rnp);
+static int rcu_print_task_exp_stall(struct rcu_node *rnp);
 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
 static void rcu_preempt_check_callbacks(void);
-void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
+void call_rcu(struct rcu_head *head, rcu_callback_t func);
 static void __init __rcu_init_preempt(void);
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
index b2bf3963a0aee328d0d3bfc0d9e997b3a5ca106f..630c19772630cc0c2cac42e2cf751dec002e81e2 100644 (file)
@@ -101,7 +101,6 @@ RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
 static struct rcu_state *const rcu_state_p = &rcu_preempt_state;
 static struct rcu_data __percpu *const rcu_data_p = &rcu_preempt_data;
 
-static int rcu_preempted_readers_exp(struct rcu_node *rnp);
 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
                               bool wake);
 
@@ -114,6 +113,147 @@ static void __init rcu_bootup_announce(void)
        rcu_bootup_announce_oddness();
 }
 
+/* Flags for rcu_preempt_ctxt_queue() decision table. */
+#define RCU_GP_TASKS   0x8
+#define RCU_EXP_TASKS  0x4
+#define RCU_GP_BLKD    0x2
+#define RCU_EXP_BLKD   0x1
+
+/*
+ * Queues a task preempted within an RCU-preempt read-side critical
+ * section into the appropriate location within the ->blkd_tasks list,
+ * depending on the states of any ongoing normal and expedited grace
+ * periods.  The ->gp_tasks pointer indicates which element the normal
+ * grace period is waiting on (NULL if none), and the ->exp_tasks pointer
+ * indicates which element the expedited grace period is waiting on (again,
+ * NULL if none).  If a grace period is waiting on a given element in the
+ * ->blkd_tasks list, it also waits on all subsequent elements.  Thus,
+ * adding a task to the tail of the list blocks any grace period that is
+ * already waiting on one of the elements.  In contrast, adding a task
+ * to the head of the list won't block any grace period that is already
+ * waiting on one of the elements.
+ *
+ * This queuing is imprecise, and can sometimes make an ongoing grace
+ * period wait for a task that is not strictly speaking blocking it.
+ * Given the choice, we needlessly block a normal grace period rather than
+ * blocking an expedited grace period.
+ *
+ * Note that an endless sequence of expedited grace periods still cannot
+ * indefinitely postpone a normal grace period.  Eventually, all of the
+ * fixed number of preempted tasks blocking the normal grace period that are
+ * not also blocking the expedited grace period will resume and complete
+ * their RCU read-side critical sections.  At that point, the ->gp_tasks
+ * pointer will equal the ->exp_tasks pointer, at which point the end of
+ * the corresponding expedited grace period will also be the end of the
+ * normal grace period.
+ */
+static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp,
+                                  unsigned long flags) __releases(rnp->lock)
+{
+       int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
+                        (rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
+                        (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) +
+                        (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0);
+       struct task_struct *t = current;
+
+       /*
+        * Decide where to queue the newly blocked task.  In theory,
+        * this could be an if-statement.  In practice, when I tried
+        * that, it was quite messy.
+        */
+       switch (blkd_state) {
+       case 0:
+       case                RCU_EXP_TASKS:
+       case                RCU_EXP_TASKS + RCU_GP_BLKD:
+       case RCU_GP_TASKS:
+       case RCU_GP_TASKS + RCU_EXP_TASKS:
+
+               /*
+                * Blocking neither GP, or first task blocking the normal
+                * GP but not blocking the already-waiting expedited GP.
+                * Queue at the head of the list to avoid unnecessarily
+                * blocking the already-waiting GPs.
+                */
+               list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
+               break;
+
+       case                                              RCU_EXP_BLKD:
+       case                                RCU_GP_BLKD:
+       case                                RCU_GP_BLKD + RCU_EXP_BLKD:
+       case RCU_GP_TASKS +                               RCU_EXP_BLKD:
+       case RCU_GP_TASKS +                 RCU_GP_BLKD + RCU_EXP_BLKD:
+       case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
+
+               /*
+                * First task arriving that blocks either GP, or first task
+                * arriving that blocks the expedited GP (with the normal
+                * GP already waiting), or a task arriving that blocks
+                * both GPs with both GPs already waiting.  Queue at the
+                * tail of the list to avoid any GP waiting on any of the
+                * already queued tasks that are not blocking it.
+                */
+               list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks);
+               break;
+
+       case                RCU_EXP_TASKS +               RCU_EXP_BLKD:
+       case                RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
+       case RCU_GP_TASKS + RCU_EXP_TASKS +               RCU_EXP_BLKD:
+
+               /*
+                * Second or subsequent task blocking the expedited GP.
+                * The task either does not block the normal GP, or is the
+                * first task blocking the normal GP.  Queue just after
+                * the first task blocking the expedited GP.
+                */
+               list_add(&t->rcu_node_entry, rnp->exp_tasks);
+               break;
+
+       case RCU_GP_TASKS +                 RCU_GP_BLKD:
+       case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD:
+
+               /*
+                * Second or subsequent task blocking the normal GP.
+                * The task does not block the expedited GP. Queue just
+                * after the first task blocking the normal GP.
+                */
+               list_add(&t->rcu_node_entry, rnp->gp_tasks);
+               break;
+
+       default:
+
+               /* Yet another exercise in excessive paranoia. */
+               WARN_ON_ONCE(1);
+               break;
+       }
+
+       /*
+        * We have now queued the task.  If it was the first one to
+        * block either grace period, update the ->gp_tasks and/or
+        * ->exp_tasks pointers, respectively, to reference the newly
+        * blocked tasks.
+        */
+       if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD))
+               rnp->gp_tasks = &t->rcu_node_entry;
+       if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
+               rnp->exp_tasks = &t->rcu_node_entry;
+       raw_spin_unlock(&rnp->lock);
+
+       /*
+        * Report the quiescent state for the expedited GP.  This expedited
+        * GP should not be able to end until we report, so there should be
+        * no need to check for a subsequent expedited GP.  (Though we are
+        * still in a quiescent state in any case.)
+        */
+       if (blkd_state & RCU_EXP_BLKD &&
+           t->rcu_read_unlock_special.b.exp_need_qs) {
+               t->rcu_read_unlock_special.b.exp_need_qs = false;
+               rcu_report_exp_rdp(rdp->rsp, rdp, true);
+       } else {
+               WARN_ON_ONCE(t->rcu_read_unlock_special.b.exp_need_qs);
+       }
+       local_irq_restore(flags);
+}
+
 /*
  * Record a preemptible-RCU quiescent state for the specified CPU.  Note
  * that this just means that the task currently running on the CPU is
@@ -125,11 +265,11 @@ static void __init rcu_bootup_announce(void)
  */
 static void rcu_preempt_qs(void)
 {
-       if (!__this_cpu_read(rcu_data_p->passed_quiesce)) {
+       if (__this_cpu_read(rcu_data_p->cpu_no_qs.s)) {
                trace_rcu_grace_period(TPS("rcu_preempt"),
                                       __this_cpu_read(rcu_data_p->gpnum),
                                       TPS("cpuqs"));
-               __this_cpu_write(rcu_data_p->passed_quiesce, 1);
+               __this_cpu_write(rcu_data_p->cpu_no_qs.b.norm, false);
                barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
                current->rcu_read_unlock_special.b.need_qs = false;
        }
@@ -167,42 +307,18 @@ static void rcu_preempt_note_context_switch(void)
                t->rcu_blocked_node = rnp;
 
                /*
-                * If this CPU has already checked in, then this task
-                * will hold up the next grace period rather than the
-                * current grace period.  Queue the task accordingly.
-                * If the task is queued for the current grace period
-                * (i.e., this CPU has not yet passed through a quiescent
-                * state for the current grace period), then as long
-                * as that task remains queued, the current grace period
-                * cannot end.  Note that there is some uncertainty as
-                * to exactly when the current grace period started.
-                * We take a conservative approach, which can result
-                * in unnecessarily waiting on tasks that started very
-                * slightly after the current grace period began.  C'est
-                * la vie!!!
-                *
-                * But first, note that the current CPU must still be
-                * on line!
+                * Verify the CPU's sanity, trace the preemption, and
+                * then queue the task as required based on the states
+                * of any ongoing and expedited grace periods.
                 */
                WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0);
                WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
-               if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
-                       list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
-                       rnp->gp_tasks = &t->rcu_node_entry;
-                       if (IS_ENABLED(CONFIG_RCU_BOOST) &&
-                           rnp->boost_tasks != NULL)
-                               rnp->boost_tasks = rnp->gp_tasks;
-               } else {
-                       list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
-                       if (rnp->qsmask & rdp->grpmask)
-                               rnp->gp_tasks = &t->rcu_node_entry;
-               }
                trace_rcu_preempt_task(rdp->rsp->name,
                                       t->pid,
                                       (rnp->qsmask & rdp->grpmask)
                                       ? rnp->gpnum
                                       : rnp->gpnum + 1);
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               rcu_preempt_ctxt_queue(rnp, rdp, flags);
        } else if (t->rcu_read_lock_nesting < 0 &&
                   t->rcu_read_unlock_special.s) {
 
@@ -272,6 +388,7 @@ void rcu_read_unlock_special(struct task_struct *t)
        unsigned long flags;
        struct list_head *np;
        bool drop_boost_mutex = false;
+       struct rcu_data *rdp;
        struct rcu_node *rnp;
        union rcu_special special;
 
@@ -282,8 +399,8 @@ void rcu_read_unlock_special(struct task_struct *t)
        local_irq_save(flags);
 
        /*
-        * If RCU core is waiting for this CPU to exit critical section,
-        * let it know that we have done so.  Because irqs are disabled,
+        * If RCU core is waiting for this CPU to exit its critical section,
+        * report the fact that it has exited.  Because irqs are disabled,
         * t->rcu_read_unlock_special cannot change.
         */
        special = t->rcu_read_unlock_special;
@@ -296,13 +413,32 @@ void rcu_read_unlock_special(struct task_struct *t)
                }
        }
 
+       /*
+        * Respond to a request for an expedited grace period, but only if
+        * we were not preempted, meaning that we were running on the same
+        * CPU throughout.  If we were preempted, the exp_need_qs flag
+        * would have been cleared at the time of the first preemption,
+        * and the quiescent state would be reported when we were dequeued.
+        */
+       if (special.b.exp_need_qs) {
+               WARN_ON_ONCE(special.b.blocked);
+               t->rcu_read_unlock_special.b.exp_need_qs = false;
+               rdp = this_cpu_ptr(rcu_state_p->rda);
+               rcu_report_exp_rdp(rcu_state_p, rdp, true);
+               if (!t->rcu_read_unlock_special.s) {
+                       local_irq_restore(flags);
+                       return;
+               }
+       }
+
        /* Hardware IRQ handlers cannot block, complain if they get here. */
        if (in_irq() || in_serving_softirq()) {
                lockdep_rcu_suspicious(__FILE__, __LINE__,
                                       "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n");
-               pr_alert("->rcu_read_unlock_special: %#x (b: %d, nq: %d)\n",
+               pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n",
                         t->rcu_read_unlock_special.s,
                         t->rcu_read_unlock_special.b.blocked,
+                        t->rcu_read_unlock_special.b.exp_need_qs,
                         t->rcu_read_unlock_special.b.need_qs);
                local_irq_restore(flags);
                return;
@@ -329,7 +465,7 @@ void rcu_read_unlock_special(struct task_struct *t)
                        raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
                }
                empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
-               empty_exp = !rcu_preempted_readers_exp(rnp);
+               empty_exp = sync_rcu_preempt_exp_done(rnp);
                smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
                np = rcu_next_node_entry(t, rnp);
                list_del_init(&t->rcu_node_entry);
@@ -353,7 +489,7 @@ void rcu_read_unlock_special(struct task_struct *t)
                 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
                 * so we must take a snapshot of the expedited state.
                 */
-               empty_exp_now = !rcu_preempted_readers_exp(rnp);
+               empty_exp_now = sync_rcu_preempt_exp_done(rnp);
                if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
                        trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
                                                         rnp->gpnum,
@@ -449,6 +585,27 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
        return ndetected;
 }
 
+/*
+ * Scan the current list of tasks blocked within RCU read-side critical
+ * sections, printing out the tid of each that is blocking the current
+ * expedited grace period.
+ */
+static int rcu_print_task_exp_stall(struct rcu_node *rnp)
+{
+       struct task_struct *t;
+       int ndetected = 0;
+
+       if (!rnp->exp_tasks)
+               return 0;
+       t = list_entry(rnp->exp_tasks->prev,
+                      struct task_struct, rcu_node_entry);
+       list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
+               pr_cont(" P%d", t->pid);
+               ndetected++;
+       }
+       return ndetected;
+}
+
 /*
  * Check that the list of blocked tasks for the newly completed grace
  * period is in fact empty.  It is a serious bug to complete a grace
@@ -483,8 +640,8 @@ static void rcu_preempt_check_callbacks(void)
                return;
        }
        if (t->rcu_read_lock_nesting > 0 &&
-           __this_cpu_read(rcu_data_p->qs_pending) &&
-           !__this_cpu_read(rcu_data_p->passed_quiesce))
+           __this_cpu_read(rcu_data_p->core_needs_qs) &&
+           __this_cpu_read(rcu_data_p->cpu_no_qs.b.norm))
                t->rcu_read_unlock_special.b.need_qs = true;
 }
 
@@ -500,7 +657,7 @@ static void rcu_preempt_do_callbacks(void)
 /*
  * Queue a preemptible-RCU callback for invocation after a grace period.
  */
-void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+void call_rcu(struct rcu_head *head, rcu_callback_t func)
 {
        __call_rcu(head, func, rcu_state_p, -1, 0);
 }
@@ -535,155 +692,41 @@ void synchronize_rcu(void)
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu);
 
-static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
-
-/*
- * Return non-zero if there are any tasks in RCU read-side critical
- * sections blocking the current preemptible-RCU expedited grace period.
- * If there is no preemptible-RCU expedited grace period currently in
- * progress, returns zero unconditionally.
- */
-static int rcu_preempted_readers_exp(struct rcu_node *rnp)
-{
-       return rnp->exp_tasks != NULL;
-}
-
-/*
- * return non-zero if there is no RCU expedited grace period in progress
- * for the specified rcu_node structure, in other words, if all CPUs and
- * tasks covered by the specified rcu_node structure have done their bit
- * for the current expedited grace period.  Works only for preemptible
- * RCU -- other RCU implementation use other means.
- *
- * Caller must hold the root rcu_node's exp_funnel_mutex.
- */
-static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
-{
-       return !rcu_preempted_readers_exp(rnp) &&
-              READ_ONCE(rnp->expmask) == 0;
-}
-
-/*
- * Report the exit from RCU read-side critical section for the last task
- * that queued itself during or before the current expedited preemptible-RCU
- * grace period.  This event is reported either to the rcu_node structure on
- * which the task was queued or to one of that rcu_node structure's ancestors,
- * recursively up the tree.  (Calm down, calm down, we do the recursion
- * iteratively!)
- *
- * Caller must hold the root rcu_node's exp_funnel_mutex.
- */
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
-                              bool wake)
-{
-       unsigned long flags;
-       unsigned long mask;
-
-       raw_spin_lock_irqsave(&rnp->lock, flags);
-       smp_mb__after_unlock_lock();
-       for (;;) {
-               if (!sync_rcu_preempt_exp_done(rnp)) {
-                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
-                       break;
-               }
-               if (rnp->parent == NULL) {
-                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
-                       if (wake) {
-                               smp_mb(); /* EGP done before wake_up(). */
-                               wake_up(&sync_rcu_preempt_exp_wq);
-                       }
-                       break;
-               }
-               mask = rnp->grpmask;
-               raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
-               rnp = rnp->parent;
-               raw_spin_lock(&rnp->lock); /* irqs already disabled */
-               smp_mb__after_unlock_lock();
-               rnp->expmask &= ~mask;
-       }
-}
-
 /*
- * Snapshot the tasks blocking the newly started preemptible-RCU expedited
- * grace period for the specified rcu_node structure, phase 1.  If there
- * are such tasks, set the ->expmask bits up the rcu_node tree and also
- * set the ->expmask bits on the leaf rcu_node structures to tell phase 2
- * that work is needed here.
- *
- * Caller must hold the root rcu_node's exp_funnel_mutex.
+ * Remote handler for smp_call_function_single().  If there is an
+ * RCU read-side critical section in effect, request that the
+ * next rcu_read_unlock() record the quiescent state up the
+ * ->expmask fields in the rcu_node tree.  Otherwise, immediately
+ * report the quiescent state.
  */
-static void
-sync_rcu_preempt_exp_init1(struct rcu_state *rsp, struct rcu_node *rnp)
+static void sync_rcu_exp_handler(void *info)
 {
-       unsigned long flags;
-       unsigned long mask;
-       struct rcu_node *rnp_up;
-
-       raw_spin_lock_irqsave(&rnp->lock, flags);
-       smp_mb__after_unlock_lock();
-       WARN_ON_ONCE(rnp->expmask);
-       WARN_ON_ONCE(rnp->exp_tasks);
-       if (!rcu_preempt_has_tasks(rnp)) {
-               /* No blocked tasks, nothing to do. */
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
-               return;
-       }
-       /* Call for Phase 2 and propagate ->expmask bits up the tree. */
-       rnp->expmask = 1;
-       rnp_up = rnp;
-       while (rnp_up->parent) {
-               mask = rnp_up->grpmask;
-               rnp_up = rnp_up->parent;
-               if (rnp_up->expmask & mask)
-                       break;
-               raw_spin_lock(&rnp_up->lock); /* irqs already off */
-               smp_mb__after_unlock_lock();
-               rnp_up->expmask |= mask;
-               raw_spin_unlock(&rnp_up->lock); /* irqs still off */
-       }
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
-}
-
-/*
- * Snapshot the tasks blocking the newly started preemptible-RCU expedited
- * grace period for the specified rcu_node structure, phase 2.  If the
- * leaf rcu_node structure has its ->expmask field set, check for tasks.
- * If there are some, clear ->expmask and set ->exp_tasks accordingly,
- * then initiate RCU priority boosting.  Otherwise, clear ->expmask and
- * invoke rcu_report_exp_rnp() to clear out the upper-level ->expmask bits,
- * enabling rcu_read_unlock_special() to do the bit-clearing.
- *
- * Caller must hold the root rcu_node's exp_funnel_mutex.
- */
-static void
-sync_rcu_preempt_exp_init2(struct rcu_state *rsp, struct rcu_node *rnp)
-{
-       unsigned long flags;
-
-       raw_spin_lock_irqsave(&rnp->lock, flags);
-       smp_mb__after_unlock_lock();
-       if (!rnp->expmask) {
-               /* Phase 1 didn't do anything, so Phase 2 doesn't either. */
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
-               return;
-       }
-
-       /* Phase 1 is over. */
-       rnp->expmask = 0;
+       struct rcu_data *rdp;
+       struct rcu_state *rsp = info;
+       struct task_struct *t = current;
 
        /*
-        * If there are still blocked tasks, set up ->exp_tasks so that
-        * rcu_read_unlock_special() will wake us and then boost them.
+        * Within an RCU read-side critical section, request that the next
+        * rcu_read_unlock() report.  Unless this RCU read-side critical
+        * section has already blocked, in which case it is already set
+        * up for the expedited grace period to wait on it.
         */
-       if (rcu_preempt_has_tasks(rnp)) {
-               rnp->exp_tasks = rnp->blkd_tasks.next;
-               rcu_initiate_boost(rnp, flags);  /* releases rnp->lock */
+       if (t->rcu_read_lock_nesting > 0 &&
+           !t->rcu_read_unlock_special.b.blocked) {
+               t->rcu_read_unlock_special.b.exp_need_qs = true;
                return;
        }
 
-       /* No longer any blocked tasks, so undo bit setting. */
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
-       rcu_report_exp_rnp(rsp, rnp, false);
+       /*
+        * We are either exiting an RCU read-side critical section (negative
+        * values of t->rcu_read_lock_nesting) or are not in one at all
+        * (zero value of t->rcu_read_lock_nesting).  Or we are in an RCU
+        * read-side critical section that blocked before this expedited
+        * grace period started.  Either way, we can immediately report
+        * the quiescent state.
+        */
+       rdp = this_cpu_ptr(rsp->rda);
+       rcu_report_exp_rdp(rsp, rdp, true);
 }
 
 /**
@@ -713,24 +756,12 @@ void synchronize_rcu_expedited(void)
 
        rcu_exp_gp_seq_start(rsp);
 
-       /* force all RCU readers onto ->blkd_tasks lists. */
-       synchronize_sched_expedited();
-
-       /*
-        * Snapshot current state of ->blkd_tasks lists into ->expmask.
-        * Phase 1 sets bits and phase 2 permits rcu_read_unlock_special()
-        * to start clearing them.  Doing this in one phase leads to
-        * strange races between setting and clearing bits, so just say "no"!
-        */
-       rcu_for_each_leaf_node(rsp, rnp)
-               sync_rcu_preempt_exp_init1(rsp, rnp);
-       rcu_for_each_leaf_node(rsp, rnp)
-               sync_rcu_preempt_exp_init2(rsp, rnp);
+       /* Initialize the rcu_node tree in preparation for the wait. */
+       sync_rcu_exp_select_cpus(rsp, sync_rcu_exp_handler);
 
        /* Wait for snapshotted ->blkd_tasks lists to drain. */
        rnp = rcu_get_root(rsp);
-       wait_event(sync_rcu_preempt_exp_wq,
-                  sync_rcu_preempt_exp_done(rnp));
+       synchronize_sched_expedited_wait(rsp);
 
        /* Clean up and exit. */
        rcu_exp_gp_seq_end(rsp);
@@ -834,6 +865,16 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
        return 0;
 }
 
+/*
+ * Because preemptible RCU does not exist, we never have to check for
+ * tasks blocked within RCU read-side critical sections that are
+ * blocking the current expedited grace period.
+ */
+static int rcu_print_task_exp_stall(struct rcu_node *rnp)
+{
+       return 0;
+}
+
 /*
  * Because there is no preemptible RCU, there can be no readers blocked,
  * so there is no need to check for blocked tasks.  So check only for
@@ -1702,8 +1743,12 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
                ticks_value = rsp->gpnum - rdp->gpnum;
        }
        print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
-       pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
-              cpu, ticks_value, ticks_title,
+       pr_err("\t%d-%c%c%c: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
+              cpu,
+              "O."[!!cpu_online(cpu)],
+              "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
+              "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
+              ticks_value, ticks_title,
               atomic_read(&rdtp->dynticks) & 0xfff,
               rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
               rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
index 6fc4c5ff3bb5cf55925a1da6ba73d269d3349d49..ef7093cc9b5cd86c3f9f6c6cff6a33f4e0b62ea8 100644 (file)
@@ -117,13 +117,13 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
 
        if (!rdp->beenonline)
                return;
-       seq_printf(m, "%3d%cc=%ld g=%ld pq=%d/%d qp=%d",
+       seq_printf(m, "%3d%cc=%ld g=%ld cnq=%d/%d:%d",
                   rdp->cpu,
                   cpu_is_offline(rdp->cpu) ? '!' : ' ',
                   ulong2long(rdp->completed), ulong2long(rdp->gpnum),
-                  rdp->passed_quiesce,
+                  rdp->cpu_no_qs.b.norm,
                   rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
-                  rdp->qs_pending);
+                  rdp->core_needs_qs);
        seq_printf(m, " dt=%d/%llx/%d df=%lu",
                   atomic_read(&rdp->dynticks->dynticks),
                   rdp->dynticks->dynticks_nesting,
@@ -268,7 +268,7 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
        gpnum = rsp->gpnum;
        seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x ",
                   ulong2long(rsp->completed), ulong2long(gpnum),
-                  rsp->fqs_state,
+                  rsp->gp_state,
                   (long)(rsp->jiffies_force_qs - jiffies),
                   (int)(jiffies & 0xffff));
        seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n",
@@ -361,7 +361,7 @@ static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp)
                   cpu_is_offline(rdp->cpu) ? '!' : ' ',
                   rdp->n_rcu_pending);
        seq_printf(m, "qsp=%ld rpq=%ld cbr=%ld cng=%ld ",
-                  rdp->n_rp_qs_pending,
+                  rdp->n_rp_core_needs_qs,
                   rdp->n_rp_report_qs,
                   rdp->n_rp_cb_ready,
                   rdp->n_rp_cpu_needs_gp);
index 7a0b3bc7c5ed147162bec6a600e0bfbded04257d..5f748c5a40f0756b2d6c97fa615cde551649db4a 100644 (file)
@@ -534,7 +534,7 @@ static void rcu_spawn_tasks_kthread(void);
  * Post an RCU-tasks callback.  First call must be from process context
  * after the scheduler if fully operational.
  */
-void call_rcu_tasks(struct rcu_head *rhp, void (*func)(struct rcu_head *rhp))
+void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
 {
        unsigned long flags;
        bool needwake;
index 2f9c9288481779c309f31c19dc3b74831ef70b3d..aa5973220ad213a960092012bf4493f296dab90b 100644 (file)
@@ -817,7 +817,7 @@ static void set_load_weight(struct task_struct *p)
        /*
         * SCHED_IDLE tasks get minimal weight:
         */
-       if (p->policy == SCHED_IDLE) {
+       if (idle_policy(p->policy)) {
                load->weight = scale_load(WEIGHT_IDLEPRIO);
                load->inv_weight = WMULT_IDLEPRIO;
                return;
@@ -827,17 +827,19 @@ static void set_load_weight(struct task_struct *p)
        load->inv_weight = prio_to_wmult[prio];
 }
 
-static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
+static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
 {
        update_rq_clock(rq);
-       sched_info_queued(rq, p);
+       if (!(flags & ENQUEUE_RESTORE))
+               sched_info_queued(rq, p);
        p->sched_class->enqueue_task(rq, p, flags);
 }
 
-static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
+static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
 {
        update_rq_clock(rq);
-       sched_info_dequeued(rq, p);
+       if (!(flags & DEQUEUE_SAVE))
+               sched_info_dequeued(rq, p);
        p->sched_class->dequeue_task(rq, p, flags);
 }
 
@@ -1178,7 +1180,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
                 * holding rq->lock.
                 */
                lockdep_assert_held(&rq->lock);
-               dequeue_task(rq, p, 0);
+               dequeue_task(rq, p, DEQUEUE_SAVE);
        }
        if (running)
                put_prev_task(rq, p);
@@ -1188,7 +1190,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
        if (running)
                p->sched_class->set_curr_task(rq);
        if (queued)
-               enqueue_task(rq, p, 0);
+               enqueue_task(rq, p, ENQUEUE_RESTORE);
 }
 
 /*
@@ -1292,7 +1294,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 
        if (task_cpu(p) != new_cpu) {
                if (p->sched_class->migrate_task_rq)
-                       p->sched_class->migrate_task_rq(p, new_cpu);
+                       p->sched_class->migrate_task_rq(p);
                p->se.nr_migrations++;
                perf_event_task_migrate(p);
        }
@@ -1333,12 +1335,16 @@ static int migrate_swap_stop(void *data)
        struct rq *src_rq, *dst_rq;
        int ret = -EAGAIN;
 
+       if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
+               return -EAGAIN;
+
        src_rq = cpu_rq(arg->src_cpu);
        dst_rq = cpu_rq(arg->dst_cpu);
 
        double_raw_lock(&arg->src_task->pi_lock,
                        &arg->dst_task->pi_lock);
        double_rq_lock(src_rq, dst_rq);
+
        if (task_cpu(arg->dst_task) != arg->dst_cpu)
                goto unlock;
 
@@ -1574,13 +1580,15 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
                        goto out;
                }
 
+               /* No more Mr. Nice Guy. */
                switch (state) {
                case cpuset:
-                       /* No more Mr. Nice Guy. */
-                       cpuset_cpus_allowed_fallback(p);
-                       state = possible;
-                       break;
-
+                       if (IS_ENABLED(CONFIG_CPUSETS)) {
+                               cpuset_cpus_allowed_fallback(p);
+                               state = possible;
+                               break;
+                       }
+                       /* fall-through */
                case possible:
                        do_set_cpus_allowed(p, cpu_possible_mask);
                        state = fail;
@@ -1692,7 +1700,7 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
 #endif /* CONFIG_SCHEDSTATS */
 }
 
-static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
+static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
 {
        activate_task(rq, p, en_flags);
        p->on_rq = TASK_ON_RQ_QUEUED;
@@ -2114,23 +2122,17 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
 #endif /* CONFIG_NUMA_BALANCING */
 }
 
+DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
+
 #ifdef CONFIG_NUMA_BALANCING
-#ifdef CONFIG_SCHED_DEBUG
+
 void set_numabalancing_state(bool enabled)
 {
        if (enabled)
-               sched_feat_set("NUMA");
+               static_branch_enable(&sched_numa_balancing);
        else
-               sched_feat_set("NO_NUMA");
+               static_branch_disable(&sched_numa_balancing);
 }
-#else
-__read_mostly bool numabalancing_enabled;
-
-void set_numabalancing_state(bool enabled)
-{
-       numabalancing_enabled = enabled;
-}
-#endif /* CONFIG_SCHED_DEBUG */
 
 #ifdef CONFIG_PROC_SYSCTL
 int sysctl_numa_balancing(struct ctl_table *table, int write,
@@ -2138,7 +2140,7 @@ int sysctl_numa_balancing(struct ctl_table *table, int write,
 {
        struct ctl_table t;
        int err;
-       int state = numabalancing_enabled;
+       int state = static_branch_likely(&sched_numa_balancing);
 
        if (write && !capable(CAP_SYS_ADMIN))
                return -EPERM;
@@ -2349,6 +2351,8 @@ void wake_up_new_task(struct task_struct *p)
        struct rq *rq;
 
        raw_spin_lock_irqsave(&p->pi_lock, flags);
+       /* Initialize new task's runnable average */
+       init_entity_runnable_average(&p->se);
 #ifdef CONFIG_SMP
        /*
         * Fork balancing, do it here and not earlier because:
@@ -2358,16 +2362,21 @@ void wake_up_new_task(struct task_struct *p)
        set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
 #endif
 
-       /* Initialize new task's runnable average */
-       init_entity_runnable_average(&p->se);
        rq = __task_rq_lock(p);
        activate_task(rq, p, 0);
        p->on_rq = TASK_ON_RQ_QUEUED;
        trace_sched_wakeup_new(p);
        check_preempt_curr(rq, p, WF_FORK);
 #ifdef CONFIG_SMP
-       if (p->sched_class->task_woken)
+       if (p->sched_class->task_woken) {
+               /*
+                * Nothing relies on rq->lock after this, so its fine to
+                * drop it.
+                */
+               lockdep_unpin_lock(&rq->lock);
                p->sched_class->task_woken(rq, p);
+               lockdep_pin_lock(&rq->lock);
+       }
 #endif
        task_rq_unlock(rq, p, &flags);
 }
@@ -2476,7 +2485,6 @@ static inline void
 prepare_task_switch(struct rq *rq, struct task_struct *prev,
                    struct task_struct *next)
 {
-       trace_sched_switch(prev, next);
        sched_info_switch(rq, prev, next);
        perf_event_task_sched_out(prev, next);
        fire_sched_out_preempt_notifiers(prev, next);
@@ -2510,6 +2518,22 @@ static struct rq *finish_task_switch(struct task_struct *prev)
        struct mm_struct *mm = rq->prev_mm;
        long prev_state;
 
+       /*
+        * The previous task will have left us with a preempt_count of 2
+        * because it left us after:
+        *
+        *      schedule()
+        *        preempt_disable();                    // 1
+        *        __schedule()
+        *          raw_spin_lock_irq(&rq->lock)        // 2
+        *
+        * Also, see FORK_PREEMPT_COUNT.
+        */
+       if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
+                     "corrupted preempt_count: %s/%d/0x%x\n",
+                     current->comm, current->pid, preempt_count()))
+               preempt_count_set(FORK_PREEMPT_COUNT);
+
        rq->prev_mm = NULL;
 
        /*
@@ -2517,11 +2541,11 @@ static struct rq *finish_task_switch(struct task_struct *prev)
         * If a task dies, then it sets TASK_DEAD in tsk->state and calls
         * schedule one last time. The schedule call will never return, and
         * the scheduled task must drop that reference.
-        * The test for TASK_DEAD must occur while the runqueue locks are
-        * still held, otherwise prev could be scheduled on another cpu, die
-        * there before we look at prev->state, and then the reference would
-        * be dropped twice.
-        *              Manfred Spraul <manfred@colorfullife.com>
+        *
+        * We must observe prev->state before clearing prev->on_cpu (in
+        * finish_lock_switch), otherwise a concurrent wakeup can get prev
+        * running on another CPU and we could rave with its RUNNING -> DEAD
+        * transition, resulting in a double drop.
         */
        prev_state = prev->state;
        vtime_task_switch(prev);
@@ -2594,8 +2618,15 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
 {
        struct rq *rq;
 
-       /* finish_task_switch() drops rq->lock and enables preemtion */
-       preempt_disable();
+       /*
+        * New tasks start with FORK_PREEMPT_COUNT, see there and
+        * finish_task_switch() for details.
+        *
+        * finish_task_switch() will drop rq->lock() and lower preempt_count
+        * and the preempt_enable() will end up enabling preemption (on
+        * PREEMPT_COUNT kernels).
+        */
+
        rq = finish_task_switch(prev);
        balance_callback(rq);
        preempt_enable();
@@ -2953,15 +2984,13 @@ static noinline void __schedule_bug(struct task_struct *prev)
 static inline void schedule_debug(struct task_struct *prev)
 {
 #ifdef CONFIG_SCHED_STACK_END_CHECK
-       BUG_ON(unlikely(task_stack_end_corrupted(prev)));
+       BUG_ON(task_stack_end_corrupted(prev));
 #endif
-       /*
-        * Test if we are atomic. Since do_exit() needs to call into
-        * schedule() atomically, we ignore that path. Otherwise whine
-        * if we are scheduling when we should not.
-        */
-       if (unlikely(in_atomic_preempt_off() && prev->state != TASK_DEAD))
+
+       if (unlikely(in_atomic_preempt_off())) {
                __schedule_bug(prev);
+               preempt_count_set(PREEMPT_DISABLED);
+       }
        rcu_sleep_check();
 
        profile_hit(SCHED_PROFILING, __builtin_return_address(0));
@@ -3047,7 +3076,7 @@ again:
  *
  * WARNING: must be called with preemption disabled!
  */
-static void __sched __schedule(void)
+static void __sched notrace __schedule(bool preempt)
 {
        struct task_struct *prev, *next;
        unsigned long *switch_count;
@@ -3059,6 +3088,17 @@ static void __sched __schedule(void)
        rcu_note_context_switch();
        prev = rq->curr;
 
+       /*
+        * do_exit() calls schedule() with preemption disabled as an exception;
+        * however we must fix that up, otherwise the next task will see an
+        * inconsistent (higher) preempt count.
+        *
+        * It also avoids the below schedule_debug() test from complaining
+        * about this.
+        */
+       if (unlikely(prev->state == TASK_DEAD))
+               preempt_enable_no_resched_notrace();
+
        schedule_debug(prev);
 
        if (sched_feat(HRTICK))
@@ -3076,7 +3116,7 @@ static void __sched __schedule(void)
        rq->clock_skip_update <<= 1; /* promote REQ to ACT */
 
        switch_count = &prev->nivcsw;
-       if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+       if (!preempt && prev->state) {
                if (unlikely(signal_pending_state(prev->state, prev))) {
                        prev->state = TASK_RUNNING;
                } else {
@@ -3112,6 +3152,7 @@ static void __sched __schedule(void)
                rq->curr = next;
                ++*switch_count;
 
+               trace_sched_switch(preempt, prev, next);
                rq = context_switch(rq, prev, next); /* unlocks the rq */
                cpu = cpu_of(rq);
        } else {
@@ -3141,7 +3182,7 @@ asmlinkage __visible void __sched schedule(void)
        sched_submit_work(tsk);
        do {
                preempt_disable();
-               __schedule();
+               __schedule(false);
                sched_preempt_enable_no_resched();
        } while (need_resched());
 }
@@ -3181,9 +3222,9 @@ void __sched schedule_preempt_disabled(void)
 static void __sched notrace preempt_schedule_common(void)
 {
        do {
-               preempt_active_enter();
-               __schedule();
-               preempt_active_exit();
+               preempt_disable_notrace();
+               __schedule(true);
+               preempt_enable_no_resched_notrace();
 
                /*
                 * Check again in case we missed a preemption opportunity
@@ -3234,24 +3275,17 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
                return;
 
        do {
-               /*
-                * Use raw __prempt_count() ops that don't call function.
-                * We can't call functions before disabling preemption which
-                * disarm preemption tracing recursions.
-                */
-               __preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
-               barrier();
+               preempt_disable_notrace();
                /*
                 * Needs preempt disabled in case user_exit() is traced
                 * and the tracer calls preempt_enable_notrace() causing
                 * an infinite recursion.
                 */
                prev_ctx = exception_enter();
-               __schedule();
+               __schedule(true);
                exception_exit(prev_ctx);
 
-               barrier();
-               __preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
+               preempt_enable_no_resched_notrace();
        } while (need_resched());
 }
 EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
@@ -3274,11 +3308,11 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
        prev_state = exception_enter();
 
        do {
-               preempt_active_enter();
+               preempt_disable();
                local_irq_enable();
-               __schedule();
+               __schedule(true);
                local_irq_disable();
-               preempt_active_exit();
+               sched_preempt_enable_no_resched();
        } while (need_resched());
 
        exception_exit(prev_state);
@@ -3306,7 +3340,7 @@ EXPORT_SYMBOL(default_wake_function);
  */
 void rt_mutex_setprio(struct task_struct *p, int prio)
 {
-       int oldprio, queued, running, enqueue_flag = 0;
+       int oldprio, queued, running, enqueue_flag = ENQUEUE_RESTORE;
        struct rq *rq;
        const struct sched_class *prev_class;
 
@@ -3338,7 +3372,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
        queued = task_on_rq_queued(p);
        running = task_current(rq, p);
        if (queued)
-               dequeue_task(rq, p, 0);
+               dequeue_task(rq, p, DEQUEUE_SAVE);
        if (running)
                put_prev_task(rq, p);
 
@@ -3356,7 +3390,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
                if (!dl_prio(p->normal_prio) ||
                    (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
                        p->dl.dl_boosted = 1;
-                       enqueue_flag = ENQUEUE_REPLENISH;
+                       enqueue_flag |= ENQUEUE_REPLENISH;
                } else
                        p->dl.dl_boosted = 0;
                p->sched_class = &dl_sched_class;
@@ -3364,7 +3398,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
                if (dl_prio(oldprio))
                        p->dl.dl_boosted = 0;
                if (oldprio < prio)
-                       enqueue_flag = ENQUEUE_HEAD;
+                       enqueue_flag |= ENQUEUE_HEAD;
                p->sched_class = &rt_sched_class;
        } else {
                if (dl_prio(oldprio))
@@ -3416,7 +3450,7 @@ void set_user_nice(struct task_struct *p, long nice)
        }
        queued = task_on_rq_queued(p);
        if (queued)
-               dequeue_task(rq, p, 0);
+               dequeue_task(rq, p, DEQUEUE_SAVE);
 
        p->static_prio = NICE_TO_PRIO(nice);
        set_load_weight(p);
@@ -3425,7 +3459,7 @@ void set_user_nice(struct task_struct *p, long nice)
        delta = p->prio - old_prio;
 
        if (queued) {
-               enqueue_task(rq, p, 0);
+               enqueue_task(rq, p, ENQUEUE_RESTORE);
                /*
                 * If the task increased its priority or is running and
                 * lowered its priority, then reschedule its CPU:
@@ -3746,10 +3780,7 @@ recheck:
        } else {
                reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
 
-               if (policy != SCHED_DEADLINE &&
-                               policy != SCHED_FIFO && policy != SCHED_RR &&
-                               policy != SCHED_NORMAL && policy != SCHED_BATCH &&
-                               policy != SCHED_IDLE)
+               if (!valid_policy(policy))
                        return -EINVAL;
        }
 
@@ -3805,7 +3836,7 @@ recheck:
                 * Treat SCHED_IDLE as nice 20. Only allow a switch to
                 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
                 */
-               if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
+               if (idle_policy(p->policy) && !idle_policy(policy)) {
                        if (!can_nice(p, task_nice(p)))
                                return -EPERM;
                }
@@ -3930,7 +3961,7 @@ change:
        queued = task_on_rq_queued(p);
        running = task_current(rq, p);
        if (queued)
-               dequeue_task(rq, p, 0);
+               dequeue_task(rq, p, DEQUEUE_SAVE);
        if (running)
                put_prev_task(rq, p);
 
@@ -3940,11 +3971,15 @@ change:
        if (running)
                p->sched_class->set_curr_task(rq);
        if (queued) {
+               int enqueue_flags = ENQUEUE_RESTORE;
                /*
                 * We enqueue to tail when the priority of a task is
                 * increased (user space view).
                 */
-               enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0);
+               if (oldprio <= p->prio)
+                       enqueue_flags |= ENQUEUE_HEAD;
+
+               enqueue_task(rq, p, enqueue_flags);
        }
 
        check_class_changed(rq, p, prev_class, oldprio);
@@ -4022,6 +4057,7 @@ int sched_setscheduler_nocheck(struct task_struct *p, int policy,
 {
        return _sched_setscheduler(p, policy, param, false);
 }
+EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
 
 static int
 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
@@ -4934,7 +4970,15 @@ void init_idle(struct task_struct *idle, int cpu)
        idle->state = TASK_RUNNING;
        idle->se.exec_start = sched_clock();
 
-       do_set_cpus_allowed(idle, cpumask_of(cpu));
+#ifdef CONFIG_SMP
+       /*
+        * Its possible that init_idle() gets called multiple times on a task,
+        * in that case do_set_cpus_allowed() will not do the right thing.
+        *
+        * And since this is boot we can forgo the serialization.
+        */
+       set_cpus_allowed_common(idle, cpumask_of(cpu));
+#endif
        /*
         * We're having a chicken and egg problem, even though we are
         * holding rq->lock, the cpu isn't yet set to this cpu so the
@@ -4951,7 +4995,7 @@ void init_idle(struct task_struct *idle, int cpu)
 
        rq->curr = rq->idle = idle;
        idle->on_rq = TASK_ON_RQ_QUEUED;
-#if defined(CONFIG_SMP)
+#ifdef CONFIG_SMP
        idle->on_cpu = 1;
 #endif
        raw_spin_unlock(&rq->lock);
@@ -4966,7 +5010,7 @@ void init_idle(struct task_struct *idle, int cpu)
        idle->sched_class = &idle_sched_class;
        ftrace_graph_init_idle_task(idle, cpu);
        vtime_init_idle(idle, cpu);
-#if defined(CONFIG_SMP)
+#ifdef CONFIG_SMP
        sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
 #endif
 }
@@ -5085,7 +5129,7 @@ void sched_setnuma(struct task_struct *p, int nid)
        running = task_current(rq, p);
 
        if (queued)
-               dequeue_task(rq, p, 0);
+               dequeue_task(rq, p, DEQUEUE_SAVE);
        if (running)
                put_prev_task(rq, p);
 
@@ -5094,7 +5138,7 @@ void sched_setnuma(struct task_struct *p, int nid)
        if (running)
                p->sched_class->set_curr_task(rq);
        if (queued)
-               enqueue_task(rq, p, 0);
+               enqueue_task(rq, p, ENQUEUE_RESTORE);
        task_rq_unlock(rq, p, &flags);
 }
 #endif /* CONFIG_NUMA_BALANCING */
@@ -5515,21 +5559,27 @@ static void set_cpu_rq_start_time(void)
 static int sched_cpu_active(struct notifier_block *nfb,
                                      unsigned long action, void *hcpu)
 {
+       int cpu = (long)hcpu;
+
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_STARTING:
                set_cpu_rq_start_time();
                return NOTIFY_OK;
+
        case CPU_ONLINE:
                /*
                 * At this point a starting CPU has marked itself as online via
                 * set_cpu_online(). But it might not yet have marked itself
                 * as active, which is essential from here on.
-                *
-                * Thus, fall-through and help the starting CPU along.
                 */
+               set_cpu_active(cpu, true);
+               stop_machine_unpark(cpu);
+               return NOTIFY_OK;
+
        case CPU_DOWN_FAILED:
-               set_cpu_active((long)hcpu, true);
+               set_cpu_active(cpu, true);
                return NOTIFY_OK;
+
        default:
                return NOTIFY_DONE;
        }
@@ -6461,7 +6511,8 @@ static struct sched_domain_topology_level default_topology[] = {
        { NULL, },
 };
 
-struct sched_domain_topology_level *sched_domain_topology = default_topology;
+static struct sched_domain_topology_level *sched_domain_topology =
+       default_topology;
 
 #define for_each_sd_topology(tl)                       \
        for (tl = sched_domain_topology; tl->mask; tl++)
@@ -7230,9 +7281,6 @@ void __init sched_init_smp(void)
        alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
        alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
 
-       /* nohz_full won't take effect without isolating the cpus. */
-       tick_nohz_full_add_cpus_to(cpu_isolated_map);
-
        sched_init_numa();
 
        /*
@@ -7465,7 +7513,7 @@ void __init sched_init(void)
 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 static inline int preempt_count_equals(int preempt_offset)
 {
-       int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
+       int nested = preempt_count() + rcu_preempt_depth();
 
        return (nested == preempt_offset);
 }
@@ -7712,7 +7760,7 @@ void sched_move_task(struct task_struct *tsk)
        queued = task_on_rq_queued(tsk);
 
        if (queued)
-               dequeue_task(rq, tsk, 0);
+               dequeue_task(rq, tsk, DEQUEUE_SAVE);
        if (unlikely(running))
                put_prev_task(rq, tsk);
 
@@ -7728,7 +7776,7 @@ void sched_move_task(struct task_struct *tsk)
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
        if (tsk->sched_class->task_move_group)
-               tsk->sched_class->task_move_group(tsk, queued);
+               tsk->sched_class->task_move_group(tsk);
        else
 #endif
                set_task_rq(tsk, task_cpu(tsk));
@@ -7736,7 +7784,7 @@ void sched_move_task(struct task_struct *tsk)
        if (unlikely(running))
                tsk->sched_class->set_curr_task(rq);
        if (queued)
-               enqueue_task(rq, tsk, 0);
+               enqueue_task(rq, tsk, ENQUEUE_RESTORE);
 
        task_rq_unlock(rq, tsk, &flags);
 }
@@ -8200,14 +8248,6 @@ static void cpu_cgroup_exit(struct cgroup_subsys_state *css,
                            struct cgroup_subsys_state *old_css,
                            struct task_struct *task)
 {
-       /*
-        * cgroup_exit() is called in the copy_process() failure path.
-        * Ignore this case since the task hasn't ran yet, this avoids
-        * trying to poke a half freed task state from generic code.
-        */
-       if (!(task->flags & PF_EXITING))
-               return;
-
        sched_move_task(task);
 }
 
index c6acb07466bb82b1143af4aba1da5e483f628e4f..5a75b08cfd8576d830adf9fc9df52d807c052be9 100644 (file)
@@ -31,11 +31,6 @@ static inline int right_child(int i)
        return (i << 1) + 2;
 }
 
-static inline int dl_time_before(u64 a, u64 b)
-{
-       return (s64)(a - b) < 0;
-}
-
 static void cpudl_exchange(struct cpudl *cp, int a, int b)
 {
        int cpu_a = cp->elements[a].cpu, cpu_b = cp->elements[b].cpu;
index 1a0a6ef2fbe1be030e32895571a1d267e26579e4..fcbdf83fed7e31afc4ee916f94d272d03117a2c2 100644 (file)
@@ -2,6 +2,7 @@
 #define _LINUX_CPUDL_H
 
 #include <linux/sched.h>
+#include <linux/sched/deadline.h>
 
 #define IDX_INVALID     -1
 
index fc8f01083527a570af73a8f8b4c6e586e7e332f9..8b0a15e285f9121ccd5540fa11eef49c94f017c1 100644 (file)
@@ -668,8 +668,15 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
         * Queueing this task back might have overloaded rq, check if we need
         * to kick someone away.
         */
-       if (has_pushable_dl_tasks(rq))
+       if (has_pushable_dl_tasks(rq)) {
+               /*
+                * Nothing relies on rq->lock after this, so its safe to drop
+                * rq->lock.
+                */
+               lockdep_unpin_lock(&rq->lock);
                push_dl_task(rq);
+               lockdep_pin_lock(&rq->lock);
+       }
 #endif
 
 unlock:
@@ -1066,8 +1073,9 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
                int target = find_later_rq(p);
 
                if (target != -1 &&
-                               dl_time_before(p->dl.deadline,
-                                       cpu_rq(target)->dl.earliest_dl.curr))
+                               (dl_time_before(p->dl.deadline,
+                                       cpu_rq(target)->dl.earliest_dl.curr) ||
+                               (cpu_rq(target)->dl.dl_nr_running == 0)))
                        cpu = target;
        }
        rcu_read_unlock();
@@ -1417,7 +1425,8 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
 
                later_rq = cpu_rq(cpu);
 
-               if (!dl_time_before(task->dl.deadline,
+               if (later_rq->dl.dl_nr_running &&
+                   !dl_time_before(task->dl.deadline,
                                        later_rq->dl.earliest_dl.curr)) {
                        /*
                         * Target rq has tasks of equal or earlier deadline,
index 6e2e3483b1ecff588e76103e0b7b7d673f16d2a5..824aa9f501a3b2a183a5a700dee57c984c51f89e 100644 (file)
@@ -661,11 +661,12 @@ static unsigned long task_h_load(struct task_struct *p);
 
 /*
  * We choose a half-life close to 1 scheduling period.
- * Note: The tables below are dependent on this value.
+ * Note: The tables runnable_avg_yN_inv and runnable_avg_yN_sum are
+ * dependent on this value.
  */
 #define LOAD_AVG_PERIOD 32
 #define LOAD_AVG_MAX 47742 /* maximum possible load avg */
-#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
+#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_AVG_MAX */
 
 /* Give new sched_entity start runnable values to heavy its load in infant time */
 void init_entity_runnable_average(struct sched_entity *se)
@@ -682,7 +683,7 @@ void init_entity_runnable_average(struct sched_entity *se)
        sa->load_avg = scale_load_down(se->load.weight);
        sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
        sa->util_avg = scale_load_down(SCHED_LOAD_SCALE);
-       sa->util_sum = LOAD_AVG_MAX;
+       sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
        /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
 }
 
@@ -2069,7 +2070,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
        int local = !!(flags & TNF_FAULT_LOCAL);
        int priv;
 
-       if (!numabalancing_enabled)
+       if (!static_branch_likely(&sched_numa_balancing))
                return;
 
        /* for example, ksmd faulting in a user's mm */
@@ -2157,7 +2158,7 @@ void task_numa_work(struct callback_head *work)
        struct vm_area_struct *vma;
        unsigned long start, end;
        unsigned long nr_pte_updates = 0;
-       long pages;
+       long pages, virtpages;
 
        WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
 
@@ -2203,9 +2204,11 @@ void task_numa_work(struct callback_head *work)
        start = mm->numa_scan_offset;
        pages = sysctl_numa_balancing_scan_size;
        pages <<= 20 - PAGE_SHIFT; /* MB in pages */
+       virtpages = pages * 8;     /* Scan up to this much virtual space */
        if (!pages)
                return;
 
+
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, start);
        if (!vma) {
@@ -2240,18 +2243,22 @@ void task_numa_work(struct callback_head *work)
                        start = max(start, vma->vm_start);
                        end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
                        end = min(end, vma->vm_end);
-                       nr_pte_updates += change_prot_numa(vma, start, end);
+                       nr_pte_updates = change_prot_numa(vma, start, end);
 
                        /*
-                        * Scan sysctl_numa_balancing_scan_size but ensure that
-                        * at least one PTE is updated so that unused virtual
-                        * address space is quickly skipped.
+                        * Try to scan sysctl_numa_balancing_size worth of
+                        * hpages that have at least one present PTE that
+                        * is not already pte-numa. If the VMA contains
+                        * areas that are unused or already full of prot_numa
+                        * PTEs, scan up to virtpages, to skip through those
+                        * areas faster.
                         */
                        if (nr_pte_updates)
                                pages -= (end - start) >> PAGE_SHIFT;
+                       virtpages -= (end - start) >> PAGE_SHIFT;
 
                        start = end;
-                       if (pages <= 0)
+                       if (pages <= 0 || virtpages <= 0)
                                goto out;
 
                        cond_resched();
@@ -2363,7 +2370,7 @@ static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
         */
        tg_weight = atomic_long_read(&tg->load_avg);
        tg_weight -= cfs_rq->tg_load_avg_contrib;
-       tg_weight += cfs_rq_load_avg(cfs_rq);
+       tg_weight += cfs_rq->load.weight;
 
        return tg_weight;
 }
@@ -2373,7 +2380,7 @@ static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
        long tg_weight, load, shares;
 
        tg_weight = calc_tg_weight(tg, cfs_rq);
-       load = cfs_rq_load_avg(cfs_rq);
+       load = cfs_rq->load.weight;
 
        shares = (tg->shares * load);
        if (tg_weight)
@@ -2515,6 +2522,12 @@ static u32 __compute_runnable_contrib(u64 n)
        return contrib + runnable_avg_yN_sum[n];
 }
 
+#if (SCHED_LOAD_SHIFT - SCHED_LOAD_RESOLUTION) != 10 || SCHED_CAPACITY_SHIFT != 10
+#error "load tracking assumes 2^10 as unit"
+#endif
+
+#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
+
 /*
  * We can represent the historical contribution to runnable average as the
  * coefficients of a geometric series.  To do this we sub-divide our runnable
@@ -2547,10 +2560,10 @@ static __always_inline int
 __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
                  unsigned long weight, int running, struct cfs_rq *cfs_rq)
 {
-       u64 delta, periods;
+       u64 delta, scaled_delta, periods;
        u32 contrib;
-       int delta_w, decayed = 0;
-       unsigned long scale_freq = arch_scale_freq_capacity(NULL, cpu);
+       unsigned int delta_w, scaled_delta_w, decayed = 0;
+       unsigned long scale_freq, scale_cpu;
 
        delta = now - sa->last_update_time;
        /*
@@ -2571,6 +2584,9 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
                return 0;
        sa->last_update_time = now;
 
+       scale_freq = arch_scale_freq_capacity(NULL, cpu);
+       scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
+
        /* delta_w is the amount already accumulated against our next period */
        delta_w = sa->period_contrib;
        if (delta + delta_w >= 1024) {
@@ -2585,13 +2601,16 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
                 * period and accrue it.
                 */
                delta_w = 1024 - delta_w;
+               scaled_delta_w = cap_scale(delta_w, scale_freq);
                if (weight) {
-                       sa->load_sum += weight * delta_w;
-                       if (cfs_rq)
-                               cfs_rq->runnable_load_sum += weight * delta_w;
+                       sa->load_sum += weight * scaled_delta_w;
+                       if (cfs_rq) {
+                               cfs_rq->runnable_load_sum +=
+                                               weight * scaled_delta_w;
+                       }
                }
                if (running)
-                       sa->util_sum += delta_w * scale_freq >> SCHED_CAPACITY_SHIFT;
+                       sa->util_sum += scaled_delta_w * scale_cpu;
 
                delta -= delta_w;
 
@@ -2608,23 +2627,25 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
 
                /* Efficiently calculate \sum (1..n_period) 1024*y^i */
                contrib = __compute_runnable_contrib(periods);
+               contrib = cap_scale(contrib, scale_freq);
                if (weight) {
                        sa->load_sum += weight * contrib;
                        if (cfs_rq)
                                cfs_rq->runnable_load_sum += weight * contrib;
                }
                if (running)
-                       sa->util_sum += contrib * scale_freq >> SCHED_CAPACITY_SHIFT;
+                       sa->util_sum += contrib * scale_cpu;
        }
 
        /* Remainder of delta accrued against u_0` */
+       scaled_delta = cap_scale(delta, scale_freq);
        if (weight) {
-               sa->load_sum += weight * delta;
+               sa->load_sum += weight * scaled_delta;
                if (cfs_rq)
-                       cfs_rq->runnable_load_sum += weight * delta;
+                       cfs_rq->runnable_load_sum += weight * scaled_delta;
        }
        if (running)
-               sa->util_sum += delta * scale_freq >> SCHED_CAPACITY_SHIFT;
+               sa->util_sum += scaled_delta * scale_cpu;
 
        sa->period_contrib += delta;
 
@@ -2634,7 +2655,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
                        cfs_rq->runnable_load_avg =
                                div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX);
                }
-               sa->util_avg = (sa->util_sum << SCHED_LOAD_SHIFT) / LOAD_AVG_MAX;
+               sa->util_avg = sa->util_sum / LOAD_AVG_MAX;
        }
 
        return decayed;
@@ -2664,20 +2685,20 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
 /* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
 static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
 {
-       int decayed;
        struct sched_avg *sa = &cfs_rq->avg;
+       int decayed, removed = 0;
 
        if (atomic_long_read(&cfs_rq->removed_load_avg)) {
                long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
                sa->load_avg = max_t(long, sa->load_avg - r, 0);
                sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
+               removed = 1;
        }
 
        if (atomic_long_read(&cfs_rq->removed_util_avg)) {
                long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
                sa->util_avg = max_t(long, sa->util_avg - r, 0);
-               sa->util_sum = max_t(s32, sa->util_sum -
-                       ((r * LOAD_AVG_MAX) >> SCHED_LOAD_SHIFT), 0);
+               sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
        }
 
        decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
@@ -2688,40 +2709,77 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
        cfs_rq->load_last_update_time_copy = sa->last_update_time;
 #endif
 
-       return decayed;
+       return decayed || removed;
 }
 
 /* Update task and its cfs_rq load average */
 static inline void update_load_avg(struct sched_entity *se, int update_tg)
 {
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
-       int cpu = cpu_of(rq_of(cfs_rq));
        u64 now = cfs_rq_clock_task(cfs_rq);
+       int cpu = cpu_of(rq_of(cfs_rq));
 
        /*
         * Track task load average for carrying it to new CPU after migrated, and
         * track group sched_entity load average for task_h_load calc in migration
         */
        __update_load_avg(now, cpu, &se->avg,
-               se->on_rq * scale_load_down(se->load.weight), cfs_rq->curr == se, NULL);
+                         se->on_rq * scale_load_down(se->load.weight),
+                         cfs_rq->curr == se, NULL);
 
        if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg)
                update_tg_load_avg(cfs_rq, 0);
 }
 
+static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+       if (!sched_feat(ATTACH_AGE_LOAD))
+               goto skip_aging;
+
+       /*
+        * If we got migrated (either between CPUs or between cgroups) we'll
+        * have aged the average right before clearing @last_update_time.
+        */
+       if (se->avg.last_update_time) {
+               __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
+                                 &se->avg, 0, 0, NULL);
+
+               /*
+                * XXX: we could have just aged the entire load away if we've been
+                * absent from the fair class for too long.
+                */
+       }
+
+skip_aging:
+       se->avg.last_update_time = cfs_rq->avg.last_update_time;
+       cfs_rq->avg.load_avg += se->avg.load_avg;
+       cfs_rq->avg.load_sum += se->avg.load_sum;
+       cfs_rq->avg.util_avg += se->avg.util_avg;
+       cfs_rq->avg.util_sum += se->avg.util_sum;
+}
+
+static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+       __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
+                         &se->avg, se->on_rq * scale_load_down(se->load.weight),
+                         cfs_rq->curr == se, NULL);
+
+       cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
+       cfs_rq->avg.load_sum = max_t(s64,  cfs_rq->avg.load_sum - se->avg.load_sum, 0);
+       cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
+       cfs_rq->avg.util_sum = max_t(s32,  cfs_rq->avg.util_sum - se->avg.util_sum, 0);
+}
+
 /* Add the load generated by se into cfs_rq's load average */
 static inline void
 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        struct sched_avg *sa = &se->avg;
        u64 now = cfs_rq_clock_task(cfs_rq);
-       int migrated = 0, decayed;
+       int migrated, decayed;
 
-       if (sa->last_update_time == 0) {
-               sa->last_update_time = now;
-               migrated = 1;
-       }
-       else {
+       migrated = !sa->last_update_time;
+       if (!migrated) {
                __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
                        se->on_rq * scale_load_down(se->load.weight),
                        cfs_rq->curr == se, NULL);
@@ -2732,12 +2790,8 @@ enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
        cfs_rq->runnable_load_avg += sa->load_avg;
        cfs_rq->runnable_load_sum += sa->load_sum;
 
-       if (migrated) {
-               cfs_rq->avg.load_avg += sa->load_avg;
-               cfs_rq->avg.load_sum += sa->load_sum;
-               cfs_rq->avg.util_avg += sa->util_avg;
-               cfs_rq->avg.util_sum += sa->util_sum;
-       }
+       if (migrated)
+               attach_entity_load_avg(cfs_rq, se);
 
        if (decayed || migrated)
                update_tg_load_avg(cfs_rq, 0);
@@ -2752,7 +2806,7 @@ dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
        cfs_rq->runnable_load_avg =
                max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0);
        cfs_rq->runnable_load_sum =
-               max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
+               max_t(s64,  cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
 }
 
 /*
@@ -2820,6 +2874,11 @@ static inline void
 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
 static inline void remove_entity_load_avg(struct sched_entity *se) {}
 
+static inline void
+attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
+static inline void
+detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
+
 static inline int idle_balance(struct rq *rq)
 {
        return 0;
@@ -4816,32 +4875,39 @@ next:
 done:
        return target;
 }
+
 /*
- * get_cpu_usage returns the amount of capacity of a CPU that is used by CFS
+ * cpu_util returns the amount of capacity of a CPU that is used by CFS
  * tasks. The unit of the return value must be the one of capacity so we can
- * compare the usage with the capacity of the CPU that is available for CFS
- * task (ie cpu_capacity).
- * cfs.avg.util_avg is the sum of running time of runnable tasks on a
- * CPU. It represents the amount of utilization of a CPU in the range
- * [0..SCHED_LOAD_SCALE].  The usage of a CPU can't be higher than the full
- * capacity of the CPU because it's about the running time on this CPU.
- * Nevertheless, cfs.avg.util_avg can be higher than SCHED_LOAD_SCALE
- * because of unfortunate rounding in util_avg or just
- * after migrating tasks until the average stabilizes with the new running
- * time. So we need to check that the usage stays into the range
- * [0..cpu_capacity_orig] and cap if necessary.
- * Without capping the usage, a group could be seen as overloaded (CPU0 usage
- * at 121% + CPU1 usage at 80%) whereas CPU1 has 20% of available capacity
+ * compare the utilization with the capacity of the CPU that is available for
+ * CFS task (ie cpu_capacity).
+ *
+ * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
+ * recent utilization of currently non-runnable tasks on a CPU. It represents
+ * the amount of utilization of a CPU in the range [0..capacity_orig] where
+ * capacity_orig is the cpu_capacity available at the highest frequency
+ * (arch_scale_freq_capacity()).
+ * The utilization of a CPU converges towards a sum equal to or less than the
+ * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
+ * the running time on this CPU scaled by capacity_curr.
+ *
+ * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
+ * higher than capacity_orig because of unfortunate rounding in
+ * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
+ * the average stabilizes with the new running time. We need to check that the
+ * utilization stays within the range of [0..capacity_orig] and cap it if
+ * necessary. Without utilization capping, a group could be seen as overloaded
+ * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
+ * available capacity. We allow utilization to overshoot capacity_curr (but not
+ * capacity_orig) as it useful for predicting the capacity required after task
+ * migrations (scheduler-driven DVFS).
  */
-static int get_cpu_usage(int cpu)
+static int cpu_util(int cpu)
 {
-       unsigned long usage = cpu_rq(cpu)->cfs.avg.util_avg;
+       unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
        unsigned long capacity = capacity_orig_of(cpu);
 
-       if (usage >= SCHED_LOAD_SCALE)
-               return capacity;
-
-       return (usage * capacity) >> SCHED_LOAD_SHIFT;
+       return (util >= capacity) ? capacity : util;
 }
 
 /*
@@ -4944,7 +5010,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
  * previous cpu.  However, the caller only guarantees p->pi_lock is held; no
  * other assumptions, including the state of rq->lock, should be made.
  */
-static void migrate_task_rq_fair(struct task_struct *p, int next_cpu)
+static void migrate_task_rq_fair(struct task_struct *p)
 {
        /*
         * We are supposed to update the task to "current" time, then its up to date
@@ -5524,10 +5590,10 @@ static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
        unsigned long src_faults, dst_faults;
        int src_nid, dst_nid;
 
-       if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
+       if (!static_branch_likely(&sched_numa_balancing))
                return -1;
 
-       if (!sched_feat(NUMA))
+       if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
                return -1;
 
        src_nid = cpu_to_node(env->src_cpu);
@@ -5933,7 +5999,7 @@ struct sg_lb_stats {
        unsigned long sum_weighted_load; /* Weighted load of group's tasks */
        unsigned long load_per_task;
        unsigned long group_capacity;
-       unsigned long group_usage; /* Total usage of the group */
+       unsigned long group_util; /* Total utilization of the group */
        unsigned int sum_nr_running; /* Nr tasks running in the group */
        unsigned int idle_cpus;
        unsigned int group_weight;
@@ -6009,19 +6075,6 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
        return load_idx;
 }
 
-static unsigned long default_scale_cpu_capacity(struct sched_domain *sd, int cpu)
-{
-       if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
-               return sd->smt_gain / sd->span_weight;
-
-       return SCHED_CAPACITY_SCALE;
-}
-
-unsigned long __weak arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
-{
-       return default_scale_cpu_capacity(sd, cpu);
-}
-
 static unsigned long scale_rt_capacity(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
@@ -6051,16 +6104,9 @@ static unsigned long scale_rt_capacity(int cpu)
 
 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
 {
-       unsigned long capacity = SCHED_CAPACITY_SCALE;
+       unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
        struct sched_group *sdg = sd->groups;
 
-       if (sched_feat(ARCH_CAPACITY))
-               capacity *= arch_scale_cpu_capacity(sd, cpu);
-       else
-               capacity *= default_scale_cpu_capacity(sd, cpu);
-
-       capacity >>= SCHED_CAPACITY_SHIFT;
-
        cpu_rq(cpu)->cpu_capacity_orig = capacity;
 
        capacity *= scale_rt_capacity(cpu);
@@ -6186,8 +6232,8 @@ static inline int sg_imbalanced(struct sched_group *group)
  * group_has_capacity returns true if the group has spare capacity that could
  * be used by some tasks.
  * We consider that a group has spare capacity if the  * number of task is
- * smaller than the number of CPUs or if the usage is lower than the available
- * capacity for CFS tasks.
+ * smaller than the number of CPUs or if the utilization is lower than the
+ * available capacity for CFS tasks.
  * For the latter, we use a threshold to stabilize the state, to take into
  * account the variance of the tasks' load and to return true if the available
  * capacity in meaningful for the load balancer.
@@ -6201,7 +6247,7 @@ group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
                return true;
 
        if ((sgs->group_capacity * 100) >
-                       (sgs->group_usage * env->sd->imbalance_pct))
+                       (sgs->group_util * env->sd->imbalance_pct))
                return true;
 
        return false;
@@ -6222,15 +6268,15 @@ group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
                return false;
 
        if ((sgs->group_capacity * 100) <
-                       (sgs->group_usage * env->sd->imbalance_pct))
+                       (sgs->group_util * env->sd->imbalance_pct))
                return true;
 
        return false;
 }
 
-static enum group_type group_classify(struct lb_env *env,
-               struct sched_group *group,
-               struct sg_lb_stats *sgs)
+static inline enum
+group_type group_classify(struct sched_group *group,
+                         struct sg_lb_stats *sgs)
 {
        if (sgs->group_no_capacity)
                return group_overloaded;
@@ -6270,7 +6316,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
                        load = source_load(i, load_idx);
 
                sgs->group_load += load;
-               sgs->group_usage += get_cpu_usage(i);
+               sgs->group_util += cpu_util(i);
                sgs->sum_nr_running += rq->cfs.h_nr_running;
 
                if (rq->nr_running > 1)
@@ -6295,7 +6341,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
        sgs->group_weight = group->group_weight;
 
        sgs->group_no_capacity = group_is_overloaded(env, sgs);
-       sgs->group_type = group_classify(env, group, sgs);
+       sgs->group_type = group_classify(group, sgs);
 }
 
 /**
@@ -6429,7 +6475,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
                    group_has_capacity(env, &sds->local_stat) &&
                    (sgs->sum_nr_running > 1)) {
                        sgs->group_no_capacity = 1;
-                       sgs->group_type = group_overloaded;
+                       sgs->group_type = group_classify(sg, sgs);
                }
 
                if (update_sd_pick_busiest(env, sds, sg, sgs)) {
@@ -7609,8 +7655,22 @@ out:
         * When the cpu is attached to null domain for ex, it will not be
         * updated.
         */
-       if (likely(update_next_balance))
+       if (likely(update_next_balance)) {
                rq->next_balance = next_balance;
+
+#ifdef CONFIG_NO_HZ_COMMON
+               /*
+                * If this CPU has been elected to perform the nohz idle
+                * balance. Other idle CPUs have already rebalanced with
+                * nohz_idle_balance() and nohz.next_balance has been
+                * updated accordingly. This CPU is now running the idle load
+                * balance for itself and we need to update the
+                * nohz.next_balance accordingly.
+                */
+               if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance))
+                       nohz.next_balance = rq->next_balance;
+#endif
+       }
 }
 
 #ifdef CONFIG_NO_HZ_COMMON
@@ -7623,6 +7683,9 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
        int this_cpu = this_rq->cpu;
        struct rq *rq;
        int balance_cpu;
+       /* Earliest time when we have to do rebalance again */
+       unsigned long next_balance = jiffies + 60*HZ;
+       int update_next_balance = 0;
 
        if (idle != CPU_IDLE ||
            !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
@@ -7654,10 +7717,19 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
                        rebalance_domains(rq, CPU_IDLE);
                }
 
-               if (time_after(this_rq->next_balance, rq->next_balance))
-                       this_rq->next_balance = rq->next_balance;
+               if (time_after(next_balance, rq->next_balance)) {
+                       next_balance = rq->next_balance;
+                       update_next_balance = 1;
+               }
        }
-       nohz.next_balance = this_rq->next_balance;
+
+       /*
+        * next_balance will be updated only when there is a need.
+        * When the CPU is attached to null domain for ex, it will not be
+        * updated.
+        */
+       if (likely(update_next_balance))
+               nohz.next_balance = next_balance;
 end:
        clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
 }
@@ -7810,7 +7882,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
                entity_tick(cfs_rq, se, queued);
        }
 
-       if (numabalancing_enabled)
+       if (static_branch_unlikely(&sched_numa_balancing))
                task_tick_numa(rq, curr);
 }
 
@@ -7886,21 +7958,39 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
                check_preempt_curr(rq, p, 0);
 }
 
-static void switched_from_fair(struct rq *rq, struct task_struct *p)
+static inline bool vruntime_normalized(struct task_struct *p)
 {
        struct sched_entity *se = &p->se;
-       struct cfs_rq *cfs_rq = cfs_rq_of(se);
 
        /*
-        * Ensure the task's vruntime is normalized, so that when it's
-        * switched back to the fair class the enqueue_entity(.flags=0) will
-        * do the right thing.
+        * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases,
+        * the dequeue_entity(.flags=0) will already have normalized the
+        * vruntime.
+        */
+       if (p->on_rq)
+               return true;
+
+       /*
+        * When !on_rq, vruntime of the task has usually NOT been normalized.
+        * But there are some cases where it has already been normalized:
         *
-        * If it's queued, then the dequeue_entity(.flags=0) will already
-        * have normalized the vruntime, if it's !queued, then only when
-        * the task is sleeping will it still have non-normalized vruntime.
+        * - A forked child which is waiting for being woken up by
+        *   wake_up_new_task().
+        * - A task which has been woken up by try_to_wake_up() and
+        *   waiting for actually being woken up by sched_ttwu_pending().
         */
-       if (!task_on_rq_queued(p) && p->state != TASK_RUNNING) {
+       if (!se->sum_exec_runtime || p->state == TASK_WAKING)
+               return true;
+
+       return false;
+}
+
+static void detach_task_cfs_rq(struct task_struct *p)
+{
+       struct sched_entity *se = &p->se;
+       struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+       if (!vruntime_normalized(p)) {
                /*
                 * Fix up our vruntime so that the current sleep doesn't
                 * cause 'unlimited' sleep bonus.
@@ -7909,28 +7999,14 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
                se->vruntime -= cfs_rq->min_vruntime;
        }
 
-#ifdef CONFIG_SMP
        /* Catch up with the cfs_rq and remove our load when we leave */
-       __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq), &se->avg,
-               se->on_rq * scale_load_down(se->load.weight), cfs_rq->curr == se, NULL);
-
-       cfs_rq->avg.load_avg =
-               max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
-       cfs_rq->avg.load_sum =
-               max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
-       cfs_rq->avg.util_avg =
-               max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
-       cfs_rq->avg.util_sum =
-               max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
-#endif
+       detach_entity_load_avg(cfs_rq, se);
 }
 
-/*
- * We switched to the sched_fair class.
- */
-static void switched_to_fair(struct rq *rq, struct task_struct *p)
+static void attach_task_cfs_rq(struct task_struct *p)
 {
        struct sched_entity *se = &p->se;
+       struct cfs_rq *cfs_rq = cfs_rq_of(se);
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
        /*
@@ -7940,31 +8016,33 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
        se->depth = se->parent ? se->parent->depth + 1 : 0;
 #endif
 
-       if (!task_on_rq_queued(p)) {
+       /* Synchronize task with its cfs_rq */
+       attach_entity_load_avg(cfs_rq, se);
+
+       if (!vruntime_normalized(p))
+               se->vruntime += cfs_rq->min_vruntime;
+}
+
+static void switched_from_fair(struct rq *rq, struct task_struct *p)
+{
+       detach_task_cfs_rq(p);
+}
+
+static void switched_to_fair(struct rq *rq, struct task_struct *p)
+{
+       attach_task_cfs_rq(p);
 
+       if (task_on_rq_queued(p)) {
                /*
-                * Ensure the task has a non-normalized vruntime when it is switched
-                * back to the fair class with !queued, so that enqueue_entity() at
-                * wake-up time will do the right thing.
-                *
-                * If it's queued, then the enqueue_entity(.flags=0) makes the task
-                * has non-normalized vruntime, if it's !queued, then it still has
-                * normalized vruntime.
+                * We were most likely switched from sched_rt, so
+                * kick off the schedule if running, otherwise just see
+                * if we can still preempt the current task.
                 */
-               if (p->state != TASK_RUNNING)
-                       se->vruntime += cfs_rq_of(se)->min_vruntime;
-               return;
+               if (rq->curr == p)
+                       resched_curr(rq);
+               else
+                       check_preempt_curr(rq, p, 0);
        }
-
-       /*
-        * We were most likely switched from sched_rt, so
-        * kick off the schedule if running, otherwise just see
-        * if we can still preempt the current task.
-        */
-       if (rq->curr == p)
-               resched_curr(rq);
-       else
-               check_preempt_curr(rq, p, 0);
 }
 
 /* Account for a task changing its policy or group.
@@ -7999,56 +8077,16 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-static void task_move_group_fair(struct task_struct *p, int queued)
+static void task_move_group_fair(struct task_struct *p)
 {
-       struct sched_entity *se = &p->se;
-       struct cfs_rq *cfs_rq;
-
-       /*
-        * If the task was not on the rq at the time of this cgroup movement
-        * it must have been asleep, sleeping tasks keep their ->vruntime
-        * absolute on their old rq until wakeup (needed for the fair sleeper
-        * bonus in place_entity()).
-        *
-        * If it was on the rq, we've just 'preempted' it, which does convert
-        * ->vruntime to a relative base.
-        *
-        * Make sure both cases convert their relative position when migrating
-        * to another cgroup's rq. This does somewhat interfere with the
-        * fair sleeper stuff for the first placement, but who cares.
-        */
-       /*
-        * When !queued, vruntime of the task has usually NOT been normalized.
-        * But there are some cases where it has already been normalized:
-        *
-        * - Moving a forked child which is waiting for being woken up by
-        *   wake_up_new_task().
-        * - Moving a task which has been woken up by try_to_wake_up() and
-        *   waiting for actually being woken up by sched_ttwu_pending().
-        *
-        * To prevent boost or penalty in the new cfs_rq caused by delta
-        * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
-        */
-       if (!queued && (!se->sum_exec_runtime || p->state == TASK_WAKING))
-               queued = 1;
-
-       if (!queued)
-               se->vruntime -= cfs_rq_of(se)->min_vruntime;
+       detach_task_cfs_rq(p);
        set_task_rq(p, task_cpu(p));
-       se->depth = se->parent ? se->parent->depth + 1 : 0;
-       if (!queued) {
-               cfs_rq = cfs_rq_of(se);
-               se->vruntime += cfs_rq->min_vruntime;
 
 #ifdef CONFIG_SMP
-               /* Virtually synchronize task with its new cfs_rq */
-               p->se.avg.last_update_time = cfs_rq->avg.last_update_time;
-               cfs_rq->avg.load_avg += p->se.avg.load_avg;
-               cfs_rq->avg.load_sum += p->se.avg.load_sum;
-               cfs_rq->avg.util_avg += p->se.avg.util_avg;
-               cfs_rq->avg.util_sum += p->se.avg.util_sum;
+       /* Tell se's cfs_rq has been changed -- migrated */
+       p->se.avg.last_update_time = 0;
 #endif
-       }
+       attach_task_cfs_rq(p);
 }
 
 void free_fair_sched_group(struct task_group *tg)
index 83a50e7ca53315695f779e2c5ac7177c423b646d..69631fa46c2f84fecd3e15599cba0e5935c1148e 100644 (file)
@@ -36,11 +36,6 @@ SCHED_FEAT(CACHE_HOT_BUDDY, true)
  */
 SCHED_FEAT(WAKEUP_PREEMPTION, true)
 
-/*
- * Use arch dependent cpu capacity functions
- */
-SCHED_FEAT(ARCH_CAPACITY, true)
-
 SCHED_FEAT(HRTICK, false)
 SCHED_FEAT(DOUBLE_TICK, false)
 SCHED_FEAT(LB_BIAS, true)
@@ -72,19 +67,5 @@ SCHED_FEAT(RT_PUSH_IPI, true)
 SCHED_FEAT(FORCE_SD_OVERLAP, false)
 SCHED_FEAT(RT_RUNTIME_SHARE, true)
 SCHED_FEAT(LB_MIN, false)
+SCHED_FEAT(ATTACH_AGE_LOAD, true)
 
-/*
- * Apply the automatic NUMA scheduling policy. Enabled automatically
- * at runtime if running on a NUMA machine. Can be controlled via
- * numa_balancing=
- */
-#ifdef CONFIG_NUMA_BALANCING
-
-/*
- * NUMA will favor moving tasks towards nodes where a higher number of
- * hinting faults are recorded during active load balancing. It will
- * resist moving tasks towards nodes where a lower number of hinting
- * faults have been recorded.
- */
-SCHED_FEAT(NUMA,       true)
-#endif
index 8f177c73ae199ba41878fea8de9a5b0a7196620a..4a2ef5a02fd3f91d7c4228378c23d5606bb73812 100644 (file)
@@ -57,9 +57,11 @@ static inline int cpu_idle_poll(void)
        rcu_idle_enter();
        trace_cpu_idle_rcuidle(0, smp_processor_id());
        local_irq_enable();
+       stop_critical_timings();
        while (!tif_need_resched() &&
                (cpu_idle_force_poll || tick_check_broadcast_expired()))
                cpu_relax();
+       start_critical_timings();
        trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
        rcu_idle_exit();
        return 1;
index d2ea59364a1c8f7e66d72e96c7d170088424eec0..e3cc16312046689fd04db8748304210c0d9fc8df 100644 (file)
@@ -635,11 +635,11 @@ bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
 /*
  * We ran out of runtime, see if we can borrow some from our neighbours.
  */
-static int do_balance_runtime(struct rt_rq *rt_rq)
+static void do_balance_runtime(struct rt_rq *rt_rq)
 {
        struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
        struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
-       int i, weight, more = 0;
+       int i, weight;
        u64 rt_period;
 
        weight = cpumask_weight(rd->span);
@@ -673,7 +673,6 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
                                diff = rt_period - rt_rq->rt_runtime;
                        iter->rt_runtime -= diff;
                        rt_rq->rt_runtime += diff;
-                       more = 1;
                        if (rt_rq->rt_runtime == rt_period) {
                                raw_spin_unlock(&iter->rt_runtime_lock);
                                break;
@@ -683,8 +682,6 @@ next:
                raw_spin_unlock(&iter->rt_runtime_lock);
        }
        raw_spin_unlock(&rt_b->rt_runtime_lock);
-
-       return more;
 }
 
 /*
@@ -796,26 +793,19 @@ static void __enable_runtime(struct rq *rq)
        }
 }
 
-static int balance_runtime(struct rt_rq *rt_rq)
+static void balance_runtime(struct rt_rq *rt_rq)
 {
-       int more = 0;
-
        if (!sched_feat(RT_RUNTIME_SHARE))
-               return more;
+               return;
 
        if (rt_rq->rt_time > rt_rq->rt_runtime) {
                raw_spin_unlock(&rt_rq->rt_runtime_lock);
-               more = do_balance_runtime(rt_rq);
+               do_balance_runtime(rt_rq);
                raw_spin_lock(&rt_rq->rt_runtime_lock);
        }
-
-       return more;
 }
 #else /* !CONFIG_SMP */
-static inline int balance_runtime(struct rt_rq *rt_rq)
-{
-       return 0;
-}
+static inline void balance_runtime(struct rt_rq *rt_rq) {}
 #endif /* CONFIG_SMP */
 
 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
index 68cda117574c3aed0e1849a6c07eb28e1f3369c9..efd3bfc7e34722883e2f08ca82f91cffde812963 100644 (file)
@@ -84,6 +84,10 @@ static inline void update_cpu_load_active(struct rq *this_rq) { }
  */
 #define RUNTIME_INF    ((u64)~0ULL)
 
+static inline int idle_policy(int policy)
+{
+       return policy == SCHED_IDLE;
+}
 static inline int fair_policy(int policy)
 {
        return policy == SCHED_NORMAL || policy == SCHED_BATCH;
@@ -98,6 +102,11 @@ static inline int dl_policy(int policy)
 {
        return policy == SCHED_DEADLINE;
 }
+static inline bool valid_policy(int policy)
+{
+       return idle_policy(policy) || fair_policy(policy) ||
+               rt_policy(policy) || dl_policy(policy);
+}
 
 static inline int task_has_rt_policy(struct task_struct *p)
 {
@@ -109,11 +118,6 @@ static inline int task_has_dl_policy(struct task_struct *p)
        return dl_policy(p->policy);
 }
 
-static inline bool dl_time_before(u64 a, u64 b)
-{
-       return (s64)(a - b) < 0;
-}
-
 /*
  * Tells if entity @a should preempt entity @b.
  */
@@ -1003,17 +1007,7 @@ extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
 #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
 
-#ifdef CONFIG_NUMA_BALANCING
-#define sched_feat_numa(x) sched_feat(x)
-#ifdef CONFIG_SCHED_DEBUG
-#define numabalancing_enabled sched_feat_numa(NUMA)
-#else
-extern bool numabalancing_enabled;
-#endif /* CONFIG_SCHED_DEBUG */
-#else
-#define sched_feat_numa(x) (0)
-#define numabalancing_enabled (0)
-#endif /* CONFIG_NUMA_BALANCING */
+extern struct static_key_false sched_numa_balancing;
 
 static inline u64 global_rt_period(void)
 {
@@ -1078,9 +1072,10 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
         * After ->on_cpu is cleared, the task can be moved to a different CPU.
         * We must ensure this doesn't happen until the switch is completely
         * finished.
+        *
+        * Pairs with the control dependency and rmb in try_to_wake_up().
         */
-       smp_wmb();
-       prev->on_cpu = 0;
+       smp_store_release(&prev->on_cpu, 0);
 #endif
 #ifdef CONFIG_DEBUG_SPINLOCK
        /* this is a valid case when another task releases the spinlock */
@@ -1156,16 +1151,18 @@ static const u32 prio_to_wmult[40] = {
  /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
 };
 
-#define ENQUEUE_WAKEUP         1
-#define ENQUEUE_HEAD           2
+#define ENQUEUE_WAKEUP         0x01
+#define ENQUEUE_HEAD           0x02
 #ifdef CONFIG_SMP
-#define ENQUEUE_WAKING         4       /* sched_class::task_waking was called */
+#define ENQUEUE_WAKING         0x04    /* sched_class::task_waking was called */
 #else
-#define ENQUEUE_WAKING         0
+#define ENQUEUE_WAKING         0x00
 #endif
-#define ENQUEUE_REPLENISH      8
+#define ENQUEUE_REPLENISH      0x08
+#define ENQUEUE_RESTORE        0x10
 
-#define DEQUEUE_SLEEP          1
+#define DEQUEUE_SLEEP          0x01
+#define DEQUEUE_SAVE           0x02
 
 #define RETRY_TASK             ((void *)-1UL)
 
@@ -1193,7 +1190,7 @@ struct sched_class {
 
 #ifdef CONFIG_SMP
        int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
-       void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
+       void (*migrate_task_rq)(struct task_struct *p);
 
        void (*task_waking) (struct task_struct *task);
        void (*task_woken) (struct rq *this_rq, struct task_struct *task);
@@ -1226,7 +1223,7 @@ struct sched_class {
        void (*update_curr) (struct rq *rq);
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-       void (*task_move_group) (struct task_struct *p, int on_rq);
+       void (*task_move_group) (struct task_struct *p);
 #endif
 };
 
@@ -1404,6 +1401,17 @@ unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
 }
 #endif
 
+#ifndef arch_scale_cpu_capacity
+static __always_inline
+unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
+{
+       if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
+               return sd->smt_gain / sd->span_weight;
+
+       return SCHED_CAPACITY_SCALE;
+}
+#endif
+
 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
 {
        rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
index a818cbc73e147382488cb0ea5bb5c490c6a46e15..d264f59bff56cb128edc68c5746280a2499752d5 100644 (file)
@@ -222,9 +222,8 @@ static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cp
 {
        struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
 
-       if (ht->pre_unpark)
-               ht->pre_unpark(cpu);
-       kthread_unpark(tsk);
+       if (!ht->selfparking)
+               kthread_unpark(tsk);
 }
 
 void smpboot_unpark_threads(unsigned int cpu)
index 12484e5d5c88769058610aca529924ea9e882aff..867bc20e1ef142a63349c345932af24b26a1adfc 100644 (file)
@@ -73,21 +73,24 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
        }
 }
 
+static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
+                                       struct cpu_stop_work *work)
+{
+       list_add_tail(&work->list, &stopper->works);
+       wake_up_process(stopper->thread);
+}
+
 /* queue @work to @stopper.  if offline, @work is completed immediately */
 static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
 {
        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
-
        unsigned long flags;
 
        spin_lock_irqsave(&stopper->lock, flags);
-
-       if (stopper->enabled) {
-               list_add_tail(&work->list, &stopper->works);
-               wake_up_process(stopper->thread);
-       } else
+       if (stopper->enabled)
+               __cpu_stop_queue_work(stopper, work);
+       else
                cpu_stop_signal_done(work->done, false);
-
        spin_unlock_irqrestore(&stopper->lock, flags);
 }
 
@@ -213,6 +216,31 @@ static int multi_cpu_stop(void *data)
        return err;
 }
 
+static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
+                                   int cpu2, struct cpu_stop_work *work2)
+{
+       struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
+       struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
+       int err;
+
+       lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
+       spin_lock_irq(&stopper1->lock);
+       spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
+
+       err = -ENOENT;
+       if (!stopper1->enabled || !stopper2->enabled)
+               goto unlock;
+
+       err = 0;
+       __cpu_stop_queue_work(stopper1, work1);
+       __cpu_stop_queue_work(stopper2, work2);
+unlock:
+       spin_unlock(&stopper2->lock);
+       spin_unlock_irq(&stopper1->lock);
+       lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
+
+       return err;
+}
 /**
  * stop_two_cpus - stops two cpus
  * @cpu1: the cpu to stop
@@ -247,24 +275,13 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
        cpu_stop_init_done(&done, 2);
        set_state(&msdata, MULTI_STOP_PREPARE);
 
-       /*
-        * If we observe both CPUs active we know _cpu_down() cannot yet have
-        * queued its stop_machine works and therefore ours will get executed
-        * first. Or its not either one of our CPUs that's getting unplugged,
-        * in which case we don't care.
-        *
-        * This relies on the stopper workqueues to be FIFO.
-        */
-       if (!cpu_active(cpu1) || !cpu_active(cpu2)) {
+       if (cpu1 > cpu2)
+               swap(cpu1, cpu2);
+       if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2)) {
                preempt_enable();
                return -ENOENT;
        }
 
-       lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
-       cpu_stop_queue_work(cpu1, &work1);
-       cpu_stop_queue_work(cpu2, &work2);
-       lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
-
        preempt_enable();
 
        wait_for_completion(&done.completion);
@@ -452,6 +469,18 @@ repeat:
        }
 }
 
+void stop_machine_park(int cpu)
+{
+       struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+       /*
+        * Lockless. cpu_stopper_thread() will take stopper->lock and flush
+        * the pending works before it parks, until then it is fine to queue
+        * the new works.
+        */
+       stopper->enabled = false;
+       kthread_park(stopper->thread);
+}
+
 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
 
 static void cpu_stop_create(unsigned int cpu)
@@ -462,26 +491,16 @@ static void cpu_stop_create(unsigned int cpu)
 static void cpu_stop_park(unsigned int cpu)
 {
        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
-       struct cpu_stop_work *work, *tmp;
-       unsigned long flags;
 
-       /* drain remaining works */
-       spin_lock_irqsave(&stopper->lock, flags);
-       list_for_each_entry_safe(work, tmp, &stopper->works, list) {
-               list_del_init(&work->list);
-               cpu_stop_signal_done(work->done, false);
-       }
-       stopper->enabled = false;
-       spin_unlock_irqrestore(&stopper->lock, flags);
+       WARN_ON(!list_empty(&stopper->works));
 }
 
-static void cpu_stop_unpark(unsigned int cpu)
+void stop_machine_unpark(int cpu)
 {
        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 
-       spin_lock_irq(&stopper->lock);
        stopper->enabled = true;
-       spin_unlock_irq(&stopper->lock);
+       kthread_unpark(stopper->thread);
 }
 
 static struct smp_hotplug_thread cpu_stop_threads = {
@@ -490,9 +509,7 @@ static struct smp_hotplug_thread cpu_stop_threads = {
        .thread_fn              = cpu_stopper_thread,
        .thread_comm            = "migration/%u",
        .create                 = cpu_stop_create,
-       .setup                  = cpu_stop_unpark,
        .park                   = cpu_stop_park,
-       .pre_unpark             = cpu_stop_unpark,
        .selfparking            = true,
 };
 
@@ -508,6 +525,7 @@ static int __init cpu_stop_init(void)
        }
 
        BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
+       stop_machine_unpark(raw_smp_processor_id());
        stop_machine_initialized = true;
        return 0;
 }
index 841b72f720e88041a99ded8852381555c307fb43..0d8fe8b8f72772e0899662c29011d1d22c261c8f 100644 (file)
@@ -217,7 +217,7 @@ static void clocksource_watchdog(unsigned long data)
                        continue;
 
                /* Check the deviation from the watchdog clocksource. */
-               if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) {
+               if (abs64(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
                        pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable because the skew is too large:\n",
                                cs->name);
                        pr_warn("                      '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
@@ -479,7 +479,7 @@ static u32 clocksource_max_adjustment(struct clocksource *cs)
  * return half the number of nanoseconds the hardware counter can technically
  * cover. This is done so that we can potentially detect problems caused by
  * delayed timers or bad hardware, which might result in time intervals that
- * are larger then what the math used can handle without overflows.
+ * are larger than what the math used can handle without overflows.
  */
 u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc)
 {
@@ -595,16 +595,15 @@ static void __clocksource_select(bool skipcur)
  */
 static void clocksource_select(void)
 {
-       return __clocksource_select(false);
+       __clocksource_select(false);
 }
 
 static void clocksource_select_fallback(void)
 {
-       return __clocksource_select(true);
+       __clocksource_select(true);
 }
 
 #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */
-
 static inline void clocksource_select(void) { }
 static inline void clocksource_select_fallback(void) { }
 
index 457a373e21812be2d06d52e500e7b490da8a4c67..435b8850dd80a300c11ad128bc0cf51e3c23e15a 100644 (file)
@@ -59,7 +59,7 @@
 /*
  * The timer bases:
  *
- * There are more clockids then hrtimer bases. Thus, we index
+ * There are more clockids than hrtimer bases. Thus, we index
  * into the timer bases by the hrtimer_base_type enum. When trying
  * to reach a base using a clockid, hrtimer_clockid_to_base()
  * is used to convert from clockid to the proper hrtimer_base_type.
index df68cb87524885dd561b615300137a68d7ecfbdb..149cc8086aea16bbd811d3af37e187fd08bb366f 100644 (file)
@@ -99,7 +99,7 @@ static time64_t                       ntp_next_leap_sec = TIME64_MAX;
 static int pps_valid;          /* signal watchdog counter */
 static long pps_tf[3];         /* phase median filter */
 static long pps_jitter;                /* current jitter (ns) */
-static struct timespec pps_fbase; /* beginning of the last freq interval */
+static struct timespec64 pps_fbase; /* beginning of the last freq interval */
 static int pps_shift;          /* current interval duration (s) (shift) */
 static int pps_intcnt;         /* interval counter */
 static s64 pps_freq;           /* frequency offset (scaled ns/s) */
@@ -509,7 +509,7 @@ static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
 static void sync_cmos_clock(struct work_struct *work)
 {
        struct timespec64 now;
-       struct timespec next;
+       struct timespec64 next;
        int fail = 1;
 
        /*
@@ -559,7 +559,7 @@ static void sync_cmos_clock(struct work_struct *work)
                next.tv_nsec -= NSEC_PER_SEC;
        }
        queue_delayed_work(system_power_efficient_wq,
-                          &sync_cmos_work, timespec_to_jiffies(&next));
+                          &sync_cmos_work, timespec64_to_jiffies(&next));
 }
 
 void ntp_notify_cmos_timer(void)
@@ -773,13 +773,13 @@ int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai)
  * pps_normtime.nsec has a range of ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ]
  * while timespec.tv_nsec has a range of [0, NSEC_PER_SEC) */
 struct pps_normtime {
-       __kernel_time_t sec;    /* seconds */
+       s64             sec;    /* seconds */
        long            nsec;   /* nanoseconds */
 };
 
 /* normalize the timestamp so that nsec is in the
    ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] interval */
-static inline struct pps_normtime pps_normalize_ts(struct timespec ts)
+static inline struct pps_normtime pps_normalize_ts(struct timespec64 ts)
 {
        struct pps_normtime norm = {
                .sec = ts.tv_sec,
@@ -861,7 +861,7 @@ static long hardpps_update_freq(struct pps_normtime freq_norm)
                pps_errcnt++;
                pps_dec_freq_interval();
                printk_deferred(KERN_ERR
-                       "hardpps: PPSERROR: interval too long - %ld s\n",
+                       "hardpps: PPSERROR: interval too long - %lld s\n",
                        freq_norm.sec);
                return 0;
        }
@@ -948,7 +948,7 @@ static void hardpps_update_phase(long error)
  * This code is based on David Mills's reference nanokernel
  * implementation. It was mostly rewritten but keeps the same idea.
  */
-void __hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
+void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
 {
        struct pps_normtime pts_norm, freq_norm;
 
@@ -969,7 +969,7 @@ void __hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
        }
 
        /* ok, now we have a base for frequency calculation */
-       freq_norm = pps_normalize_ts(timespec_sub(*raw_ts, pps_fbase));
+       freq_norm = pps_normalize_ts(timespec64_sub(*raw_ts, pps_fbase));
 
        /* check that the signal is in the range
         * [1s - MAXFREQ us, 1s + MAXFREQ us], otherwise reject it */
index 65430504ca2630c31d185429e6a2573c817b43ae..af924470eac04c046e3481750a32833cef9acc13 100644 (file)
@@ -9,5 +9,5 @@ extern ktime_t ntp_get_next_leap(void);
 extern int second_overflow(unsigned long secs);
 extern int ntp_validate_timex(struct timex *);
 extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *);
-extern void __hardpps(const struct timespec *, const struct timespec *);
+extern void __hardpps(const struct timespec64 *, const struct timespec64 *);
 #endif /* _LINUX_NTP_INTERNAL_H */
index 892e3dae0aac41199e9ebbbdef8b73f6b2d57afd..f5e86d282d520a7881557de76096c1cf46f58fa7 100644 (file)
@@ -249,7 +249,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
                 * but barriers are not required because update_gt_cputime()
                 * can handle concurrent updates.
                 */
-               WRITE_ONCE(cputimer->running, 1);
+               WRITE_ONCE(cputimer->running, true);
        }
        sample_cputime_atomic(times, &cputimer->cputime_atomic);
 }
@@ -864,6 +864,13 @@ static void check_thread_timers(struct task_struct *tsk,
        unsigned long long expires;
        unsigned long soft;
 
+       /*
+        * If cputime_expires is zero, then there are no active
+        * per thread CPU timers.
+        */
+       if (task_cputime_zero(&tsk->cputime_expires))
+               return;
+
        expires = check_timers_list(timers, firing, prof_ticks(tsk));
        tsk_expires->prof_exp = expires_to_cputime(expires);
 
@@ -911,7 +918,7 @@ static inline void stop_process_timers(struct signal_struct *sig)
        struct thread_group_cputimer *cputimer = &sig->cputimer;
 
        /* Turn off cputimer->running. This is done without locking. */
-       WRITE_ONCE(cputimer->running, 0);
+       WRITE_ONCE(cputimer->running, false);
 }
 
 static u32 onecputick;
@@ -961,6 +968,19 @@ static void check_process_timers(struct task_struct *tsk,
        struct task_cputime cputime;
        unsigned long soft;
 
+       /*
+        * If cputimer is not running, then there are no active
+        * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
+        */
+       if (!READ_ONCE(tsk->signal->cputimer.running))
+               return;
+
+        /*
+        * Signify that a thread is checking for process timers.
+        * Write access to this field is protected by the sighand lock.
+        */
+       sig->cputimer.checking_timer = true;
+
        /*
         * Collect the current process totals.
         */
@@ -1015,6 +1035,8 @@ static void check_process_timers(struct task_struct *tsk,
        sig->cputime_expires.sched_exp = sched_expires;
        if (task_cputime_zero(&sig->cputime_expires))
                stop_process_timers(sig);
+
+       sig->cputimer.checking_timer = false;
 }
 
 /*
@@ -1117,24 +1139,33 @@ static inline int task_cputime_expired(const struct task_cputime *sample,
 static inline int fastpath_timer_check(struct task_struct *tsk)
 {
        struct signal_struct *sig;
-       cputime_t utime, stime;
-
-       task_cputime(tsk, &utime, &stime);
 
        if (!task_cputime_zero(&tsk->cputime_expires)) {
-               struct task_cputime task_sample = {
-                       .utime = utime,
-                       .stime = stime,
-                       .sum_exec_runtime = tsk->se.sum_exec_runtime
-               };
+               struct task_cputime task_sample;
 
+               task_cputime(tsk, &task_sample.utime, &task_sample.stime);
+               task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
                if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
                        return 1;
        }
 
        sig = tsk->signal;
-       /* Check if cputimer is running. This is accessed without locking. */
-       if (READ_ONCE(sig->cputimer.running)) {
+       /*
+        * Check if thread group timers expired when the cputimer is
+        * running and no other thread in the group is already checking
+        * for thread group cputimers. These fields are read without the
+        * sighand lock. However, this is fine because this is meant to
+        * be a fastpath heuristic to determine whether we should try to
+        * acquire the sighand lock to check/handle timers.
+        *
+        * In the worst case scenario, if 'running' or 'checking_timer' gets
+        * set but the current thread doesn't see the change yet, we'll wait
+        * until the next thread in the group gets a scheduler interrupt to
+        * handle the timer. This isn't an issue in practice because these
+        * types of delays with signals actually getting sent are expected.
+        */
+       if (READ_ONCE(sig->cputimer.running) &&
+           !READ_ONCE(sig->cputimer.checking_timer)) {
                struct task_cputime group_sample;
 
                sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic);
@@ -1174,12 +1205,8 @@ void run_posix_cpu_timers(struct task_struct *tsk)
         * put them on the firing list.
         */
        check_thread_timers(tsk, &firing);
-       /*
-        * If there are any active process wide timers (POSIX 1.b, itimers,
-        * RLIMIT_CPU) cputimer must be running.
-        */
-       if (READ_ONCE(tsk->signal->cputimer.running))
-               check_process_timers(tsk, &firing);
+
+       check_process_timers(tsk, &firing);
 
        /*
         * We must release these locks before taking any timer's lock.
index c7388dee86358ae46967a20f39ffab6f44080d16..c48688904f9fec9dc033239e28c0bdcbc761e59d 100644 (file)
@@ -39,7 +39,7 @@ define fmuls(b,n,d) {
 }
 
 define timeconst(hz) {
-       print "/* Automatically generated by kernel/timeconst.bc */\n"
+       print "/* Automatically generated by kernel/time/timeconst.bc */\n"
        print "/* Time conversion constants for HZ == ", hz, " */\n"
        print "\n"
 
index 3739ac6aa47355e7234cf0ee2fc60ebc3adce979..b1356b7ae57057139acf46c2c68cfa27688bcb63 100644 (file)
@@ -849,7 +849,7 @@ EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
 #ifdef CONFIG_NTP_PPS
 
 /**
- * getnstime_raw_and_real - get day and raw monotonic time in timespec format
+ * ktime_get_raw_and_real_ts64 - get day and raw monotonic time in timespec format
  * @ts_raw:    pointer to the timespec to be set to raw monotonic time
  * @ts_real:   pointer to the timespec to be set to the time of day
  *
@@ -857,7 +857,7 @@ EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
  * same time atomically and stores the resulting timestamps in timespec
  * format.
  */
-void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
+void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw, struct timespec64 *ts_real)
 {
        struct timekeeper *tk = &tk_core.timekeeper;
        unsigned long seq;
@@ -868,7 +868,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
        do {
                seq = read_seqcount_begin(&tk_core.seq);
 
-               *ts_raw = timespec64_to_timespec(tk->raw_time);
+               *ts_raw = tk->raw_time;
                ts_real->tv_sec = tk->xtime_sec;
                ts_real->tv_nsec = 0;
 
@@ -877,10 +877,10 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
 
        } while (read_seqcount_retry(&tk_core.seq, seq));
 
-       timespec_add_ns(ts_raw, nsecs_raw);
-       timespec_add_ns(ts_real, nsecs_real);
+       timespec64_add_ns(ts_raw, nsecs_raw);
+       timespec64_add_ns(ts_real, nsecs_real);
 }
-EXPORT_SYMBOL(getnstime_raw_and_real);
+EXPORT_SYMBOL(ktime_get_raw_and_real_ts64);
 
 #endif /* CONFIG_NTP_PPS */
 
@@ -1251,7 +1251,7 @@ void __init timekeeping_init(void)
        set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
        tk_set_wall_to_mono(tk, tmp);
 
-       timekeeping_update(tk, TK_MIRROR);
+       timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
 
        write_seqcount_end(&tk_core.seq);
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
@@ -1674,7 +1674,7 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
 /**
  * accumulate_nsecs_to_secs - Accumulates nsecs into secs
  *
- * Helper function that accumulates a the nsecs greater then a second
+ * Helper function that accumulates the nsecs greater than a second
  * from the xtime_nsec field to the xtime_secs field.
  * It also calls into the NTP code to handle leapsecond processing.
  *
@@ -1726,7 +1726,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
        cycle_t interval = tk->cycle_interval << shift;
        u64 raw_nsecs;
 
-       /* If the offset is smaller then a shifted interval, do nothing */
+       /* If the offset is smaller than a shifted interval, do nothing */
        if (offset < interval)
                return offset;
 
@@ -2025,7 +2025,7 @@ int do_adjtimex(struct timex *txc)
 /**
  * hardpps() - Accessor function to NTP __hardpps function
  */
-void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
+void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
 {
        unsigned long flags;
 
index 84190f02b521c9fc77cee6e6a6722000939ee470..74591ba9474f4dff4e0c3d7b7acedbe14b5d57a8 100644 (file)
@@ -461,10 +461,17 @@ void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
 
 static void timer_stats_account_timer(struct timer_list *timer)
 {
-       if (likely(!timer->start_site))
+       void *site;
+
+       /*
+        * start_site can be concurrently reset by
+        * timer_stats_timer_clear_start_info()
+        */
+       site = READ_ONCE(timer->start_site);
+       if (likely(!site))
                return;
 
-       timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
+       timer_stats_update_stats(timer, timer->start_pid, site,
                                 timer->function, timer->start_comm,
                                 timer->flags);
 }
@@ -867,7 +874,7 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
        if (mask == 0)
                return expires;
 
-       bit = find_last_bit(&mask, BITS_PER_LONG);
+       bit = __fls(mask);
 
        mask = (1UL << bit) - 1;
 
index 3e4840633d3ee7bd926f1fe67f8b0a4b324514da..44aa462d033f700a86e1439d1b24053daca4094a 100644 (file)
@@ -523,6 +523,7 @@ static int stutter;
  */
 void stutter_wait(const char *title)
 {
+       cond_resched_rcu_qs();
        while (READ_ONCE(stutter_pause_test) ||
               (torture_runnable && !READ_ONCE(*torture_runnable))) {
                if (stutter_pause_test)
index b0623ac785a22287526327a021d81fe3eaf5fafb..00611e95a8ee00bb91e7ccbd41c3ebcbc580e4f8 100644 (file)
@@ -5697,7 +5697,7 @@ free:
 }
 
 static void
-ftrace_graph_probe_sched_switch(void *ignore,
+ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
                        struct task_struct *prev, struct task_struct *next)
 {
        unsigned long long timestamp;
index f270088e9929aa2e085a15960828929140ae1037..4c896a0101bdcb231581a3cced5f2ef4a3556d08 100644 (file)
@@ -16,7 +16,8 @@ static int                    sched_ref;
 static DEFINE_MUTEX(sched_register_mutex);
 
 static void
-probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next)
+probe_sched_switch(void *ignore, bool preempt,
+                  struct task_struct *prev, struct task_struct *next)
 {
        if (unlikely(!sched_ref))
                return;
index 12cbe77b413620cb80436ab55d3758237e31a5b9..4bcfbac289ff9e9e6ab4d39772ad2dffd89509a8 100644 (file)
@@ -420,7 +420,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
 }
 
 static void notrace
-probe_wakeup_sched_switch(void *ignore,
+probe_wakeup_sched_switch(void *ignore, bool preempt,
                          struct task_struct *prev, struct task_struct *next)
 {
        struct trace_array_cpu *data;
index b746399ab59c01e422da63468aa370b1b642a860..8abf1ba18085742af78176dbc514095a47643c9c 100644 (file)
@@ -85,9 +85,19 @@ check_stack(unsigned long ip, unsigned long *stack)
        if (!object_is_on_stack(stack))
                return;
 
+       /* Can't do this from NMI context (can cause deadlocks) */
+       if (in_nmi())
+               return;
+
        local_irq_save(flags);
        arch_spin_lock(&max_stack_lock);
 
+       /*
+        * RCU may not be watching, make it see us.
+        * The stack trace code uses rcu_sched.
+        */
+       rcu_irq_enter();
+
        /* In case another CPU set the tracer_frame on us */
        if (unlikely(!frame_size))
                this_size -= tracer_frame;
@@ -169,6 +179,7 @@ check_stack(unsigned long ip, unsigned long *stack)
        }
 
  out:
+       rcu_irq_exit();
        arch_spin_unlock(&max_stack_lock);
        local_irq_restore(flags);
 }
index ca71582fcfab29ec708746eab636c325b9caef15..bcb14cafe007148b15edb5cfed5adc041a6d966c 100644 (file)
@@ -1458,13 +1458,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
        timer_stats_timer_set_start_info(&dwork->timer);
 
        dwork->wq = wq;
+       /* timer isn't guaranteed to run in this cpu, record earlier */
+       if (cpu == WORK_CPU_UNBOUND)
+               cpu = raw_smp_processor_id();
        dwork->cpu = cpu;
        timer->expires = jiffies + delay;
 
-       if (unlikely(cpu != WORK_CPU_UNBOUND))
-               add_timer_on(timer, cpu);
-       else
-               add_timer(timer);
+       add_timer_on(timer, cpu);
 }
 
 /**
index 2e491ac15622a559c88ba12a4067eeb5ca704115..f0df318104e7272ef97641421c938069a047a85e 100644 (file)
@@ -220,6 +220,7 @@ config ZLIB_INFLATE
 
 config ZLIB_DEFLATE
        tristate
+       select BITREVERSE
 
 config LZO_COMPRESS
        tristate
index ab76b99adc857fb38c2e4677b6ab34d6a01fa786..1d1521c26302a2510e873d49a8b6bb525f399d66 100644 (file)
@@ -197,6 +197,7 @@ config ENABLE_MUST_CHECK
 config FRAME_WARN
        int "Warn for stack frames larger than (needs gcc 4.4)"
        range 0 8192
+       default 0 if KASAN
        default 1024 if !64BIT
        default 2048 if 64BIT
        help
index f1cdeb024d172a488e1ed44f70ce1e090ee5e541..6a823a53e357bc83e84476d93cc2c155f0adfee5 100644 (file)
@@ -44,7 +44,7 @@ static void fail_dump(struct fault_attr *attr)
                printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n"
                       "name %pd, interval %lu, probability %lu, "
                       "space %d, times %d\n", attr->dname,
-                      attr->probability, attr->interval,
+                      attr->interval, attr->probability,
                       atomic_read(&attr->space),
                       atomic_read(&attr->times));
                if (attr->verbose > 1)
index 88d3d32e59236bc1127d88c781bc014d70920652..6019c53c669e176bcf36cf3e8bffb35c9b893432 100644 (file)
@@ -43,6 +43,12 @@ static void print_seq_line(struct nmi_seq_buf *s, int start, int end)
        printk("%.*s", (end - start) + 1, buf);
 }
 
+/*
+ * When raise() is called it will be is passed a pointer to the
+ * backtrace_mask. Architectures that call nmi_cpu_backtrace()
+ * directly from their raise() functions may rely on the mask
+ * they are passed being updated as a side effect of this call.
+ */
 void nmi_trigger_all_cpu_backtrace(bool include_self,
                                   void (*raise)(cpumask_t *mask))
 {
@@ -149,7 +155,10 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
                /* Replace printk to write into the NMI seq */
                this_cpu_write(printk_func, nmi_vprintk);
                pr_warn("NMI backtrace for cpu %d\n", cpu);
-               show_regs(regs);
+               if (regs)
+                       show_regs(regs);
+               else
+                       dump_stack();
                this_cpu_write(printk_func, printk_func_save);
 
                cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
index 13d1e84ddb80e983a325011fb8dd8b6ff9ce0600..84775ba873b9efd978fa006be56e58057b34031f 100644 (file)
 #include <linux/bug.h>
 #include <linux/errno.h>
 
+#include <asm/byteorder.h>
+#include <asm/word-at-a-time.h>
+#include <asm/page.h>
+
 #ifndef __HAVE_ARCH_STRNCASECMP
 /**
  * strncasecmp - Case insensitive, length-limited string comparison
@@ -146,6 +150,91 @@ size_t strlcpy(char *dest, const char *src, size_t size)
 EXPORT_SYMBOL(strlcpy);
 #endif
 
+#ifndef __HAVE_ARCH_STRSCPY
+/**
+ * strscpy - Copy a C-string into a sized buffer
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ * @count: Size of destination buffer
+ *
+ * Copy the string, or as much of it as fits, into the dest buffer.
+ * The routine returns the number of characters copied (not including
+ * the trailing NUL) or -E2BIG if the destination buffer wasn't big enough.
+ * The behavior is undefined if the string buffers overlap.
+ * The destination buffer is always NUL terminated, unless it's zero-sized.
+ *
+ * Preferred to strlcpy() since the API doesn't require reading memory
+ * from the src string beyond the specified "count" bytes, and since
+ * the return value is easier to error-check than strlcpy()'s.
+ * In addition, the implementation is robust to the string changing out
+ * from underneath it, unlike the current strlcpy() implementation.
+ *
+ * Preferred to strncpy() since it always returns a valid string, and
+ * doesn't unnecessarily force the tail of the destination buffer to be
+ * zeroed.  If the zeroing is desired, it's likely cleaner to use strscpy()
+ * with an overflow test, then just memset() the tail of the dest buffer.
+ */
+ssize_t strscpy(char *dest, const char *src, size_t count)
+{
+       const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+       size_t max = count;
+       long res = 0;
+
+       if (count == 0)
+               return -E2BIG;
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+       /*
+        * If src is unaligned, don't cross a page boundary,
+        * since we don't know if the next page is mapped.
+        */
+       if ((long)src & (sizeof(long) - 1)) {
+               size_t limit = PAGE_SIZE - ((long)src & (PAGE_SIZE - 1));
+               if (limit < max)
+                       max = limit;
+       }
+#else
+       /* If src or dest is unaligned, don't do word-at-a-time. */
+       if (((long) dest | (long) src) & (sizeof(long) - 1))
+               max = 0;
+#endif
+
+       while (max >= sizeof(unsigned long)) {
+               unsigned long c, data;
+
+               c = *(unsigned long *)(src+res);
+               if (has_zero(c, &data, &constants)) {
+                       data = prep_zero_mask(c, data, &constants);
+                       data = create_zero_mask(data);
+                       *(unsigned long *)(dest+res) = c & zero_bytemask(data);
+                       return res + find_zero(data);
+               }
+               *(unsigned long *)(dest+res) = c;
+               res += sizeof(unsigned long);
+               count -= sizeof(unsigned long);
+               max -= sizeof(unsigned long);
+       }
+
+       while (count) {
+               char c;
+
+               c = src[res];
+               dest[res] = c;
+               if (!c)
+                       return res;
+               res++;
+               count--;
+       }
+
+       /* Hit buffer length without finding a NUL; force NUL-termination. */
+       if (res)
+               dest[res-1] = '\0';
+
+       return -E2BIG;
+}
+EXPORT_SYMBOL(strscpy);
+#endif
+
 #ifndef __HAVE_ARCH_STRCAT
 /**
  * strcat - Append one %NUL-terminated string to another
index 2df8ddcb0ca0a7f7a055456de4b46a8c55bbfdf1..619984fc07ec32792349c7fe8aec7c8b56e3d2a3 100644 (file)
@@ -480,6 +480,10 @@ static void cgwb_release_workfn(struct work_struct *work)
                                                release_work);
        struct backing_dev_info *bdi = wb->bdi;
 
+       spin_lock_irq(&cgwb_lock);
+       list_del_rcu(&wb->bdi_node);
+       spin_unlock_irq(&cgwb_lock);
+
        wb_shutdown(wb);
 
        css_put(wb->memcg_css);
@@ -575,6 +579,7 @@ static int cgwb_create(struct backing_dev_info *bdi,
                ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
                if (!ret) {
                        atomic_inc(&bdi->usage_cnt);
+                       list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
                        list_add(&wb->memcg_node, memcg_cgwb_list);
                        list_add(&wb->blkcg_node, blkcg_cgwb_list);
                        css_get(memcg_css);
@@ -676,7 +681,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
 static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
 {
        struct radix_tree_iter iter;
-       struct bdi_writeback_congested *congested, *congested_n;
+       struct rb_node *rbn;
        void **slot;
 
        WARN_ON(test_bit(WB_registered, &bdi->wb.state));
@@ -686,9 +691,11 @@ static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
        radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
                cgwb_kill(*slot);
 
-       rbtree_postorder_for_each_entry_safe(congested, congested_n,
-                                       &bdi->cgwb_congested_tree, rb_node) {
-               rb_erase(&congested->rb_node, &bdi->cgwb_congested_tree);
+       while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
+               struct bdi_writeback_congested *congested =
+                       rb_entry(rbn, struct bdi_writeback_congested, rb_node);
+
+               rb_erase(rbn, &bdi->cgwb_congested_tree);
                congested->bdi = NULL;  /* mark @congested unlinked */
        }
 
@@ -764,15 +771,22 @@ static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { }
 
 int bdi_init(struct backing_dev_info *bdi)
 {
+       int ret;
+
        bdi->dev = NULL;
 
        bdi->min_ratio = 0;
        bdi->max_ratio = 100;
        bdi->max_prop_frac = FPROP_FRAC_BASE;
        INIT_LIST_HEAD(&bdi->bdi_list);
+       INIT_LIST_HEAD(&bdi->wb_list);
        init_waitqueue_head(&bdi->wb_waitq);
 
-       return cgwb_bdi_init(bdi);
+       ret = cgwb_bdi_init(bdi);
+
+       list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
+
+       return ret;
 }
 EXPORT_SYMBOL(bdi_init);
 
@@ -823,7 +837,7 @@ static void bdi_remove_from_list(struct backing_dev_info *bdi)
        synchronize_rcu_expedited();
 }
 
-void bdi_destroy(struct backing_dev_info *bdi)
+void bdi_unregister(struct backing_dev_info *bdi)
 {
        /* make sure nobody finds us on the bdi_list anymore */
        bdi_remove_from_list(bdi);
@@ -835,9 +849,19 @@ void bdi_destroy(struct backing_dev_info *bdi)
                device_unregister(bdi->dev);
                bdi->dev = NULL;
        }
+}
 
+void bdi_exit(struct backing_dev_info *bdi)
+{
+       WARN_ON_ONCE(bdi->dev);
        wb_exit(&bdi->wb);
 }
+
+void bdi_destroy(struct backing_dev_info *bdi)
+{
+       bdi_unregister(bdi);
+       bdi_exit(bdi);
+}
 EXPORT_SYMBOL(bdi_destroy);
 
 /*
index e7d1db5330254da4a8d265b8784d1eb645693447..4eb56badf37e60e63a5a1badd093d1934a21ad35 100644 (file)
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -361,7 +361,7 @@ err:
  * This function allocates part of contiguous memory on specific
  * contiguous memory area.
  */
-struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align)
+struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
 {
        unsigned long mask, offset, pfn, start = 0;
        unsigned long bitmap_maxno, bitmap_no, bitmap_count;
@@ -371,7 +371,7 @@ struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align)
        if (!cma || !cma->count)
                return NULL;
 
-       pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
+       pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
                 count, align);
 
        if (!count)
index 71a8998cd03a6b8b0d2dbfe36a24ce70766047e8..312a716fa14c2ef0d2780832bc378c05a3d08d16 100644 (file)
@@ -394,7 +394,7 @@ static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
        list_for_each_entry(page, &pool->page_list, page_list) {
                if (dma < page->dma)
                        continue;
-               if (dma < (page->dma + pool->allocation))
+               if ((dma - page->dma) < pool->allocation)
                        return page;
        }
        return NULL;
index 72940fb38666811b80c146bc085a1c84fc0e7ecc..327910c2400c6ce36f440383147fdc768cf14692 100644 (file)
@@ -2473,6 +2473,26 @@ ssize_t generic_perform_write(struct file *file,
                                                iov_iter_count(i));
 
 again:
+               /*
+                * Bring in the user page that we will copy from _first_.
+                * Otherwise there's a nasty deadlock on copying from the
+                * same page as we're writing to, without it being marked
+                * up-to-date.
+                *
+                * Not only is this an optimisation, but it is also required
+                * to check that the address is actually valid, when atomic
+                * usercopies are used, below.
+                */
+               if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
+                       status = -EFAULT;
+                       break;
+               }
+
+               if (fatal_signal_pending(current)) {
+                       status = -EINTR;
+                       break;
+               }
+
                status = a_ops->write_begin(file, mapping, pos, bytes, flags,
                                                &page, &fsdata);
                if (unlikely(status < 0))
@@ -2480,17 +2500,8 @@ again:
 
                if (mapping_writably_mapped(mapping))
                        flush_dcache_page(page);
-               /*
-                * 'page' is now locked.  If we are trying to copy from a
-                * mapping of 'page' in userspace, the copy might fault and
-                * would need PageUptodate() to complete.  But, page can not be
-                * made Uptodate without acquiring the page lock, which we hold.
-                * Deadlock.  Avoid with pagefault_disable().  Fix up below with
-                * iov_iter_fault_in_readable().
-                */
-               pagefault_disable();
+
                copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
-               pagefault_enable();
                flush_dcache_page(page);
 
                status = a_ops->write_end(file, mapping, pos, bytes, copied,
@@ -2513,24 +2524,12 @@ again:
                         */
                        bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
                                                iov_iter_single_seg_count(i));
-                       /*
-                        * This is the fallback to recover if the copy from
-                        * userspace above faults.
-                        */
-                       if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
-                               status = -EFAULT;
-                               break;
-                       }
                        goto again;
                }
                pos += copied;
                written += copied;
 
                balance_dirty_pages_ratelimited(mapping);
-               if (fatal_signal_pending(current)) {
-                       status = -EINTR;
-                       break;
-               }
        } while (iov_iter_count(i));
 
        return written ? written : status;
index 4b06b8db9df23c8f33406586507bbaecf7f5444c..3fd0311c3ba70fa2e6238237c1afe58f0c47e75d 100644 (file)
@@ -1880,7 +1880,7 @@ static int __split_huge_page_map(struct page *page,
                 * here). But it is generally safer to never allow
                 * small and huge TLB entries for the same virtual
                 * address to be loaded simultaneously. So instead of
-                * doing "pmd_populate(); flush_tlb_range();" we first
+                * doing "pmd_populate(); flush_pmd_tlb_range();" we first
                 * mark the current pmd notpresent (atomically because
                 * here the pmd_trans_huge and pmd_trans_splitting
                 * must remain set at all times on the pmd until the
@@ -2206,7 +2206,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
        for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
             _pte++, address += PAGE_SIZE) {
                pte_t pteval = *_pte;
-               if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
+               if (pte_none(pteval) || (pte_present(pteval) &&
+                               is_zero_pfn(pte_pfn(pteval)))) {
                        if (!userfaultfd_armed(vma) &&
                            ++none_or_zero <= khugepaged_max_ptes_none)
                                continue;
index 999fb0aef8f16f9a126579e54fca79ad8e4f6487..9cc773483624e4cbb1592ddde74f9c8faa21ef87 100644 (file)
@@ -3201,6 +3201,14 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
                if (iter_vma == vma)
                        continue;
 
+               /*
+                * Shared VMAs have their own reserves and do not affect
+                * MAP_PRIVATE accounting but it is possible that a shared
+                * VMA is using the same page so check and skip such VMAs.
+                */
+               if (iter_vma->vm_flags & VM_MAYSHARE)
+                       continue;
+
                /*
                 * Unmap the page from other VMAs without their own reserves.
                 * They get marked to be SIGKILLed if they fault in these
index 6ddaeba34e097a7553d33b8add26e26a27d3c81a..c57c4423c68837d14816c5ff230435e1567e7c20 100644 (file)
@@ -644,12 +644,14 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
 }
 
 /*
+ * Return page count for single (non recursive) @memcg.
+ *
  * Implementation Note: reading percpu statistics for memcg.
  *
  * Both of vmstat[] and percpu_counter has threshold and do periodic
  * synchronization to implement "quick" read. There are trade-off between
  * reading cost and precision of value. Then, we may have a chance to implement
- * a periodic synchronizion of counter in memcg's counter.
+ * a periodic synchronization of counter in memcg's counter.
  *
  * But this _read() function is used for user interface now. The user accounts
  * memory usage by memory cgroup and he _always_ requires exact value because
@@ -659,17 +661,24 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
  *
  * If there are kernel internal actions which can make use of some not-exact
  * value, and reading all cpu value can be performance bottleneck in some
- * common workload, threashold and synchonization as vmstat[] should be
+ * common workload, threshold and synchronization as vmstat[] should be
  * implemented.
  */
-static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
-                                enum mem_cgroup_stat_index idx)
+static unsigned long
+mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
 {
        long val = 0;
        int cpu;
 
+       /* Per-cpu values can be negative, use a signed accumulator */
        for_each_possible_cpu(cpu)
                val += per_cpu(memcg->stat->count[idx], cpu);
+       /*
+        * Summing races with updates, so val may be negative.  Avoid exposing
+        * transient negative values.
+        */
+       if (val < 0)
+               val = 0;
        return val;
 }
 
@@ -1254,7 +1263,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
                for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
                        if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
                                continue;
-                       pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
+                       pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
                                K(mem_cgroup_read_stat(iter, i)));
                }
 
@@ -2819,14 +2828,11 @@ static unsigned long tree_stat(struct mem_cgroup *memcg,
                               enum mem_cgroup_stat_index idx)
 {
        struct mem_cgroup *iter;
-       long val = 0;
+       unsigned long val = 0;
 
-       /* Per-cpu values can be negative, use a signed accumulator */
        for_each_mem_cgroup_tree(iter, memcg)
                val += mem_cgroup_read_stat(iter, idx);
 
-       if (val < 0) /* race ? */
-               val = 0;
        return val;
 }
 
@@ -3169,7 +3175,7 @@ static int memcg_stat_show(struct seq_file *m, void *v)
        for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
                if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
                        continue;
-               seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
+               seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
                           mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
        }
 
@@ -3194,13 +3200,13 @@ static int memcg_stat_show(struct seq_file *m, void *v)
                           (u64)memsw * PAGE_SIZE);
 
        for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
-               long long val = 0;
+               unsigned long long val = 0;
 
                if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
                        continue;
                for_each_mem_cgroup_tree(mi, memcg)
                        val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
-               seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
+               seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
        }
 
        for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
@@ -3381,6 +3387,7 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
        ret = page_counter_memparse(args, "-1", &threshold);
        if (ret)
                return ret;
+       threshold <<= PAGE_SHIFT;
 
        mutex_lock(&memcg->thresholds_lock);
 
@@ -3734,44 +3741,43 @@ struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
 /**
  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
  * @wb: bdi_writeback in question
- * @pavail: out parameter for number of available pages
+ * @pfilepages: out parameter for number of file pages
+ * @pheadroom: out parameter for number of allocatable pages according to memcg
  * @pdirty: out parameter for number of dirty pages
  * @pwriteback: out parameter for number of pages under writeback
  *
- * Determine the numbers of available, dirty, and writeback pages in @wb's
- * memcg.  Dirty and writeback are self-explanatory.  Available is a bit
- * more involved.
+ * Determine the numbers of file, headroom, dirty, and writeback pages in
+ * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
+ * is a bit more involved.
  *
- * A memcg's headroom is "min(max, high) - used".  The available memory is
- * calculated as the lowest headroom of itself and the ancestors plus the
- * number of pages already being used for file pages.  Note that this
- * doesn't consider the actual amount of available memory in the system.
- * The caller should further cap *@pavail accordingly.
+ * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
+ * headroom is calculated as the lowest headroom of itself and the
+ * ancestors.  Note that this doesn't consider the actual amount of
+ * available memory in the system.  The caller should further cap
+ * *@pheadroom accordingly.
  */
-void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail,
-                        unsigned long *pdirty, unsigned long *pwriteback)
+void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
+                        unsigned long *pheadroom, unsigned long *pdirty,
+                        unsigned long *pwriteback)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
        struct mem_cgroup *parent;
-       unsigned long head_room = PAGE_COUNTER_MAX;
-       unsigned long file_pages;
 
        *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY);
 
        /* this should eventually include NR_UNSTABLE_NFS */
        *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
+       *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
+                                                    (1 << LRU_ACTIVE_FILE));
+       *pheadroom = PAGE_COUNTER_MAX;
 
-       file_pages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
-                                                   (1 << LRU_ACTIVE_FILE));
        while ((parent = parent_mem_cgroup(memcg))) {
                unsigned long ceiling = min(memcg->memory.limit, memcg->high);
                unsigned long used = page_counter_read(&memcg->memory);
 
-               head_room = min(head_room, ceiling - min(ceiling, used));
+               *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
                memcg = parent;
        }
-
-       *pavail = file_pages + head_room;
 }
 
 #else  /* CONFIG_CGROUP_WRITEBACK */
@@ -4179,7 +4185,6 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
        if (memcg_wb_domain_init(memcg, GFP_KERNEL))
                goto out_free_stat;
 
-       spin_lock_init(&memcg->pcp_counter_lock);
        return memcg;
 
 out_free_stat:
index 9cb27470fee991cb874676bb0cbc0f694b5e1d36..deb679c31f2ab897cafebf72643aec4f66233308 100644 (file)
@@ -2426,6 +2426,8 @@ void unmap_mapping_range(struct address_space *mapping,
        if (details.last_index < details.first_index)
                details.last_index = ULONG_MAX;
 
+
+       /* DAX uses i_mmap_lock to serialise file truncate vs page fault */
        i_mmap_lock_write(mapping);
        if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
                unmap_mapping_range_tree(&mapping->i_mmap, &details);
index 7452a00bbb50c134b529c1d024dfc53fcfca093b..842ecd7aaf7fa6ac1371f6137dc155c91851505c 100644 (file)
@@ -740,6 +740,15 @@ static int move_to_new_page(struct page *newpage, struct page *page,
        if (PageSwapBacked(page))
                SetPageSwapBacked(newpage);
 
+       /*
+        * Indirectly called below, migrate_page_copy() copies PG_dirty and thus
+        * needs newpage's memcg set to transfer memcg dirty page accounting.
+        * So perform memcg migration in two steps:
+        * 1. set newpage->mem_cgroup (here)
+        * 2. clear page->mem_cgroup (below)
+        */
+       set_page_memcg(newpage, page_memcg(page));
+
        mapping = page_mapping(page);
        if (!mapping)
                rc = migrate_page(mapping, newpage, page, mode);
@@ -756,9 +765,10 @@ static int move_to_new_page(struct page *newpage, struct page *page,
                rc = fallback_migrate_page(mapping, newpage, page, mode);
 
        if (rc != MIGRATEPAGE_SUCCESS) {
+               set_page_memcg(newpage, NULL);
                newpage->mapping = NULL;
        } else {
-               mem_cgroup_migrate(page, newpage, false);
+               set_page_memcg(page, NULL);
                if (page_was_mapped)
                        remove_migration_ptes(page, newpage);
                page->mapping = NULL;
index 0a931cdd4f6baaa96cdfab2e0dd668c0abef8809..2c90357c34ea4c4d81521b7bf14d669d6565cc07 100644 (file)
@@ -145,9 +145,6 @@ struct dirty_throttle_control {
        unsigned long           pos_ratio;
 };
 
-#define DTC_INIT_COMMON(__wb)  .wb = (__wb),                           \
-                               .wb_completions = &(__wb)->completions
-
 /*
  * Length of period for aging writeout fractions of bdis. This is an
  * arbitrarily chosen number. The longer the period, the slower fractions will
@@ -157,12 +154,16 @@ struct dirty_throttle_control {
 
 #ifdef CONFIG_CGROUP_WRITEBACK
 
-#define GDTC_INIT(__wb)                .dom = &global_wb_domain,               \
-                               DTC_INIT_COMMON(__wb)
+#define GDTC_INIT(__wb)                .wb = (__wb),                           \
+                               .dom = &global_wb_domain,               \
+                               .wb_completions = &(__wb)->completions
+
 #define GDTC_INIT_NO_WB                .dom = &global_wb_domain
-#define MDTC_INIT(__wb, __gdtc)        .dom = mem_cgroup_wb_domain(__wb),      \
-                               .gdtc = __gdtc,                         \
-                               DTC_INIT_COMMON(__wb)
+
+#define MDTC_INIT(__wb, __gdtc)        .wb = (__wb),                           \
+                               .dom = mem_cgroup_wb_domain(__wb),      \
+                               .wb_completions = &(__wb)->memcg_completions, \
+                               .gdtc = __gdtc
 
 static bool mdtc_valid(struct dirty_throttle_control *dtc)
 {
@@ -213,7 +214,8 @@ static void wb_min_max_ratio(struct bdi_writeback *wb,
 
 #else  /* CONFIG_CGROUP_WRITEBACK */
 
-#define GDTC_INIT(__wb)                DTC_INIT_COMMON(__wb)
+#define GDTC_INIT(__wb)                .wb = (__wb),                           \
+                               .wb_completions = &(__wb)->completions
 #define GDTC_INIT_NO_WB
 #define MDTC_INIT(__wb, __gdtc)
 
@@ -682,13 +684,19 @@ static unsigned long hard_dirty_limit(struct wb_domain *dom,
        return max(thresh, dom->dirty_limit);
 }
 
-/* memory available to a memcg domain is capped by system-wide clean memory */
-static void mdtc_cap_avail(struct dirty_throttle_control *mdtc)
+/*
+ * Memory which can be further allocated to a memcg domain is capped by
+ * system-wide clean memory excluding the amount being used in the domain.
+ */
+static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
+                           unsigned long filepages, unsigned long headroom)
 {
        struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc);
-       unsigned long clean = gdtc->avail - min(gdtc->avail, gdtc->dirty);
+       unsigned long clean = filepages - min(filepages, mdtc->dirty);
+       unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty);
+       unsigned long other_clean = global_clean - min(global_clean, clean);
 
-       mdtc->avail = min(mdtc->avail, clean);
+       mdtc->avail = filepages + min(headroom, other_clean);
 }
 
 /**
@@ -1562,16 +1570,16 @@ static void balance_dirty_pages(struct address_space *mapping,
                }
 
                if (mdtc) {
-                       unsigned long writeback;
+                       unsigned long filepages, headroom, writeback;
 
                        /*
                         * If @wb belongs to !root memcg, repeat the same
                         * basic calculations for the memcg domain.
                         */
-                       mem_cgroup_wb_stats(wb, &mdtc->avail, &mdtc->dirty,
-                                           &writeback);
-                       mdtc_cap_avail(mdtc);
+                       mem_cgroup_wb_stats(wb, &filepages, &headroom,
+                                           &mdtc->dirty, &writeback);
                        mdtc->dirty += writeback;
+                       mdtc_calc_avail(mdtc, filepages, headroom);
 
                        domain_dirty_limits(mdtc);
 
@@ -1893,10 +1901,11 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
                return true;
 
        if (mdtc) {
-               unsigned long writeback;
+               unsigned long filepages, headroom, writeback;
 
-               mem_cgroup_wb_stats(wb, &mdtc->avail, &mdtc->dirty, &writeback);
-               mdtc_cap_avail(mdtc);
+               mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty,
+                                   &writeback);
+               mdtc_calc_avail(mdtc, filepages, headroom);
                domain_dirty_limits(mdtc);      /* ditto, ignore writeback */
 
                if (mdtc->dirty > mdtc->bg_thresh)
@@ -1956,7 +1965,6 @@ void laptop_mode_timer_fn(unsigned long data)
        int nr_pages = global_page_state(NR_FILE_DIRTY) +
                global_page_state(NR_UNSTABLE_NFS);
        struct bdi_writeback *wb;
-       struct wb_iter iter;
 
        /*
         * We want to write everything out, not just down to the dirty
@@ -1965,10 +1973,12 @@ void laptop_mode_timer_fn(unsigned long data)
        if (!bdi_has_dirty_io(&q->backing_dev_info))
                return;
 
-       bdi_for_each_wb(wb, &q->backing_dev_info, &iter, 0)
+       rcu_read_lock();
+       list_for_each_entry_rcu(wb, &q->backing_dev_info.wb_list, bdi_node)
                if (wb_has_dirty_io(wb))
                        wb_start_writeback(wb, nr_pages, true,
                                           WB_REASON_LAPTOP_TIMER);
+       rcu_read_unlock();
 }
 
 /*
index 6b674e00153cea664ecadcec49f7c4c35bb843c0..7d3db0247983b22b121290c2203ba2c2fb544ec0 100644 (file)
@@ -57,35 +57,59 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
 }
 #endif
 
+#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+int ptep_clear_flush_young(struct vm_area_struct *vma,
+                          unsigned long address, pte_t *ptep)
+{
+       int young;
+       young = ptep_test_and_clear_young(vma, address, ptep);
+       if (young)
+               flush_tlb_page(vma, address);
+       return young;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
+pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
+                      pte_t *ptep)
+{
+       struct mm_struct *mm = (vma)->vm_mm;
+       pte_t pte;
+       pte = ptep_get_and_clear(mm, address, ptep);
+       if (pte_accessible(mm, pte))
+               flush_tlb_page(vma, address);
+       return pte;
+}
+#endif
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+
+/*
+ * ARCHes with special requirements for evicting THP backing TLB entries can
+ * implement this. Otherwise also, it can help optimize normal TLB flush in
+ * THP regime. stock flush_tlb_range() typically has optimization to nuke the
+ * entire TLB TLB if flush span is greater than a threshhold, which will
+ * likely be true for a single huge page. Thus a single thp flush will
+ * invalidate the entire TLB which is not desitable.
+ * e.g. see arch/arc: flush_pmd_tlb_range
+ */
+#define flush_pmd_tlb_range(vma, addr, end)    flush_tlb_range(vma, addr, end)
+#endif
+
 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
 int pmdp_set_access_flags(struct vm_area_struct *vma,
                          unsigned long address, pmd_t *pmdp,
                          pmd_t entry, int dirty)
 {
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
        int changed = !pmd_same(*pmdp, entry);
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
        if (changed) {
                set_pmd_at(vma->vm_mm, address, pmdp, entry);
-               flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+               flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
        }
        return changed;
-#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-       BUG();
-       return 0;
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-}
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-int ptep_clear_flush_young(struct vm_area_struct *vma,
-                          unsigned long address, pte_t *ptep)
-{
-       int young;
-       young = ptep_test_and_clear_young(vma, address, ptep);
-       if (young)
-               flush_tlb_page(vma, address);
-       return young;
 }
 #endif
 
@@ -94,33 +118,15 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
                           unsigned long address, pmd_t *pmdp)
 {
        int young;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
-#else
-       BUG();
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
        young = pmdp_test_and_clear_young(vma, address, pmdp);
        if (young)
-               flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+               flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
        return young;
 }
 #endif
 
-#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
-pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
-                      pte_t *ptep)
-{
-       struct mm_struct *mm = (vma)->vm_mm;
-       pte_t pte;
-       pte = ptep_get_and_clear(mm, address, ptep);
-       if (pte_accessible(mm, pte))
-               flush_tlb_page(vma, address);
-       return pte;
-}
-#endif
-
 #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
                            pmd_t *pmdp)
 {
@@ -128,14 +134,12 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
        VM_BUG_ON(!pmd_trans_huge(*pmdp));
        pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
-       flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+       flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
        return pmd;
 }
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
 
 #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
                          pmd_t *pmdp)
 {
@@ -143,13 +147,11 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
        set_pmd_at(vma->vm_mm, address, pmdp, pmd);
        /* tlb flush only to serialize against gup-fast */
-       flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+       flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 }
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
 
 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
                                pgtable_t pgtable)
 {
@@ -162,11 +164,9 @@ void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
                list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
        pmd_huge_pte(mm, pmdp) = pgtable;
 }
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
 
 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 /* no "address" argument so destroys page coloring of some arch */
 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
 {
@@ -185,23 +185,19 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
        }
        return pgtable;
 }
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
 
 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
                     pmd_t *pmdp)
 {
        pmd_t entry = *pmdp;
        set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
-       flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+       flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 }
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
 
 #ifndef pmdp_collapse_flush
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
                          pmd_t *pmdp)
 {
@@ -214,8 +210,8 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
        VM_BUG_ON(pmd_trans_huge(*pmdp));
        pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
-       flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+       flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
        return pmd;
 }
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
index 60cd846a9a4401f73a6a51539507ecc22fb308d4..24682f6f4cfd1d84d7245faea0da78e7fd17e716 100644 (file)
@@ -89,8 +89,8 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
        while (!list_empty(pages)) {
                page = list_to_page(pages);
                list_del(&page->lru);
-               if (add_to_page_cache_lru(page, mapping,
-                                       page->index, GFP_KERNEL)) {
+               if (add_to_page_cache_lru(page, mapping, page->index,
+                               GFP_KERNEL & mapping_gfp_mask(mapping))) {
                        read_cache_pages_invalidate_page(mapping, page);
                        continue;
                }
@@ -127,8 +127,8 @@ static int read_pages(struct address_space *mapping, struct file *filp,
        for (page_idx = 0; page_idx < nr_pages; page_idx++) {
                struct page *page = list_to_page(pages);
                list_del(&page->lru);
-               if (!add_to_page_cache_lru(page, mapping,
-                                       page->index, GFP_KERNEL)) {
+               if (!add_to_page_cache_lru(page, mapping, page->index,
+                               GFP_KERNEL & mapping_gfp_mask(mapping))) {
                        mapping->a_ops->readpage(filp, page);
                }
                page_cache_release(page);
index c77ebe6cc87cd3066f24fd9e3682699448689fa8..4fcc5dd8d5a6c2776ac2f88d9011ac23c78fb769 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2190,9 +2190,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
                        size += BYTES_PER_WORD;
        }
 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
-       if (size >= kmalloc_size(INDEX_NODE + 1)
-           && cachep->object_size > cache_line_size()
-           && ALIGN(size, cachep->align) < PAGE_SIZE) {
+       /*
+        * To activate debug pagealloc, off-slab management is necessary
+        * requirement. In early phase of initialization, small sized slab
+        * doesn't get initialized so it would not be possible. So, we need
+        * to check size >= 256. It guarantees that all necessary small
+        * sized slab is initialized in current slab initialization sequence.
+        */
+       if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
+               size >= 256 && cachep->object_size > cache_line_size() &&
+               ALIGN(size, cachep->align) < PAGE_SIZE) {
                cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
                size = PAGE_SIZE;
        }
index 2faaa2976447a104ac5017ce7f4ad1e52b808b0b..af3a519e40c2ba9e08a0ff41eadeae945e813dbb 100644 (file)
@@ -2688,52 +2688,5 @@ static int __init proc_vmalloc_init(void)
 }
 module_init(proc_vmalloc_init);
 
-void get_vmalloc_info(struct vmalloc_info *vmi)
-{
-       struct vmap_area *va;
-       unsigned long free_area_size;
-       unsigned long prev_end;
-
-       vmi->used = 0;
-       vmi->largest_chunk = 0;
-
-       prev_end = VMALLOC_START;
-
-       rcu_read_lock();
-
-       if (list_empty(&vmap_area_list)) {
-               vmi->largest_chunk = VMALLOC_TOTAL;
-               goto out;
-       }
-
-       list_for_each_entry_rcu(va, &vmap_area_list, list) {
-               unsigned long addr = va->va_start;
-
-               /*
-                * Some archs keep another range for modules in vmalloc space
-                */
-               if (addr < VMALLOC_START)
-                       continue;
-               if (addr >= VMALLOC_END)
-                       break;
-
-               if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
-                       continue;
-
-               vmi->used += (va->va_end - va->va_start);
-
-               free_area_size = addr - prev_end;
-               if (vmi->largest_chunk < free_area_size)
-                       vmi->largest_chunk = free_area_size;
-
-               prev_end = va->va_end;
-       }
-
-       if (VMALLOC_END - prev_end > vmi->largest_chunk)
-               vmi->largest_chunk = VMALLOC_END - prev_end;
-
-out:
-       rcu_read_unlock();
-}
 #endif
 
index 4f5cd974e11a0adbb8a601cc92b9866ab6d67d55..fbf14485a0498bf181e81f43bc69a0522e67afd5 100644 (file)
@@ -1363,15 +1363,16 @@ static cpumask_var_t cpu_stat_off;
 
 static void vmstat_update(struct work_struct *w)
 {
-       if (refresh_cpu_vm_stats())
+       if (refresh_cpu_vm_stats()) {
                /*
                 * Counters were updated so we expect more updates
                 * to occur in the future. Keep on running the
                 * update worker thread.
                 */
-               schedule_delayed_work(this_cpu_ptr(&vmstat_work),
+               schedule_delayed_work_on(smp_processor_id(),
+                       this_cpu_ptr(&vmstat_work),
                        round_jiffies_relative(sysctl_stat_interval));
-       else {
+       else {
                /*
                 * We did not update any counters so the app may be in
                 * a mode where it does not cause counter updates.
index b4548c739a6475446d643bd5b01ab8627ef1f08e..2dda439c8cb83b7fa72b238dcf93023088762001 100644 (file)
@@ -91,10 +91,50 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
         * autoconnect action, remove them completely. If they are, just unmark
         * them as waiting for connection, by clearing explicit_connect field.
         */
-       if (params->auto_connect == HCI_AUTO_CONN_EXPLICIT)
+       params->explicit_connect = false;
+
+       list_del_init(&params->action);
+
+       switch (params->auto_connect) {
+       case HCI_AUTO_CONN_EXPLICIT:
                hci_conn_params_del(conn->hdev, bdaddr, bdaddr_type);
-       else
-               params->explicit_connect = false;
+               /* return instead of break to avoid duplicate scan update */
+               return;
+       case HCI_AUTO_CONN_DIRECT:
+       case HCI_AUTO_CONN_ALWAYS:
+               list_add(&params->action, &conn->hdev->pend_le_conns);
+               break;
+       case HCI_AUTO_CONN_REPORT:
+               list_add(&params->action, &conn->hdev->pend_le_reports);
+               break;
+       default:
+               break;
+       }
+
+       hci_update_background_scan(conn->hdev);
+}
+
+static void hci_conn_cleanup(struct hci_conn *conn)
+{
+       struct hci_dev *hdev = conn->hdev;
+
+       if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
+               hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
+
+       hci_chan_list_flush(conn);
+
+       hci_conn_hash_del(hdev, conn);
+
+       if (hdev->notify)
+               hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
+
+       hci_conn_del_sysfs(conn);
+
+       debugfs_remove_recursive(conn->debugfs);
+
+       hci_dev_put(hdev);
+
+       hci_conn_put(conn);
 }
 
 /* This function requires the caller holds hdev->lock */
@@ -102,8 +142,13 @@ static void hci_connect_le_scan_remove(struct hci_conn *conn)
 {
        hci_connect_le_scan_cleanup(conn);
 
-       hci_conn_hash_del(conn->hdev, conn);
-       hci_update_background_scan(conn->hdev);
+       /* We can't call hci_conn_del here since that would deadlock
+        * with trying to call cancel_delayed_work_sync(&conn->disc_work).
+        * Instead, call just hci_conn_cleanup() which contains the bare
+        * minimum cleanup operations needed for a connection in this
+        * state.
+        */
+       hci_conn_cleanup(conn);
 }
 
 static void hci_acl_create_connection(struct hci_conn *conn)
@@ -581,27 +626,17 @@ int hci_conn_del(struct hci_conn *conn)
                }
        }
 
-       hci_chan_list_flush(conn);
-
        if (conn->amp_mgr)
                amp_mgr_put(conn->amp_mgr);
 
-       hci_conn_hash_del(hdev, conn);
-       if (hdev->notify)
-               hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
-
        skb_queue_purge(&conn->data_q);
 
-       hci_conn_del_sysfs(conn);
-
-       debugfs_remove_recursive(conn->debugfs);
-
-       if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
-               hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
-
-       hci_dev_put(hdev);
-
-       hci_conn_put(conn);
+       /* Remove the connection from the list and cleanup its remaining
+        * state. This is a separate function since for some cases like
+        * BT_CONNECT_SCAN we *only* want the cleanup part without the
+        * rest of hci_conn_del.
+        */
+       hci_conn_cleanup(conn);
 
        return 0;
 }
@@ -973,15 +1008,23 @@ static int hci_explicit_conn_params_set(struct hci_request *req,
        if (is_connected(hdev, addr, addr_type))
                return -EISCONN;
 
-       params = hci_conn_params_add(hdev, addr, addr_type);
-       if (!params)
-               return -EIO;
+       params = hci_conn_params_lookup(hdev, addr, addr_type);
+       if (!params) {
+               params = hci_conn_params_add(hdev, addr, addr_type);
+               if (!params)
+                       return -ENOMEM;
 
-       /* If we created new params, or existing params were marked as disabled,
-        * mark them to be used just once to connect.
-        */
-       if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
+               /* If we created new params, mark them to be deleted in
+                * hci_connect_le_scan_cleanup. It's different case than
+                * existing disabled params, those will stay after cleanup.
+                */
                params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
+       }
+
+       /* We're trying to connect, so make sure params are at pend_le_conns */
+       if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
+           params->auto_connect == HCI_AUTO_CONN_REPORT ||
+           params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
                list_del_init(&params->action);
                list_add(&params->action, &hdev->pend_le_conns);
        }
index adcbc74c243268e8330bf760c39fa792a1c2a3a8..e837539452fb0e2880d8335da7769752a3b08110 100644 (file)
@@ -2861,13 +2861,6 @@ struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
                        return param;
        }
 
-       list_for_each_entry(param, &hdev->pend_le_reports, action) {
-               if (bacmp(&param->addr, addr) == 0 &&
-                   param->addr_type == addr_type &&
-                   param->explicit_connect)
-                       return param;
-       }
-
        return NULL;
 }
 
index 186041866315a4e107de086df52142f39d6c6ee3..bc31099d3b5bd113a8496b2ccad9e3e40364187f 100644 (file)
@@ -55,7 +55,12 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
        wake_up_bit(&hdev->flags, HCI_INQUIRY);
 
        hci_dev_lock(hdev);
-       hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+       /* Set discovery state to stopped if we're not doing LE active
+        * scanning.
+        */
+       if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
+           hdev->le_scan_type != LE_SCAN_ACTIVE)
+               hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
        hci_dev_unlock(hdev);
 
        hci_conn_check_pending(hdev);
@@ -4648,8 +4653,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
        /* If we're not connectable only connect devices that we have in
         * our pend_le_conns list.
         */
-       params = hci_explicit_connect_lookup(hdev, addr, addr_type);
-
+       params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
+                                          addr_type);
        if (!params)
                return NULL;
 
index ccaf5a436d8f7a70799729a04ffc17583d11913f..c4fe2fee753fcfaa4233a4bb6675bbbdd29fc9d5 100644 (file)
@@ -3545,6 +3545,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
                                       auth_type);
        } else {
                u8 addr_type;
+               struct hci_conn_params *p;
 
                /* Convert from L2CAP channel address type to HCI address type
                 */
@@ -3562,7 +3563,10 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
                 * If connection parameters already exist, then they
                 * will be kept and this function does nothing.
                 */
-               hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
+               p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
+
+               if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
+                       p->auto_connect = HCI_AUTO_CONN_DISABLED;
 
                conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
                                           addr_type, sec_level,
@@ -6117,14 +6121,21 @@ static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
                __hci_update_background_scan(req);
                break;
        case HCI_AUTO_CONN_REPORT:
-               list_add(&params->action, &hdev->pend_le_reports);
+               if (params->explicit_connect)
+                       list_add(&params->action, &hdev->pend_le_conns);
+               else
+                       list_add(&params->action, &hdev->pend_le_reports);
                __hci_update_background_scan(req);
                break;
        case HCI_AUTO_CONN_DIRECT:
        case HCI_AUTO_CONN_ALWAYS:
                if (!is_connected(hdev, addr, addr_type)) {
                        list_add(&params->action, &hdev->pend_le_conns);
-                       __hci_update_background_scan(req);
+                       /* If we are in scan phase of connecting, we were
+                        * already added to pend_le_conns and scanning.
+                        */
+                       if (params->auto_connect != HCI_AUTO_CONN_EXPLICIT)
+                               __hci_update_background_scan(req);
                }
                break;
        }
@@ -6379,7 +6390,8 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
                        goto unlock;
                }
 
-               if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
+               if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
+                   params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
                        err = cmd->cmd_complete(cmd,
                                                MGMT_STATUS_INVALID_PARAMS);
                        mgmt_pending_remove(cmd);
@@ -6415,6 +6427,10 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
                        if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
                                continue;
                        device_removed(sk, hdev, &p->addr, p->addr_type);
+                       if (p->explicit_connect) {
+                               p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
+                               continue;
+                       }
                        list_del(&p->action);
                        list_del(&p->list);
                        kfree(p);
index 80b94e37c94aae115155454b9f4386a1b91021de..f79ccac6699fb7b171b680261db9000f7fe4c70c 100644 (file)
@@ -285,6 +285,7 @@ static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
        switch (op->op) {
        case CEPH_OSD_OP_READ:
        case CEPH_OSD_OP_WRITE:
+       case CEPH_OSD_OP_WRITEFULL:
                ceph_osd_data_release(&op->extent.osd_data);
                break;
        case CEPH_OSD_OP_CALL:
@@ -485,13 +486,14 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
        size_t payload_len = 0;
 
        BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
-              opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE);
+              opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
+              opcode != CEPH_OSD_OP_TRUNCATE);
 
        op->extent.offset = offset;
        op->extent.length = length;
        op->extent.truncate_size = truncate_size;
        op->extent.truncate_seq = truncate_seq;
-       if (opcode == CEPH_OSD_OP_WRITE)
+       if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
                payload_len += length;
 
        op->payload_len = payload_len;
@@ -670,9 +672,11 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
                break;
        case CEPH_OSD_OP_READ:
        case CEPH_OSD_OP_WRITE:
+       case CEPH_OSD_OP_WRITEFULL:
        case CEPH_OSD_OP_ZERO:
        case CEPH_OSD_OP_TRUNCATE:
-               if (src->op == CEPH_OSD_OP_WRITE)
+               if (src->op == CEPH_OSD_OP_WRITE ||
+                   src->op == CEPH_OSD_OP_WRITEFULL)
                        request_data_len = src->extent.length;
                dst->extent.offset = cpu_to_le64(src->extent.offset);
                dst->extent.length = cpu_to_le64(src->extent.length);
@@ -681,7 +685,8 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
                dst->extent.truncate_seq =
                        cpu_to_le32(src->extent.truncate_seq);
                osd_data = &src->extent.osd_data;
-               if (src->op == CEPH_OSD_OP_WRITE)
+               if (src->op == CEPH_OSD_OP_WRITE ||
+                   src->op == CEPH_OSD_OP_WRITEFULL)
                        ceph_osdc_msg_data_add(req->r_request, osd_data);
                else
                        ceph_osdc_msg_data_add(req->r_reply, osd_data);
index 6bb6470f5b7bbbfaa60de8e33277b63aadbdb641..c14748d051e7f58343768e920e52317154ef06fb 100644 (file)
@@ -99,6 +99,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/stat.h>
 #include <net/dst.h>
+#include <net/dst_metadata.h>
 #include <net/pkt_sched.h>
 #include <net/checksum.h>
 #include <net/xfrm.h>
@@ -681,6 +682,32 @@ int dev_get_iflink(const struct net_device *dev)
 }
 EXPORT_SYMBOL(dev_get_iflink);
 
+/**
+ *     dev_fill_metadata_dst - Retrieve tunnel egress information.
+ *     @dev: targeted interface
+ *     @skb: The packet.
+ *
+ *     For better visibility of tunnel traffic OVS needs to retrieve
+ *     egress tunnel information for a packet. Following API allows
+ *     user to get this info.
+ */
+int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+{
+       struct ip_tunnel_info *info;
+
+       if (!dev->netdev_ops  || !dev->netdev_ops->ndo_fill_metadata_dst)
+               return -EINVAL;
+
+       info = skb_tunnel_info_unclone(skb);
+       if (!info)
+               return -ENOMEM;
+       if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
+               return -EINVAL;
+
+       return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
+}
+EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
+
 /**
  *     __dev_get_by_name       - find a device by its name
  *     @net: the applicable net namespace
index b495ab1797fae303d12a3251f09b141052c1ff55..29edf74846fc9cfef49f3fc35b4ba41de6c254af 100644 (file)
@@ -1284,7 +1284,7 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
 
        gstrings.len = ret;
 
-       data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
+       data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
        if (!data)
                return -ENOMEM;
 
index 05a04ea871728d2c18f68571321e9435df1f3b18..bb18c368000129ebea752a0a78785524848eaab8 100644 (file)
@@ -1415,6 +1415,7 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
                return dev_forward_skb(dev, skb2);
 
        skb2->dev = dev;
+       skb_sender_cpu_clear(skb2);
        return dev_queue_xmit(skb2);
 }
 
@@ -1854,9 +1855,13 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
                goto out;
 
        /* We're copying the filter that has been originally attached,
-        * so no conversion/decode needed anymore.
+        * so no conversion/decode needed anymore. eBPF programs that
+        * have no original program cannot be dumped through this.
         */
+       ret = -EACCES;
        fprog = filter->prog->orig_prog;
+       if (!fprog)
+               goto out;
 
        ret = fprog->len;
        if (!len)
index 805a95a481076dcc229c881322c2e6ef308692bd..830f8a7c1cb173caf3df0c9044bb81f9363338d8 100644 (file)
@@ -31,7 +31,6 @@
 static const char fmt_hex[] = "%#x\n";
 static const char fmt_long_hex[] = "%#lx\n";
 static const char fmt_dec[] = "%d\n";
-static const char fmt_udec[] = "%u\n";
 static const char fmt_ulong[] = "%lu\n";
 static const char fmt_u64[] = "%llu\n";
 
@@ -202,7 +201,7 @@ static ssize_t speed_show(struct device *dev,
        if (netif_running(netdev)) {
                struct ethtool_cmd cmd;
                if (!__ethtool_get_settings(netdev, &cmd))
-                       ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
+                       ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd));
        }
        rtnl_unlock();
        return ret;
index dad4dd37e2aaad17b9493cb67796ed9650515a84..fab4599ba8b261dc43977af8349a336edc4d2799 100644 (file)
@@ -2958,11 +2958,12 @@ EXPORT_SYMBOL_GPL(skb_append_pagefrags);
  */
 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
 {
+       unsigned char *data = skb->data;
+
        BUG_ON(len > skb->len);
-       skb->len -= len;
-       BUG_ON(skb->len < skb->data_len);
-       skb_postpull_rcsum(skb, skb->data, len);
-       return skb->data += len;
+       __skb_pull(skb, len);
+       skb_postpull_rcsum(skb, data, len);
+       return skb->data;
 }
 EXPORT_SYMBOL_GPL(skb_pull_rcsum);
 
index c59fa5d9c22c449b4bfa9f058575d44324cfd7f9..adb5325f49348412efb50f0eeee5a5f0fa1e50ed 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/of_platform.h>
 #include <linux/of_net.h>
 #include <linux/sysfs.h>
+#include <linux/phy_fixed.h>
 #include "dsa_priv.h"
 
 char dsa_driver_version[] = "0.1";
@@ -305,7 +306,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
        if (ret < 0)
                goto out;
 
-       ds->slave_mii_bus = mdiobus_alloc();
+       ds->slave_mii_bus = devm_mdiobus_alloc(parent);
        if (ds->slave_mii_bus == NULL) {
                ret = -ENOMEM;
                goto out;
@@ -314,7 +315,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
 
        ret = mdiobus_register(ds->slave_mii_bus);
        if (ret < 0)
-               goto out_free;
+               goto out;
 
 
        /*
@@ -367,10 +368,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
 
        return ret;
 
-out_free:
-       mdiobus_free(ds->slave_mii_bus);
 out:
-       kfree(ds);
        return ret;
 }
 
@@ -400,7 +398,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
        /*
         * Allocate and initialise switch state.
         */
-       ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL);
+       ds = devm_kzalloc(parent, sizeof(*ds) + drv->priv_size, GFP_KERNEL);
        if (ds == NULL)
                return ERR_PTR(-ENOMEM);
 
@@ -420,10 +418,47 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
 
 static void dsa_switch_destroy(struct dsa_switch *ds)
 {
+       struct device_node *port_dn;
+       struct phy_device *phydev;
+       struct dsa_chip_data *cd = ds->pd;
+       int port;
+
 #ifdef CONFIG_NET_DSA_HWMON
        if (ds->hwmon_dev)
                hwmon_device_unregister(ds->hwmon_dev);
 #endif
+
+       /* Disable configuration of the CPU and DSA ports */
+       for (port = 0; port < DSA_MAX_PORTS; port++) {
+               if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)))
+                       continue;
+
+               port_dn = cd->port_dn[port];
+               if (of_phy_is_fixed_link(port_dn)) {
+                       phydev = of_phy_find_device(port_dn);
+                       if (phydev) {
+                               int addr = phydev->addr;
+
+                               phy_device_free(phydev);
+                               of_node_put(port_dn);
+                               fixed_phy_del(addr);
+                       }
+               }
+       }
+
+       /* Destroy network devices for physical switch ports. */
+       for (port = 0; port < DSA_MAX_PORTS; port++) {
+               if (!(ds->phys_port_mask & (1 << port)))
+                       continue;
+
+               if (!ds->ports[port])
+                       continue;
+
+               unregister_netdev(ds->ports[port]);
+               free_netdev(ds->ports[port]);
+       }
+
+       mdiobus_unregister(ds->slave_mii_bus);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -802,10 +837,11 @@ static inline void dsa_of_remove(struct device *dev)
 }
 #endif
 
-static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
-                         struct device *parent, struct dsa_platform_data *pd)
+static int dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
+                        struct device *parent, struct dsa_platform_data *pd)
 {
        int i;
+       unsigned configured = 0;
 
        dst->pd = pd;
        dst->master_netdev = dev;
@@ -825,8 +861,16 @@ static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
                dst->ds[i] = ds;
                if (ds->drv->poll_link != NULL)
                        dst->link_poll_needed = 1;
+
+               ++configured;
        }
 
+       /*
+        * If no switch was found, exit cleanly
+        */
+       if (!configured)
+               return -EPROBE_DEFER;
+
        /*
         * If we use a tagging format that doesn't have an ethertype
         * field, make sure that all packets from this point on get
@@ -843,6 +887,8 @@ static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
                dst->link_poll_timer.expires = round_jiffies(jiffies + HZ);
                add_timer(&dst->link_poll_timer);
        }
+
+       return 0;
 }
 
 static int dsa_probe(struct platform_device *pdev)
@@ -883,7 +929,7 @@ static int dsa_probe(struct platform_device *pdev)
                goto out;
        }
 
-       dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+       dst = devm_kzalloc(&pdev->dev, sizeof(*dst), GFP_KERNEL);
        if (dst == NULL) {
                dev_put(dev);
                ret = -ENOMEM;
@@ -892,7 +938,9 @@ static int dsa_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, dst);
 
-       dsa_setup_dst(dst, dev, &pdev->dev, pd);
+       ret = dsa_setup_dst(dst, dev, &pdev->dev, pd);
+       if (ret)
+               goto out;
 
        return 0;
 
@@ -914,7 +962,7 @@ static void dsa_remove_dst(struct dsa_switch_tree *dst)
        for (i = 0; i < dst->pd->nr_chips; i++) {
                struct dsa_switch *ds = dst->ds[i];
 
-               if (ds != NULL)
+               if (ds)
                        dsa_switch_destroy(ds);
        }
 }
index cce97385f7436445f22c17605b5ee4da48c80cac..7d91f4612ac07406cfffd90defb6aa1d7436cb36 100644 (file)
@@ -458,12 +458,17 @@ static int dsa_slave_stp_update(struct net_device *dev, u8 state)
 static int dsa_slave_port_attr_set(struct net_device *dev,
                                   struct switchdev_attr *attr)
 {
-       int ret = 0;
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       struct dsa_switch *ds = p->parent;
+       int ret;
 
        switch (attr->id) {
        case SWITCHDEV_ATTR_PORT_STP_STATE:
-               if (attr->trans == SWITCHDEV_TRANS_COMMIT)
-                       ret = dsa_slave_stp_update(dev, attr->u.stp_state);
+               if (attr->trans == SWITCHDEV_TRANS_PREPARE)
+                       ret = ds->drv->port_stp_update ? 0 : -EOPNOTSUPP;
+               else
+                       ret = ds->drv->port_stp_update(ds, p->port,
+                                                      attr->u.stp_state);
                break;
        default:
                ret = -EOPNOTSUPP;
index f03db8b7abee68806468c104470da4054a8bf755..0c9c3482e41997a671c23be7dc0387edb1403916 100644 (file)
@@ -312,7 +312,7 @@ static void arp_send_dst(int type, int ptype, __be32 dest_ip,
        if (!skb)
                return;
 
-       skb_dst_set(skb, dst);
+       skb_dst_set(skb, dst_clone(dst));
        arp_xmit(skb);
 }
 
@@ -384,7 +384,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
        }
 
        if (skb && !(dev->priv_flags & IFF_XMIT_DST_RELEASE))
-               dst = dst_clone(skb_dst(skb));
+               dst = skb_dst(skb);
        arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
                     dst_hw, dev->dev_addr, NULL, dst);
 }
@@ -811,7 +811,7 @@ static int arp_process(struct sock *sk, struct sk_buff *skb)
                                } else {
                                        pneigh_enqueue(&arp_tbl,
                                                       in_dev->arp_parms, skb);
-                                       return 0;
+                                       goto out_free_dst;
                                }
                                goto out;
                        }
@@ -865,6 +865,8 @@ static int arp_process(struct sock *sk, struct sk_buff *skb)
 
 out:
        consume_skb(skb);
+out_free_dst:
+       dst_release(reply_dst);
        return 0;
 }
 
index 6fcbd215cdbc501fe6541054208d68d965b5286e..690bcbc59f26d1add82e9ebaa82e2c072b84ca47 100644 (file)
@@ -340,6 +340,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
        fl4.flowi4_tos = tos;
        fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
        fl4.flowi4_tun_key.tun_id = 0;
+       fl4.flowi4_flags = 0;
 
        no_addr = idev->ifa_list == NULL;
 
index 6c2af797f2f92b93cf4ea28d1d5deee4e725f757..744e5936c10d7ec555d1ca621f5bd4be57f1c72b 100644 (file)
@@ -1569,7 +1569,7 @@ static struct key_vector *leaf_walk_rcu(struct key_vector **tn, t_key key)
        do {
                /* record parent and next child index */
                pn = n;
-               cindex = key ? get_index(key, pn) : 0;
+               cindex = (key > pn->key) ? get_index(key, pn) : 0;
 
                if (cindex >> pn->bits)
                        break;
index 5aa46d4b44efb99702ccd89005528f20ae422a0e..5a8ee3282550880a7749b8d6a9086dc413661519 100644 (file)
@@ -36,7 +36,8 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
                                  SKB_GSO_TCP_ECN |
                                  SKB_GSO_GRE |
                                  SKB_GSO_GRE_CSUM |
-                                 SKB_GSO_IPIP)))
+                                 SKB_GSO_IPIP |
+                                 SKB_GSO_SIT)))
                goto out;
 
        if (!skb->encapsulation)
index 7bb9c39e0a4d1a6a30b7aece9d3b0c17e447c20a..61b45a17fc738e17146a5e1f1cf97774f338179e 100644 (file)
@@ -577,21 +577,22 @@ EXPORT_SYMBOL(inet_rtx_syn_ack);
 static bool reqsk_queue_unlink(struct request_sock_queue *queue,
                               struct request_sock *req)
 {
-       struct listen_sock *lopt = queue->listen_opt;
        struct request_sock **prev;
+       struct listen_sock *lopt;
        bool found = false;
 
        spin_lock(&queue->syn_wait_lock);
-
-       for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL;
-            prev = &(*prev)->dl_next) {
-               if (*prev == req) {
-                       *prev = req->dl_next;
-                       found = true;
-                       break;
+       lopt = queue->listen_opt;
+       if (lopt) {
+               for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL;
+                    prev = &(*prev)->dl_next) {
+                       if (*prev == req) {
+                               *prev = req->dl_next;
+                               found = true;
+                               break;
+                       }
                }
        }
-
        spin_unlock(&queue->syn_wait_lock);
        if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
                reqsk_put(req);
index bd0679d90519b170dc98369e9b438e4c31b152b9..614521437e30159c5234e9b24fcc41bad7ea6c6b 100644 (file)
@@ -498,10 +498,26 @@ static struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
                                        csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 }
 
+static struct rtable *gre_get_rt(struct sk_buff *skb,
+                                struct net_device *dev,
+                                struct flowi4 *fl,
+                                const struct ip_tunnel_key *key)
+{
+       struct net *net = dev_net(dev);
+
+       memset(fl, 0, sizeof(*fl));
+       fl->daddr = key->u.ipv4.dst;
+       fl->saddr = key->u.ipv4.src;
+       fl->flowi4_tos = RT_TOS(key->tos);
+       fl->flowi4_mark = skb->mark;
+       fl->flowi4_proto = IPPROTO_GRE;
+
+       return ip_route_output_key(net, fl);
+}
+
 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ip_tunnel_info *tun_info;
-       struct net *net = dev_net(dev);
        const struct ip_tunnel_key *key;
        struct flowi4 fl;
        struct rtable *rt;
@@ -516,14 +532,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
                goto err_free_skb;
 
        key = &tun_info->key;
-       memset(&fl, 0, sizeof(fl));
-       fl.daddr = key->u.ipv4.dst;
-       fl.saddr = key->u.ipv4.src;
-       fl.flowi4_tos = RT_TOS(key->tos);
-       fl.flowi4_mark = skb->mark;
-       fl.flowi4_proto = IPPROTO_GRE;
-
-       rt = ip_route_output_key(net, &fl);
+       rt = gre_get_rt(skb, dev, &fl, key);
        if (IS_ERR(rt))
                goto err_free_skb;
 
@@ -566,6 +575,24 @@ err_free_skb:
        dev->stats.tx_dropped++;
 }
 
+static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+{
+       struct ip_tunnel_info *info = skb_tunnel_info(skb);
+       struct rtable *rt;
+       struct flowi4 fl4;
+
+       if (ip_tunnel_info_af(info) != AF_INET)
+               return -EINVAL;
+
+       rt = gre_get_rt(skb, dev, &fl4, &info->key);
+       if (IS_ERR(rt))
+               return PTR_ERR(rt);
+
+       ip_rt_put(rt);
+       info->key.u.ipv4.src = fl4.saddr;
+       return 0;
+}
+
 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
                              struct net_device *dev)
 {
@@ -1023,6 +1050,7 @@ static const struct net_device_ops gre_tap_netdev_ops = {
        .ndo_change_mtu         = ip_tunnel_change_mtu,
        .ndo_get_stats64        = ip_tunnel_get_stats64,
        .ndo_get_iflink         = ip_tunnel_get_iflink,
+       .ndo_fill_metadata_dst  = gre_fill_metadata_dst,
 };
 
 static void ipgre_tap_setup(struct net_device *dev)
index 690d27d3f2f90d99612de8ed4a32dec0596a680a..a3558417653567ffe3a83d06515fb1b68ec36dcf 100644 (file)
@@ -75,6 +75,7 @@ endif # NF_TABLES
 
 config NF_DUP_IPV4
        tristate "Netfilter IPv4 packet duplication to alternate destination"
+       depends on !NF_CONNTRACK || NF_CONNTRACK
        help
          This option enables the nf_dup_ipv4 core, which duplicates an IPv4
          packet to be rerouted to another destination.
index 8618fd150c965015ca2ee256499add4186f4810a..c4ffc9de165420f5839006ec053859aff77147fe 100644 (file)
@@ -61,9 +61,7 @@ static bool rpfilter_lookup_reverse(struct flowi4 *fl4,
        if (FIB_RES_DEV(res) == dev)
                dev_match = true;
 #endif
-       if (dev_match || flags & XT_RPFILTER_LOOSE)
-               return FIB_RES_NH(res).nh_scope <= RT_SCOPE_HOST;
-       return dev_match;
+       return dev_match || flags & XT_RPFILTER_LOOSE;
 }
 
 static bool rpfilter_is_local(const struct sk_buff *skb)
index c6ad99ad0ffb713500646af1bfc40bbbb12880f4..c81deb85acb4da84480f2f54555f4d7cdcbbb209 100644 (file)
@@ -1737,6 +1737,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        fl4.flowi4_mark = skb->mark;
        fl4.flowi4_tos = tos;
        fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
+       fl4.flowi4_flags = 0;
        fl4.daddr = daddr;
        fl4.saddr = saddr;
        err = fib_lookup(net, &fl4, &res, 0);
index 7092a61c4dc8465fcf17ff71b289cf25bbb8b559..7e538f71f5fbae087c3e3e4367d60e08cd609ac5 100644 (file)
@@ -209,7 +209,7 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
 
                /* alpha = (1 - g) * alpha + g * F */
 
-               alpha -= alpha >> dctcp_shift_g;
+               alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
                if (bytes_ecn) {
                        /* If dctcp_shift_g == 1, a 32bit value would overflow
                         * after 8 Mbytes.
index 1100ffe4a722d7bbae1ec30ee165ecc565cc2e72..3dbee0d83b15b0cbd2a1008cab5eeb8f9365c878 100644 (file)
@@ -3405,7 +3405,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
         */
        tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
        skb_mstamp_get(&skb->skb_mstamp);
-       NET_INC_STATS_BH(sock_net(sk), mib);
+       NET_INC_STATS(sock_net(sk), mib);
        return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
 }
 
index 2878dbfffeb7e769a32079f1a6b80061136a7efc..41a261355662eb42fae031fdd30132767469f98e 100644 (file)
@@ -30,6 +30,8 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
 
        mtu = dst_mtu(skb_dst(skb));
        if (skb->len > mtu) {
+               skb->protocol = htons(ETH_P_IP);
+
                if (skb->sk)
                        xfrm_local_error(skb, mtu);
                else
index 900113376d4e0e528dc5a5d349687ae908ca08d6..36b85bd05ac8a320b1b910c7d3454da1139ecd3c 100644 (file)
@@ -3119,6 +3119,8 @@ static void addrconf_gre_config(struct net_device *dev)
        }
 
        addrconf_addr_gen(idev, true);
+       if (dev->flags & IFF_POINTOPOINT)
+               addrconf_add_mroute(dev);
 }
 #endif
 
index 9f777ec59a59d24566d87643889a8c591dd52637..ed33abf57abd7d7ec71685a7180cf88ec132626c 100644 (file)
@@ -32,6 +32,7 @@ struct fib6_rule {
 struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
                                   int flags, pol_lookup_t lookup)
 {
+       struct rt6_info *rt;
        struct fib_lookup_arg arg = {
                .lookup_ptr = lookup,
                .flags = FIB_LOOKUP_NOREF,
@@ -40,11 +41,21 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
        fib_rules_lookup(net->ipv6.fib6_rules_ops,
                         flowi6_to_flowi(fl6), flags, &arg);
 
-       if (arg.result)
-               return arg.result;
+       rt = arg.result;
 
-       dst_hold(&net->ipv6.ip6_null_entry->dst);
-       return &net->ipv6.ip6_null_entry->dst;
+       if (!rt) {
+               dst_hold(&net->ipv6.ip6_null_entry->dst);
+               return &net->ipv6.ip6_null_entry->dst;
+       }
+
+       if (rt->rt6i_flags & RTF_REJECT &&
+           rt->dst.error == -EAGAIN) {
+               ip6_rt_put(rt);
+               rt = net->ipv6.ip6_null_entry;
+               dst_hold(&rt->dst);
+       }
+
+       return &rt->dst;
 }
 
 static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
index 7d2e0023c72dbe2e466b35ffb1c6f0c0446af6da..6cedc62b2abb1c3520647b4046c1f027ffe1295b 100644 (file)
@@ -285,7 +285,17 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id)
 struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
                                   int flags, pol_lookup_t lookup)
 {
-       return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
+       struct rt6_info *rt;
+
+       rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
+       if (rt->rt6i_flags & RTF_REJECT &&
+           rt->dst.error == -EAGAIN) {
+               ip6_rt_put(rt);
+               rt = net->ipv6.ip6_null_entry;
+               dst_hold(&rt->dst);
+       }
+
+       return &rt->dst;
 }
 
 static void __net_init fib6_tables_init(struct net *net)
index 92b1aa38f121507b662e2c964423952bf995b81e..f84ec4e9b2de7653d1ad8b2348977418827676c2 100644 (file)
@@ -376,6 +376,9 @@ int ip6_forward(struct sk_buff *skb)
        if (skb->pkt_type != PACKET_HOST)
                goto drop;
 
+       if (unlikely(skb->sk))
+               goto drop;
+
        if (skb_warn_if_lro(skb))
                goto drop;
 
@@ -581,6 +584,8 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb,
                if (np->frag_size)
                        mtu = np->frag_size;
        }
+       if (mtu < hlen + sizeof(struct frag_hdr) + 8)
+               goto fail_toobig;
        mtu -= hlen + sizeof(struct frag_hdr);
 
        frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
@@ -874,7 +879,8 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
 #ifdef CONFIG_IPV6_SUBTREES
            ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
 #endif
-           (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
+          (!(fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) &&
+             (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex))) {
                dst_release(dst);
                dst = NULL;
        }
index 96833e4b31939a191eaf7de297ac438d4aa41fa4..f6a024e141e595541009cb24c172e0c52a8a879f 100644 (file)
@@ -58,6 +58,7 @@ endif # NF_TABLES
 
 config NF_DUP_IPV6
        tristate "Netfilter IPv6 packet duplication to alternate destination"
+       depends on !NF_CONNTRACK || NF_CONNTRACK
        help
          This option enables the nf_dup_ipv6 core, which duplicates an IPv6
          packet to be rerouted to another destination.
index 701cd2bae0a9224d56005f67d8b9f5e71f45825f..c7196ad1d69f8467a6971cf40f1c5ffdd14b4a92 100644 (file)
@@ -646,6 +646,7 @@ void nf_ct_frag6_consume_orig(struct sk_buff *skb)
                s = s2;
        }
 }
+EXPORT_SYMBOL_GPL(nf_ct_frag6_consume_orig);
 
 static int nf_ct_net_init(struct net *net)
 {
index f204089e854cfbf80229cd82480aaaf57249adef..946880ad48acda725eb66f9e2d0a8fd0f2b4ec40 100644 (file)
@@ -142,6 +142,9 @@ static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
        struct net_device *loopback_dev = net->loopback_dev;
        int cpu;
 
+       if (dev == loopback_dev)
+               return;
+
        for_each_possible_cpu(cpu) {
                struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
                struct rt6_info *rt;
@@ -151,14 +154,12 @@ static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
                        struct inet6_dev *rt_idev = rt->rt6i_idev;
                        struct net_device *rt_dev = rt->dst.dev;
 
-                       if (rt_idev && (rt_idev->dev == dev || !dev) &&
-                           rt_idev->dev != loopback_dev) {
+                       if (rt_idev->dev == dev) {
                                rt->rt6i_idev = in6_dev_get(loopback_dev);
                                in6_dev_put(rt_idev);
                        }
 
-                       if (rt_dev && (rt_dev == dev || !dev) &&
-                           rt_dev != loopback_dev) {
+                       if (rt_dev == dev) {
                                rt->dst.dev = loopback_dev;
                                dev_hold(rt->dst.dev);
                                dev_put(rt_dev);
@@ -247,12 +248,6 @@ static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
 {
 }
 
-static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
-                                        unsigned long old)
-{
-       return NULL;
-}
-
 static struct dst_ops ip6_dst_blackhole_ops = {
        .family                 =       AF_INET6,
        .destroy                =       ip6_dst_destroy,
@@ -261,7 +256,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
        .default_advmss         =       ip6_default_advmss,
        .update_pmtu            =       ip6_rt_blackhole_update_pmtu,
        .redirect               =       ip6_rt_blackhole_redirect,
-       .cow_metrics            =       ip6_rt_blackhole_cow_metrics,
+       .cow_metrics            =       dst_cow_metrics_generic,
        .neigh_lookup           =       ip6_neigh_lookup,
 };
 
@@ -318,6 +313,15 @@ static const struct rt6_info ip6_blk_hole_entry_template = {
 
 #endif
 
+static void rt6_info_init(struct rt6_info *rt)
+{
+       struct dst_entry *dst = &rt->dst;
+
+       memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
+       INIT_LIST_HEAD(&rt->rt6i_siblings);
+       INIT_LIST_HEAD(&rt->rt6i_uncached);
+}
+
 /* allocate dst with ip6_dst_ops */
 static struct rt6_info *__ip6_dst_alloc(struct net *net,
                                        struct net_device *dev,
@@ -326,13 +330,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net,
        struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
                                        0, DST_OBSOLETE_FORCE_CHK, flags);
 
-       if (rt) {
-               struct dst_entry *dst = &rt->dst;
+       if (rt)
+               rt6_info_init(rt);
 
-               memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
-               INIT_LIST_HEAD(&rt->rt6i_siblings);
-               INIT_LIST_HEAD(&rt->rt6i_uncached);
-       }
        return rt;
 }
 
@@ -1068,6 +1068,9 @@ static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
        fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
        saved_fn = fn;
 
+       if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
+               oif = 0;
+
 redo_rt6_select:
        rt = rt6_select(fn, oif, strict);
        if (rt->rt6i_nsiblings)
@@ -1190,13 +1193,16 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
                                    struct flowi6 *fl6)
 {
        int flags = 0;
+       bool any_src;
 
        fl6->flowi6_iif = LOOPBACK_IFINDEX;
 
-       if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
+       any_src = ipv6_addr_any(&fl6->saddr);
+       if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
+           (fl6->flowi6_oif && any_src))
                flags |= RT6_LOOKUP_F_IFACE;
 
-       if (!ipv6_addr_any(&fl6->saddr))
+       if (!any_src)
                flags |= RT6_LOOKUP_F_HAS_SADDR;
        else if (sk)
                flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
@@ -1212,24 +1218,20 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
 
        rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
        if (rt) {
-               new = &rt->dst;
-
-               memset(new + 1, 0, sizeof(*rt) - sizeof(*new));
+               rt6_info_init(rt);
 
+               new = &rt->dst;
                new->__use = 1;
                new->input = dst_discard;
                new->output = dst_discard_sk;
 
-               if (dst_metrics_read_only(&ort->dst))
-                       new->_metrics = ort->dst._metrics;
-               else
-                       dst_copy_metrics(new, &ort->dst);
+               dst_copy_metrics(new, &ort->dst);
                rt->rt6i_idev = ort->rt6i_idev;
                if (rt->rt6i_idev)
                        in6_dev_hold(rt->rt6i_idev);
 
                rt->rt6i_gateway = ort->rt6i_gateway;
-               rt->rt6i_flags = ort->rt6i_flags;
+               rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
                rt->rt6i_metric = 0;
 
                memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
@@ -2621,7 +2623,8 @@ void rt6_ifdown(struct net *net, struct net_device *dev)
 
        fib6_clean_all(net, fib6_ifdown, &adn);
        icmp6_clean_all(fib6_ifdown, &adn);
-       rt6_uncached_list_flush_dev(net, dev);
+       if (dev)
+               rt6_uncached_list_flush_dev(net, dev);
 }
 
 struct rt6_mtu_change_arg {
index 09c76a7b474dbcb12cae8aeba6fcba375d0d329a..e15feb7b413dd1a93a376f2ab6aabcbc3c3bb944 100644 (file)
@@ -79,6 +79,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
 
        if (!skb->ignore_df && skb->len > mtu) {
                skb->dev = dst->dev;
+               skb->protocol = htons(ETH_P_IPV6);
 
                if (xfrm6_local_dontfrag(skb))
                        xfrm6_local_rxpmtu(skb, mtu);
@@ -136,6 +137,7 @@ static int __xfrm6_output(struct sock *sk, struct sk_buff *skb)
        struct dst_entry *dst = skb_dst(skb);
        struct xfrm_state *x = dst->xfrm;
        int mtu;
+       bool toobig;
 
 #ifdef CONFIG_NETFILTER
        if (!x) {
@@ -144,25 +146,29 @@ static int __xfrm6_output(struct sock *sk, struct sk_buff *skb)
        }
 #endif
 
+       if (x->props.mode != XFRM_MODE_TUNNEL)
+               goto skip_frag;
+
        if (skb->protocol == htons(ETH_P_IPV6))
                mtu = ip6_skb_dst_mtu(skb);
        else
                mtu = dst_mtu(skb_dst(skb));
 
-       if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
+       toobig = skb->len > mtu && !skb_is_gso(skb);
+
+       if (toobig && xfrm6_local_dontfrag(skb)) {
                xfrm6_local_rxpmtu(skb, mtu);
                return -EMSGSIZE;
-       } else if (!skb->ignore_df && skb->len > mtu && skb->sk) {
+       } else if (!skb->ignore_df && toobig && skb->sk) {
                xfrm_local_error(skb, mtu);
                return -EMSGSIZE;
        }
 
-       if (x->props.mode == XFRM_MODE_TUNNEL &&
-           ((skb->len > mtu && !skb_is_gso(skb)) ||
-               dst_allfrag(skb_dst(skb)))) {
+       if (toobig || dst_allfrag(skb_dst(skb)))
                return ip6_fragment(sk, skb,
                                    x->outer_mode->afinfo->output_finish);
-       }
+
+skip_frag:
        return x->outer_mode->afinfo->output_finish(sk, skb);
 }
 
index 30caa289c5dbf589270768ce90d15f2990341231..da55e0c85bb8edca213eae4686c46073e2af1650 100644 (file)
@@ -37,6 +37,7 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
 
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_oif = oif;
+       fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
        memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
        if (saddr)
                memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr));
@@ -178,7 +179,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
                        return;
 
                case IPPROTO_ICMPV6:
-                       if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) {
+                       if (!onlyproto && (nh + offset + 2 < skb->data ||
+                           pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
                                u8 *icmp;
 
                                nh = skb_network_header(skb);
@@ -192,7 +194,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
 #if IS_ENABLED(CONFIG_IPV6_MIP6)
                case IPPROTO_MH:
                        offset += ipv6_optlen(exthdr);
-                       if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
+                       if (!onlyproto && (nh + offset + 3 < skb->data ||
+                           pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
                                struct ip6_mh *mh;
 
                                nh = skb_network_header(skb);
index a26c401ef4a4431b2957d5b46c05c0c2a35c90bd..43964594aa12d9864c00a3b778d77060084e6a14 100644 (file)
@@ -1839,7 +1839,7 @@ static void *irlmp_seq_hb_idx(struct irlmp_iter_state *iter, loff_t *off)
        for (element = hashbin_get_first(iter->hashbin);
             element != NULL;
             element = hashbin_get_next(iter->hashbin)) {
-               if (!off || *off-- == 0) {
+               if (!off || (*off)-- == 0) {
                        /* NB: hashbin left locked */
                        return element;
                }
index 83a70688784b8449603e13f32ec26ff6fce06639..f9c9ecb0cdd3b3eea618538fda2e884583f9bc09 100644 (file)
@@ -261,7 +261,7 @@ static int pfkey_broadcast(struct sk_buff *skb,
 
                err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
 
-               /* Error is cleare after succecful sending to at least one
+               /* Error is cleared after successful sending to at least one
                 * registered KM */
                if ((broadcast_flags & BROADCAST_REGISTERED) && err)
                        err = err2;
index f6b090df3930d32dc1fda0c8069ad1f7b3246d41..afca2eb4dfa777c75288dfb6fce9636b309a2ebc 100644 (file)
@@ -1319,7 +1319,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
        tunnel = container_of(work, struct l2tp_tunnel, del_work);
        sk = l2tp_tunnel_sock_lookup(tunnel);
        if (!sk)
-               return;
+               goto out;
 
        sock = sk->sk_socket;
 
@@ -1341,6 +1341,8 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
        }
 
        l2tp_tunnel_sock_put(sk);
+out:
+       l2tp_tunnel_dec_refcount(tunnel);
 }
 
 /* Create a socket for the tunnel, if one isn't set up by
@@ -1636,8 +1638,13 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
  */
 int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
 {
+       l2tp_tunnel_inc_refcount(tunnel);
        l2tp_tunnel_closeall(tunnel);
-       return (false == queue_work(l2tp_wq, &tunnel->del_work));
+       if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
+               l2tp_tunnel_dec_refcount(tunnel);
+               return 1;
+       }
+       return 0;
 }
 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
 
index ced6bf3be8d6cf5d3d9fc80b6c46f48c4e567aef..1560c8482bcb9fd587d1e278a499f99fd9958803 100644 (file)
@@ -149,7 +149,7 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
 
        for (i = 0; i < NUM_IEEE80211_HW_FLAGS; i++) {
                if (test_bit(i, local->hw.flags))
-                       pos += scnprintf(pos, end - pos, "%s",
+                       pos += scnprintf(pos, end - pos, "%s\n",
                                         hw_flag_names[i]);
        }
 
index 8ba5832435095f10e94f782e07d92a4f742cad3a..3ed7ddfbf8e840d4c910b0e9452c06843031c91d 100644 (file)
@@ -101,6 +101,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
         * when it wakes up for the next time.
         */
        set_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT);
+       ieee80211_clear_fast_xmit(sta);
 
        /*
         * This code races in the following way:
index 84e0e8c7fb236952dfc1cfcb23204623e80d7867..7892eb8ed4c8b1fa416ebcb297fab9f1cce91743 100644 (file)
@@ -1218,8 +1218,10 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
 
        if (!tx->sta)
                info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
-       else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT))
+       else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT)) {
                info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
+               ieee80211_check_fast_xmit(tx->sta);
+       }
 
        info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT;
 
@@ -2451,7 +2453,8 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
 
        if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
            test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
-           test_sta_flag(sta, WLAN_STA_PS_DELIVER))
+           test_sta_flag(sta, WLAN_STA_PS_DELIVER) ||
+           test_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT))
                goto out;
 
        if (sdata->noack_map)
index 8e47f8113495739082572d269797e00160251751..21a085686dc1b543439f33e448531bc64a273684 100644 (file)
@@ -152,6 +152,8 @@ void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
 #endif
        synchronize_net();
        nf_queue_nf_hook_drop(net, &entry->ops);
+       /* other cpu might still process nfqueue verdict that used reg */
+       synchronize_net();
        kfree(entry);
 }
 EXPORT_SYMBOL(nf_unregister_net_hook);
index a1fe5377a2b3376d29f29b18d77ebfe08fc988e1..5a30ce6e8c90d278ac37cb0115f45a4390f9a6d7 100644 (file)
@@ -297,7 +297,7 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
              ip_set_timeout_expired(ext_timeout(n, set))))
                n =  NULL;
 
-       e = kzalloc(set->dsize, GFP_KERNEL);
+       e = kzalloc(set->dsize, GFP_ATOMIC);
        if (!e)
                return -ENOMEM;
        e->id = d->id;
index 8f060d7f9a0e107a410d3ffe71722f49059f7bc8..fafe33bdb61989e680dc4b26dbe99dcc1d4064b5 100644 (file)
@@ -2371,7 +2371,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
                int pos, idx, shift;
 
                err = 0;
-               netlink_table_grab();
+               netlink_lock_table();
                for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
                        if (len - pos < sizeof(u32))
                                break;
@@ -2386,7 +2386,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
                }
                if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
                        err = -EFAULT;
-               netlink_table_ungrab();
+               netlink_unlock_table();
                break;
        }
        case NETLINK_CAP_ACK:
@@ -2785,6 +2785,7 @@ static int netlink_dump(struct sock *sk)
        struct sk_buff *skb = NULL;
        struct nlmsghdr *nlh;
        int len, err = -ENOBUFS;
+       int alloc_min_size;
        int alloc_size;
 
        mutex_lock(nlk->cb_mutex);
@@ -2793,9 +2794,6 @@ static int netlink_dump(struct sock *sk)
                goto errout_skb;
        }
 
-       cb = &nlk->cb;
-       alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
-
        if (!netlink_rx_is_mmaped(sk) &&
            atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
                goto errout_skb;
@@ -2805,23 +2803,35 @@ static int netlink_dump(struct sock *sk)
         * to reduce number of system calls on dump operations, if user
         * ever provided a big enough buffer.
         */
-       if (alloc_size < nlk->max_recvmsg_len) {
-               skb = netlink_alloc_skb(sk,
-                                       nlk->max_recvmsg_len,
-                                       nlk->portid,
+       cb = &nlk->cb;
+       alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
+
+       if (alloc_min_size < nlk->max_recvmsg_len) {
+               alloc_size = nlk->max_recvmsg_len;
+               skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
                                        GFP_KERNEL |
                                        __GFP_NOWARN |
                                        __GFP_NORETRY);
-               /* available room should be exact amount to avoid MSG_TRUNC */
-               if (skb)
-                       skb_reserve(skb, skb_tailroom(skb) -
-                                        nlk->max_recvmsg_len);
        }
-       if (!skb)
+       if (!skb) {
+               alloc_size = alloc_min_size;
                skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
                                        GFP_KERNEL);
+       }
        if (!skb)
                goto errout_skb;
+
+       /* Trim skb to allocated size. User is expected to provide buffer as
+        * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at
+        * netlink_recvmsg())). dump will pack as many smaller messages as
+        * could fit within the allocated skb. skb is typically allocated
+        * with larger space than required (could be as much as near 2x the
+        * requested size with align to next power of 2 approach). Allowing
+        * dump to use the excess space makes it difficult for a user to have a
+        * reasonable static buffer based on the expected largest dump of a
+        * single netdev. The outcome is MSG_TRUNC error.
+        */
+       skb_reserve(skb, skb_tailroom(skb) - alloc_size);
        netlink_skb_set_owner_r(skb, sk);
 
        len = cb->dump(skb, cb);
index 315f5330b6e5400eaf28ce7d0290038b7113a6fd..dba635d086b2f165940ce1dea67c92c48392e4cb 100644 (file)
@@ -684,7 +684,7 @@ static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru,
 {
        if (skb_network_offset(skb) > MAX_L2_LEN) {
                OVS_NLERR(1, "L2 header too long to fragment");
-               return;
+               goto err;
        }
 
        if (ethertype == htons(ETH_P_IP)) {
@@ -708,8 +708,7 @@ static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru,
                struct rt6_info ovs_rt;
 
                if (!v6ops) {
-                       kfree_skb(skb);
-                       return;
+                       goto err;
                }
 
                prepare_frag(vport, skb);
@@ -728,8 +727,12 @@ static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru,
                WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
                          ovs_vport_name(vport), ntohs(ethertype), mru,
                          vport->dev->mtu);
-               kfree_skb(skb);
+               goto err;
        }
+
+       return;
+err:
+       kfree_skb(skb);
 }
 
 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
@@ -765,7 +768,6 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
                            struct sw_flow_key *key, const struct nlattr *attr,
                            const struct nlattr *actions, int actions_len)
 {
-       struct ip_tunnel_info info;
        struct dp_upcall_info upcall;
        const struct nlattr *a;
        int rem;
@@ -793,11 +795,9 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
                        if (vport) {
                                int err;
 
-                               upcall.egress_tun_info = &info;
-                               err = ovs_vport_get_egress_tun_info(vport, skb,
-                                                                   &upcall);
-                               if (err)
-                                       upcall.egress_tun_info = NULL;
+                               err = dev_fill_metadata_dst(vport->dev, skb);
+                               if (!err)
+                                       upcall.egress_tun_info = skb_tunnel_info(skb);
                        }
 
                        break;
@@ -968,7 +968,7 @@ static int execute_masked_set_action(struct sk_buff *skb,
        case OVS_KEY_ATTR_CT_STATE:
        case OVS_KEY_ATTR_CT_ZONE:
        case OVS_KEY_ATTR_CT_MARK:
-       case OVS_KEY_ATTR_CT_LABEL:
+       case OVS_KEY_ATTR_CT_LABELS:
                err = -EINVAL;
                break;
        }
@@ -1099,12 +1099,18 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
                        break;
 
                case OVS_ACTION_ATTR_CT:
+                       if (!is_flow_key_valid(key)) {
+                               err = ovs_flow_key_update(skb, key);
+                               if (err)
+                                       return err;
+                       }
+
                        err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
                                             nla_data(a));
 
                        /* Hide stolen IP fragments from user space. */
-                       if (err == -EINPROGRESS)
-                               return 0;
+                       if (err)
+                               return err == -EINPROGRESS ? 0 : err;
                        break;
                }
 
index 002a755fa07ea8c6c71c762716a5be8700c0a36c..50095820edb7b2f8adc1ba1699543c2b28e378b0 100644 (file)
@@ -37,9 +37,9 @@ struct md_mark {
 };
 
 /* Metadata label for masked write to conntrack label. */
-struct md_label {
-       struct ovs_key_ct_label value;
-       struct ovs_key_ct_label mask;
+struct md_labels {
+       struct ovs_key_ct_labels value;
+       struct ovs_key_ct_labels mask;
 };
 
 /* Conntrack action context for execution. */
@@ -47,10 +47,10 @@ struct ovs_conntrack_info {
        struct nf_conntrack_helper *helper;
        struct nf_conntrack_zone zone;
        struct nf_conn *ct;
-       u32 flags;
+       u8 commit : 1;
        u16 family;
        struct md_mark mark;
-       struct md_label label;
+       struct md_labels labels;
 };
 
 static u16 key_to_nfproto(const struct sw_flow_key *key)
@@ -109,21 +109,21 @@ static u32 ovs_ct_get_mark(const struct nf_conn *ct)
 #endif
 }
 
-static void ovs_ct_get_label(const struct nf_conn *ct,
-                            struct ovs_key_ct_label *label)
+static void ovs_ct_get_labels(const struct nf_conn *ct,
+                             struct ovs_key_ct_labels *labels)
 {
        struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL;
 
        if (cl) {
                size_t len = cl->words * sizeof(long);
 
-               if (len > OVS_CT_LABEL_LEN)
-                       len = OVS_CT_LABEL_LEN;
-               else if (len < OVS_CT_LABEL_LEN)
-                       memset(label, 0, OVS_CT_LABEL_LEN);
-               memcpy(label, cl->bits, len);
+               if (len > OVS_CT_LABELS_LEN)
+                       len = OVS_CT_LABELS_LEN;
+               else if (len < OVS_CT_LABELS_LEN)
+                       memset(labels, 0, OVS_CT_LABELS_LEN);
+               memcpy(labels, cl->bits, len);
        } else {
-               memset(label, 0, OVS_CT_LABEL_LEN);
+               memset(labels, 0, OVS_CT_LABELS_LEN);
        }
 }
 
@@ -134,7 +134,7 @@ static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state,
        key->ct.state = state;
        key->ct.zone = zone->id;
        key->ct.mark = ovs_ct_get_mark(ct);
-       ovs_ct_get_label(ct, &key->ct.label);
+       ovs_ct_get_labels(ct, &key->ct.labels);
 }
 
 /* Update 'key' based on skb->nfct. If 'post_ct' is true, then OVS has
@@ -151,6 +151,8 @@ static void ovs_ct_update_key(const struct sk_buff *skb,
        ct = nf_ct_get(skb, &ctinfo);
        if (ct) {
                state = ovs_ct_get_state(ctinfo);
+               if (!nf_ct_is_confirmed(ct))
+                       state |= OVS_CS_F_NEW;
                if (ct->master)
                        state |= OVS_CS_F_RELATED;
                zone = nf_ct_zone(ct);
@@ -167,7 +169,7 @@ void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key)
 
 int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb)
 {
-       if (nla_put_u8(skb, OVS_KEY_ATTR_CT_STATE, key->ct.state))
+       if (nla_put_u32(skb, OVS_KEY_ATTR_CT_STATE, key->ct.state))
                return -EMSGSIZE;
 
        if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
@@ -179,8 +181,8 @@ int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb)
                return -EMSGSIZE;
 
        if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
-           nla_put(skb, OVS_KEY_ATTR_CT_LABEL, sizeof(key->ct.label),
-                   &key->ct.label))
+           nla_put(skb, OVS_KEY_ATTR_CT_LABELS, sizeof(key->ct.labels),
+                   &key->ct.labels))
                return -EMSGSIZE;
 
        return 0;
@@ -213,18 +215,15 @@ static int ovs_ct_set_mark(struct sk_buff *skb, struct sw_flow_key *key,
 #endif
 }
 
-static int ovs_ct_set_label(struct sk_buff *skb, struct sw_flow_key *key,
-                           const struct ovs_key_ct_label *label,
-                           const struct ovs_key_ct_label *mask)
+static int ovs_ct_set_labels(struct sk_buff *skb, struct sw_flow_key *key,
+                            const struct ovs_key_ct_labels *labels,
+                            const struct ovs_key_ct_labels *mask)
 {
        enum ip_conntrack_info ctinfo;
        struct nf_conn_labels *cl;
        struct nf_conn *ct;
        int err;
 
-       if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS))
-               return -ENOTSUPP;
-
        /* The connection could be invalid, in which case set_label is no-op.*/
        ct = nf_ct_get(skb, &ctinfo);
        if (!ct)
@@ -235,15 +234,15 @@ static int ovs_ct_set_label(struct sk_buff *skb, struct sw_flow_key *key,
                nf_ct_labels_ext_add(ct);
                cl = nf_ct_labels_find(ct);
        }
-       if (!cl || cl->words * sizeof(long) < OVS_CT_LABEL_LEN)
+       if (!cl || cl->words * sizeof(long) < OVS_CT_LABELS_LEN)
                return -ENOSPC;
 
-       err = nf_connlabels_replace(ct, (u32 *)label, (u32 *)mask,
-                                   OVS_CT_LABEL_LEN / sizeof(u32));
+       err = nf_connlabels_replace(ct, (u32 *)labels, (u32 *)mask,
+                                   OVS_CT_LABELS_LEN / sizeof(u32));
        if (err)
                return err;
 
-       ovs_ct_get_label(ct, &key->ct.label);
+       ovs_ct_get_labels(ct, &key->ct.labels);
        return 0;
 }
 
@@ -294,6 +293,9 @@ static int ovs_ct_helper(struct sk_buff *skb, u16 proto)
        return helper->help(skb, protoff, ct, ctinfo);
 }
 
+/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
+ * value if 'skb' is freed.
+ */
 static int handle_fragments(struct net *net, struct sw_flow_key *key,
                            u16 zone, struct sk_buff *skb)
 {
@@ -309,8 +311,8 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
                        return err;
 
                ovs_cb.mru = IPCB(skb)->frag_max_size;
-       } else if (key->eth.type == htons(ETH_P_IPV6)) {
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+       } else if (key->eth.type == htons(ETH_P_IPV6)) {
                enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
                struct sk_buff *reasm;
 
@@ -319,17 +321,25 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
                if (!reasm)
                        return -EINPROGRESS;
 
-               if (skb == reasm)
+               if (skb == reasm) {
+                       kfree_skb(skb);
                        return -EINVAL;
+               }
+
+               /* Don't free 'skb' even though it is one of the original
+                * fragments, as we're going to morph it into the head.
+                */
+               skb_get(skb);
+               nf_ct_frag6_consume_orig(reasm);
 
                key->ip.proto = ipv6_hdr(reasm)->nexthdr;
                skb_morph(skb, reasm);
+               skb->next = reasm->next;
                consume_skb(reasm);
                ovs_cb.mru = IP6CB(skb)->frag_max_size;
-#else
-               return -EPFNOSUPPORT;
 #endif
        } else {
+               kfree_skb(skb);
                return -EPFNOSUPPORT;
        }
 
@@ -377,7 +387,7 @@ static bool skb_nfct_cached(const struct net *net, const struct sk_buff *skb,
        return true;
 }
 
-static int __ovs_ct_lookup(struct net *net, const struct sw_flow_key *key,
+static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
                           const struct ovs_conntrack_info *info,
                           struct sk_buff *skb)
 {
@@ -408,6 +418,8 @@ static int __ovs_ct_lookup(struct net *net, const struct sw_flow_key *key,
                }
        }
 
+       ovs_ct_update_key(skb, key, true);
+
        return 0;
 }
 
@@ -430,8 +442,6 @@ static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
                err = __ovs_ct_lookup(net, key, info, skb);
                if (err)
                        return err;
-
-               ovs_ct_update_key(skb, key, true);
        }
 
        return 0;
@@ -460,22 +470,23 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
        if (nf_conntrack_confirm(skb) != NF_ACCEPT)
                return -EINVAL;
 
-       ovs_ct_update_key(skb, key, true);
-
        return 0;
 }
 
-static bool label_nonzero(const struct ovs_key_ct_label *label)
+static bool labels_nonzero(const struct ovs_key_ct_labels *labels)
 {
        size_t i;
 
-       for (i = 0; i < sizeof(*label); i++)
-               if (label->ct_label[i])
+       for (i = 0; i < sizeof(*labels); i++)
+               if (labels->ct_labels[i])
                        return true;
 
        return false;
 }
 
+/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
+ * value if 'skb' is freed.
+ */
 int ovs_ct_execute(struct net *net, struct sk_buff *skb,
                   struct sw_flow_key *key,
                   const struct ovs_conntrack_info *info)
@@ -493,7 +504,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
                        return err;
        }
 
-       if (info->flags & OVS_CT_F_COMMIT)
+       if (info->commit)
                err = ovs_ct_commit(net, key, info, skb);
        else
                err = ovs_ct_lookup(net, key, info, skb);
@@ -506,11 +517,13 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
                if (err)
                        goto err;
        }
-       if (label_nonzero(&info->label.mask))
-               err = ovs_ct_set_label(skb, key, &info->label.value,
-                                      &info->label.mask);
+       if (labels_nonzero(&info->labels.mask))
+               err = ovs_ct_set_labels(skb, key, &info->labels.value,
+                                       &info->labels.mask);
 err:
        skb_push(skb, nh_ofs);
+       if (err)
+               kfree_skb(skb);
        return err;
 }
 
@@ -539,14 +552,13 @@ static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
 }
 
 static const struct ovs_ct_len_tbl ovs_ct_attr_lens[OVS_CT_ATTR_MAX + 1] = {
-       [OVS_CT_ATTR_FLAGS]     = { .minlen = sizeof(u32),
-                                   .maxlen = sizeof(u32) },
+       [OVS_CT_ATTR_COMMIT]    = { .minlen = 0, .maxlen = 0 },
        [OVS_CT_ATTR_ZONE]      = { .minlen = sizeof(u16),
                                    .maxlen = sizeof(u16) },
        [OVS_CT_ATTR_MARK]      = { .minlen = sizeof(struct md_mark),
                                    .maxlen = sizeof(struct md_mark) },
-       [OVS_CT_ATTR_LABEL]     = { .minlen = sizeof(struct md_label),
-                                   .maxlen = sizeof(struct md_label) },
+       [OVS_CT_ATTR_LABELS]    = { .minlen = sizeof(struct md_labels),
+                                   .maxlen = sizeof(struct md_labels) },
        [OVS_CT_ATTR_HELPER]    = { .minlen = 1,
                                    .maxlen = NF_CT_HELPER_NAME_LEN }
 };
@@ -576,8 +588,8 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
                }
 
                switch (type) {
-               case OVS_CT_ATTR_FLAGS:
-                       info->flags = nla_get_u32(a);
+               case OVS_CT_ATTR_COMMIT:
+                       info->commit = true;
                        break;
 #ifdef CONFIG_NF_CONNTRACK_ZONES
                case OVS_CT_ATTR_ZONE:
@@ -588,15 +600,23 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
                case OVS_CT_ATTR_MARK: {
                        struct md_mark *mark = nla_data(a);
 
+                       if (!mark->mask) {
+                               OVS_NLERR(log, "ct_mark mask cannot be 0");
+                               return -EINVAL;
+                       }
                        info->mark = *mark;
                        break;
                }
 #endif
 #ifdef CONFIG_NF_CONNTRACK_LABELS
-               case OVS_CT_ATTR_LABEL: {
-                       struct md_label *label = nla_data(a);
+               case OVS_CT_ATTR_LABELS: {
+                       struct md_labels *labels = nla_data(a);
 
-                       info->label = *label;
+                       if (!labels_nonzero(&labels->mask)) {
+                               OVS_NLERR(log, "ct_labels mask cannot be 0");
+                               return -EINVAL;
+                       }
+                       info->labels = *labels;
                        break;
                }
 #endif
@@ -633,7 +653,7 @@ bool ovs_ct_verify(struct net *net, enum ovs_key_attr attr)
            attr == OVS_KEY_ATTR_CT_MARK)
                return true;
        if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
-           attr == OVS_KEY_ATTR_CT_LABEL) {
+           attr == OVS_KEY_ATTR_CT_LABELS) {
                struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
 
                return ovs_net->xt_label;
@@ -701,18 +721,19 @@ int ovs_ct_action_to_attr(const struct ovs_conntrack_info *ct_info,
        if (!start)
                return -EMSGSIZE;
 
-       if (nla_put_u32(skb, OVS_CT_ATTR_FLAGS, ct_info->flags))
+       if (ct_info->commit && nla_put_flag(skb, OVS_CT_ATTR_COMMIT))
                return -EMSGSIZE;
        if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
            nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id))
                return -EMSGSIZE;
-       if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
+       if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && ct_info->mark.mask &&
            nla_put(skb, OVS_CT_ATTR_MARK, sizeof(ct_info->mark),
                    &ct_info->mark))
                return -EMSGSIZE;
        if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
-           nla_put(skb, OVS_CT_ATTR_LABEL, sizeof(ct_info->label),
-                   &ct_info->label))
+           labels_nonzero(&ct_info->labels.mask) &&
+           nla_put(skb, OVS_CT_ATTR_LABELS, sizeof(ct_info->labels),
+                   &ct_info->labels))
                return -EMSGSIZE;
        if (ct_info->helper) {
                if (nla_put_string(skb, OVS_CT_ATTR_HELPER,
@@ -737,7 +758,7 @@ void ovs_ct_free_action(const struct nlattr *a)
 
 void ovs_ct_init(struct net *net)
 {
-       unsigned int n_bits = sizeof(struct ovs_key_ct_label) * BITS_PER_BYTE;
+       unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE;
        struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
 
        if (nf_connlabels_get(net, n_bits)) {
index 43f5dd7a55774414aeb7aad8c0560db3e0596035..a7544f405c1626f6564075f4453832b311b1ac29 100644 (file)
@@ -34,6 +34,10 @@ int ovs_ct_execute(struct net *, struct sk_buff *, struct sw_flow_key *,
 void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key);
 int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb);
 void ovs_ct_free_action(const struct nlattr *a);
+
+#define CT_SUPPORTED_MASK (OVS_CS_F_NEW | OVS_CS_F_ESTABLISHED | \
+                          OVS_CS_F_RELATED | OVS_CS_F_REPLY_DIR | \
+                          OVS_CS_F_INVALID | OVS_CS_F_TRACKED)
 #else
 #include <linux/errno.h>
 
@@ -63,6 +67,7 @@ static inline int ovs_ct_execute(struct net *net, struct sk_buff *skb,
                                 struct sw_flow_key *key,
                                 const struct ovs_conntrack_info *info)
 {
+       kfree_skb(skb);
        return -ENOTSUPP;
 }
 
@@ -72,7 +77,7 @@ static inline void ovs_ct_fill_key(const struct sk_buff *skb,
        key->ct.state = 0;
        key->ct.zone = 0;
        key->ct.mark = 0;
-       memset(&key->ct.label, 0, sizeof(key->ct.label));
+       memset(&key->ct.labels, 0, sizeof(key->ct.labels));
 }
 
 static inline int ovs_ct_put_key(const struct sw_flow_key *key,
@@ -82,5 +87,7 @@ static inline int ovs_ct_put_key(const struct sw_flow_key *key,
 }
 
 static inline void ovs_ct_free_action(const struct nlattr *a) { }
+
+#define CT_SUPPORTED_MASK 0
 #endif /* CONFIG_NF_CONNTRACK */
 #endif /* ovs_conntrack.h */
index b816ff87152834065841ff41e96d2f0d9ee4e307..c5d08ee377304313e7e320133e46343adff5da95 100644 (file)
@@ -490,9 +490,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
 
        if (upcall_info->egress_tun_info) {
                nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
-               err = ovs_nla_put_egress_tunnel_key(user_skb,
-                                                   upcall_info->egress_tun_info,
-                                                   upcall_info->egress_tun_opts);
+               err = ovs_nla_put_tunnel_info(user_skb,
+                                             upcall_info->egress_tun_info);
                BUG_ON(err);
                nla_nest_end(user_skb, nla);
        }
index f88038a99f4442bb753b4fc0cb3cc6c05c2bc2fc..67bdecd9fdc1f2b0544c2081aa1a557b16e0063a 100644 (file)
@@ -117,7 +117,6 @@ struct ovs_skb_cb {
  */
 struct dp_upcall_info {
        struct ip_tunnel_info *egress_tun_info;
-       const void *egress_tun_opts;
        const struct nlattr *userdata;
        const struct nlattr *actions;
        int actions_len;
index fe527d2dd4b7ae9d6b31a7d9d92586d1cd276c26..8cfa15a08668804a2a74c0047662694f23acbb80 100644 (file)
@@ -116,7 +116,7 @@ struct sw_flow_key {
                u16 zone;
                u32 mark;
                u8 state;
-               struct ovs_key_ct_label label;
+               struct ovs_key_ct_labels labels;
        } ct;
 
 } __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */
index 5c030a4d73382f6123345f4a6b608ea1b1684ae0..38536c137c54d0d4ebcebcce613fefdb89799b5a 100644 (file)
@@ -291,10 +291,10 @@ size_t ovs_key_attr_size(void)
                + nla_total_size(4)   /* OVS_KEY_ATTR_SKB_MARK */
                + nla_total_size(4)   /* OVS_KEY_ATTR_DP_HASH */
                + nla_total_size(4)   /* OVS_KEY_ATTR_RECIRC_ID */
-               + nla_total_size(1)   /* OVS_KEY_ATTR_CT_STATE */
+               + nla_total_size(4)   /* OVS_KEY_ATTR_CT_STATE */
                + nla_total_size(2)   /* OVS_KEY_ATTR_CT_ZONE */
                + nla_total_size(4)   /* OVS_KEY_ATTR_CT_MARK */
-               + nla_total_size(16)  /* OVS_KEY_ATTR_CT_LABEL */
+               + nla_total_size(16)  /* OVS_KEY_ATTR_CT_LABELS */
                + nla_total_size(12)  /* OVS_KEY_ATTR_ETHERNET */
                + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
                + nla_total_size(4)   /* OVS_KEY_ATTR_VLAN */
@@ -349,10 +349,10 @@ static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
        [OVS_KEY_ATTR_TUNNEL]    = { .len = OVS_ATTR_NESTED,
                                     .next = ovs_tunnel_key_lens, },
        [OVS_KEY_ATTR_MPLS]      = { .len = sizeof(struct ovs_key_mpls) },
-       [OVS_KEY_ATTR_CT_STATE]  = { .len = sizeof(u8) },
+       [OVS_KEY_ATTR_CT_STATE]  = { .len = sizeof(u32) },
        [OVS_KEY_ATTR_CT_ZONE]   = { .len = sizeof(u16) },
        [OVS_KEY_ATTR_CT_MARK]   = { .len = sizeof(u32) },
-       [OVS_KEY_ATTR_CT_LABEL]  = { .len = sizeof(struct ovs_key_ct_label) },
+       [OVS_KEY_ATTR_CT_LABELS] = { .len = sizeof(struct ovs_key_ct_labels) },
 };
 
 static bool check_attr_len(unsigned int attr_len, unsigned int expected_len)
@@ -717,7 +717,7 @@ static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
        if ((output->tun_flags & TUNNEL_OAM) &&
            nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
                return -EMSGSIZE;
-       if (tun_opts) {
+       if (swkey_tun_opts_len) {
                if (output->tun_flags & TUNNEL_GENEVE_OPT &&
                    nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
                            swkey_tun_opts_len, tun_opts))
@@ -749,13 +749,12 @@ static int ipv4_tun_to_nlattr(struct sk_buff *skb,
        return 0;
 }
 
-int ovs_nla_put_egress_tunnel_key(struct sk_buff *skb,
-                                 const struct ip_tunnel_info *egress_tun_info,
-                                 const void *egress_tun_opts)
+int ovs_nla_put_tunnel_info(struct sk_buff *skb,
+                           struct ip_tunnel_info *tun_info)
 {
-       return __ipv4_tun_to_nlattr(skb, &egress_tun_info->key,
-                                   egress_tun_opts,
-                                   egress_tun_info->options_len);
+       return __ipv4_tun_to_nlattr(skb, &tun_info->key,
+                                   ip_tunnel_info_opts(tun_info),
+                                   tun_info->options_len);
 }
 
 static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
@@ -814,7 +813,13 @@ static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
 
        if (*attrs & (1 << OVS_KEY_ATTR_CT_STATE) &&
            ovs_ct_verify(net, OVS_KEY_ATTR_CT_STATE)) {
-               u8 ct_state = nla_get_u8(a[OVS_KEY_ATTR_CT_STATE]);
+               u32 ct_state = nla_get_u32(a[OVS_KEY_ATTR_CT_STATE]);
+
+               if (ct_state & ~CT_SUPPORTED_MASK) {
+                       OVS_NLERR(log, "ct_state flags %08x unsupported",
+                                 ct_state);
+                       return -EINVAL;
+               }
 
                SW_FLOW_KEY_PUT(match, ct.state, ct_state, is_mask);
                *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_STATE);
@@ -833,14 +838,14 @@ static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
                SW_FLOW_KEY_PUT(match, ct.mark, mark, is_mask);
                *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_MARK);
        }
-       if (*attrs & (1 << OVS_KEY_ATTR_CT_LABEL) &&
-           ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABEL)) {
-               const struct ovs_key_ct_label *cl;
+       if (*attrs & (1 << OVS_KEY_ATTR_CT_LABELS) &&
+           ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABELS)) {
+               const struct ovs_key_ct_labels *cl;
 
-               cl = nla_data(a[OVS_KEY_ATTR_CT_LABEL]);
-               SW_FLOW_KEY_MEMCPY(match, ct.label, cl->ct_label,
+               cl = nla_data(a[OVS_KEY_ATTR_CT_LABELS]);
+               SW_FLOW_KEY_MEMCPY(match, ct.labels, cl->ct_labels,
                                   sizeof(*cl), is_mask);
-               *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABEL);
+               *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABELS);
        }
        return 0;
 }
@@ -1093,6 +1098,9 @@ static void nlattr_set(struct nlattr *attr, u8 val,
                } else {
                        memset(nla_data(nla), val, nla_len(nla));
                }
+
+               if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
+                       *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
        }
 }
 
@@ -1973,7 +1981,7 @@ static int validate_set(const struct nlattr *a,
        case OVS_KEY_ATTR_PRIORITY:
        case OVS_KEY_ATTR_SKB_MARK:
        case OVS_KEY_ATTR_CT_MARK:
-       case OVS_KEY_ATTR_CT_LABEL:
+       case OVS_KEY_ATTR_CT_LABELS:
        case OVS_KEY_ATTR_ETHERNET:
                break;
 
@@ -2374,10 +2382,7 @@ static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
                if (!start)
                        return -EMSGSIZE;
 
-               err = ipv4_tun_to_nlattr(skb, &tun_info->key,
-                                        tun_info->options_len ?
-                                            ip_tunnel_info_opts(tun_info) : NULL,
-                                        tun_info->options_len);
+               err = ovs_nla_put_tunnel_info(skb, tun_info);
                if (err)
                        return err;
                nla_nest_end(skb, start);
index 6ca3f0baf449f05f82dd92f9796cd8cfa7abf141..47dd142eca1c0856c74406e1d75682fc822861f6 100644 (file)
@@ -55,9 +55,9 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb);
 int ovs_nla_get_match(struct net *, struct sw_flow_match *,
                      const struct nlattr *key, const struct nlattr *mask,
                      bool log);
-int ovs_nla_put_egress_tunnel_key(struct sk_buff *,
-                                 const struct ip_tunnel_info *,
-                                 const void *egress_tun_opts);
+
+int ovs_nla_put_tunnel_info(struct sk_buff *skb,
+                           struct ip_tunnel_info *tun_info);
 
 bool ovs_nla_get_ufid(struct sw_flow_id *, const struct nlattr *, bool log);
 int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
index f2ea83ba47631d4f650aa3dcf2f9c66cf4fd1c6f..c7f74aab34b9ef7d2befcd571d44290b76fbfbc1 100644 (file)
@@ -93,7 +93,8 @@ struct sw_flow *ovs_flow_alloc(void)
 
        /* Initialize the default stat node. */
        stats = kmem_cache_alloc_node(flow_stats_cache,
-                                     GFP_KERNEL | __GFP_ZERO, 0);
+                                     GFP_KERNEL | __GFP_ZERO,
+                                     node_online(0) ? 0 : NUMA_NO_NODE);
        if (!stats)
                goto err;
 
index 2735e9c4a3b88586165ef5644e429cf28079974d..5f8aaaaa0785385b89096925718d96b3335c1d32 100644 (file)
@@ -52,18 +52,6 @@ static int geneve_get_options(const struct vport *vport,
        return 0;
 }
 
-static int geneve_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-                                     struct dp_upcall_info *upcall)
-{
-       struct geneve_port *geneve_port = geneve_vport(vport);
-       struct net *net = ovs_dp_get_net(vport->dp);
-       __be16 dport = htons(geneve_port->port_no);
-       __be16 sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true);
-
-       return ovs_tunnel_get_egress_info(upcall, ovs_dp_get_net(vport->dp),
-                                         skb, IPPROTO_UDP, sport, dport);
-}
-
 static struct vport *geneve_tnl_create(const struct vport_parms *parms)
 {
        struct net *net = ovs_dp_get_net(parms->dp);
@@ -130,7 +118,6 @@ static struct vport_ops ovs_geneve_vport_ops = {
        .get_options    = geneve_get_options,
        .send           = ovs_netdev_send,
        .owner          = THIS_MODULE,
-       .get_egress_tun_info    = geneve_get_egress_tun_info,
 };
 
 static int __init ovs_geneve_tnl_init(void)
index 4d24481669c95197b06bb75d207b3e713b433508..64225bf5eb405f4082547bbc8f09d920de72cdb8 100644 (file)
@@ -84,18 +84,10 @@ static struct vport *gre_create(const struct vport_parms *parms)
        return ovs_netdev_link(vport, parms->name);
 }
 
-static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-                                  struct dp_upcall_info *upcall)
-{
-       return ovs_tunnel_get_egress_info(upcall, ovs_dp_get_net(vport->dp),
-                                         skb, IPPROTO_GRE, 0, 0);
-}
-
 static struct vport_ops ovs_gre_vport_ops = {
        .type           = OVS_VPORT_TYPE_GRE,
        .create         = gre_create,
        .send           = ovs_netdev_send,
-       .get_egress_tun_info    = gre_get_egress_tun_info,
        .destroy        = ovs_netdev_tunnel_destroy,
        .owner          = THIS_MODULE,
 };
index 388b8a6bf112979f7f7291c5bb17fd6c7027e594..b3934126daa894d7bdaf7511ece6ff5319cf2c8a 100644 (file)
@@ -106,12 +106,45 @@ static void internal_dev_destructor(struct net_device *dev)
        free_netdev(dev);
 }
 
+static struct rtnl_link_stats64 *
+internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+       int i;
+
+       memset(stats, 0, sizeof(*stats));
+       stats->rx_errors  = dev->stats.rx_errors;
+       stats->tx_errors  = dev->stats.tx_errors;
+       stats->tx_dropped = dev->stats.tx_dropped;
+       stats->rx_dropped = dev->stats.rx_dropped;
+
+       for_each_possible_cpu(i) {
+               const struct pcpu_sw_netstats *percpu_stats;
+               struct pcpu_sw_netstats local_stats;
+               unsigned int start;
+
+               percpu_stats = per_cpu_ptr(dev->tstats, i);
+
+               do {
+                       start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
+                       local_stats = *percpu_stats;
+               } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
+
+               stats->rx_bytes         += local_stats.rx_bytes;
+               stats->rx_packets       += local_stats.rx_packets;
+               stats->tx_bytes         += local_stats.tx_bytes;
+               stats->tx_packets       += local_stats.tx_packets;
+       }
+
+       return stats;
+}
+
 static const struct net_device_ops internal_dev_netdev_ops = {
        .ndo_open = internal_dev_open,
        .ndo_stop = internal_dev_stop,
        .ndo_start_xmit = internal_dev_xmit,
        .ndo_set_mac_address = eth_mac_addr,
        .ndo_change_mtu = internal_dev_change_mtu,
+       .ndo_get_stats64 = internal_get_stats,
 };
 
 static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
@@ -161,6 +194,11 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
                err = -ENOMEM;
                goto error_free_vport;
        }
+       vport->dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+       if (!vport->dev->tstats) {
+               err = -ENOMEM;
+               goto error_free_netdev;
+       }
 
        dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
        internal_dev = internal_dev_priv(vport->dev);
@@ -173,7 +211,7 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
        rtnl_lock();
        err = register_netdevice(vport->dev);
        if (err)
-               goto error_free_netdev;
+               goto error_unlock;
 
        dev_set_promiscuity(vport->dev, 1);
        rtnl_unlock();
@@ -181,8 +219,10 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
 
        return vport;
 
-error_free_netdev:
+error_unlock:
        rtnl_unlock();
+       free_percpu(vport->dev->tstats);
+error_free_netdev:
        free_netdev(vport->dev);
 error_free_vport:
        ovs_vport_free(vport);
@@ -198,7 +238,7 @@ static void internal_dev_destroy(struct vport *vport)
 
        /* unregister_netdevice() waits for an RCU grace period. */
        unregister_netdevice(vport->dev);
-
+       free_percpu(vport->dev->tstats);
        rtnl_unlock();
 }
 
index c11413d5075f882c2b360515285ba520a7303d0c..e1c9c08880373276e8430cadd93f03fe4a1e11a0 100644 (file)
@@ -146,31 +146,12 @@ static struct vport *vxlan_create(const struct vport_parms *parms)
        return ovs_netdev_link(vport, parms->name);
 }
 
-static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-                                    struct dp_upcall_info *upcall)
-{
-       struct vxlan_dev *vxlan = netdev_priv(vport->dev);
-       struct net *net = ovs_dp_get_net(vport->dp);
-       __be16 dst_port = vxlan_dev_dst_port(vxlan);
-       __be16 src_port;
-       int port_min;
-       int port_max;
-
-       inet_get_local_port_range(net, &port_min, &port_max);
-       src_port = udp_flow_src_port(net, skb, 0, 0, true);
-
-       return ovs_tunnel_get_egress_info(upcall, net,
-                                         skb, IPPROTO_UDP,
-                                         src_port, dst_port);
-}
-
 static struct vport_ops ovs_vxlan_netdev_vport_ops = {
        .type                   = OVS_VPORT_TYPE_VXLAN,
        .create                 = vxlan_create,
        .destroy                = ovs_netdev_tunnel_destroy,
        .get_options            = vxlan_get_options,
        .send                   = ovs_netdev_send,
-       .get_egress_tun_info    = vxlan_get_egress_tun_info,
 };
 
 static int __init ovs_vxlan_tnl_init(void)
index dc81dc619aa2344a5c7912def9d6852fcd37ebda..320c765ce44a07e71daedfd457d37c81ea2e4c49 100644 (file)
@@ -280,35 +280,19 @@ void ovs_vport_del(struct vport *vport)
  */
 void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
 {
-       struct net_device *dev = vport->dev;
-       int i;
-
-       memset(stats, 0, sizeof(*stats));
-       stats->rx_errors  = dev->stats.rx_errors;
-       stats->tx_errors  = dev->stats.tx_errors;
-       stats->tx_dropped = dev->stats.tx_dropped;
-       stats->rx_dropped = dev->stats.rx_dropped;
-
-       stats->rx_dropped += atomic_long_read(&dev->rx_dropped);
-       stats->tx_dropped += atomic_long_read(&dev->tx_dropped);
-
-       for_each_possible_cpu(i) {
-               const struct pcpu_sw_netstats *percpu_stats;
-               struct pcpu_sw_netstats local_stats;
-               unsigned int start;
-
-               percpu_stats = per_cpu_ptr(dev->tstats, i);
-
-               do {
-                       start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
-                       local_stats = *percpu_stats;
-               } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
-
-               stats->rx_bytes         += local_stats.rx_bytes;
-               stats->rx_packets       += local_stats.rx_packets;
-               stats->tx_bytes         += local_stats.tx_bytes;
-               stats->tx_packets       += local_stats.tx_packets;
-       }
+       const struct rtnl_link_stats64 *dev_stats;
+       struct rtnl_link_stats64 temp;
+
+       dev_stats = dev_get_stats(vport->dev, &temp);
+       stats->rx_errors  = dev_stats->rx_errors;
+       stats->tx_errors  = dev_stats->tx_errors;
+       stats->tx_dropped = dev_stats->tx_dropped;
+       stats->rx_dropped = dev_stats->rx_dropped;
+
+       stats->rx_bytes   = dev_stats->rx_bytes;
+       stats->rx_packets = dev_stats->rx_packets;
+       stats->tx_bytes   = dev_stats->tx_bytes;
+       stats->tx_packets = dev_stats->tx_packets;
 }
 
 /**
@@ -460,6 +444,15 @@ int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
 
        OVS_CB(skb)->input_vport = vport;
        OVS_CB(skb)->mru = 0;
+       if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) {
+               u32 mark;
+
+               mark = skb->mark;
+               skb_scrub_packet(skb, true);
+               skb->mark = mark;
+               tun_info = NULL;
+       }
+
        /* Extract flow from 'skb' into 'key'. */
        error = ovs_flow_key_extract(tun_info, skb, &key);
        if (unlikely(error)) {
@@ -486,61 +479,3 @@ void ovs_vport_deferred_free(struct vport *vport)
        call_rcu(&vport->rcu, free_vport_rcu);
 }
 EXPORT_SYMBOL_GPL(ovs_vport_deferred_free);
-
-int ovs_tunnel_get_egress_info(struct dp_upcall_info *upcall,
-                              struct net *net,
-                              struct sk_buff *skb,
-                              u8 ipproto,
-                              __be16 tp_src,
-                              __be16 tp_dst)
-{
-       struct ip_tunnel_info *egress_tun_info = upcall->egress_tun_info;
-       const struct ip_tunnel_info *tun_info = skb_tunnel_info(skb);
-       const struct ip_tunnel_key *tun_key;
-       u32 skb_mark = skb->mark;
-       struct rtable *rt;
-       struct flowi4 fl;
-
-       if (unlikely(!tun_info))
-               return -EINVAL;
-       if (ip_tunnel_info_af(tun_info) != AF_INET)
-               return -EINVAL;
-
-       tun_key = &tun_info->key;
-
-       /* Route lookup to get srouce IP address.
-        * The process may need to be changed if the corresponding process
-        * in vports ops changed.
-        */
-       rt = ovs_tunnel_route_lookup(net, tun_key, skb_mark, &fl, ipproto);
-       if (IS_ERR(rt))
-               return PTR_ERR(rt);
-
-       ip_rt_put(rt);
-
-       /* Generate egress_tun_info based on tun_info,
-        * saddr, tp_src and tp_dst
-        */
-       ip_tunnel_key_init(&egress_tun_info->key,
-                          fl.saddr, tun_key->u.ipv4.dst,
-                          tun_key->tos,
-                          tun_key->ttl,
-                          tp_src, tp_dst,
-                          tun_key->tun_id,
-                          tun_key->tun_flags);
-       egress_tun_info->options_len = tun_info->options_len;
-       egress_tun_info->mode = tun_info->mode;
-       upcall->egress_tun_opts = ip_tunnel_info_opts(egress_tun_info);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(ovs_tunnel_get_egress_info);
-
-int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-                                 struct dp_upcall_info *upcall)
-{
-       /* get_egress_tun_info() is only implemented on tunnel ports. */
-       if (unlikely(!vport->ops->get_egress_tun_info))
-               return -EINVAL;
-
-       return vport->ops->get_egress_tun_info(vport, skb, upcall);
-}
index a413f3ae6a7b540ed7b34fd4b31f69424caeb39f..d341ad6f3afe5734f587c1df347fd72dc2ba2c38 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/u64_stats_sync.h>
-#include <net/route.h>
 
 #include "datapath.h"
 
@@ -53,16 +52,6 @@ int ovs_vport_set_upcall_portids(struct vport *, const struct nlattr *pids);
 int ovs_vport_get_upcall_portids(const struct vport *, struct sk_buff *);
 u32 ovs_vport_find_upcall_portid(const struct vport *, struct sk_buff *);
 
-int ovs_tunnel_get_egress_info(struct dp_upcall_info *upcall,
-                              struct net *net,
-                              struct sk_buff *,
-                              u8 ipproto,
-                              __be16 tp_src,
-                              __be16 tp_dst);
-
-int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-                                 struct dp_upcall_info *upcall);
-
 /**
  * struct vport_portids - array of netlink portids of a vport.
  *                        must be protected by rcu.
@@ -140,8 +129,6 @@ struct vport_parms {
  * have any configuration.
  * @send: Send a packet on the device.
  * zero for dropped packets or negative for error.
- * @get_egress_tun_info: Get the egress tunnel 5-tuple and other info for
- * a packet.
  */
 struct vport_ops {
        enum ovs_vport_type type;
@@ -154,9 +141,6 @@ struct vport_ops {
        int (*get_options)(const struct vport *, struct sk_buff *);
 
        void (*send)(struct vport *, struct sk_buff *);
-       int (*get_egress_tun_info)(struct vport *, struct sk_buff *,
-                                  struct dp_upcall_info *upcall);
-
        struct module *owner;
        struct list_head list;
 };
@@ -215,25 +199,6 @@ static inline const char *ovs_vport_name(struct vport *vport)
 int ovs_vport_ops_register(struct vport_ops *ops);
 void ovs_vport_ops_unregister(struct vport_ops *ops);
 
-static inline struct rtable *ovs_tunnel_route_lookup(struct net *net,
-                                                    const struct ip_tunnel_key *key,
-                                                    u32 mark,
-                                                    struct flowi4 *fl,
-                                                    u8 protocol)
-{
-       struct rtable *rt;
-
-       memset(fl, 0, sizeof(*fl));
-       fl->daddr = key->u.ipv4.dst;
-       fl->saddr = key->u.ipv4.src;
-       fl->flowi4_tos = RT_TOS(key->tos);
-       fl->flowi4_mark = mark;
-       fl->flowi4_proto = protocol;
-
-       rt = ip_route_output_key(net, fl);
-       return rt;
-}
-
 static inline void ovs_vport_send(struct vport *vport, struct sk_buff *skb)
 {
        vport->ops->send(vport, skb);
index fbc5ef88bc0e692ea4cf9cb59f6163905510928e..27a992154804c685d983206b599ef7a5bf96e8af 100644 (file)
@@ -214,8 +214,15 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
                        }
 
                        to_copy = min(tc->t_tinc_data_rem, left);
-                       pskb_pull(clone, offset);
-                       pskb_trim(clone, to_copy);
+                       if (!pskb_pull(clone, offset) ||
+                           pskb_trim(clone, to_copy)) {
+                               pr_warn("rds_tcp_data_recv: pull/trim failed "
+                                       "left %zu data_rem %zu skb_len %d\n",
+                                       left, tc->t_tinc_data_rem, skb->len);
+                               kfree_skb(clone);
+                               desc->error = -ENOMEM;
+                               goto out;
+                       }
                        skb_queue_tail(&tinc->ti_skb_list, clone);
 
                        rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
index 2d1be4a760fdc4361f23d0aa93a861298eaafe45..32fcdecdb9e2074bad6f3e3002738e9c289317c3 100644 (file)
 
 #define MIRRED_TAB_MASK     7
 static LIST_HEAD(mirred_list);
+static DEFINE_SPINLOCK(mirred_list_lock);
 
 static void tcf_mirred_release(struct tc_action *a, int bind)
 {
        struct tcf_mirred *m = to_mirred(a);
        struct net_device *dev = rcu_dereference_protected(m->tcfm_dev, 1);
 
+       /* We could be called either in a RCU callback or with RTNL lock held. */
+       spin_lock_bh(&mirred_list_lock);
        list_del(&m->tcfm_list);
+       spin_unlock_bh(&mirred_list_lock);
        if (dev)
                dev_put(dev);
 }
@@ -103,10 +107,10 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
        } else {
                if (bind)
                        return 0;
-               if (!ovr) {
-                       tcf_hash_release(a, bind);
+
+               tcf_hash_release(a, bind);
+               if (!ovr)
                        return -EEXIST;
-               }
        }
        m = to_mirred(a);
 
@@ -123,7 +127,9 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
        }
 
        if (ret == ACT_P_CREATED) {
+               spin_lock_bh(&mirred_list_lock);
                list_add(&m->tcfm_list, &mirred_list);
+               spin_unlock_bh(&mirred_list_lock);
                tcf_hash_insert(a);
        }
 
@@ -173,6 +179,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
 
        skb2->skb_iif = skb->dev->ifindex;
        skb2->dev = dev;
+       skb_sender_cpu_clear(skb2);
        err = dev_queue_xmit(skb2);
 
        if (err) {
@@ -221,7 +228,8 @@ static int mirred_device_event(struct notifier_block *unused,
        struct tcf_mirred *m;
 
        ASSERT_RTNL();
-       if (event == NETDEV_UNREGISTER)
+       if (event == NETDEV_UNREGISTER) {
+               spin_lock_bh(&mirred_list_lock);
                list_for_each_entry(m, &mirred_list, tcfm_list) {
                        if (rcu_access_pointer(m->tcfm_dev) == dev) {
                                dev_put(dev);
@@ -231,6 +239,8 @@ static int mirred_device_event(struct notifier_block *unused,
                                RCU_INIT_POINTER(m->tcfm_dev, NULL);
                        }
                }
+               spin_unlock_bh(&mirred_list_lock);
+       }
 
        return NOTIFY_DONE;
 }
index 9d15cb6b8cb1f5e8424e96f6245e9dd206d92405..86b04e31e60b76027214b85ee0c4c0e0de1b04c4 100644 (file)
@@ -368,6 +368,15 @@ static unsigned int hhf_drop(struct Qdisc *sch)
        return bucket - q->buckets;
 }
 
+static unsigned int hhf_qdisc_drop(struct Qdisc *sch)
+{
+       unsigned int prev_backlog;
+
+       prev_backlog = sch->qstats.backlog;
+       hhf_drop(sch);
+       return prev_backlog - sch->qstats.backlog;
+}
+
 static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct hhf_sched_data *q = qdisc_priv(sch);
@@ -696,7 +705,7 @@ static struct Qdisc_ops hhf_qdisc_ops __read_mostly = {
        .enqueue        =       hhf_enqueue,
        .dequeue        =       hhf_dequeue,
        .peek           =       qdisc_peek_dequeued,
-       .drop           =       hhf_drop,
+       .drop           =       hhf_qdisc_drop,
        .init           =       hhf_init,
        .reset          =       hhf_reset,
        .destroy        =       hhf_destroy,
index 197c3f59ecbf1d7975a987e57c13023ac9e2b357..b00f1f9611d64a7f46fdd37460d9c5ec9711f37f 100644 (file)
@@ -1208,20 +1208,22 @@ void sctp_assoc_update(struct sctp_association *asoc,
  *   within this document.
  *
  * Our basic strategy is to round-robin transports in priorities
- * according to sctp_state_prio_map[] e.g., if no such
+ * according to sctp_trans_score() e.g., if no such
  * transport with state SCTP_ACTIVE exists, round-robin through
  * SCTP_UNKNOWN, etc. You get the picture.
  */
-static const u8 sctp_trans_state_to_prio_map[] = {
-       [SCTP_ACTIVE]   = 3,    /* best case */
-       [SCTP_UNKNOWN]  = 2,
-       [SCTP_PF]       = 1,
-       [SCTP_INACTIVE] = 0,    /* worst case */
-};
-
 static u8 sctp_trans_score(const struct sctp_transport *trans)
 {
-       return sctp_trans_state_to_prio_map[trans->state];
+       switch (trans->state) {
+       case SCTP_ACTIVE:
+               return 3;       /* best case */
+       case SCTP_UNKNOWN:
+               return 2;
+       case SCTP_PF:
+               return 1;
+       default: /* case SCTP_INACTIVE */
+               return 0;       /* worst case */
+       }
 }
 
 static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
index 35df1266bf073aa9a7a4145da787ff42e95a7ee1..6098d4c42fa91287d3cde36ac05d860f76d4fe32 100644 (file)
@@ -244,12 +244,13 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
        int error;
        struct sctp_transport *transport = (struct sctp_transport *) peer;
        struct sctp_association *asoc = transport->asoc;
-       struct net *net = sock_net(asoc->base.sk);
+       struct sock *sk = asoc->base.sk;
+       struct net *net = sock_net(sk);
 
        /* Check whether a task is in the sock.  */
 
-       bh_lock_sock(asoc->base.sk);
-       if (sock_owned_by_user(asoc->base.sk)) {
+       bh_lock_sock(sk);
+       if (sock_owned_by_user(sk)) {
                pr_debug("%s: sock is busy\n", __func__);
 
                /* Try again later.  */
@@ -272,10 +273,10 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
                           transport, GFP_ATOMIC);
 
        if (error)
-               asoc->base.sk->sk_err = -error;
+               sk->sk_err = -error;
 
 out_unlock:
-       bh_unlock_sock(asoc->base.sk);
+       bh_unlock_sock(sk);
        sctp_transport_put(transport);
 }
 
@@ -285,11 +286,12 @@ out_unlock:
 static void sctp_generate_timeout_event(struct sctp_association *asoc,
                                        sctp_event_timeout_t timeout_type)
 {
-       struct net *net = sock_net(asoc->base.sk);
+       struct sock *sk = asoc->base.sk;
+       struct net *net = sock_net(sk);
        int error = 0;
 
-       bh_lock_sock(asoc->base.sk);
-       if (sock_owned_by_user(asoc->base.sk)) {
+       bh_lock_sock(sk);
+       if (sock_owned_by_user(sk)) {
                pr_debug("%s: sock is busy: timer %d\n", __func__,
                         timeout_type);
 
@@ -312,10 +314,10 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
                           (void *)timeout_type, GFP_ATOMIC);
 
        if (error)
-               asoc->base.sk->sk_err = -error;
+               sk->sk_err = -error;
 
 out_unlock:
-       bh_unlock_sock(asoc->base.sk);
+       bh_unlock_sock(sk);
        sctp_association_put(asoc);
 }
 
@@ -365,10 +367,11 @@ void sctp_generate_heartbeat_event(unsigned long data)
        int error = 0;
        struct sctp_transport *transport = (struct sctp_transport *) data;
        struct sctp_association *asoc = transport->asoc;
-       struct net *net = sock_net(asoc->base.sk);
+       struct sock *sk = asoc->base.sk;
+       struct net *net = sock_net(sk);
 
-       bh_lock_sock(asoc->base.sk);
-       if (sock_owned_by_user(asoc->base.sk)) {
+       bh_lock_sock(sk);
+       if (sock_owned_by_user(sk)) {
                pr_debug("%s: sock is busy\n", __func__);
 
                /* Try again later.  */
@@ -388,11 +391,11 @@ void sctp_generate_heartbeat_event(unsigned long data)
                           asoc->state, asoc->ep, asoc,
                           transport, GFP_ATOMIC);
 
-        if (error)
-                asoc->base.sk->sk_err = -error;
+       if (error)
+               sk->sk_err = -error;
 
 out_unlock:
-       bh_unlock_sock(asoc->base.sk);
+       bh_unlock_sock(sk);
        sctp_transport_put(transport);
 }
 
@@ -403,10 +406,11 @@ void sctp_generate_proto_unreach_event(unsigned long data)
 {
        struct sctp_transport *transport = (struct sctp_transport *) data;
        struct sctp_association *asoc = transport->asoc;
-       struct net *net = sock_net(asoc->base.sk);
+       struct sock *sk = asoc->base.sk;
+       struct net *net = sock_net(sk);
 
-       bh_lock_sock(asoc->base.sk);
-       if (sock_owned_by_user(asoc->base.sk)) {
+       bh_lock_sock(sk);
+       if (sock_owned_by_user(sk)) {
                pr_debug("%s: sock is busy\n", __func__);
 
                /* Try again later.  */
@@ -427,7 +431,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
                   asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
 
 out_unlock:
-       bh_unlock_sock(asoc->base.sk);
+       bh_unlock_sock(sk);
        sctp_association_put(asoc);
 }
 
index cb25c89da6239154475d6c31736e328d13f19134..f1e8dafbd5079b3406a769ba4854ecba229edca6 100644 (file)
@@ -39,25 +39,6 @@ static int
 fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
            struct rpcrdma_create_data_internal *cdata)
 {
-       struct ib_device_attr *devattr = &ia->ri_devattr;
-       struct ib_mr *mr;
-
-       /* Obtain an lkey to use for the regbufs, which are
-        * protected from remote access.
-        */
-       if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
-               ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
-       } else {
-               mr = ib_get_dma_mr(ia->ri_pd, IB_ACCESS_LOCAL_WRITE);
-               if (IS_ERR(mr)) {
-                       pr_err("%s: ib_get_dma_mr for failed with %lX\n",
-                              __func__, PTR_ERR(mr));
-                       return -ENOMEM;
-               }
-               ia->ri_dma_lkey = ia->ri_dma_mr->lkey;
-               ia->ri_dma_mr = mr;
-       }
-
        return 0;
 }
 
index d6653f5d0830378cd08531afb61c0b766ae8b6b9..5318951b3b531ca322f1a0c3639a9079d3599555 100644 (file)
@@ -189,11 +189,6 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
        struct ib_device_attr *devattr = &ia->ri_devattr;
        int depth, delta;
 
-       /* Obtain an lkey to use for the regbufs, which are
-        * protected from remote access.
-        */
-       ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
-
        ia->ri_max_frmr_depth =
                        min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
                              devattr->max_fast_reg_page_list_len);
index 72cf8b15bbb4e331d49f937c58abd85f7dd70862..617b76f22154c41b41cdccd329cbbe9f00a3fabe 100644 (file)
@@ -23,7 +23,6 @@ static int
 physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
                 struct rpcrdma_create_data_internal *cdata)
 {
-       struct ib_device_attr *devattr = &ia->ri_devattr;
        struct ib_mr *mr;
 
        /* Obtain an rkey to use for RPC data payloads.
@@ -37,15 +36,8 @@ physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
                       __func__, PTR_ERR(mr));
                return -ENOMEM;
        }
-       ia->ri_dma_mr = mr;
-
-       /* Obtain an lkey to use for regbufs.
-        */
-       if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
-               ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
-       else
-               ia->ri_dma_lkey = ia->ri_dma_mr->lkey;
 
+       ia->ri_dma_mr = mr;
        return 0;
 }
 
index cb51742840740f790d24797e585e7fb520646a09..f0c3ff67ca987427136baebf67034ad3bf58a27f 100644 (file)
@@ -136,7 +136,8 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
        ctxt->direction = DMA_FROM_DEVICE;
        ctxt->read_hdr = head;
        pages_needed = min_t(int, pages_needed, xprt->sc_max_sge_rd);
-       read = min_t(int, pages_needed << PAGE_SHIFT, rs_length);
+       read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset,
+                    rs_length);
 
        for (pno = 0; pno < pages_needed; pno++) {
                int len = min_t(int, rs_length, PAGE_SIZE - pg_off);
@@ -235,7 +236,8 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
        ctxt->direction = DMA_FROM_DEVICE;
        ctxt->frmr = frmr;
        pages_needed = min_t(int, pages_needed, xprt->sc_frmr_pg_list_len);
-       read = min_t(int, pages_needed << PAGE_SHIFT, rs_length);
+       read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset,
+                    rs_length);
 
        frmr->kva = page_address(rqstp->rq_arg.pages[pg_no]);
        frmr->direction = DMA_FROM_DEVICE;
@@ -531,7 +533,7 @@ static int rdma_read_complete(struct svc_rqst *rqstp,
        rqstp->rq_arg.page_base = head->arg.page_base;
 
        /* rq_respages starts after the last arg page */
-       rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];
+       rqstp->rq_respages = &rqstp->rq_pages[page_no];
        rqstp->rq_next_page = rqstp->rq_respages + 1;
 
        /* Rebuild rq_arg head and tail. */
index 64443eb754ad0fe7fd0b16633c3aa10cebdc3e26..41e452bc580c0fea0f39fe71924b72dcdff6782f 100644 (file)
@@ -270,8 +270,8 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
 
        xprt_clear_connected(xprt);
 
-       rpcrdma_buffer_destroy(&r_xprt->rx_buf);
        rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia);
+       rpcrdma_buffer_destroy(&r_xprt->rx_buf);
        rpcrdma_ia_close(&r_xprt->rx_ia);
 
        xprt_rdma_free_addresses(xprt);
index 682996779970c6ccae749c9de566f06a9b205c80..5502d4dade74aa8646f89305b011d215294352e0 100644 (file)
@@ -543,11 +543,8 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
        }
 
        if (memreg == RPCRDMA_FRMR) {
-               /* Requires both frmr reg and local dma lkey */
-               if (((devattr->device_cap_flags &
-                    (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) !=
-                   (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) ||
-                     (devattr->max_fast_reg_page_list_len == 0)) {
+               if (!(devattr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) ||
+                   (devattr->max_fast_reg_page_list_len == 0)) {
                        dprintk("RPC:       %s: FRMR registration "
                                "not supported by HCA\n", __func__);
                        memreg = RPCRDMA_MTHCAFMR;
@@ -557,6 +554,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
                if (!ia->ri_device->alloc_fmr) {
                        dprintk("RPC:       %s: MTHCAFMR registration "
                                "not supported by HCA\n", __func__);
+                       rc = -EINVAL;
                        goto out3;
                }
        }
@@ -755,19 +753,22 @@ rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
 
        cancel_delayed_work_sync(&ep->rep_connect_worker);
 
-       if (ia->ri_id->qp) {
+       if (ia->ri_id->qp)
                rpcrdma_ep_disconnect(ep, ia);
+
+       rpcrdma_clean_cq(ep->rep_attr.recv_cq);
+       rpcrdma_clean_cq(ep->rep_attr.send_cq);
+
+       if (ia->ri_id->qp) {
                rdma_destroy_qp(ia->ri_id);
                ia->ri_id->qp = NULL;
        }
 
-       rpcrdma_clean_cq(ep->rep_attr.recv_cq);
        rc = ib_destroy_cq(ep->rep_attr.recv_cq);
        if (rc)
                dprintk("RPC:       %s: ib_destroy_cq returned %i\n",
                        __func__, rc);
 
-       rpcrdma_clean_cq(ep->rep_attr.send_cq);
        rc = ib_destroy_cq(ep->rep_attr.send_cq);
        if (rc)
                dprintk("RPC:       %s: ib_destroy_cq returned %i\n",
@@ -1252,7 +1253,7 @@ rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags)
                goto out_free;
 
        iov->length = size;
-       iov->lkey = ia->ri_dma_lkey;
+       iov->lkey = ia->ri_pd->local_dma_lkey;
        rb->rg_size = size;
        rb->rg_owner = NULL;
        return rb;
index 02512221b8bc885dde93b987c561b344f6e96722..c09414e6f91b0428bd7cc5fd6f2b1c67f8830b23 100644 (file)
@@ -65,7 +65,6 @@ struct rpcrdma_ia {
        struct rdma_cm_id       *ri_id;
        struct ib_pd            *ri_pd;
        struct ib_mr            *ri_dma_mr;
-       u32                     ri_dma_lkey;
        struct completion       ri_done;
        int                     ri_async_rc;
        unsigned int            ri_max_frmr_depth;
index fda38f830a10869713177220f8d2066c6076da0c..77f5d17e261230a6db4f261445311eafb37c7b06 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/notifier.h>
 #include <linux/netdevice.h>
 #include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
 #include <net/ip_fib.h>
 #include <net/switchdev.h>
 
@@ -634,6 +635,8 @@ static int switchdev_port_br_afspec(struct net_device *dev,
                if (nla_len(attr) != sizeof(struct bridge_vlan_info))
                        return -EINVAL;
                vinfo = nla_data(attr);
+               if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
+                       return -EINVAL;
                vlan->flags = vinfo->flags;
                if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
                        if (vlan->vid_begin)
index e7000be321b0148469264524ed6fce75c3952955..ed98c1fc3de1428560ea370413084102af9dff7f 100644 (file)
@@ -94,10 +94,14 @@ __init int net_sysctl_init(void)
                goto out;
        ret = register_pernet_subsys(&sysctl_pernet_ops);
        if (ret)
-               goto out;
+               goto out1;
        register_sysctl_root(&net_sysctl_root);
 out:
        return ret;
+out1:
+       unregister_sysctl_table(net_header);
+       net_header = NULL;
+       goto out;
 }
 
 struct ctl_table_header *register_net_sysctl(struct net *net,
index 41042de3ae9bcfad4504e0bcbb29d3bea4512bb5..eadba62afa85dcd9463f5eddd08704539fa63488 100644 (file)
@@ -42,7 +42,8 @@
 #include "core.h"
 
 #define        MAX_PKT_DEFAULT_MCAST   1500    /* bcast link max packet size (fixed) */
-#define        BCLINK_WIN_DEFAULT      20      /* bcast link window size (default) */
+#define        BCLINK_WIN_DEFAULT      50      /* bcast link window size (default) */
+#define        BCLINK_WIN_MIN          32      /* bcast minimum link window size */
 
 const char tipc_bclink_name[] = "broadcast-link";
 
@@ -908,9 +909,10 @@ int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
 
        if (!bcl)
                return -ENOPROTOOPT;
-       if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
+       if (limit < BCLINK_WIN_MIN)
+               limit = BCLINK_WIN_MIN;
+       if (limit > TIPC_MAX_LINK_WIN)
                return -EINVAL;
-
        tipc_bclink_lock(net);
        tipc_link_set_queue_limits(bcl, limit);
        tipc_bclink_unlock(net);
index c5ac436235e0823c016123394fef6a0cf321092c..5f73450159df3b7d99349e2b49610b7d9a1d6c47 100644 (file)
@@ -121,7 +121,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
 {
        struct sk_buff *head = *headbuf;
        struct sk_buff *frag = *buf;
-       struct sk_buff *tail;
+       struct sk_buff *tail = NULL;
        struct tipc_msg *msg;
        u32 fragid;
        int delta;
@@ -141,9 +141,15 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
                if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
                        goto err;
                head = *headbuf = frag;
-               skb_frag_list_init(head);
-               TIPC_SKB_CB(head)->tail = NULL;
                *buf = NULL;
+               TIPC_SKB_CB(head)->tail = NULL;
+               if (skb_is_nonlinear(head)) {
+                       skb_walk_frags(head, tail) {
+                               TIPC_SKB_CB(head)->tail = tail;
+                       }
+               } else {
+                       skb_frag_list_init(head);
+               }
                return 0;
        }
 
index a82c5848d4bc22129bd1e6ba7f677795febdc9e9..5351a3f97e8ecf17e545459344a8b315f522045a 100644 (file)
@@ -357,7 +357,7 @@ static inline u32 msg_importance(struct tipc_msg *m)
        if (likely((usr <= TIPC_CRITICAL_IMPORTANCE) && !msg_errcode(m)))
                return usr;
        if ((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER))
-               return msg_bits(m, 5, 13, 0x7);
+               return msg_bits(m, 9, 0, 0x7);
        return TIPC_SYSTEM_IMPORTANCE;
 }
 
@@ -366,7 +366,7 @@ static inline void msg_set_importance(struct tipc_msg *m, u32 i)
        int usr = msg_user(m);
 
        if (likely((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER)))
-               msg_set_bits(m, 5, 13, 0x7, i);
+               msg_set_bits(m, 9, 0, 0x7, i);
        else if (i < TIPC_SYSTEM_IMPORTANCE)
                msg_set_user(m, i);
        else
index 703875fd6cde204ddeaf630b9a6bd11daec6dbfa..2c32a83037a3614ef09fb4b29c8f3337dcdf0f45 100644 (file)
@@ -1116,7 +1116,7 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
        }
 
        /* Ignore duplicate packets */
-       if (less(oseqno, rcv_nxt))
+       if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
                return true;
 
        /* Initiate or update failover mode if applicable */
@@ -1146,8 +1146,8 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
        if (!pl || !tipc_link_is_up(pl))
                return true;
 
-       /* Initiate or update synch mode if applicable */
-       if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) {
+       /* Initiate synch mode if applicable */
+       if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
                syncpt = iseqno + exp_pkts - 1;
                if (!tipc_link_is_up(l)) {
                        tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
index c170d3138953a2361df5439aeffadd29afa52ad9..6e648d90297a9ecc69a23a68183fde658eed63cd 100644 (file)
@@ -52,6 +52,8 @@
 /* IANA assigned UDP port */
 #define UDP_PORT_DEFAULT       6118
 
+#define UDP_MIN_HEADROOM        28
+
 static const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = {
        [TIPC_NLA_UDP_UNSPEC]   = {.type = NLA_UNSPEC},
        [TIPC_NLA_UDP_LOCAL]    = {.type = NLA_BINARY,
@@ -156,6 +158,9 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
        struct sk_buff *clone;
        struct rtable *rt;
 
+       if (skb_headroom(skb) < UDP_MIN_HEADROOM)
+               pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
+
        clone = skb_clone(skb, GFP_ATOMIC);
        skb_set_inner_protocol(clone, htons(ETH_P_TIPC));
        ub = rcu_dereference_rtnl(b->media_ptr);
index 03ee4d359f6a4922397a1a8a36c015a06aae1cac..94f658235fb49a0c644a84867302cc00a75243d5 100644 (file)
@@ -2064,6 +2064,11 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
                goto out;
        }
 
+       if (flags & MSG_PEEK)
+               skip = sk_peek_offset(sk, flags);
+       else
+               skip = 0;
+
        do {
                int chunk;
                struct sk_buff *skb, *last;
@@ -2112,7 +2117,6 @@ unlock:
                        break;
                }
 
-               skip = sk_peek_offset(sk, flags);
                while (skip >= unix_skb_len(skb)) {
                        skip -= unix_skb_len(skb);
                        last = skb;
@@ -2181,6 +2185,17 @@ unlock:
 
                        sk_peek_offset_fwd(sk, chunk);
 
+                       if (UNIXCB(skb).fp)
+                               break;
+
+                       skip = 0;
+                       last = skb;
+                       last_len = skb->len;
+                       unix_state_lock(sk);
+                       skb = skb_peek_next(skb, &sk->sk_receive_queue);
+                       if (skb)
+                               goto again;
+                       unix_state_unlock(sk);
                        break;
                }
        } while (size);
index df5fc6b340f1bbde621fbe84fa44e0af6e2e00af..00e8a349aabccc61cec1f9ebb889bc7dd9d3b5e1 100644 (file)
@@ -1948,13 +1948,13 @@ int __vsock_core_init(const struct vsock_transport *t, struct module *owner)
        err = misc_register(&vsock_device);
        if (err) {
                pr_err("Failed to register misc device\n");
-               return -ENOENT;
+               goto err_reset_transport;
        }
 
        err = proto_register(&vsock_proto, 1);  /* we want our slab */
        if (err) {
                pr_err("Cannot register vsock protocol\n");
-               goto err_misc_deregister;
+               goto err_deregister_misc;
        }
 
        err = sock_register(&vsock_family_ops);
@@ -1969,8 +1969,9 @@ int __vsock_core_init(const struct vsock_transport *t, struct module *owner)
 
 err_unregister_proto:
        proto_unregister(&vsock_proto);
-err_misc_deregister:
+err_deregister_misc:
        misc_deregister(&vsock_device);
+err_reset_transport:
        transport = NULL;
 err_busy:
        mutex_unlock(&vsock_register_mutex);
index 1f63daff39659e08561862cfd71220ffc6949291..7555cad83a752a930a54e4a8ca609846386e0ec1 100644 (file)
 
 static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg);
 static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg);
-static void vmci_transport_peer_attach_cb(u32 sub_id,
-                                         const struct vmci_event_data *ed,
-                                         void *client_data);
 static void vmci_transport_peer_detach_cb(u32 sub_id,
                                          const struct vmci_event_data *ed,
                                          void *client_data);
 static void vmci_transport_recv_pkt_work(struct work_struct *work);
+static void vmci_transport_cleanup(struct work_struct *work);
 static int vmci_transport_recv_listen(struct sock *sk,
                                      struct vmci_transport_packet *pkt);
 static int vmci_transport_recv_connecting_server(
@@ -75,6 +73,10 @@ struct vmci_transport_recv_pkt_info {
        struct vmci_transport_packet pkt;
 };
 
+static LIST_HEAD(vmci_transport_cleanup_list);
+static DEFINE_SPINLOCK(vmci_transport_cleanup_lock);
+static DECLARE_WORK(vmci_transport_cleanup_work, vmci_transport_cleanup);
+
 static struct vmci_handle vmci_transport_stream_handle = { VMCI_INVALID_ID,
                                                           VMCI_INVALID_ID };
 static u32 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
@@ -791,44 +793,6 @@ out:
        return err;
 }
 
-static void vmci_transport_peer_attach_cb(u32 sub_id,
-                                         const struct vmci_event_data *e_data,
-                                         void *client_data)
-{
-       struct sock *sk = client_data;
-       const struct vmci_event_payload_qp *e_payload;
-       struct vsock_sock *vsk;
-
-       e_payload = vmci_event_data_const_payload(e_data);
-
-       vsk = vsock_sk(sk);
-
-       /* We don't ask for delayed CBs when we subscribe to this event (we
-        * pass 0 as flags to vmci_event_subscribe()).  VMCI makes no
-        * guarantees in that case about what context we might be running in,
-        * so it could be BH or process, blockable or non-blockable.  So we
-        * need to account for all possible contexts here.
-        */
-       local_bh_disable();
-       bh_lock_sock(sk);
-
-       /* XXX This is lame, we should provide a way to lookup sockets by
-        * qp_handle.
-        */
-       if (vmci_handle_is_equal(vmci_trans(vsk)->qp_handle,
-                                e_payload->handle)) {
-               /* XXX This doesn't do anything, but in the future we may want
-                * to set a flag here to verify the attach really did occur and
-                * we weren't just sent a datagram claiming it was.
-                */
-               goto out;
-       }
-
-out:
-       bh_unlock_sock(sk);
-       local_bh_enable();
-}
-
 static void vmci_transport_handle_detach(struct sock *sk)
 {
        struct vsock_sock *vsk;
@@ -871,28 +835,38 @@ static void vmci_transport_peer_detach_cb(u32 sub_id,
                                          const struct vmci_event_data *e_data,
                                          void *client_data)
 {
-       struct sock *sk = client_data;
+       struct vmci_transport *trans = client_data;
        const struct vmci_event_payload_qp *e_payload;
-       struct vsock_sock *vsk;
 
        e_payload = vmci_event_data_const_payload(e_data);
-       vsk = vsock_sk(sk);
-       if (vmci_handle_is_invalid(e_payload->handle))
-               return;
-
-       /* Same rules for locking as for peer_attach_cb(). */
-       local_bh_disable();
-       bh_lock_sock(sk);
 
        /* XXX This is lame, we should provide a way to lookup sockets by
         * qp_handle.
         */
-       if (vmci_handle_is_equal(vmci_trans(vsk)->qp_handle,
-                                e_payload->handle))
-               vmci_transport_handle_detach(sk);
+       if (vmci_handle_is_invalid(e_payload->handle) ||
+           vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
+               return;
 
-       bh_unlock_sock(sk);
-       local_bh_enable();
+       /* We don't ask for delayed CBs when we subscribe to this event (we
+        * pass 0 as flags to vmci_event_subscribe()).  VMCI makes no
+        * guarantees in that case about what context we might be running in,
+        * so it could be BH or process, blockable or non-blockable.  So we
+        * need to account for all possible contexts here.
+        */
+       spin_lock_bh(&trans->lock);
+       if (!trans->sk)
+               goto out;
+
+       /* Apart from here, trans->lock is only grabbed as part of sk destruct,
+        * where trans->sk isn't locked.
+        */
+       bh_lock_sock(trans->sk);
+
+       vmci_transport_handle_detach(trans->sk);
+
+       bh_unlock_sock(trans->sk);
+ out:
+       spin_unlock_bh(&trans->lock);
 }
 
 static void vmci_transport_qp_resumed_cb(u32 sub_id,
@@ -1181,7 +1155,7 @@ vmci_transport_recv_connecting_server(struct sock *listener,
         */
        err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
                                   vmci_transport_peer_detach_cb,
-                                  pending, &detach_sub_id);
+                                  vmci_trans(vpending), &detach_sub_id);
        if (err < VMCI_SUCCESS) {
                vmci_transport_send_reset(pending, pkt);
                err = vmci_transport_error_to_vsock_error(err);
@@ -1321,7 +1295,6 @@ vmci_transport_recv_connecting_client(struct sock *sk,
                    || vmci_trans(vsk)->qpair
                    || vmci_trans(vsk)->produce_size != 0
                    || vmci_trans(vsk)->consume_size != 0
-                   || vmci_trans(vsk)->attach_sub_id != VMCI_INVALID_ID
                    || vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) {
                        skerr = EPROTO;
                        err = -EINVAL;
@@ -1389,7 +1362,6 @@ static int vmci_transport_recv_connecting_client_negotiate(
        struct vsock_sock *vsk;
        struct vmci_handle handle;
        struct vmci_qp *qpair;
-       u32 attach_sub_id;
        u32 detach_sub_id;
        bool is_local;
        u32 flags;
@@ -1399,7 +1371,6 @@ static int vmci_transport_recv_connecting_client_negotiate(
 
        vsk = vsock_sk(sk);
        handle = VMCI_INVALID_HANDLE;
-       attach_sub_id = VMCI_INVALID_ID;
        detach_sub_id = VMCI_INVALID_ID;
 
        /* If we have gotten here then we should be past the point where old
@@ -1444,23 +1415,15 @@ static int vmci_transport_recv_connecting_client_negotiate(
                goto destroy;
        }
 
-       /* Subscribe to attach and detach events first.
+       /* Subscribe to detach events first.
         *
         * XXX We attach once for each queue pair created for now so it is easy
         * to find the socket (it's provided), but later we should only
         * subscribe once and add a way to lookup sockets by queue pair handle.
         */
-       err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_ATTACH,
-                                  vmci_transport_peer_attach_cb,
-                                  sk, &attach_sub_id);
-       if (err < VMCI_SUCCESS) {
-               err = vmci_transport_error_to_vsock_error(err);
-               goto destroy;
-       }
-
        err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
                                   vmci_transport_peer_detach_cb,
-                                  sk, &detach_sub_id);
+                                  vmci_trans(vsk), &detach_sub_id);
        if (err < VMCI_SUCCESS) {
                err = vmci_transport_error_to_vsock_error(err);
                goto destroy;
@@ -1496,7 +1459,6 @@ static int vmci_transport_recv_connecting_client_negotiate(
        vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size =
                pkt->u.size;
 
-       vmci_trans(vsk)->attach_sub_id = attach_sub_id;
        vmci_trans(vsk)->detach_sub_id = detach_sub_id;
 
        vmci_trans(vsk)->notify_ops->process_negotiate(sk);
@@ -1504,9 +1466,6 @@ static int vmci_transport_recv_connecting_client_negotiate(
        return 0;
 
 destroy:
-       if (attach_sub_id != VMCI_INVALID_ID)
-               vmci_event_unsubscribe(attach_sub_id);
-
        if (detach_sub_id != VMCI_INVALID_ID)
                vmci_event_unsubscribe(detach_sub_id);
 
@@ -1607,9 +1566,11 @@ static int vmci_transport_socket_init(struct vsock_sock *vsk,
        vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE;
        vmci_trans(vsk)->qpair = NULL;
        vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size = 0;
-       vmci_trans(vsk)->attach_sub_id = vmci_trans(vsk)->detach_sub_id =
-               VMCI_INVALID_ID;
+       vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID;
        vmci_trans(vsk)->notify_ops = NULL;
+       INIT_LIST_HEAD(&vmci_trans(vsk)->elem);
+       vmci_trans(vsk)->sk = &vsk->sk;
+       spin_lock_init(&vmci_trans(vsk)->lock);
        if (psk) {
                vmci_trans(vsk)->queue_pair_size =
                        vmci_trans(psk)->queue_pair_size;
@@ -1629,29 +1590,57 @@ static int vmci_transport_socket_init(struct vsock_sock *vsk,
        return 0;
 }
 
-static void vmci_transport_destruct(struct vsock_sock *vsk)
+static void vmci_transport_free_resources(struct list_head *transport_list)
 {
-       if (vmci_trans(vsk)->attach_sub_id != VMCI_INVALID_ID) {
-               vmci_event_unsubscribe(vmci_trans(vsk)->attach_sub_id);
-               vmci_trans(vsk)->attach_sub_id = VMCI_INVALID_ID;
-       }
+       while (!list_empty(transport_list)) {
+               struct vmci_transport *transport =
+                   list_first_entry(transport_list, struct vmci_transport,
+                                    elem);
+               list_del(&transport->elem);
 
-       if (vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) {
-               vmci_event_unsubscribe(vmci_trans(vsk)->detach_sub_id);
-               vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID;
-       }
+               if (transport->detach_sub_id != VMCI_INVALID_ID) {
+                       vmci_event_unsubscribe(transport->detach_sub_id);
+                       transport->detach_sub_id = VMCI_INVALID_ID;
+               }
 
-       if (!vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)) {
-               vmci_qpair_detach(&vmci_trans(vsk)->qpair);
-               vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE;
-               vmci_trans(vsk)->produce_size = 0;
-               vmci_trans(vsk)->consume_size = 0;
+               if (!vmci_handle_is_invalid(transport->qp_handle)) {
+                       vmci_qpair_detach(&transport->qpair);
+                       transport->qp_handle = VMCI_INVALID_HANDLE;
+                       transport->produce_size = 0;
+                       transport->consume_size = 0;
+               }
+
+               kfree(transport);
        }
+}
+
+static void vmci_transport_cleanup(struct work_struct *work)
+{
+       LIST_HEAD(pending);
+
+       spin_lock_bh(&vmci_transport_cleanup_lock);
+       list_replace_init(&vmci_transport_cleanup_list, &pending);
+       spin_unlock_bh(&vmci_transport_cleanup_lock);
+       vmci_transport_free_resources(&pending);
+}
+
+static void vmci_transport_destruct(struct vsock_sock *vsk)
+{
+       /* Ensure that the detach callback doesn't use the sk/vsk
+        * we are about to destruct.
+        */
+       spin_lock_bh(&vmci_trans(vsk)->lock);
+       vmci_trans(vsk)->sk = NULL;
+       spin_unlock_bh(&vmci_trans(vsk)->lock);
 
        if (vmci_trans(vsk)->notify_ops)
                vmci_trans(vsk)->notify_ops->socket_destruct(vsk);
 
-       kfree(vsk->trans);
+       spin_lock_bh(&vmci_transport_cleanup_lock);
+       list_add(&vmci_trans(vsk)->elem, &vmci_transport_cleanup_list);
+       spin_unlock_bh(&vmci_transport_cleanup_lock);
+       schedule_work(&vmci_transport_cleanup_work);
+
        vsk->trans = NULL;
 }
 
@@ -2146,6 +2135,9 @@ module_init(vmci_transport_init);
 
 static void __exit vmci_transport_exit(void)
 {
+       cancel_work_sync(&vmci_transport_cleanup_work);
+       vmci_transport_free_resources(&vmci_transport_cleanup_list);
+
        if (!vmci_handle_is_invalid(vmci_transport_stream_handle)) {
                if (vmci_datagram_destroy_handle(
                        vmci_transport_stream_handle) != VMCI_SUCCESS)
@@ -2164,6 +2156,7 @@ module_exit(vmci_transport_exit);
 
 MODULE_AUTHOR("VMware, Inc.");
 MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
+MODULE_VERSION("1.0.2.0-k");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("vmware_vsock");
 MODULE_ALIAS_NETPROTO(PF_VSOCK);
index ce6c9623d5f069029ce58294bcc7de9bc3728fcd..2ad46f39649f8130d9f19ce48ccf8a8b4309797d 100644 (file)
@@ -119,10 +119,12 @@ struct vmci_transport {
        u64 queue_pair_size;
        u64 queue_pair_min_size;
        u64 queue_pair_max_size;
-       u32 attach_sub_id;
        u32 detach_sub_id;
        union vmci_transport_notify notify;
        struct vmci_transport_notify_ops *notify_ops;
+       struct list_head elem;
+       struct sock *sk;
+       spinlock_t lock; /* protects sk. */
 };
 
 int vmci_transport_register(void);
index a8de9e3002000d7eaa76f6764797e5b231d187ff..24e06a2377f6b3601003157d22dd05bb0278f145 100644 (file)
@@ -1928,8 +1928,10 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
        struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
        struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
+       struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
+       struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
 
-       if (!lt && !rp && !re)
+       if (!lt && !rp && !re && !et && !rt)
                return err;
 
        /* pedantic mode - thou shalt sayeth replaceth */
index 3a44d3a272af40119b379fe1930244909499fd8d..af44e564d6ddc1278e6edce3c6b18a7879c76ecd 100644 (file)
@@ -86,5 +86,17 @@ static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flag
 #define PT_REGS_RC(x) ((x)->gprs[2])
 #define PT_REGS_SP(x) ((x)->gprs[15])
 
+#elif defined(__aarch64__)
+
+#define PT_REGS_PARM1(x) ((x)->regs[0])
+#define PT_REGS_PARM2(x) ((x)->regs[1])
+#define PT_REGS_PARM3(x) ((x)->regs[2])
+#define PT_REGS_PARM4(x) ((x)->regs[3])
+#define PT_REGS_PARM5(x) ((x)->regs[4])
+#define PT_REGS_RET(x) ((x)->regs[30])
+#define PT_REGS_FP(x) ((x)->regs[29]) /* Works only with CONFIG_FRAME_POINTER */
+#define PT_REGS_RC(x) ((x)->regs[0])
+#define PT_REGS_SP(x) ((x)->sp)
+
 #endif
 #endif
index 9119ac6a82702972d5973c0da0a1e9399d665b8a..c285a3b8a9f1a5d88e4fc229368ac6ad25473f29 100644 (file)
@@ -1,13 +1,13 @@
 /*
  * Here's a sample kernel module showing the use of jprobes to dump
- * the arguments of do_fork().
+ * the arguments of _do_fork().
  *
  * For more information on theory of operation of jprobes, see
  * Documentation/kprobes.txt
  *
  * Build and insert the kernel module as done in the kprobe example.
  * You will see the trace data in /var/log/messages and on the
- * console whenever do_fork() is invoked to create a new process.
+ * console whenever _do_fork() is invoked to create a new process.
  * (Some messages may be suppressed if syslogd is configured to
  * eliminate duplicate messages.)
  */
 #include <linux/kprobes.h>
 
 /*
- * Jumper probe for do_fork.
+ * Jumper probe for _do_fork.
  * Mirror principle enables access to arguments of the probed routine
  * from the probe handler.
  */
 
-/* Proxy routine having the same arguments as actual do_fork() routine */
-static long jdo_fork(unsigned long clone_flags, unsigned long stack_start,
+/* Proxy routine having the same arguments as actual _do_fork() routine */
+static long j_do_fork(unsigned long clone_flags, unsigned long stack_start,
              unsigned long stack_size, int __user *parent_tidptr,
              int __user *child_tidptr)
 {
@@ -36,9 +36,9 @@ static long jdo_fork(unsigned long clone_flags, unsigned long stack_start,
 }
 
 static struct jprobe my_jprobe = {
-       .entry                  = jdo_fork,
+       .entry                  = j_do_fork,
        .kp = {
-               .symbol_name    = "do_fork",
+               .symbol_name    = "_do_fork",
        },
 };
 
index 366db1a9fb65b5662ffdb14564750c60b6f012c7..727eb21c9c5624f2998f321d59db1017339c69a3 100644 (file)
@@ -1,13 +1,13 @@
 /*
  * NOTE: This example is works on x86 and powerpc.
  * Here's a sample kernel module showing the use of kprobes to dump a
- * stack trace and selected registers when do_fork() is called.
+ * stack trace and selected registers when _do_fork() is called.
  *
  * For more information on theory of operation of kprobes, see
  * Documentation/kprobes.txt
  *
  * You will see the trace data in /var/log/messages and on the console
- * whenever do_fork() is invoked to create a new process.
+ * whenever _do_fork() is invoked to create a new process.
  */
 
 #include <linux/kernel.h>
@@ -16,7 +16,7 @@
 
 /* For each probe you need to allocate a kprobe structure */
 static struct kprobe kp = {
-       .symbol_name    = "do_fork",
+       .symbol_name    = "_do_fork",
 };
 
 /* kprobe pre_handler: called just before the probed instruction is executed */
index 1041b6731598137d22752334054cd9054fcdabb6..ebb1d1aed54782f2e4a0126e1de886c63767d056 100644 (file)
@@ -7,7 +7,7 @@
  *
  * usage: insmod kretprobe_example.ko func=<func_name>
  *
- * If no func_name is specified, do_fork is instrumented
+ * If no func_name is specified, _do_fork is instrumented
  *
  * For more information on theory of operation of kretprobes, see
  * Documentation/kprobes.txt
@@ -25,7 +25,7 @@
 #include <linux/limits.h>
 #include <linux/sched.h>
 
-static char func_name[NAME_MAX] = "do_fork";
+static char func_name[NAME_MAX] = "_do_fork";
 module_param_string(func, func_name, NAME_MAX, S_IRUGO);
 MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the"
                        " function's execution time");
index 6ce5945a0b892e79f2e98e645615cbc37b79f4d1..b071bf476fea7ede6aab95730f5844ac1bf5caa5 100644 (file)
 #include <stdint.h>
 #include <stdbool.h>
 #include <string.h>
-#include <getopt.h>
 #include <err.h>
-#include <arpa/inet.h>
 #include <openssl/bio.h>
-#include <openssl/evp.h>
 #include <openssl/pem.h>
-#include <openssl/pkcs7.h>
 #include <openssl/err.h>
 #include <openssl/engine.h>
 
index 0cd46e129920e8ad114eb335ae92ce85230d068d..b967e4f9fed2e6cc78b9538c79517a473fc375b8 100755 (executable)
@@ -115,7 +115,7 @@ esac
 BUILD_DEBUG="$(grep -s '^CONFIG_DEBUG_INFO=y' $KCONFIG_CONFIG || true)"
 
 # Setup the directory structure
-rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir"
+rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir" $objtree/debian/files
 mkdir -m 755 -p "$tmpdir/DEBIAN"
 mkdir -p "$tmpdir/lib" "$tmpdir/boot"
 mkdir -p "$fwdir/lib/firmware/$version/"
@@ -408,7 +408,7 @@ binary-arch:
        \$(MAKE) KDEB_SOURCENAME=${sourcename} KDEB_PKGVERSION=${packageversion} bindeb-pkg
 
 clean:
-       rm -rf debian/*tmp
+       rm -rf debian/*tmp debian/files
        mv debian/ debian.backup # debian/ might be cleaned away
        \$(MAKE) clean
        mv debian.backup debian
index c3899ca4811cc2e0add94f8f31e31b93be432fff..250a7a6450331ae0805aaf3100da46af66dd4827 100755 (executable)
 #include <getopt.h>
 #include <err.h>
 #include <arpa/inet.h>
+#include <openssl/opensslv.h>
 #include <openssl/bio.h>
 #include <openssl/evp.h>
 #include <openssl/pem.h>
-#include <openssl/cms.h>
 #include <openssl/err.h>
 #include <openssl/engine.h>
 
+/*
+ * Use CMS if we have openssl-1.0.0 or newer available - otherwise we have to
+ * assume that it's not available and its header file is missing and that we
+ * should use PKCS#7 instead.  Switching to the older PKCS#7 format restricts
+ * the options we have on specifying the X.509 certificate we want.
+ *
+ * Further, older versions of OpenSSL don't support manually adding signers to
+ * the PKCS#7 message so have to accept that we get a certificate included in
+ * the signature message.  Nor do such older versions of OpenSSL support
+ * signing with anything other than SHA1 - so we're stuck with that if such is
+ * the case.
+ */
+#if OPENSSL_VERSION_NUMBER < 0x10000000L
+#define USE_PKCS7
+#endif
+#ifndef USE_PKCS7
+#include <openssl/cms.h>
+#else
+#include <openssl/pkcs7.h>
+#endif
+
 struct module_signature {
        uint8_t         algo;           /* Public-key crypto algorithm [0] */
        uint8_t         hash;           /* Digest algorithm [0] */
@@ -110,30 +131,42 @@ int main(int argc, char **argv)
        struct module_signature sig_info = { .id_type = PKEY_ID_PKCS7 };
        char *hash_algo = NULL;
        char *private_key_name, *x509_name, *module_name, *dest_name;
-       bool save_cms = false, replace_orig;
+       bool save_sig = false, replace_orig;
        bool sign_only = false;
        unsigned char buf[4096];
-       unsigned long module_size, cms_size;
-       unsigned int use_keyid = 0, use_signed_attrs = CMS_NOATTR;
+       unsigned long module_size, sig_size;
+       unsigned int use_signed_attrs;
        const EVP_MD *digest_algo;
        EVP_PKEY *private_key;
+#ifndef USE_PKCS7
        CMS_ContentInfo *cms;
+       unsigned int use_keyid = 0;
+#else
+       PKCS7 *pkcs7;
+#endif
        X509 *x509;
        BIO *b, *bd = NULL, *bm;
        int opt, n;
-
        OpenSSL_add_all_algorithms();
        ERR_load_crypto_strings();
        ERR_clear_error();
 
        key_pass = getenv("KBUILD_SIGN_PIN");
 
+#ifndef USE_PKCS7
+       use_signed_attrs = CMS_NOATTR;
+#else
+       use_signed_attrs = PKCS7_NOATTR;
+#endif
+
        do {
                opt = getopt(argc, argv, "dpk");
                switch (opt) {
-               case 'p': save_cms = true; break;
-               case 'd': sign_only = true; save_cms = true; break;
+               case 'p': save_sig = true; break;
+               case 'd': sign_only = true; save_sig = true; break;
+#ifndef USE_PKCS7
                case 'k': use_keyid = CMS_USE_KEYID; break;
+#endif
                case -1: break;
                default: format();
                }
@@ -157,6 +190,14 @@ int main(int argc, char **argv)
                replace_orig = true;
        }
 
+#ifdef USE_PKCS7
+       if (strcmp(hash_algo, "sha1") != 0) {
+               fprintf(stderr, "sign-file: %s only supports SHA1 signing\n",
+                       OPENSSL_VERSION_TEXT);
+               exit(3);
+       }
+#endif
+
        /* Read the private key and the X.509 cert the PKCS#7 message
         * will point to.
         */
@@ -213,7 +254,8 @@ int main(int argc, char **argv)
        bm = BIO_new_file(module_name, "rb");
        ERR(!bm, "%s", module_name);
 
-       /* Load the CMS message from the digest buffer. */
+#ifndef USE_PKCS7
+       /* Load the signature message from the digest buffer. */
        cms = CMS_sign(NULL, NULL, NULL, NULL,
                       CMS_NOCERTS | CMS_PARTIAL | CMS_BINARY | CMS_DETACHED | CMS_STREAM);
        ERR(!cms, "CMS_sign");
@@ -221,17 +263,31 @@ int main(int argc, char **argv)
        ERR(!CMS_add1_signer(cms, x509, private_key, digest_algo,
                             CMS_NOCERTS | CMS_BINARY | CMS_NOSMIMECAP |
                             use_keyid | use_signed_attrs),
-           "CMS_sign_add_signer");
+           "CMS_add1_signer");
        ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) < 0,
            "CMS_final");
 
-       if (save_cms) {
-               char *cms_name;
+#else
+       pkcs7 = PKCS7_sign(x509, private_key, NULL, bm,
+                          PKCS7_NOCERTS | PKCS7_BINARY |
+                          PKCS7_DETACHED | use_signed_attrs);
+       ERR(!pkcs7, "PKCS7_sign");
+#endif
 
-               ERR(asprintf(&cms_name, "%s.p7s", module_name) < 0, "asprintf");
-               b = BIO_new_file(cms_name, "wb");
-               ERR(!b, "%s", cms_name);
-               ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0, "%s", cms_name);
+       if (save_sig) {
+               char *sig_file_name;
+
+               ERR(asprintf(&sig_file_name, "%s.p7s", module_name) < 0,
+                   "asprintf");
+               b = BIO_new_file(sig_file_name, "wb");
+               ERR(!b, "%s", sig_file_name);
+#ifndef USE_PKCS7
+               ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0,
+                   "%s", sig_file_name);
+#else
+               ERR(i2d_PKCS7_bio(b, pkcs7) < 0,
+                       "%s", sig_file_name);
+#endif
                BIO_free(b);
        }
 
@@ -247,9 +303,13 @@ int main(int argc, char **argv)
        ERR(n < 0, "%s", module_name);
        module_size = BIO_number_written(bd);
 
+#ifndef USE_PKCS7
        ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) < 0, "%s", dest_name);
-       cms_size = BIO_number_written(bd) - module_size;
-       sig_info.sig_len = htonl(cms_size);
+#else
+       ERR(i2d_PKCS7_bio(bd, pkcs7) < 0, "%s", dest_name);
+#endif
+       sig_size = BIO_number_written(bd) - module_size;
+       sig_info.sig_len = htonl(sig_size);
        ERR(BIO_write(bd, &sig_info, sizeof(sig_info)) < 0, "%s", dest_name);
        ERR(BIO_write(bd, magic_number, sizeof(magic_number) - 1) < 0, "%s", dest_name);
 
index c7952375ac5325cfb4c403fa1020671b5f31a150..addf060399e09547307d9c023f36d8dbf869a931 100644 (file)
@@ -134,6 +134,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
                kdebug("- %u", key->serial);
                key_check(key);
 
+               /* Throw away the key data if the key is instantiated */
+               if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
+                   !test_bit(KEY_FLAG_NEGATIVE, &key->flags) &&
+                   key->type->destroy)
+                       key->type->destroy(key);
+
                security_key_free(key);
 
                /* deal with the user's key tracking and quota */
@@ -148,10 +154,6 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
                if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
                        atomic_dec(&key->user->nikeys);
 
-               /* now throw away the key memory */
-               if (key->type->destroy)
-                       key->type->destroy(key);
-
                key_user_put(key->user);
 
                kfree(key->description);
index 486ef6fa393b2cc9d8ceb8cb11f187f97730bd90..0d625312427831b63ed18784719cd0b424f03a41 100644 (file)
@@ -440,6 +440,9 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
 
        kenter("");
 
+       if (ctx->index_key.type == &key_type_keyring)
+               return ERR_PTR(-EPERM);
+       
        user = key_user_lookup(current_fsuid());
        if (!user)
                return ERR_PTR(-ENOMEM);
index 4449d1a990893db4078a102b9da0f92835d5add3..2433f7c81472848be51b9af420ec198b523871b1 100644 (file)
@@ -19,6 +19,7 @@
 
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/io.h>
 #include <sound/hdaudio_ext.h>
 
 MODULE_DESCRIPTION("HDA extended core");
index 37f43a1b34ef1f5f48682230055c0f01d9a07449..a249d5486889dca683af566e0818d95ee49b12ae 100644 (file)
@@ -3367,10 +3367,8 @@ int snd_hda_codec_build_pcms(struct hda_codec *codec)
        int dev, err;
 
        err = snd_hda_codec_parse_pcms(codec);
-       if (err < 0) {
-               snd_hda_codec_reset(codec);
+       if (err < 0)
                return err;
-       }
 
        /* attach a new PCM streams */
        list_for_each_entry(cpcm, &codec->pcm_list_head, list) {
index 584a0343ab0cc132b7c2038679923b6617926cf7..85813de26da87715df7d1d259339e30450c7815a 100644 (file)
@@ -633,6 +633,7 @@ static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = {
        SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11),
        SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
        SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
+       SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
        {} /* terminator */
 };
 
index ca03c40609fcf09d8838ed05f4920de3f03a6cf7..2f0ec7c45fc70d6232339761e5773349d24213a9 100644 (file)
@@ -819,6 +819,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
        SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410),
        SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410),
        SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD),
+       SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
index afec6dc9f91fddcf8c0023b344771307aa684d3a..16b8dcba5c12d2d13ed7c80c4e6f93df69d9944f 100644 (file)
@@ -5306,6 +5306,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
index 9d947aef2c8b60b99f63fd9464ad289bef0c7061..def5cc8dff0293c2f70c4c3fcf67da1aea1bf55e 100644 (file)
@@ -4520,7 +4520,11 @@ static int patch_stac92hd73xx(struct hda_codec *codec)
                return err;
 
        spec = codec->spec;
-       codec->power_save_node = 1;
+       /* enable power_save_node only for new 92HD89xx chips, as it causes
+        * click noises on old 92HD73xx chips.
+        */
+       if ((codec->core.vendor_id & 0xfffffff0) != 0x111d7670)
+               codec->power_save_node = 1;
        spec->linear_tone_beep = 0;
        spec->gen.mixer_nid = 0x1d;
        spec->have_spdif_mux = 1;
index 58c3164802b8ceda545e1469f557a814702694be..8c907ebea18960ec8e48942f5746fd2121705736 100644 (file)
@@ -129,6 +129,8 @@ static struct snd_soc_dai_link db1300_i2s_dai = {
        .cpu_dai_name   = "au1xpsc_i2s.2",
        .platform_name  = "au1xpsc-pcm.2",
        .codec_name     = "wm8731.0-001b",
+       .dai_fmt        = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF |
+                         SND_SOC_DAIFMT_CBM_CFM,
        .ops            = &db1200_i2s_wm8731_ops,
 };
 
@@ -146,6 +148,8 @@ static struct snd_soc_dai_link db1550_i2s_dai = {
        .cpu_dai_name   = "au1xpsc_i2s.3",
        .platform_name  = "au1xpsc-pcm.3",
        .codec_name     = "wm8731.0-001b",
+       .dai_fmt        = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF |
+                         SND_SOC_DAIFMT_CBM_CFM,
        .ops            = &db1200_i2s_wm8731_ops,
 };
 
index 3c2f0f8d6266e358a667aa376fea3d094829845a..f823eb502367dccad4b93e71ac7e06d30c0bccdf 100644 (file)
@@ -50,24 +50,24 @@ struct rt298_priv {
 };
 
 static struct reg_default rt298_index_def[] = {
-       { 0x01, 0xaaaa },
-       { 0x02, 0x8aaa },
+       { 0x01, 0xa5a8 },
+       { 0x02, 0x8e95 },
        { 0x03, 0x0002 },
-       { 0x04, 0xaf01 },
-       { 0x08, 0x000d },
-       { 0x09, 0xd810 },
-       { 0x0a, 0x0120 },
+       { 0x04, 0xaf67 },
+       { 0x08, 0x200f },
+       { 0x09, 0xd010 },
+       { 0x0a, 0x0100 },
        { 0x0b, 0x0000 },
        { 0x0d, 0x2800 },
-       { 0x0f, 0x0000 },
-       { 0x19, 0x0a17 },
+       { 0x0f, 0x0022 },
+       { 0x19, 0x0217 },
        { 0x20, 0x0020 },
        { 0x33, 0x0208 },
        { 0x46, 0x0300 },
-       { 0x49, 0x0004 },
-       { 0x4f, 0x50e9 },
-       { 0x50, 0x2000 },
-       { 0x63, 0x2902 },
+       { 0x49, 0x4004 },
+       { 0x4f, 0x50c9 },
+       { 0x50, 0x3000 },
+       { 0x63, 0x1b02 },
        { 0x67, 0x1111 },
        { 0x68, 0x1016 },
        { 0x69, 0x273f },
@@ -1214,7 +1214,7 @@ static int rt298_i2c_probe(struct i2c_client *i2c,
        mdelay(10);
 
        if (!rt298->pdata.gpio2_en)
-               regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0x4000);
+               regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0x40);
        else
                regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0);
 
index 268a28bd1df409dd103d08bbe809cfe294b1e858..5c101af0ac630dddf1cf12085846d5cfd9c08911 100644 (file)
@@ -519,11 +519,11 @@ static const struct snd_kcontrol_new rt5645_snd_controls[] = {
                RT5645_L_VOL_SFT + 1, RT5645_R_VOL_SFT + 1, 63, 0, adc_vol_tlv),
 
        /* ADC Boost Volume Control */
-       SOC_DOUBLE_TLV("STO1 ADC Boost Gain", RT5645_ADC_BST_VOL1,
+       SOC_DOUBLE_TLV("ADC Boost Capture Volume", RT5645_ADC_BST_VOL1,
                RT5645_STO1_ADC_L_BST_SFT, RT5645_STO1_ADC_R_BST_SFT, 3, 0,
                adc_bst_tlv),
-       SOC_DOUBLE_TLV("STO2 ADC Boost Gain", RT5645_ADC_BST_VOL1,
-               RT5645_STO2_ADC_L_BST_SFT, RT5645_STO2_ADC_R_BST_SFT, 3, 0,
+       SOC_DOUBLE_TLV("Mono ADC Boost Capture Volume", RT5645_ADC_BST_VOL2,
+               RT5645_MONO_ADC_L_BST_SFT, RT5645_MONO_ADC_R_BST_SFT, 3, 0,
                adc_bst_tlv),
 
        /* I2S2 function select */
index 0e4cfc6ac64984acb1bd395a7477b7cf21cfc020..8c964cfb120ddc4130e5d39e9ab96ae34623eb34 100644 (file)
@@ -39,8 +39,8 @@
 #define RT5645_STO1_ADC_DIG_VOL                        0x1c
 #define RT5645_MONO_ADC_DIG_VOL                        0x1d
 #define RT5645_ADC_BST_VOL1                    0x1e
-/* Mixer - D-D */
 #define RT5645_ADC_BST_VOL2                    0x20
+/* Mixer - D-D */
 #define RT5645_STO1_ADC_MIXER                  0x27
 #define RT5645_MONO_ADC_MIXER                  0x28
 #define RT5645_AD_DA_MIXER                     0x29
 #define RT5645_STO1_ADC_R_BST_SFT              12
 #define RT5645_STO1_ADC_COMP_MASK              (0x3 << 10)
 #define RT5645_STO1_ADC_COMP_SFT               10
-#define RT5645_STO2_ADC_L_BST_MASK             (0x3 << 8)
-#define RT5645_STO2_ADC_L_BST_SFT              8
-#define RT5645_STO2_ADC_R_BST_MASK             (0x3 << 6)
-#define RT5645_STO2_ADC_R_BST_SFT              6
-#define RT5645_STO2_ADC_COMP_MASK              (0x3 << 4)
-#define RT5645_STO2_ADC_COMP_SFT               4
+
+/* ADC Boost Volume Control (0x20) */
+#define RT5645_MONO_ADC_L_BST_MASK             (0x3 << 14)
+#define RT5645_MONO_ADC_L_BST_SFT              14
+#define RT5645_MONO_ADC_R_BST_MASK             (0x3 << 12)
+#define RT5645_MONO_ADC_R_BST_SFT              12
+#define RT5645_MONO_ADC_COMP_MASK              (0x3 << 10)
+#define RT5645_MONO_ADC_COMP_SFT               10
 
 /* Stereo2 ADC Mixer Control (0x26) */
 #define RT5645_STO2_ADC_SRC_MASK               (0x1 << 15)
index bfda25ef0dd43313f066a7afca06c11b8a8ecbe0..f540f82b1f271ec4833d9ea43aa7f4567c0df712 100644 (file)
@@ -1376,8 +1376,8 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
                        sgtl5000->micbias_resistor << SGTL5000_BIAS_R_SHIFT);
 
        snd_soc_update_bits(codec, SGTL5000_CHIP_MIC_CTRL,
-                       SGTL5000_BIAS_R_MASK,
-                       sgtl5000->micbias_voltage << SGTL5000_BIAS_R_SHIFT);
+                       SGTL5000_BIAS_VOLT_MASK,
+                       sgtl5000->micbias_voltage << SGTL5000_BIAS_VOLT_SHIFT);
        /*
         * disable DAP
         * TODO:
@@ -1549,7 +1549,7 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
                        else {
                                sgtl5000->micbias_voltage = 0;
                                dev_err(&client->dev,
-                                       "Unsuitable MicBias resistor\n");
+                                       "Unsuitable MicBias voltage\n");
                        }
                } else {
                        sgtl5000->micbias_voltage = 0;
index e3a0bca28bcf5fb7c454de1abf235c1c5ace347d..cc1d3981fa4b6b92c018d596a0aaac1192f141f1 100644 (file)
@@ -549,7 +549,7 @@ static struct snd_soc_dai_driver tas2552_dai[] = {
 /*
  * DAC digital volumes. From -7 to 24 dB in 1 dB steps
  */
-static DECLARE_TLV_DB_SCALE(dac_tlv, -7, 100, 0);
+static DECLARE_TLV_DB_SCALE(dac_tlv, -700, 100, 0);
 
 static const char * const tas2552_din_source_select[] = {
        "Muted",
index 1a82b19b26442e31eb38e8fa38287d9710aa3546..8739126a1f6f60d4c9e3eac08aaf337c2f7b3a2b 100644 (file)
@@ -1509,14 +1509,17 @@ static int aic3x_init(struct snd_soc_codec *codec)
        snd_soc_write(codec, PGAL_2_LLOPM_VOL, DEFAULT_VOL);
        snd_soc_write(codec, PGAR_2_RLOPM_VOL, DEFAULT_VOL);
 
-       /* Line2 to HP Bypass default volume, disconnect from Output Mixer */
-       snd_soc_write(codec, LINE2L_2_HPLOUT_VOL, DEFAULT_VOL);
-       snd_soc_write(codec, LINE2R_2_HPROUT_VOL, DEFAULT_VOL);
-       snd_soc_write(codec, LINE2L_2_HPLCOM_VOL, DEFAULT_VOL);
-       snd_soc_write(codec, LINE2R_2_HPRCOM_VOL, DEFAULT_VOL);
-       /* Line2 Line Out default volume, disconnect from Output Mixer */
-       snd_soc_write(codec, LINE2L_2_LLOPM_VOL, DEFAULT_VOL);
-       snd_soc_write(codec, LINE2R_2_RLOPM_VOL, DEFAULT_VOL);
+       /* On tlv320aic3104, these registers are reserved and must not be written */
+       if (aic3x->model != AIC3X_MODEL_3104) {
+               /* Line2 to HP Bypass default volume, disconnect from Output Mixer */
+               snd_soc_write(codec, LINE2L_2_HPLOUT_VOL, DEFAULT_VOL);
+               snd_soc_write(codec, LINE2R_2_HPROUT_VOL, DEFAULT_VOL);
+               snd_soc_write(codec, LINE2L_2_HPLCOM_VOL, DEFAULT_VOL);
+               snd_soc_write(codec, LINE2R_2_HPRCOM_VOL, DEFAULT_VOL);
+               /* Line2 Line Out default volume, disconnect from Output Mixer */
+               snd_soc_write(codec, LINE2L_2_LLOPM_VOL, DEFAULT_VOL);
+               snd_soc_write(codec, LINE2R_2_RLOPM_VOL, DEFAULT_VOL);
+       }
 
        switch (aic3x->model) {
        case AIC3X_MODEL_3X:
index 293e47a6ff59073af3aaf0bb6c6d76521d1b1d94..39ebd7bf4f5306382c86fb49f3417dc3c908dcad 100644 (file)
@@ -3760,7 +3760,7 @@ static int wm8962_i2c_probe(struct i2c_client *i2c,
        ret = snd_soc_register_codec(&i2c->dev,
                                     &soc_codec_dev_wm8962, &wm8962_dai, 1);
        if (ret < 0)
-               goto err_enable;
+               goto err_pm_runtime;
 
        regcache_cache_only(wm8962->regmap, true);
 
@@ -3769,6 +3769,8 @@ static int wm8962_i2c_probe(struct i2c_client *i2c,
 
        return 0;
 
+err_pm_runtime:
+       pm_runtime_disable(&i2c->dev);
 err_enable:
        regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies);
 err:
@@ -3778,6 +3780,7 @@ err:
 static int wm8962_i2c_remove(struct i2c_client *client)
 {
        snd_soc_unregister_codec(&client->dev);
+       pm_runtime_disable(&client->dev);
        return 0;
 }
 
@@ -3805,6 +3808,8 @@ static int wm8962_runtime_resume(struct device *dev)
 
        wm8962_reset(wm8962);
 
+       regcache_mark_dirty(wm8962->regmap);
+
        /* SYSCLK defaults to on; make sure it is off so we can safely
         * write to registers if the device is declocked.
         */
index a3e97b46b64e3871ec9362b231d19f9334628229..ba34252b7bba4fdd8d1b7c58a313b490c14aa329 100644 (file)
@@ -131,23 +131,32 @@ static inline void i2s_clear_irqs(struct dw_i2s_dev *dev, u32 stream)
 
        if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
                for (i = 0; i < 4; i++)
-                       i2s_write_reg(dev->i2s_base, TOR(i), 0);
+                       i2s_read_reg(dev->i2s_base, TOR(i));
        } else {
                for (i = 0; i < 4; i++)
-                       i2s_write_reg(dev->i2s_base, ROR(i), 0);
+                       i2s_read_reg(dev->i2s_base, ROR(i));
        }
 }
 
 static void i2s_start(struct dw_i2s_dev *dev,
                      struct snd_pcm_substream *substream)
 {
-
+       u32 i, irq;
        i2s_write_reg(dev->i2s_base, IER, 1);
 
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+               for (i = 0; i < 4; i++) {
+                       irq = i2s_read_reg(dev->i2s_base, IMR(i));
+                       i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x30);
+               }
                i2s_write_reg(dev->i2s_base, ITER, 1);
-       else
+       } else {
+               for (i = 0; i < 4; i++) {
+                       irq = i2s_read_reg(dev->i2s_base, IMR(i));
+                       i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x03);
+               }
                i2s_write_reg(dev->i2s_base, IRER, 1);
+       }
 
        i2s_write_reg(dev->i2s_base, CER, 1);
 }
index 48b2d24dd1f0a9a639c6bd7e7549035ed38f8e15..b95132e2f9dc299d82c783810982c2d5b768fb35 100644 (file)
@@ -95,7 +95,8 @@ static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
        switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
        case SND_SOC_DAIFMT_I2S:
                /* data on rising edge of bclk, frame low 1clk before data */
-               strcr |= SSI_STCR_TFSI | SSI_STCR_TEFS | SSI_STCR_TXBIT0;
+               strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSI |
+                       SSI_STCR_TEFS;
                scr |= SSI_SCR_NET;
                if (ssi->flags & IMX_SSI_USE_I2S_SLAVE) {
                        scr &= ~SSI_I2S_MODE_MASK;
@@ -104,33 +105,31 @@ static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
                break;
        case SND_SOC_DAIFMT_LEFT_J:
                /* data on rising edge of bclk, frame high with data */
-               strcr |= SSI_STCR_TXBIT0;
+               strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP;
                break;
        case SND_SOC_DAIFMT_DSP_B:
                /* data on rising edge of bclk, frame high with data */
-               strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0;
+               strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSL;
                break;
        case SND_SOC_DAIFMT_DSP_A:
                /* data on rising edge of bclk, frame high 1clk before data */
-               strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0 | SSI_STCR_TEFS;
+               strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSL |
+                       SSI_STCR_TEFS;
                break;
        }
 
        /* DAI clock inversion */
        switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
        case SND_SOC_DAIFMT_IB_IF:
-               strcr |= SSI_STCR_TFSI;
-               strcr &= ~SSI_STCR_TSCKP;
+               strcr ^= SSI_STCR_TSCKP | SSI_STCR_TFSI;
                break;
        case SND_SOC_DAIFMT_IB_NF:
-               strcr &= ~(SSI_STCR_TSCKP | SSI_STCR_TFSI);
+               strcr ^= SSI_STCR_TSCKP;
                break;
        case SND_SOC_DAIFMT_NB_IF:
-               strcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP;
+               strcr ^= SSI_STCR_TFSI;
                break;
        case SND_SOC_DAIFMT_NB_NF:
-               strcr &= ~SSI_STCR_TFSI;
-               strcr |= SSI_STCR_TSCKP;
                break;
        }
 
index 100d92b5b77ef92da6a7e64efd58ffde1437f769..05977ae1ff2a3250c3fe2043b8124f9370dd1542 100644 (file)
@@ -206,6 +206,34 @@ int snd_soc_info_volsw(struct snd_kcontrol *kcontrol,
 }
 EXPORT_SYMBOL_GPL(snd_soc_info_volsw);
 
+/**
+ * snd_soc_info_volsw_sx - Mixer info callback for SX TLV controls
+ * @kcontrol: mixer control
+ * @uinfo: control element information
+ *
+ * Callback to provide information about a single mixer control, or a double
+ * mixer control that spans 2 registers of the SX TLV type. SX TLV controls
+ * have a range that represents both positive and negative values either side
+ * of zero but without a sign bit.
+ *
+ * Returns 0 for success.
+ */
+int snd_soc_info_volsw_sx(struct snd_kcontrol *kcontrol,
+                         struct snd_ctl_elem_info *uinfo)
+{
+       struct soc_mixer_control *mc =
+               (struct soc_mixer_control *)kcontrol->private_value;
+
+       snd_soc_info_volsw(kcontrol, uinfo);
+       /* Max represents the number of levels in an SX control not the
+        * maximum value, so add the minimum value back on
+        */
+       uinfo->value.integer.max += mc->min;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_info_volsw_sx);
+
 /**
  * snd_soc_get_volsw - single mixer get callback
  * @kcontrol: mixer control
index 82e350e9501ccc0d5ebc82962f60c034466448eb..ac75816ada7c31b133586693e5c73a41dc79348f 100644 (file)
@@ -69,7 +69,8 @@ snd_emux_init_seq_oss(struct snd_emux *emu)
        struct snd_seq_oss_reg *arg;
        struct snd_seq_device *dev;
 
-       if (snd_seq_device_new(emu->card, 0, SNDRV_SEQ_DEV_ID_OSS,
+       /* using device#1 here for avoiding conflicts with OPL3 */
+       if (snd_seq_device_new(emu->card, 1, SNDRV_SEQ_DEV_ID_OSS,
                               sizeof(struct snd_seq_oss_reg), &dev) < 0)
                return;
 
diff --git a/tools/build/.gitignore b/tools/build/.gitignore
new file mode 100644 (file)
index 0000000..a776371
--- /dev/null
@@ -0,0 +1 @@
+fixdep
diff --git a/tools/build/Build b/tools/build/Build
new file mode 100644 (file)
index 0000000..63a6c34
--- /dev/null
@@ -0,0 +1 @@
+fixdep-y := fixdep.o
index 4c8daaccb82a90565a5aac52afb9c497e8d8580d..4d000bc959b495b695686281912709bc53d10ca6 100644 (file)
@@ -54,15 +54,26 @@ make-cmd = $(call escsq,$(subst \#,\\\#,$(subst $$,$$$$,$(cmd_$(1)))))
 # PHONY targets skipped in both cases.
 any-prereq = $(filter-out $(PHONY),$?) $(filter-out $(PHONY) $(wildcard $^),$^)
 
+###
+# Copy dependency data into .cmd file
+#  - gcc -M dependency info
+#  - command line to create object 'cmd_object :='
+dep-cmd = $(if $(wildcard $(fixdep)),                                           \
+           $(fixdep) $(depfile) $@ '$(make-cmd)' > $(dot-target).tmp;           \
+           rm -f $(depfile);                                                    \
+           mv -f $(dot-target).tmp $(dot-target).cmd,                           \
+           printf '\# cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
+           printf '\# using basic dep data\n\n' >> $(dot-target).cmd;           \
+           cat $(depfile) >> $(dot-target).cmd;                                 \
+           printf '%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd)
+
 ###
 # if_changed_dep  - execute command if any prerequisite is newer than
 #                   target, or command line has changed and update
 #                   dependencies in the cmd file
 if_changed_dep = $(if $(strip $(any-prereq) $(arg-check)),         \
        @set -e;                                                   \
-       $(echo-cmd) $(cmd_$(1));                                   \
-       cat $(depfile) > $(dot-target).cmd;                        \
-       printf '%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd)
+       $(echo-cmd) $(cmd_$(1)) && $(dep-cmd))
 
 # if_changed      - execute command if any prerequisite is newer than
 #                   target, or command line has changed
index aa5e092c4352bad50660f3c262ce9819b70120fc..a47bffbae1595bdb8d9fc111ddf102f868e217e5 100644 (file)
@@ -11,8 +11,9 @@ Unlike the kernel we don't have a single build object 'obj-y' list that where
 we setup source objects, but we support more. This allows one 'Build' file to
 carry a sources list for multiple build objects.
 
-a) Build framework makefiles
-----------------------------
+
+Build framework makefiles
+-------------------------
 
 The build framework consists of 2 Makefiles:
 
@@ -23,7 +24,7 @@ While the 'Build.include' file contains just some generic definitions, the
 'Makefile.build' file is the makefile used from the outside. It's
 interface/usage is following:
 
-  $ make -f tools/build/Makefile srctree=$(KSRC) dir=$(DIR) obj=$(OBJECT)
+  $ make -f tools/build/Makefile.build srctree=$(KSRC) dir=$(DIR) obj=$(OBJECT)
 
 where:
 
@@ -38,8 +39,9 @@ called $(OBJECT)-in.o:
 
 which includes all compiled sources described in 'Build' makefiles.
 
-a) Build makefiles
-------------------
+
+Build makefiles
+---------------
 
 The user supplies 'Build' makefiles that contains a objects list, and connects
 the build to nested directories.
@@ -95,8 +97,31 @@ It's only a matter of 2 single commands to create the final binaries:
 
 You can check the 'ex' example in 'tools/build/tests/ex' for more details.
 
-b) Rules
---------
+
+Makefile.include
+----------------
+
+The tools/build/Makefile.include makefile could be included
+via user makefiles to get usefull definitions.
+
+It defines following interface:
+
+  - build macro definition:
+      build := -f $(srctree)/tools/build/Makefile.build dir=. obj
+
+    to make it easier to invoke build like:
+      make $(build)=ex
+
+
+Fixdep
+------
+It is necessary to build the fixdep helper before invoking the build.
+The Makefile.include file adds the fixdep target, that could be
+invoked by the user.
+
+
+Rules
+-----
 
 The build framework provides standard compilation rules to handle .S and .c
 compilation.
@@ -104,8 +129,9 @@ compilation.
 It's possible to include special rule if needed (like we do for flex or bison
 code generation).
 
-c) CFLAGS
----------
+
+CFLAGS
+------
 
 It's possible to alter the standard object C flags in the following way:
 
@@ -115,8 +141,8 @@ It's possible to alter the standard object C flags in the following way:
 This C flags changes has the scope of the Build makefile they are defined in.
 
 
-d) Dependencies
----------------
+Dependencies
+------------
 
 For each built object file 'a.o' the '.a.cmd' is created and holds:
 
@@ -130,8 +156,8 @@ All existing '.cmd' files are included in the Build process to follow properly
 the dependencies and trigger a rebuild when necessary.
 
 
-e) Single rules
----------------
+Single rules
+------------
 
 It's possible to build single object file by choice, like:
 
diff --git a/tools/build/Makefile b/tools/build/Makefile
new file mode 100644 (file)
index 0000000..a930362
--- /dev/null
@@ -0,0 +1,43 @@
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+endif
+
+include $(srctree)/tools//scripts/Makefile.include
+
+define allow-override
+  $(if $(or $(findstring environment,$(origin $(1))),\
+            $(findstring command line,$(origin $(1)))),,\
+    $(eval $(1) = $(2)))
+endef
+
+$(call allow-override,CC,$(CROSS_COMPILE)gcc)
+$(call allow-override,LD,$(CROSS_COMPILE)ld)
+
+ifeq ($(V),1)
+  Q =
+else
+  Q = @
+endif
+
+export Q srctree CC LD
+
+MAKEFLAGS := --no-print-directory
+build     := -f $(srctree)/tools/build/Makefile.build dir=. obj
+
+all: fixdep
+
+clean:
+       $(call QUIET_CLEAN, fixdep)
+       $(Q)find . -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
+       $(Q)rm -f fixdep
+
+$(OUTPUT)fixdep-in.o: FORCE
+       $(Q)$(MAKE) $(build)=fixdep
+
+$(OUTPUT)fixdep: $(OUTPUT)fixdep-in.o
+       $(QUIET_LINK)$(CC) $(LDFLAGS) -o $@ $<
+
+FORCE:
+
+.PHONY: FORCE
index 0c5f485521d6707786188652cb42a3fb7d23c8e6..4a96473b180f938b038d5e698f3578876eb7c70c 100644 (file)
@@ -21,6 +21,13 @@ endif
 
 build-dir := $(srctree)/tools/build
 
+# Define $(fixdep) for dep-cmd function
+ifeq ($(OUTPUT),)
+  fixdep := $(build-dir)/fixdep
+else
+  fixdep := $(OUTPUT)/fixdep
+endif
+
 # Generic definitions
 include $(build-dir)/Build.include
 
index 2975632d51e2341e7e1a60286e0fa822cbec0279..37ff4c9f92f1b5d8c17f8df034f0dad567d146fa 100644 (file)
@@ -41,6 +41,7 @@ FEATURE_TESTS ?=                      \
        libelf-getphdrnum               \
        libelf-mmap                     \
        libnuma                         \
+       numa_num_possible_cpus          \
        libperl                         \
        libpython                       \
        libpython-version               \
@@ -51,7 +52,9 @@ FEATURE_TESTS ?=                      \
        timerfd                         \
        libdw-dwarf-unwind              \
        zlib                            \
-       lzma
+       lzma                            \
+       get_cpuid                       \
+       bpf
 
 FEATURE_DISPLAY ?=                     \
        dwarf                           \
@@ -61,13 +64,16 @@ FEATURE_DISPLAY ?=                  \
        libbfd                          \
        libelf                          \
        libnuma                         \
+       numa_num_possible_cpus          \
        libperl                         \
        libpython                       \
        libslang                        \
        libunwind                       \
        libdw-dwarf-unwind              \
        zlib                            \
-       lzma
+       lzma                            \
+       get_cpuid                       \
+       bpf
 
 # Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features.
 # If in the future we need per-feature checks/flags for features not
@@ -117,8 +123,9 @@ define feature_print_text_code
     MSG = $(shell printf '...%30s: %s' $(1) $(2))
 endef
 
+FEATURE_DUMP_FILENAME = $(OUTPUT)FEATURE-DUMP$(FEATURE_USER)
 FEATURE_DUMP := $(foreach feat,$(FEATURE_DISPLAY),feature-$(feat)($(feature-$(feat))))
-FEATURE_DUMP_FILE := $(shell touch $(OUTPUT)FEATURE-DUMP; cat $(OUTPUT)FEATURE-DUMP)
+FEATURE_DUMP_FILE := $(shell touch $(FEATURE_DUMP_FILENAME); cat $(FEATURE_DUMP_FILENAME))
 
 ifeq ($(dwarf-post-unwind),1)
   FEATURE_DUMP += dwarf-post-unwind($(dwarf-post-unwind-text))
@@ -127,16 +134,16 @@ endif
 # The $(feature_display) controls the default detection message
 # output. It's set if:
 # - detected features differes from stored features from
-#   last build (in FEATURE-DUMP file)
+#   last build (in $(FEATURE_DUMP_FILENAME) file)
 # - one of the $(FEATURE_DISPLAY) is not detected
 # - VF is enabled
 
 ifneq ("$(FEATURE_DUMP)","$(FEATURE_DUMP_FILE)")
-  $(shell echo "$(FEATURE_DUMP)" > $(OUTPUT)FEATURE-DUMP)
+  $(shell echo "$(FEATURE_DUMP)" > $(FEATURE_DUMP_FILENAME))
   feature_display := 1
 endif
 
-feature_display_check = $(eval $(feature_check_code))
+feature_display_check = $(eval $(feature_check_display_code))
 define feature_display_check_code
   ifneq ($(feature-$(1)), 1)
     feature_display := 1
diff --git a/tools/build/Makefile.include b/tools/build/Makefile.include
new file mode 100644 (file)
index 0000000..4e09ad6
--- /dev/null
@@ -0,0 +1,10 @@
+build := -f $(srctree)/tools/build/Makefile.build dir=. obj
+
+ifdef CROSS_COMPILE
+fixdep:
+else
+fixdep:
+       $(Q)$(MAKE) -C $(srctree)/tools/build fixdep
+endif
+
+.PHONY: fixdep
index 74ca42093d70d72fac5a4b1776237be9873f480a..cea04ce9f35cdfb7e5b217077d60b214894b6c48 100644 (file)
@@ -19,6 +19,7 @@ FILES=                                        \
        test-libelf-getphdrnum.bin      \
        test-libelf-mmap.bin            \
        test-libnuma.bin                \
+       test-numa_num_possible_cpus.bin \
        test-libperl.bin                \
        test-libpython.bin              \
        test-libpython-version.bin      \
@@ -34,7 +35,8 @@ FILES=                                        \
        test-compile-x32.bin            \
        test-zlib.bin                   \
        test-lzma.bin                   \
-       test-bpf.bin
+       test-bpf.bin                    \
+       test-get_cpuid.bin
 
 CC := $(CROSS_COMPILE)gcc -MD
 PKG_CONFIG := $(CROSS_COMPILE)pkg-config
@@ -87,6 +89,9 @@ test-libelf-getphdrnum.bin:
 test-libnuma.bin:
        $(BUILD) -lnuma
 
+test-numa_num_possible_cpus.bin:
+       $(BUILD) -lnuma
+
 test-libunwind.bin:
        $(BUILD) -lelf
 
@@ -127,10 +132,10 @@ test-libbfd.bin:
        $(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl
 
 test-liberty.bin:
-       $(CC) -Wall -Werror -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty
+       $(CC) $(CFLAGS) -Wall -Werror -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty
 
 test-liberty-z.bin:
-       $(CC) -Wall -Werror -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty -lz
+       $(CC) $(CFLAGS) -Wall -Werror -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty -lz
 
 test-cplus-demangle.bin:
        $(BUILD) -liberty
@@ -162,6 +167,9 @@ test-zlib.bin:
 test-lzma.bin:
        $(BUILD) -llzma
 
+test-get_cpuid.bin:
+       $(BUILD)
+
 test-bpf.bin:
        $(BUILD)
 
index 84689a67814a9a622bd3f12b69fe5a8f5733a499..33cf6f20bd4ec6812ac6b53cfe1b301053d8c940 100644 (file)
 # include "test-libnuma.c"
 #undef main
 
+#define main main_test_numa_num_possible_cpus
+# include "test-numa_num_possible_cpus.c"
+#undef main
+
 #define main main_test_timerfd
 # include "test-timerfd.c"
 #undef main
 # include "test-lzma.c"
 #undef main
 
+#define main main_test_get_cpuid
+# include "test-get_cpuid.c"
+#undef main
+
 int main(int argc, char *argv[])
 {
        main_test_libpython();
@@ -136,6 +144,7 @@ int main(int argc, char *argv[])
        main_test_libbfd();
        main_test_backtrace();
        main_test_libnuma();
+       main_test_numa_num_possible_cpus();
        main_test_timerfd();
        main_test_stackprotector_all();
        main_test_libdw_dwarf_unwind();
@@ -143,6 +152,7 @@ int main(int argc, char *argv[])
        main_test_zlib();
        main_test_pthread_attr_setaffinity_np();
        main_test_lzma();
+       main_test_get_cpuid();
 
        return 0;
 }
diff --git a/tools/build/feature/test-get_cpuid.c b/tools/build/feature/test-get_cpuid.c
new file mode 100644 (file)
index 0000000..d7a2c40
--- /dev/null
@@ -0,0 +1,7 @@
+#include <cpuid.h>
+
+int main(void)
+{
+       unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
+       return __get_cpuid(0x15, &eax, &ebx, &ecx, &edx);
+}
diff --git a/tools/build/feature/test-numa_num_possible_cpus.c b/tools/build/feature/test-numa_num_possible_cpus.c
new file mode 100644 (file)
index 0000000..2606e94
--- /dev/null
@@ -0,0 +1,6 @@
+#include <numa.h>
+
+int main(void)
+{
+       return numa_num_possible_cpus();
+}
diff --git a/tools/build/fixdep.c b/tools/build/fixdep.c
new file mode 100644 (file)
index 0000000..1521d36
--- /dev/null
@@ -0,0 +1,168 @@
+/*
+ * "Optimize" a list of dependencies as spit out by gcc -MD
+ * for the build framework.
+ *
+ * Original author:
+ *   Copyright    2002 by Kai Germaschewski  <kai.germaschewski@gmx.de>
+ *
+ * This code has been borrowed from kbuild's fixdep (scripts/basic/fixdep.c),
+ * Please check it for detailed explanation. This fixdep borow only the
+ * base transformation of dependecies without the CONFIG mangle.
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <limits.h>
+
+char *target;
+char *depfile;
+char *cmdline;
+
+static void usage(void)
+{
+       fprintf(stderr, "Usage: fixdep <depfile> <target> <cmdline>\n");
+       exit(1);
+}
+
+/*
+ * Print out the commandline prefixed with cmd_<target filename> :=
+ */
+static void print_cmdline(void)
+{
+       printf("cmd_%s := %s\n\n", target, cmdline);
+}
+
+/*
+ * Important: The below generated source_foo.o and deps_foo.o variable
+ * assignments are parsed not only by make, but also by the rather simple
+ * parser in scripts/mod/sumversion.c.
+ */
+static void parse_dep_file(void *map, size_t len)
+{
+       char *m = map;
+       char *end = m + len;
+       char *p;
+       char s[PATH_MAX];
+       int is_target;
+       int saw_any_target = 0;
+       int is_first_dep = 0;
+
+       while (m < end) {
+               /* Skip any "white space" */
+               while (m < end && (*m == ' ' || *m == '\\' || *m == '\n'))
+                       m++;
+               /* Find next "white space" */
+               p = m;
+               while (p < end && *p != ' ' && *p != '\\' && *p != '\n')
+                       p++;
+               /* Is the token we found a target name? */
+               is_target = (*(p-1) == ':');
+               /* Don't write any target names into the dependency file */
+               if (is_target) {
+                       /* The /next/ file is the first dependency */
+                       is_first_dep = 1;
+               } else {
+                       /* Save this token/filename */
+                       memcpy(s, m, p-m);
+                       s[p - m] = 0;
+
+                       /*
+                        * Do not list the source file as dependency,
+                        * so that kbuild is not confused if a .c file
+                        * is rewritten into .S or vice versa. Storing
+                        * it in source_* is needed for modpost to
+                        * compute srcversions.
+                        */
+                       if (is_first_dep) {
+                               /*
+                                * If processing the concatenation of
+                                * multiple dependency files, only
+                                * process the first target name, which
+                                * will be the original source name,
+                                * and ignore any other target names,
+                                * which will be intermediate temporary
+                                * files.
+                                */
+                               if (!saw_any_target) {
+                                       saw_any_target = 1;
+                                       printf("source_%s := %s\n\n",
+                                               target, s);
+                                       printf("deps_%s := \\\n",
+                                               target);
+                               }
+                               is_first_dep = 0;
+                       } else
+                               printf("  %s \\\n", s);
+               }
+               /*
+                * Start searching for next token immediately after the first
+                * "whitespace" character that follows this token.
+                */
+               m = p + 1;
+       }
+
+       if (!saw_any_target) {
+               fprintf(stderr, "fixdep: parse error; no targets found\n");
+               exit(1);
+       }
+
+       printf("\n%s: $(deps_%s)\n\n", target, target);
+       printf("$(deps_%s):\n", target);
+}
+
+static void print_deps(void)
+{
+       struct stat st;
+       int fd;
+       void *map;
+
+       fd = open(depfile, O_RDONLY);
+       if (fd < 0) {
+               fprintf(stderr, "fixdep: error opening depfile: ");
+               perror(depfile);
+               exit(2);
+       }
+       if (fstat(fd, &st) < 0) {
+               fprintf(stderr, "fixdep: error fstat'ing depfile: ");
+               perror(depfile);
+               exit(2);
+       }
+       if (st.st_size == 0) {
+               fprintf(stderr, "fixdep: %s is empty\n", depfile);
+               close(fd);
+               return;
+       }
+       map = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
+       if ((long) map == -1) {
+               perror("fixdep: mmap");
+               close(fd);
+               return;
+       }
+
+       parse_dep_file(map, st.st_size);
+
+       munmap(map, st.st_size);
+
+       close(fd);
+}
+
+int main(int argc, char **argv)
+{
+       if (argc != 4)
+               usage();
+
+       depfile = argv[1];
+       target  = argv[2];
+       cmdline = argv[3];
+
+       print_cmdline();
+       print_deps();
+
+       return 0;
+}
index 429c7d4521016283ef81bc1dbd1e9ae93e8f2581..4d502f9b1a50e5c55e8ddd4b745a77acf72f6a87 100644 (file)
@@ -4,6 +4,7 @@ ex-y += b.o
 ex-y += b.o
 ex-y += empty/
 ex-y += empty2/
+ex-y += inc.o
 
 libex-y += c.o
 libex-y += d.o
index 52d2476073a34c6a8b788bdeec4627806c4864aa..c50d5782ad5a967472a75f84a9d3a81fee19eecf 100644 (file)
@@ -1,19 +1,22 @@
-export srctree := ../../../..
+export srctree := $(abspath ../../../..)
 export CC      := gcc
 export LD      := ld
 export AR      := ar
 
-build := -f $(srctree)/tools/build/Makefile.build dir=. obj
+ex:
+
+include $(srctree)/tools/build/Makefile.include
+
 ex: ex-in.o libex-in.o
        gcc -o $@ $^
 
-ex.%: FORCE
+ex.%: fixdep FORCE
        make -f $(srctree)/tools/build/Makefile.build dir=. $@
 
-ex-in.o: FORCE
+ex-in.o: fixdep FORCE
        make $(build)=ex
 
-libex-in.o: FORCE
+libex-in.o: fixdep FORCE
        make $(build)=libex
 
 clean:
index dc42eb2e1a677414b32001de061eb1868acd5827..57de6074d252857fdbaf4c115a25c254b3bd2a94 100644 (file)
@@ -5,6 +5,7 @@ int c(void);
 int d(void);
 int e(void);
 int f(void);
+int inc(void);
 
 int main(void)
 {
@@ -14,6 +15,7 @@ int main(void)
        d();
        e();
        f();
+       inc();
 
        return 0;
 }
diff --git a/tools/build/tests/ex/inc.c b/tools/build/tests/ex/inc.c
new file mode 100644 (file)
index 0000000..c20f1e9
--- /dev/null
@@ -0,0 +1,8 @@
+#ifdef INCLUDE
+#include "krava.h"
+#endif
+
+int inc(void)
+{
+       return 0;
+}
index 5494f8ea75670f91a77034959b32db9a88ceb203..44d2a0fade677b9a456647120d1bb60f3b711aa1 100755 (executable)
@@ -34,9 +34,36 @@ function test_ex_suffix {
        make -C ex V=1 clean > /dev/null 2>&1
        rm -f ex.out
 }
+
+function test_ex_include {
+       make -C ex V=1 clean > ex.out 2>&1
+
+       # build with krava.h include
+       touch ex/krava.h
+       make -C ex V=1 CFLAGS=-DINCLUDE >> ex.out 2>&1
+
+       if [ ! -x ./ex/ex ]; then
+         echo FAILED
+         exit -1
+       fi
+
+       # build without the include
+       rm -f ex/krava.h ex/ex
+       make -C ex V=1 >> ex.out 2>&1
+
+       if [ ! -x ./ex/ex ]; then
+         echo FAILED
+         exit -1
+       fi
+
+       make -C ex V=1 clean > /dev/null 2>&1
+       rm -f ex.out
+}
+
 echo -n Testing..
 
 test_ex
 test_ex_suffix
+test_ex_include
 
 echo OK
index 9098083869c85a62bc4df5bcf8df96f5ffcc7e00..fa7208a32d763d0ae35e953ccff82c69c772a5c2 100644 (file)
 
 #include <linux/types.h>
 
+/*
+ * Following functions are taken from kernel sources and
+ * break aliasing rules in their original form.
+ *
+ * While kernel is compiled with -fno-strict-aliasing,
+ * perf uses -Wstrict-aliasing=3 which makes build fail
+ * under gcc 4.4.
+ *
+ * Using extra __may_alias__ type to allow aliasing
+ * in this case.
+ */
+typedef __u8  __attribute__((__may_alias__))  __u8_alias_t;
+typedef __u16 __attribute__((__may_alias__)) __u16_alias_t;
+typedef __u32 __attribute__((__may_alias__)) __u32_alias_t;
+typedef __u64 __attribute__((__may_alias__)) __u64_alias_t;
+
 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
 {
        switch (size) {
-       case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
-       case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
-       case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
-       case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
+       case 1: *(__u8_alias_t  *) res = *(volatile __u8_alias_t  *) p; break;
+       case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break;
+       case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break;
+       case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break;
        default:
                barrier();
                __builtin_memcpy((void *)res, (const void *)p, size);
@@ -60,10 +76,10 @@ static __always_inline void __read_once_size(const volatile void *p, void *res,
 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
 {
        switch (size) {
-       case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
-       case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
-       case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
-       case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
+       case 1: *(volatile  __u8_alias_t *) p = *(__u8_alias_t  *) res; break;
+       case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break;
+       case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break;
+       case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break;
        default:
                barrier();
                __builtin_memcpy((void *)p, (const void *)res, size);
diff --git a/tools/include/linux/err.h b/tools/include/linux/err.h
new file mode 100644 (file)
index 0000000..bdc3dd8
--- /dev/null
@@ -0,0 +1,49 @@
+#ifndef __TOOLS_LINUX_ERR_H
+#define __TOOLS_LINUX_ERR_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#include <asm/errno.h>
+
+/*
+ * Original kernel header comment:
+ *
+ * Kernel pointers have redundant information, so we can use a
+ * scheme where we can return either an error code or a normal
+ * pointer with the same return value.
+ *
+ * This should be a per-architecture thing, to allow different
+ * error and pointer decisions.
+ *
+ * Userspace note:
+ * The same principle works for userspace, because 'error' pointers
+ * fall down to the unused hole far from user space, as described
+ * in Documentation/x86/x86_64/mm.txt for x86_64 arch:
+ *
+ * 0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm hole caused by [48:63] sign extension
+ * ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
+ *
+ * It should be the same case for other architectures, because
+ * this code is used in generic kernel code.
+ */
+#define MAX_ERRNO      4095
+
+#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
+
+static inline void * __must_check ERR_PTR(long error_)
+{
+       return (void *) error_;
+}
+
+static inline long __must_check PTR_ERR(__force const void *ptr)
+{
+       return (long) ptr;
+}
+
+static inline bool __must_check IS_ERR(__force const void *ptr)
+{
+       return IS_ERR_VALUE((unsigned long)ptr);
+}
+
+#endif /* _LINUX_ERR_H */
diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h
new file mode 100644 (file)
index 0000000..3276625
--- /dev/null
@@ -0,0 +1,231 @@
+/*
+ * Linux Socket Filter Data Structures
+ */
+#ifndef __TOOLS_LINUX_FILTER_H
+#define __TOOLS_LINUX_FILTER_H
+
+#include <linux/bpf.h>
+
+/* ArgX, context and stack frame pointer register positions. Note,
+ * Arg1, Arg2, Arg3, etc are used as argument mappings of function
+ * calls in BPF_CALL instruction.
+ */
+#define BPF_REG_ARG1   BPF_REG_1
+#define BPF_REG_ARG2   BPF_REG_2
+#define BPF_REG_ARG3   BPF_REG_3
+#define BPF_REG_ARG4   BPF_REG_4
+#define BPF_REG_ARG5   BPF_REG_5
+#define BPF_REG_CTX    BPF_REG_6
+#define BPF_REG_FP     BPF_REG_10
+
+/* Additional register mappings for converted user programs. */
+#define BPF_REG_A      BPF_REG_0
+#define BPF_REG_X      BPF_REG_7
+#define BPF_REG_TMP    BPF_REG_8
+
+/* BPF program can access up to 512 bytes of stack space. */
+#define MAX_BPF_STACK  512
+
+/* Helper macros for filter block array initializers. */
+
+/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
+
+#define BPF_ALU64_REG(OP, DST, SRC)                            \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,        \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = 0,                                     \
+               .imm   = 0 })
+
+#define BPF_ALU32_REG(OP, DST, SRC)                            \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU | BPF_OP(OP) | BPF_X,          \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = 0,                                     \
+               .imm   = 0 })
+
+/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
+
+#define BPF_ALU64_IMM(OP, DST, IMM)                            \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,        \
+               .dst_reg = DST,                                 \
+               .src_reg = 0,                                   \
+               .off   = 0,                                     \
+               .imm   = IMM })
+
+#define BPF_ALU32_IMM(OP, DST, IMM)                            \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU | BPF_OP(OP) | BPF_K,          \
+               .dst_reg = DST,                                 \
+               .src_reg = 0,                                   \
+               .off   = 0,                                     \
+               .imm   = IMM })
+
+/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
+
+#define BPF_ENDIAN(TYPE, DST, LEN)                             \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU | BPF_END | BPF_SRC(TYPE),     \
+               .dst_reg = DST,                                 \
+               .src_reg = 0,                                   \
+               .off   = 0,                                     \
+               .imm   = LEN })
+
+/* Short form of mov, dst_reg = src_reg */
+
+#define BPF_MOV64_REG(DST, SRC)                                        \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU64 | BPF_MOV | BPF_X,           \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = 0,                                     \
+               .imm   = 0 })
+
+#define BPF_MOV32_REG(DST, SRC)                                        \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU | BPF_MOV | BPF_X,             \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = 0,                                     \
+               .imm   = 0 })
+
+/* Short form of mov, dst_reg = imm32 */
+
+#define BPF_MOV64_IMM(DST, IMM)                                        \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU64 | BPF_MOV | BPF_K,           \
+               .dst_reg = DST,                                 \
+               .src_reg = 0,                                   \
+               .off   = 0,                                     \
+               .imm   = IMM })
+
+#define BPF_MOV32_IMM(DST, IMM)                                        \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU | BPF_MOV | BPF_K,             \
+               .dst_reg = DST,                                 \
+               .src_reg = 0,                                   \
+               .off   = 0,                                     \
+               .imm   = IMM })
+
+/* Short form of mov based on type,  BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
+
+#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM)                     \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE),   \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = 0,                                     \
+               .imm   = IMM })
+
+#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM)                     \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU | BPF_MOV | BPF_SRC(TYPE),     \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = 0,                                     \
+               .imm   = IMM })
+
+/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
+
+#define BPF_LD_ABS(SIZE, IMM)                                  \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,     \
+               .dst_reg = 0,                                   \
+               .src_reg = 0,                                   \
+               .off   = 0,                                     \
+               .imm   = IMM })
+
+/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
+
+#define BPF_LD_IND(SIZE, SRC, IMM)                             \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_LD | BPF_SIZE(SIZE) | BPF_IND,     \
+               .dst_reg = 0,                                   \
+               .src_reg = SRC,                                 \
+               .off   = 0,                                     \
+               .imm   = IMM })
+
+/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
+
+#define BPF_LDX_MEM(SIZE, DST, SRC, OFF)                       \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,    \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = OFF,                                   \
+               .imm   = 0 })
+
+/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
+
+#define BPF_STX_MEM(SIZE, DST, SRC, OFF)                       \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,    \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = OFF,                                   \
+               .imm   = 0 })
+
+/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
+
+#define BPF_ST_MEM(SIZE, DST, OFF, IMM)                                \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,     \
+               .dst_reg = DST,                                 \
+               .src_reg = 0,                                   \
+               .off   = OFF,                                   \
+               .imm   = IMM })
+
+/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
+
+#define BPF_JMP_REG(OP, DST, SRC, OFF)                         \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_JMP | BPF_OP(OP) | BPF_X,          \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = OFF,                                   \
+               .imm   = 0 })
+
+/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
+
+#define BPF_JMP_IMM(OP, DST, IMM, OFF)                         \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_JMP | BPF_OP(OP) | BPF_K,          \
+               .dst_reg = DST,                                 \
+               .src_reg = 0,                                   \
+               .off   = OFF,                                   \
+               .imm   = IMM })
+
+/* Function call */
+
+#define BPF_EMIT_CALL(FUNC)                                    \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_JMP | BPF_CALL,                    \
+               .dst_reg = 0,                                   \
+               .src_reg = 0,                                   \
+               .off   = 0,                                     \
+               .imm   = ((FUNC) - BPF_FUNC_unspec) })
+
+/* Raw code statement block */
+
+#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)                 \
+       ((struct bpf_insn) {                                    \
+               .code  = CODE,                                  \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = OFF,                                   \
+               .imm   = IMM })
+
+/* Program exit */
+
+#define BPF_EXIT_INSN()                                                \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_JMP | BPF_EXIT,                    \
+               .dst_reg = 0,                                   \
+               .src_reg = 0,                                   \
+               .off   = 0,                                     \
+               .imm   = 0 })
+
+#endif /* __TOOLS_LINUX_FILTER_H */
index 3653965cf48185a9950bb81aafb12660b306cba6..e8b8a23b9bf4cdac8ccfcf95690f8dfd93e1d6e6 100644 (file)
@@ -1,2 +1,3 @@
 libapi-y += fd/
 libapi-y += fs/
+libapi-y += cpu.o
index fe1b02c2c95bbe1274376f1b20ffebfac7358cc7..d85904dc9b38747dc0feda60f819cf93ceabbdcf 100644 (file)
@@ -21,12 +21,14 @@ CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
 
 RM = rm -f
 
-build  := -f $(srctree)/tools/build/Makefile.build dir=. obj
 API_IN := $(OUTPUT)libapi-in.o
 
+all:
+
 export srctree OUTPUT CC LD CFLAGS V
+include $(srctree)/tools/build/Makefile.include
 
-all: $(LIBFILE)
+all: fixdep $(LIBFILE)
 
 $(API_IN): FORCE
        @$(MAKE) $(build)=libapi
diff --git a/tools/lib/api/cpu.c b/tools/lib/api/cpu.c
new file mode 100644 (file)
index 0000000..8c64893
--- /dev/null
@@ -0,0 +1,18 @@
+#include <stdio.h>
+
+#include "cpu.h"
+#include "fs/fs.h"
+
+int cpu__get_max_freq(unsigned long long *freq)
+{
+       char entry[PATH_MAX];
+       int cpu;
+
+       if (sysfs__read_int("devices/system/cpu/online", &cpu) < 0)
+               return -1;
+
+       snprintf(entry, sizeof(entry),
+                "devices/system/cpu/cpu%d/cpufreq/cpuinfo_max_freq", cpu);
+
+       return sysfs__read_ull(entry, freq);
+}
diff --git a/tools/lib/api/cpu.h b/tools/lib/api/cpu.h
new file mode 100644 (file)
index 0000000..81e9d39
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef __API_CPU__
+#define __API_CPU__
+
+int cpu__get_max_freq(unsigned long long *freq);
+
+#endif /* __API_CPU__ */
index 6de5a4f0b501ef33244ef356a63fcec9ac208a18..f4ed9629ae85dcc16b53faf90f41defb960476db 100644 (file)
@@ -1,4 +1,2 @@
 libapi-y += fs.o
-libapi-y += debugfs.o
-libapi-y += findfs.o
-libapi-y += tracefs.o
+libapi-y += tracing_path.o
diff --git a/tools/lib/api/fs/debugfs.c b/tools/lib/api/fs/debugfs.c
deleted file mode 100644 (file)
index eb7cf4d..0000000
+++ /dev/null
@@ -1,129 +0,0 @@
-#define _GNU_SOURCE
-#include <errno.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <stdbool.h>
-#include <sys/vfs.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/mount.h>
-#include <linux/kernel.h>
-
-#include "debugfs.h"
-#include "tracefs.h"
-
-#ifndef DEBUGFS_DEFAULT_PATH
-#define DEBUGFS_DEFAULT_PATH           "/sys/kernel/debug"
-#endif
-
-char debugfs_mountpoint[PATH_MAX + 1] = DEBUGFS_DEFAULT_PATH;
-
-static const char * const debugfs_known_mountpoints[] = {
-       DEBUGFS_DEFAULT_PATH,
-       "/debug",
-       0,
-};
-
-static bool debugfs_found;
-
-bool debugfs_configured(void)
-{
-       return debugfs_find_mountpoint() != NULL;
-}
-
-/* find the path to the mounted debugfs */
-const char *debugfs_find_mountpoint(void)
-{
-       const char *ret;
-
-       if (debugfs_found)
-               return (const char *)debugfs_mountpoint;
-
-       ret = find_mountpoint("debugfs", (long) DEBUGFS_MAGIC,
-                             debugfs_mountpoint, PATH_MAX + 1,
-                             debugfs_known_mountpoints);
-       if (ret)
-               debugfs_found = true;
-
-       return ret;
-}
-
-/* mount the debugfs somewhere if it's not mounted */
-char *debugfs_mount(const char *mountpoint)
-{
-       /* see if it's already mounted */
-       if (debugfs_find_mountpoint())
-               goto out;
-
-       /* if not mounted and no argument */
-       if (mountpoint == NULL) {
-               /* see if environment variable set */
-               mountpoint = getenv(PERF_DEBUGFS_ENVIRONMENT);
-               /* if no environment variable, use default */
-               if (mountpoint == NULL)
-                       mountpoint = DEBUGFS_DEFAULT_PATH;
-       }
-
-       if (mount(NULL, mountpoint, "debugfs", 0, NULL) < 0)
-               return NULL;
-
-       /* save the mountpoint */
-       debugfs_found = true;
-       strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint));
-out:
-       return debugfs_mountpoint;
-}
-
-int debugfs__strerror_open(int err, char *buf, size_t size, const char *filename)
-{
-       char sbuf[128];
-
-       switch (err) {
-       case ENOENT:
-               if (debugfs_found) {
-                       snprintf(buf, size,
-                                "Error:\tFile %s/%s not found.\n"
-                                "Hint:\tPerhaps this kernel misses some CONFIG_ setting to enable this feature?.\n",
-                                debugfs_mountpoint, filename);
-                       break;
-               }
-               snprintf(buf, size, "%s",
-                        "Error:\tUnable to find debugfs\n"
-                        "Hint:\tWas your kernel compiled with debugfs support?\n"
-                        "Hint:\tIs the debugfs filesystem mounted?\n"
-                        "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
-               break;
-       case EACCES: {
-               const char *mountpoint = debugfs_mountpoint;
-
-               if (!access(debugfs_mountpoint, R_OK) && strncmp(filename, "tracing/", 8) == 0) {
-                       const char *tracefs_mntpoint = tracefs_find_mountpoint();
-
-                       if (tracefs_mntpoint)
-                               mountpoint = tracefs_mntpoint;
-               }
-
-               snprintf(buf, size,
-                        "Error:\tNo permissions to read %s/%s\n"
-                        "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
-                        debugfs_mountpoint, filename, mountpoint);
-       }
-               break;
-       default:
-               snprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
-               break;
-       }
-
-       return 0;
-}
-
-int debugfs__strerror_open_tp(int err, char *buf, size_t size, const char *sys, const char *name)
-{
-       char path[PATH_MAX];
-
-       snprintf(path, PATH_MAX, "tracing/events/%s/%s", sys, name ?: "*");
-
-       return debugfs__strerror_open(err, buf, size, path);
-}
diff --git a/tools/lib/api/fs/debugfs.h b/tools/lib/api/fs/debugfs.h
deleted file mode 100644 (file)
index 4550236..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef __API_DEBUGFS_H__
-#define __API_DEBUGFS_H__
-
-#include "findfs.h"
-
-#ifndef DEBUGFS_MAGIC
-#define DEBUGFS_MAGIC          0x64626720
-#endif
-
-#ifndef PERF_DEBUGFS_ENVIRONMENT
-#define PERF_DEBUGFS_ENVIRONMENT "PERF_DEBUGFS_DIR"
-#endif
-
-bool debugfs_configured(void);
-const char *debugfs_find_mountpoint(void);
-char *debugfs_mount(const char *mountpoint);
-
-extern char debugfs_mountpoint[];
-
-int debugfs__strerror_open(int err, char *buf, size_t size, const char *filename);
-int debugfs__strerror_open_tp(int err, char *buf, size_t size, const char *sys, const char *name);
-
-#endif /* __API_DEBUGFS_H__ */
diff --git a/tools/lib/api/fs/findfs.c b/tools/lib/api/fs/findfs.c
deleted file mode 100644 (file)
index 49946cb..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-#include <errno.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <stdbool.h>
-#include <sys/vfs.h>
-
-#include "findfs.h"
-
-/* verify that a mountpoint is actually the type we want */
-
-int valid_mountpoint(const char *mount, long magic)
-{
-       struct statfs st_fs;
-
-       if (statfs(mount, &st_fs) < 0)
-               return -ENOENT;
-       else if ((long)st_fs.f_type != magic)
-               return -ENOENT;
-
-       return 0;
-}
-
-/* find the path to a mounted file system */
-const char *find_mountpoint(const char *fstype, long magic,
-                           char *mountpoint, int len,
-                           const char * const *known_mountpoints)
-{
-       const char * const *ptr;
-       char format[128];
-       char type[100];
-       FILE *fp;
-
-       if (known_mountpoints) {
-               ptr = known_mountpoints;
-               while (*ptr) {
-                       if (valid_mountpoint(*ptr, magic) == 0) {
-                               strncpy(mountpoint, *ptr, len - 1);
-                               mountpoint[len-1] = 0;
-                               return mountpoint;
-                       }
-                       ptr++;
-               }
-       }
-
-       /* give up and parse /proc/mounts */
-       fp = fopen("/proc/mounts", "r");
-       if (fp == NULL)
-               return NULL;
-
-       snprintf(format, 128, "%%*s %%%ds %%99s %%*s %%*d %%*d\n", len);
-
-       while (fscanf(fp, format, mountpoint, type) == 2) {
-               if (strcmp(type, fstype) == 0)
-                       break;
-       }
-       fclose(fp);
-
-       if (strcmp(type, fstype) != 0)
-               return NULL;
-
-       return mountpoint;
-}
diff --git a/tools/lib/api/fs/findfs.h b/tools/lib/api/fs/findfs.h
deleted file mode 100644 (file)
index b6f5d05..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef __API_FINDFS_H__
-#define __API_FINDFS_H__
-
-#include <stdbool.h>
-
-#define _STR(x) #x
-#define STR(x) _STR(x)
-
-/*
- * On most systems <limits.h> would have given us this, but  not on some systems
- * (e.g. GNU/Hurd).
- */
-#ifndef PATH_MAX
-#define PATH_MAX 4096
-#endif
-
-const char *find_mountpoint(const char *fstype, long magic,
-                           char *mountpoint, int len,
-                           const char * const *known_mountpoints);
-
-int valid_mountpoint(const char *mount, long magic);
-
-#endif /* __API_FINDFS_H__ */
index 128ef6332a6bd89c0ddbeef283c4dfccbf5f8417..459599d1b6c410b7b41333c13f248ee685c2dd4a 100644 (file)
@@ -1,7 +1,6 @@
-/* TODO merge/factor in debugfs.c here */
-
 #include <ctype.h>
 #include <errno.h>
+#include <limits.h>
 #include <stdbool.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <sys/stat.h>
 #include <fcntl.h>
 #include <unistd.h>
+#include <sys/mount.h>
 
-#include "debugfs.h"
 #include "fs.h"
 
+#define _STR(x) #x
+#define STR(x) _STR(x)
+
+#ifndef SYSFS_MAGIC
+#define SYSFS_MAGIC            0x62656572
+#endif
+
+#ifndef PROC_SUPER_MAGIC
+#define PROC_SUPER_MAGIC       0x9fa0
+#endif
+
+#ifndef DEBUGFS_MAGIC
+#define DEBUGFS_MAGIC          0x64626720
+#endif
+
+#ifndef TRACEFS_MAGIC
+#define TRACEFS_MAGIC          0x74726163
+#endif
+
 static const char * const sysfs__fs_known_mountpoints[] = {
        "/sys",
        0,
@@ -25,19 +43,48 @@ static const char * const procfs__known_mountpoints[] = {
        0,
 };
 
+#ifndef DEBUGFS_DEFAULT_PATH
+#define DEBUGFS_DEFAULT_PATH "/sys/kernel/debug"
+#endif
+
+static const char * const debugfs__known_mountpoints[] = {
+       DEBUGFS_DEFAULT_PATH,
+       "/debug",
+       0,
+};
+
+
+#ifndef TRACEFS_DEFAULT_PATH
+#define TRACEFS_DEFAULT_PATH "/sys/kernel/tracing"
+#endif
+
+static const char * const tracefs__known_mountpoints[] = {
+       TRACEFS_DEFAULT_PATH,
+       "/sys/kernel/debug/tracing",
+       "/tracing",
+       "/trace",
+       0,
+};
+
 struct fs {
        const char              *name;
        const char * const      *mounts;
-       char                     path[PATH_MAX + 1];
+       char                     path[PATH_MAX];
        bool                     found;
        long                     magic;
 };
 
 enum {
-       FS__SYSFS  = 0,
-       FS__PROCFS = 1,
+       FS__SYSFS   = 0,
+       FS__PROCFS  = 1,
+       FS__DEBUGFS = 2,
+       FS__TRACEFS = 3,
 };
 
+#ifndef TRACEFS_MAGIC
+#define TRACEFS_MAGIC 0x74726163
+#endif
+
 static struct fs fs__entries[] = {
        [FS__SYSFS] = {
                .name   = "sysfs",
@@ -49,6 +96,16 @@ static struct fs fs__entries[] = {
                .mounts = procfs__known_mountpoints,
                .magic  = PROC_SUPER_MAGIC,
        },
+       [FS__DEBUGFS] = {
+               .name   = "debugfs",
+               .mounts = debugfs__known_mountpoints,
+               .magic  = DEBUGFS_MAGIC,
+       },
+       [FS__TRACEFS] = {
+               .name   = "tracefs",
+               .mounts = tracefs__known_mountpoints,
+               .magic  = TRACEFS_MAGIC,
+       },
 };
 
 static bool fs__read_mounts(struct fs *fs)
@@ -159,14 +216,54 @@ static const char *fs__mountpoint(int idx)
        return fs__get_mountpoint(fs);
 }
 
-#define FS__MOUNTPOINT(name, idx)      \
-const char *name##__mountpoint(void)   \
-{                                      \
-       return fs__mountpoint(idx);     \
+static const char *mount_overload(struct fs *fs)
+{
+       size_t name_len = strlen(fs->name);
+       /* "PERF_" + name + "_ENVIRONMENT" + '\0' */
+       char upper_name[5 + name_len + 12 + 1];
+
+       snprintf(upper_name, name_len, "PERF_%s_ENVIRONMENT", fs->name);
+       mem_toupper(upper_name, name_len);
+
+       return getenv(upper_name) ?: *fs->mounts;
+}
+
+static const char *fs__mount(int idx)
+{
+       struct fs *fs = &fs__entries[idx];
+       const char *mountpoint;
+
+       if (fs__mountpoint(idx))
+               return (const char *)fs->path;
+
+       mountpoint = mount_overload(fs);
+
+       if (mount(NULL, mountpoint, fs->name, 0, NULL) < 0)
+               return NULL;
+
+       return fs__check_mounts(fs) ? fs->path : NULL;
+}
+
+#define FS(name, idx)                          \
+const char *name##__mountpoint(void)           \
+{                                              \
+       return fs__mountpoint(idx);             \
+}                                              \
+                                               \
+const char *name##__mount(void)                        \
+{                                              \
+       return fs__mount(idx);                  \
+}                                              \
+                                               \
+bool name##__configured(void)                  \
+{                                              \
+       return name##__mountpoint() != NULL;    \
 }
 
-FS__MOUNTPOINT(sysfs,  FS__SYSFS);
-FS__MOUNTPOINT(procfs, FS__PROCFS);
+FS(sysfs,   FS__SYSFS);
+FS(procfs,  FS__PROCFS);
+FS(debugfs, FS__DEBUGFS);
+FS(tracefs, FS__TRACEFS);
 
 int filename__read_int(const char *filename, int *value)
 {
@@ -185,6 +282,50 @@ int filename__read_int(const char *filename, int *value)
        return err;
 }
 
+int filename__read_ull(const char *filename, unsigned long long *value)
+{
+       char line[64];
+       int fd = open(filename, O_RDONLY), err = -1;
+
+       if (fd < 0)
+               return -1;
+
+       if (read(fd, line, sizeof(line)) > 0) {
+               *value = strtoull(line, NULL, 10);
+               if (*value != ULLONG_MAX)
+                       err = 0;
+       }
+
+       close(fd);
+       return err;
+}
+
+int sysfs__read_ull(const char *entry, unsigned long long *value)
+{
+       char path[PATH_MAX];
+       const char *sysfs = sysfs__mountpoint();
+
+       if (!sysfs)
+               return -1;
+
+       snprintf(path, sizeof(path), "%s/%s", sysfs, entry);
+
+       return filename__read_ull(path, value);
+}
+
+int sysfs__read_int(const char *entry, int *value)
+{
+       char path[PATH_MAX];
+       const char *sysfs = sysfs__mountpoint();
+
+       if (!sysfs)
+               return -1;
+
+       snprintf(path, sizeof(path), "%s/%s", sysfs, entry);
+
+       return filename__read_int(path, value);
+}
+
 int sysctl__read_int(const char *sysctl, int *value)
 {
        char path[PATH_MAX];
index 6caa2bbc6cecdc2f7fa39b69fa1cf30f8653f15e..d024a7f682f69b27a4bba1aed7b8f1894d07b50f 100644 (file)
@@ -1,17 +1,33 @@
 #ifndef __API_FS__
 #define __API_FS__
 
-#ifndef SYSFS_MAGIC
-#define SYSFS_MAGIC            0x62656572
-#endif
+#include <stdbool.h>
 
-#ifndef PROC_SUPER_MAGIC
-#define PROC_SUPER_MAGIC       0x9fa0
+/*
+ * On most systems <limits.h> would have given us this, but  not on some systems
+ * (e.g. GNU/Hurd).
+ */
+#ifndef PATH_MAX
+#define PATH_MAX 4096
 #endif
 
-const char *sysfs__mountpoint(void);
-const char *procfs__mountpoint(void);
+#define FS(name)                               \
+       const char *name##__mountpoint(void);   \
+       const char *name##__mount(void);        \
+       bool name##__configured(void);          \
+
+FS(sysfs)
+FS(procfs)
+FS(debugfs)
+FS(tracefs)
+
+#undef FS
+
 
 int filename__read_int(const char *filename, int *value);
+int filename__read_ull(const char *filename, unsigned long long *value);
+
 int sysctl__read_int(const char *sysctl, int *value);
+int sysfs__read_int(const char *entry, int *value);
+int sysfs__read_ull(const char *entry, unsigned long long *value);
 #endif /* __API_FS__ */
diff --git a/tools/lib/api/fs/tracefs.c b/tools/lib/api/fs/tracefs.c
deleted file mode 100644 (file)
index e4aa968..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-#include <errno.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <stdbool.h>
-#include <sys/vfs.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/mount.h>
-#include <linux/kernel.h>
-
-#include "tracefs.h"
-
-#ifndef TRACEFS_DEFAULT_PATH
-#define TRACEFS_DEFAULT_PATH           "/sys/kernel/tracing"
-#endif
-
-char tracefs_mountpoint[PATH_MAX + 1] = TRACEFS_DEFAULT_PATH;
-
-static const char * const tracefs_known_mountpoints[] = {
-       TRACEFS_DEFAULT_PATH,
-       "/sys/kernel/debug/tracing",
-       "/tracing",
-       "/trace",
-       0,
-};
-
-static bool tracefs_found;
-
-bool tracefs_configured(void)
-{
-       return tracefs_find_mountpoint() != NULL;
-}
-
-/* find the path to the mounted tracefs */
-const char *tracefs_find_mountpoint(void)
-{
-       const char *ret;
-
-       if (tracefs_found)
-               return (const char *)tracefs_mountpoint;
-
-       ret = find_mountpoint("tracefs", (long) TRACEFS_MAGIC,
-                             tracefs_mountpoint, PATH_MAX + 1,
-                             tracefs_known_mountpoints);
-
-       if (ret)
-               tracefs_found = true;
-
-       return ret;
-}
-
-/* mount the tracefs somewhere if it's not mounted */
-char *tracefs_mount(const char *mountpoint)
-{
-       /* see if it's already mounted */
-       if (tracefs_find_mountpoint())
-               goto out;
-
-       /* if not mounted and no argument */
-       if (mountpoint == NULL) {
-               /* see if environment variable set */
-               mountpoint = getenv(PERF_TRACEFS_ENVIRONMENT);
-               /* if no environment variable, use default */
-               if (mountpoint == NULL)
-                       mountpoint = TRACEFS_DEFAULT_PATH;
-       }
-
-       if (mount(NULL, mountpoint, "tracefs", 0, NULL) < 0)
-               return NULL;
-
-       /* save the mountpoint */
-       tracefs_found = true;
-       strncpy(tracefs_mountpoint, mountpoint, sizeof(tracefs_mountpoint));
-out:
-       return tracefs_mountpoint;
-}
diff --git a/tools/lib/api/fs/tracefs.h b/tools/lib/api/fs/tracefs.h
deleted file mode 100644 (file)
index da780ac..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef __API_TRACEFS_H__
-#define __API_TRACEFS_H__
-
-#include "findfs.h"
-
-#ifndef TRACEFS_MAGIC
-#define TRACEFS_MAGIC          0x74726163
-#endif
-
-#ifndef PERF_TRACEFS_ENVIRONMENT
-#define PERF_TRACEFS_ENVIRONMENT "PERF_TRACEFS_DIR"
-#endif
-
-bool tracefs_configured(void);
-const char *tracefs_find_mountpoint(void);
-int tracefs_valid_mountpoint(const char *debugfs);
-char *tracefs_mount(const char *mountpoint);
-
-extern char tracefs_mountpoint[];
-
-#endif /* __API_DEBUGFS_H__ */
diff --git a/tools/lib/api/fs/tracing_path.c b/tools/lib/api/fs/tracing_path.c
new file mode 100644 (file)
index 0000000..a26bb5e
--- /dev/null
@@ -0,0 +1,135 @@
+#ifndef _GNU_SOURCE
+# define _GNU_SOURCE
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include "fs.h"
+
+#include "tracing_path.h"
+
+
+char tracing_mnt[PATH_MAX]         = "/sys/kernel/debug";
+char tracing_path[PATH_MAX]        = "/sys/kernel/debug/tracing";
+char tracing_events_path[PATH_MAX] = "/sys/kernel/debug/tracing/events";
+
+
+static void __tracing_path_set(const char *tracing, const char *mountpoint)
+{
+       snprintf(tracing_mnt, sizeof(tracing_mnt), "%s", mountpoint);
+       snprintf(tracing_path, sizeof(tracing_path), "%s/%s",
+                mountpoint, tracing);
+       snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s%s",
+                mountpoint, tracing, "events");
+}
+
+static const char *tracing_path_tracefs_mount(void)
+{
+       const char *mnt;
+
+       mnt = tracefs__mount();
+       if (!mnt)
+               return NULL;
+
+       __tracing_path_set("", mnt);
+
+       return mnt;
+}
+
+static const char *tracing_path_debugfs_mount(void)
+{
+       const char *mnt;
+
+       mnt = debugfs__mount();
+       if (!mnt)
+               return NULL;
+
+       __tracing_path_set("tracing/", mnt);
+
+       return mnt;
+}
+
+const char *tracing_path_mount(void)
+{
+       const char *mnt;
+
+       mnt = tracing_path_tracefs_mount();
+       if (mnt)
+               return mnt;
+
+       mnt = tracing_path_debugfs_mount();
+
+       return mnt;
+}
+
+void tracing_path_set(const char *mntpt)
+{
+       __tracing_path_set("tracing/", mntpt);
+}
+
+char *get_tracing_file(const char *name)
+{
+       char *file;
+
+       if (asprintf(&file, "%s/%s", tracing_path, name) < 0)
+               return NULL;
+
+       return file;
+}
+
+void put_tracing_file(char *file)
+{
+       free(file);
+}
+
+static int strerror_open(int err, char *buf, size_t size, const char *filename)
+{
+       char sbuf[128];
+
+       switch (err) {
+       case ENOENT:
+               /*
+                * We will get here if we can't find the tracepoint, but one of
+                * debugfs or tracefs is configured, which means you probably
+                * want some tracepoint which wasn't compiled in your kernel.
+                * - jirka
+                */
+               if (debugfs__configured() || tracefs__configured()) {
+                       snprintf(buf, size,
+                                "Error:\tFile %s/%s not found.\n"
+                                "Hint:\tPerhaps this kernel misses some CONFIG_ setting to enable this feature?.\n",
+                                tracing_events_path, filename);
+                       break;
+               }
+               snprintf(buf, size, "%s",
+                        "Error:\tUnable to find debugfs/tracefs\n"
+                        "Hint:\tWas your kernel compiled with debugfs/tracefs support?\n"
+                        "Hint:\tIs the debugfs/tracefs filesystem mounted?\n"
+                        "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
+               break;
+       case EACCES: {
+               snprintf(buf, size,
+                        "Error:\tNo permissions to read %s/%s\n"
+                        "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
+                        tracing_events_path, filename, tracing_mnt);
+       }
+               break;
+       default:
+               snprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
+               break;
+       }
+
+       return 0;
+}
+
+int tracing_path__strerror_open_tp(int err, char *buf, size_t size, const char *sys, const char *name)
+{
+       char path[PATH_MAX];
+
+       snprintf(path, PATH_MAX, "%s/%s", sys, name ?: "*");
+
+       return strerror_open(err, buf, size, path);
+}
diff --git a/tools/lib/api/fs/tracing_path.h b/tools/lib/api/fs/tracing_path.h
new file mode 100644 (file)
index 0000000..3f233ac
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef __API_FS_TRACING_PATH_H
+#define __API_FS_TRACING_PATH_H
+
+#include <linux/types.h>
+
+extern char tracing_path[];
+extern char tracing_events_path[];
+
+void tracing_path_set(const char *mountpoint);
+const char *tracing_path_mount(void);
+
+char *get_tracing_file(const char *name);
+void put_tracing_file(char *file);
+
+int tracing_path__strerror_open_tp(int err, char *buf, size_t size, const char *sys, const char *name);
+#endif /* __API_FS_TRACING_PATH_H */
index f68d23a0b4877342175d82866acdcaf4963f36e4..fc9af57b666e244b6b1c06cd163738068190b6f8 100644 (file)
@@ -64,8 +64,9 @@ srctree := $(patsubst %/,%,$(dir $(srctree)))
 #$(info Determined 'srctree' to be $(srctree))
 endif
 
-FEATURE_DISPLAY = libelf libelf-getphdrnum libelf-mmap bpf
-FEATURE_TESTS = libelf bpf
+FEATURE_USER = .libbpf
+FEATURE_TESTS = libelf libelf-getphdrnum libelf-mmap bpf
+FEATURE_DISPLAY = libelf bpf
 
 INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/arch/$(ARCH)/include/uapi -I$(srctree)/include/uapi
 FEATURE_CHECK_CFLAGS-bpf = $(INCLUDES)
@@ -122,8 +123,10 @@ endif
 # the same command line setup.
 MAKEOVERRIDES=
 
+all:
+
 export srctree OUTPUT CC LD CFLAGS V
-build := -f $(srctree)/tools/build/Makefile.build dir=. obj
+include $(srctree)/tools/build/Makefile.include
 
 BPF_IN    := $(OUTPUT)libbpf-in.o
 LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
@@ -132,7 +135,7 @@ CMD_TARGETS = $(LIB_FILE)
 
 TARGETS = $(CMD_TARGETS)
 
-all: $(VERSION_FILES) all_cmd
+all: fixdep $(VERSION_FILES) all_cmd
 
 all_cmd: $(CMD_TARGETS)
 
index 18ffccf004264d202632990d40227c27c41d27ad..7e319afac78a0763967b0cb8c2d38842e69c9302 100644 (file)
@@ -93,8 +93,10 @@ else
   print_install =              echo '  INSTALL  '$1'   to      $(DESTDIR_SQ)$2';
 endif
 
+all:
+
 export srctree OUTPUT CC LD CFLAGS V
-build := -f $(srctree)/tools/build/Makefile.build dir=. obj
+include $(srctree)/tools/build/Makefile.include
 
 do_compile_shared_library =                    \
        ($(print_shared_lib_compile)            \
@@ -109,7 +111,7 @@ CMD_TARGETS = $(LIB_FILE)
 TARGETS = $(CMD_TARGETS)
 
 
-all: all_cmd
+all: fixdep all_cmd
 
 all_cmd: $(CMD_TARGETS)
 
index 18bc271a4bbc7fe4232e6cc6f5fd2b2bf392846a..5e431077fcd6784ac9567a785bc6fb90a40bc7bb 100644 (file)
@@ -2,6 +2,12 @@
 #include <stdio.h>
 #include <stdlib.h>
 
+u8 kallsyms2elf_type(char type)
+{
+       type = tolower(type);
+       return (type == 't' || type == 'w') ? STT_FUNC : STT_OBJECT;
+}
+
 int kallsyms__parse(const char *filename, void *arg,
                    int (*process_symbol)(void *arg, const char *name,
                                          char type, u64 start))
index 6084f5e18b3c34d354c38a56dea258fd9769e596..4071316a766ef95023853f61ed1a27c9477aa898 100644 (file)
@@ -9,7 +9,7 @@
 #define KSYM_NAME_LEN 256
 #endif
 
-static inline u8 kallsyms2elf_type(char type)
+static inline u8 kallsyms2elf_binding(char type)
 {
        if (type == 'W')
                return STB_WEAK;
@@ -17,6 +17,8 @@ static inline u8 kallsyms2elf_type(char type)
        return isupper(type) ? STB_GLOBAL : STB_LOCAL;
 }
 
+u8 kallsyms2elf_type(char type);
+
 int kallsyms__parse(const char *filename, void *arg,
                    int (*process_symbol)(void *arg, const char *name,
                                          char type, u64 start));
index 4d885934b9190e9dc995d6f5fb80b3a90ef218a3..2a912df6771bf5c6abd880667fbece1dcf1e1005 100644 (file)
@@ -848,6 +848,7 @@ static void free_arg(struct print_arg *arg)
                free(arg->bitmask.bitmask);
                break;
        case PRINT_DYNAMIC_ARRAY:
+       case PRINT_DYNAMIC_ARRAY_LEN:
                free(arg->dynarray.index);
                break;
        case PRINT_OP:
@@ -2728,6 +2729,42 @@ process_dynamic_array(struct event_format *event, struct print_arg *arg, char **
        return EVENT_ERROR;
 }
 
+static enum event_type
+process_dynamic_array_len(struct event_format *event, struct print_arg *arg,
+                         char **tok)
+{
+       struct format_field *field;
+       enum event_type type;
+       char *token;
+
+       if (read_expect_type(EVENT_ITEM, &token) < 0)
+               goto out_free;
+
+       arg->type = PRINT_DYNAMIC_ARRAY_LEN;
+
+       /* Find the field */
+       field = pevent_find_field(event, token);
+       if (!field)
+               goto out_free;
+
+       arg->dynarray.field = field;
+       arg->dynarray.index = 0;
+
+       if (read_expected(EVENT_DELIM, ")") < 0)
+               goto out_err;
+
+       type = read_token(&token);
+       *tok = token;
+
+       return type;
+
+ out_free:
+       free_token(token);
+ out_err:
+       *tok = NULL;
+       return EVENT_ERROR;
+}
+
 static enum event_type
 process_paren(struct event_format *event, struct print_arg *arg, char **tok)
 {
@@ -2975,6 +3012,10 @@ process_function(struct event_format *event, struct print_arg *arg,
                free_token(token);
                return process_dynamic_array(event, arg, tok);
        }
+       if (strcmp(token, "__get_dynamic_array_len") == 0) {
+               free_token(token);
+               return process_dynamic_array_len(event, arg, tok);
+       }
 
        func = find_func_handler(event->pevent, token);
        if (func) {
@@ -3655,14 +3696,25 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
                        goto out_warning_op;
                }
                break;
+       case PRINT_DYNAMIC_ARRAY_LEN:
+               offset = pevent_read_number(pevent,
+                                           data + arg->dynarray.field->offset,
+                                           arg->dynarray.field->size);
+               /*
+                * The total allocated length of the dynamic array is
+                * stored in the top half of the field, and the offset
+                * is in the bottom half of the 32 bit field.
+                */
+               val = (unsigned long long)(offset >> 16);
+               break;
        case PRINT_DYNAMIC_ARRAY:
                /* Without [], we pass the address to the dynamic data */
                offset = pevent_read_number(pevent,
                                            data + arg->dynarray.field->offset,
                                            arg->dynarray.field->size);
                /*
-                * The actual length of the dynamic array is stored
-                * in the top half of the field, and the offset
+                * The total allocated length of the dynamic array is
+                * stored in the top half of the field, and the offset
                 * is in the bottom half of the 32 bit field.
                 */
                offset &= 0xffff;
@@ -3795,7 +3847,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
        struct format_field *field;
        struct printk_map *printk;
        long long val, fval;
-       unsigned long addr;
+       unsigned long long addr;
        char *str;
        unsigned char *hex;
        int print;
@@ -3828,13 +3880,30 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
                 */
                if (!(field->flags & FIELD_IS_ARRAY) &&
                    field->size == pevent->long_size) {
-                       addr = *(unsigned long *)(data + field->offset);
+
+                       /* Handle heterogeneous recording and processing
+                        * architectures
+                        *
+                        * CASE I:
+                        * Traces recorded on 32-bit devices (32-bit
+                        * addressing) and processed on 64-bit devices:
+                        * In this case, only 32 bits should be read.
+                        *
+                        * CASE II:
+                        * Traces recorded on 64 bit devices and processed
+                        * on 32-bit devices:
+                        * In this case, 64 bits must be read.
+                        */
+                       addr = (pevent->long_size == 8) ?
+                               *(unsigned long long *)(data + field->offset) :
+                               (unsigned long long)*(unsigned int *)(data + field->offset);
+
                        /* Check if it matches a print format */
                        printk = find_printk(pevent, addr);
                        if (printk)
                                trace_seq_puts(s, printk->printk);
                        else
-                               trace_seq_printf(s, "%lx", addr);
+                               trace_seq_printf(s, "%llx", addr);
                        break;
                }
                str = malloc(len + 1);
@@ -4836,8 +4905,8 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
                                else
                                        ls = 2;
 
-                               if (*(ptr+1) == 'F' ||
-                                   *(ptr+1) == 'f') {
+                               if (*(ptr+1) == 'F' || *(ptr+1) == 'f' ||
+                                   *(ptr+1) == 'S' || *(ptr+1) == 's') {
                                        ptr++;
                                        show_func = *ptr;
                                } else if (*(ptr+1) == 'M' || *(ptr+1) == 'm') {
index 204befb05a173754869f4dc00f58c48a87331091..6fc83c7edbe918b01be04667699ae56a87d8676e 100644 (file)
@@ -294,6 +294,7 @@ enum print_arg_type {
        PRINT_OP,
        PRINT_FUNC,
        PRINT_BITMASK,
+       PRINT_DYNAMIC_ARRAY_LEN,
 };
 
 struct print_arg {
index 88fe83dff7cdf4fb957a23670324138225affa86..18536f7565773b84dd29cb7a65b5df851dd4b005 100644 (file)
@@ -124,7 +124,10 @@ static const char *disassemble(unsigned char *insn, int len, uint64_t rip,
        _ER(WBINVD,              54)            \
        _ER(XSETBV,              55)            \
        _ER(APIC_WRITE,          56)            \
-       _ER(INVPCID,             58)
+       _ER(INVPCID,             58)            \
+       _ER(PML_FULL,            62)            \
+       _ER(XSAVES,              63)            \
+       _ER(XRSTORS,             64)
 
 #define SVM_EXIT_REASONS \
        _ER(EXIT_READ_CR0,      0x000)          \
@@ -352,15 +355,18 @@ static int kvm_nested_vmexit_handler(struct trace_seq *s, struct pevent_record *
 union kvm_mmu_page_role {
        unsigned word;
        struct {
-               unsigned glevels:4;
                unsigned level:4;
+               unsigned cr4_pae:1;
                unsigned quadrant:2;
-               unsigned pad_for_nice_hex_output:6;
                unsigned direct:1;
                unsigned access:3;
                unsigned invalid:1;
-               unsigned cr4_pge:1;
                unsigned nxe:1;
+               unsigned cr0_wp:1;
+               unsigned smep_and_not_wp:1;
+               unsigned smap_and_not_wp:1;
+               unsigned pad_for_nice_hex_output:8;
+               unsigned smm:8;
        };
 };
 
@@ -385,15 +391,18 @@ static int kvm_mmu_print_role(struct trace_seq *s, struct pevent_record *record,
        if (pevent_is_file_bigendian(event->pevent) ==
            pevent_is_host_bigendian(event->pevent)) {
 
-               trace_seq_printf(s, "%u/%u q%u%s %s%s %spge %snxe",
+               trace_seq_printf(s, "%u q%u%s %s%s %spae %snxe %swp%s%s%s",
                                 role.level,
-                                role.glevels,
                                 role.quadrant,
                                 role.direct ? " direct" : "",
                                 access_str[role.access],
                                 role.invalid ? " invalid" : "",
-                                role.cr4_pge ? "" : "!",
-                                role.nxe ? "" : "!");
+                                role.cr4_pae ? "" : "!",
+                                role.nxe ? "" : "!",
+                                role.cr0_wp ? "" : "!",
+                                role.smep_and_not_wp ? " smep" : "",
+                                role.smap_and_not_wp ? " smap" : "",
+                                role.smm ? " smm" : "");
        } else
                trace_seq_printf(s, "WORD: %08x", role.word);
 
index 4a0501d7a3b412337960b07a695a772d2b7c1344..be764f9ec7691a3d2357214cbe1af9c6c333ad92 100644 (file)
@@ -364,21 +364,6 @@ cyc_thresh Specifies how frequently CYC packets are produced - see cyc
 
                CYC packets are not requested by default.
 
-no_force_psb   This is a driver option and is not in the IA32_RTIT_CTL MSR.
-
-               It stops the driver resetting the byte count to zero whenever
-               enabling the trace (for example on context switches) which in
-               turn results in no PSB being forced.  However some processors
-               will produce a PSB anyway.
-
-               In any case, there is still a PSB when the trace is enabled for
-               the first time.
-
-               no_force_psb can be used to slightly decrease the trace size but
-               may make it harder for the decoder to recover from errors.
-
-               no_force_psb is not selected by default.
-
 
 new snapshot option
 -------------------
@@ -686,6 +671,7 @@ The letters are:
        e       synthesize tracing error events
        d       create a debug log
        g       synthesize a call chain (use with i or x)
+       l       synthesize last branch entries (use with i or x)
 
 "Instructions" events look like they were recorded by "perf record -e
 instructions".
@@ -722,12 +708,26 @@ on the sample is *not* adjusted and reflects the last known value of TSC.
 
 For Intel PT, the default period is 100us.
 
+Setting it to a zero period means "as often as possible".
+
+In the case of Intel PT that is the same as a period of 1 and a unit of
+'instructions' (i.e. --itrace=i1i).
+
 Also the call chain size (default 16, max. 1024) for instructions or
 transactions events can be specified. e.g.
 
        --itrace=ig32
        --itrace=xg32
 
+Also the number of last branch entries (default 64, max. 1024) for instructions or
+transactions events can be specified. e.g.
+
+       --itrace=il10
+       --itrace=xl10
+
+Note that last branch entries are cleared for each sample, so there is no overlap
+from one sample to the next.
+
 To disable trace decoding entirely, use the option --no-itrace.
 
 
@@ -764,3 +764,32 @@ perf inject also accepts the --itrace option in which case tracing data is
 removed and replaced with the synthesized events. e.g.
 
        perf inject --itrace -i perf.data -o perf.data.new
+
+Below is an example of using Intel PT with autofdo.  It requires autofdo
+(https://github.com/google/autofdo) and gcc version 5.  The bubble
+sort example is from the AutoFDO tutorial (https://gcc.gnu.org/wiki/AutoFDO/Tutorial)
+amended to take the number of elements as a parameter.
+
+       $ gcc-5 -O3 sort.c -o sort_optimized
+       $ ./sort_optimized 30000
+       Bubble sorting array of 30000 elements
+       2254 ms
+
+       $ cat ~/.perfconfig
+       [intel-pt]
+               mispred-all
+
+       $ perf record -e intel_pt//u ./sort 3000
+       Bubble sorting array of 3000 elements
+       58 ms
+       [ perf record: Woken up 2 times to write data ]
+       [ perf record: Captured and wrote 3.939 MB perf.data ]
+       $ perf inject -i perf.data -o inj --itrace=i100usle --strip
+       $ ./create_gcov --binary=./sort --profile=inj --gcov=sort.gcov -gcov_version=1
+       $ gcc-5 -O3 -fauto-profile=sort.gcov sort.c -o sort_autofdo
+       $ ./sort_autofdo 30000
+       Bubble sorting array of 30000 elements
+       2155 ms
+
+Note there is currently no advantage to using Intel PT instead of LBR, but
+that may change in the future if greater use is made of the data.
index 2ff946677e3b81b15be4d0d3a4782327d80daccc..65453f4c700604f8a259df384c6e52988ce81b14 100644 (file)
@@ -6,6 +6,7 @@
                e       synthesize error events
                d       create a debug log
                g       synthesize a call chain (use with i or x)
+               l       synthesize last branch entries (use with i or x)
 
        The default is all events i.e. the same as --itrace=ibxe
 
@@ -20,3 +21,6 @@
 
        Also the call chain size (default 16, max. 1024) for instructions or
        transactions events can be specified.
+
+       Also the number of last branch entries (default 64, max. 1024) for
+       instructions or transactions events can be specified.
index ab632d9fbd7d09c8947f824dddddcfd1d1cd0837..34750fc32714caeb18ba0fdcb55cb4f4928c1648 100644 (file)
@@ -82,7 +82,7 @@ Be multi thread instead of multi process
 Specify number of groups
 
 -l::
---loop=::
+--nr_loops=::
 Specify number of loops
 
 Example of *messaging*
@@ -139,64 +139,48 @@ Suite for evaluating performance of simple memory copy in various ways.
 Options of *memcpy*
 ^^^^^^^^^^^^^^^^^^^
 -l::
---length::
-Specify length of memory to copy (default: 1MB).
+--size::
+Specify size of memory to copy (default: 1MB).
 Available units are B, KB, MB, GB and TB (case insensitive).
 
--r::
---routine::
-Specify routine to copy (default: default).
-Available routines are depend on the architecture.
+-f::
+--function::
+Specify function to copy (default: default).
+Available functions are depend on the architecture.
 On x86-64, x86-64-unrolled, x86-64-movsq and x86-64-movsb are supported.
 
--i::
---iterations::
+-l::
+--nr_loops::
 Repeat memcpy invocation this number of times.
 
 -c::
---cycle::
+--cycles::
 Use perf's cpu-cycles event instead of gettimeofday syscall.
 
--o::
---only-prefault::
-Show only the result with page faults before memcpy.
-
--n::
---no-prefault::
-Show only the result without page faults before memcpy.
-
 *memset*::
 Suite for evaluating performance of simple memory set in various ways.
 
 Options of *memset*
 ^^^^^^^^^^^^^^^^^^^
 -l::
---length::
-Specify length of memory to set (default: 1MB).
+--size::
+Specify size of memory to set (default: 1MB).
 Available units are B, KB, MB, GB and TB (case insensitive).
 
--r::
---routine::
-Specify routine to set (default: default).
-Available routines are depend on the architecture.
+-f::
+--function::
+Specify function to set (default: default).
+Available functions are depend on the architecture.
 On x86-64, x86-64-unrolled, x86-64-stosq and x86-64-stosb are supported.
 
--i::
---iterations::
+-l::
+--nr_loops::
 Repeat memset invocation this number of times.
 
 -c::
---cycle::
+--cycles::
 Use perf's cpu-cycles event instead of gettimeofday syscall.
 
--o::
---only-prefault::
-Show only the result with page faults before memset.
-
--n::
---no-prefault::
-Show only the result without page faults before memset.
-
 SUITES FOR 'numa'
 ~~~~~~~~~~~~~~~~~
 *mem*::
index 0c721c3e37e1131e3546b2c0440bbcdef289a54e..0b1cedeef895369c9c60aa0a1e6cbb718c65c509 100644 (file)
@@ -50,6 +50,9 @@ OPTIONS
 
 include::itrace.txt[]
 
+--strip::
+       Use with --itrace to strip out non-synthesized events.
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-archive[1]
index bada8933fdd4798a2890a13b2fb64d5bf75d9743..79483f40e991eaa80ffd7889f628d5e4d8ea3ce0 100644 (file)
@@ -30,6 +30,7 @@ counted. The following modifiers exist:
  G - guest counting (in KVM guests)
  H - host counting (not in KVM guests)
  p - precise level
+ P - use maximum detected precise level
  S - read sample value (PERF_SAMPLE_READ)
  D - pin the event to the PMU
 
@@ -125,6 +126,8 @@ To limit the list use:
 . If none of the above is matched, it will apply the supplied glob to all
   events, printing the ones that match.
 
+. As a last resort, it will do a substring search in all event names.
+
 One or more types can be used at the same time, listing the events for the
 types specified.
 
index 2e9ce77b5e144a720a01a01661d3482661fb4f88..e630a7d2c3483cc6b11f33190d289b19c2d43b85 100644 (file)
@@ -144,7 +144,7 @@ OPTIONS
 
 --call-graph::
        Setup and enable call-graph (stack chain/backtrace) recording,
-       implies -g.
+       implies -g.  Default is "fp".
 
        Allows specifying "fp" (frame pointer) or "dwarf"
        (DWARF's CFI - Call Frame Information) or "lbr"
@@ -154,13 +154,18 @@ OPTIONS
        In some systems, where binaries are build with gcc
        --fomit-frame-pointer, using the "fp" method will produce bogus
        call graphs, using "dwarf", if available (perf tools linked to
-       the libunwind library) should be used instead.
+       the libunwind or libdw library) should be used instead.
        Using the "lbr" method doesn't require any compiler options. It
        will produce call graphs from the hardware LBR registers. The
        main limition is that it is only available on new Intel
        platforms, such as Haswell. It can only get user call chain. It
        doesn't work with branch stack sampling at the same time.
 
+       When "dwarf" recording is used, perf also records (user) stack dump
+       when sampled.  Default size of the stack dump is 8192 (bytes).
+       User can change the size by passing the size after comma like
+       "--call-graph dwarf,4096".
+
 -q::
 --quiet::
        Don't print any message, useful for scripting.
@@ -236,6 +241,7 @@ following filters are defined:
         - any_call: any function call or system call
         - any_ret: any function return or system call return
         - ind_call: any indirect branch
+        - call: direct calls, including far (to/from kernel) calls
         - u:  only when the branch target is at the user level
         - k: only when the branch target is in the kernel
         - hv: only when the target is at the hypervisor level
@@ -308,6 +314,12 @@ This option sets the time out limit. The default value is 500 ms.
 Record context switch events i.e. events of type PERF_RECORD_SWITCH or
 PERF_RECORD_SWITCH_CPU_WIDE.
 
+--clang-path::
+Path to clang binary to use for compiling BPF scriptlets.
+
+--clang-opt::
+Options passed to clang when compiling BPF scriptlets.
+
 SEE ALSO
 --------
 linkperf:perf-stat[1], linkperf:perf-list[1]
index 9c7981bfddad4ca92f3580169156f15b8645f8ef..5ce8da1e1256f2295c0b3273c48516463db8ce23 100644 (file)
@@ -29,7 +29,7 @@ OPTIONS
 --show-nr-samples::
        Show the number of samples for each symbol
 
---showcpuutilization::
+--show-cpu-utilization::
         Show sample percentage for different cpu modes.
 
 -T::
@@ -68,7 +68,7 @@ OPTIONS
 --sort=::
        Sort histogram entries by given key(s) - multiple keys can be specified
        in CSV format.  Following sort keys are available:
-       pid, comm, dso, symbol, parent, cpu, srcline, weight, local_weight.
+       pid, comm, dso, symbol, parent, cpu, socket, srcline, weight, local_weight.
 
        Each key has following meaning:
 
@@ -79,6 +79,7 @@ OPTIONS
        - parent: name of function matched to the parent regex filter. Unmatched
        entries are displayed as "[other]".
        - cpu: cpu number the task ran at the time of sample
+       - socket: processor socket number the task ran at the time of sample
        - srcline: filename and line number executed at the time of sample.  The
        DWARF debugging info must be provided.
        - srcfile: file name of the source file of the same. Requires dwarf
@@ -168,30 +169,40 @@ OPTIONS
 --dump-raw-trace::
         Dump raw trace in ASCII.
 
--g [type,min[,limit],order[,key][,branch]]::
---call-graph::
-        Display call chains using type, min percent threshold, optional print
-       limit and order.
-       type can be either:
+-g::
+--call-graph=<print_type,threshold[,print_limit],order,sort_key,branch>::
+        Display call chains using type, min percent threshold, print limit,
+       call order, sort key and branch.  Note that ordering of parameters is not
+       fixed so any parement can be given in an arbitraty order.  One exception
+       is the print_limit which should be preceded by threshold.
+
+       print_type can be either:
        - flat: single column, linear exposure of call chains.
-       - graph: use a graph tree, displaying absolute overhead rates.
+       - graph: use a graph tree, displaying absolute overhead rates. (default)
        - fractal: like graph, but displays relative rates. Each branch of
-                the tree is considered as a new profiled object. +
+                the tree is considered as a new profiled object.
+       - none: disable call chain display.
+
+       threshold is a percentage value which specifies a minimum percent to be
+       included in the output call graph.  Default is 0.5 (%).
+
+       print_limit is only applied when stdio interface is used.  It's to limit
+       number of call graph entries in a single hist entry.  Note that it needs
+       to be given after threshold (but not necessarily consecutive).
+       Default is 0 (unlimited).
 
        order can be either:
        - callee: callee based call graph.
        - caller: inverted caller based call graph.
+       Default is 'caller' when --children is used, otherwise 'callee'.
 
-       key can be:
-       - function: compare on functions
+       sort_key can be:
+       - function: compare on functions (default)
        - address: compare on individual code addresses
 
        branch can be:
-       - branch: include last branch information in callgraph
-       when available. Usually more convenient to use --branch-history
-       for this.
-
-       Default: fractal,0.5,callee,function.
+       - branch: include last branch information in callgraph when available.
+                 Usually more convenient to use --branch-history for this.
 
 --children::
        Accumulate callchain of children to parent entry so that then can
@@ -204,6 +215,8 @@ OPTIONS
        beyond the specified depth will be ignored. This is a trade-off
        between information loss and faster processing especially for
        workloads that can have a very long callchain stack.
+       Note that when using the --itrace option the synthesized callchain size
+       will override this value if the synthesized callchain size is bigger.
 
        Default: 127
 
@@ -349,6 +362,9 @@ include::itrace.txt[]
        This option extends the perf report to show reference callgraphs,
        which collected by reference event, in no callgraph event.
 
+--socket-filter::
+       Only report the samples on the processor socket that match with this filter
+
 include::callchain-overhead-calculation.txt[]
 
 SEE ALSO
index dc3ec783b7bd50973da5eecb0a5ba728a1a72361..382ddfb45d1dbbb4a65bfe464c29096923b64bd3 100644 (file)
@@ -112,11 +112,11 @@ OPTIONS
 --debug-mode::
         Do various checks like samples ordering and lost events.
 
--f::
+-F::
 --fields::
         Comma separated list of fields to print. Options are:
         comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
-       srcline, period, iregs, flags.
+       srcline, period, iregs, brstack, brstacksym, flags.
         Field list can be prepended with the type, trace, sw or hw,
         to indicate to which event type the field list applies.
         e.g., -f sw:comm,tid,time,ip,sym  and -f trace:time,cpu,trace
@@ -175,6 +175,16 @@ OPTIONS
        Finally, a user may not set fields to none for all event types.
        i.e., -f "" is not allowed.
 
+       The brstack output includes branch related information with raw addresses using the
+       /v/v/v/v/ syntax in the following order:
+       FROM: branch source instruction
+       TO  : branch target instruction
+        M/P/-: M=branch target mispredicted or branch direction was mispredicted, P=target predicted or direction predicted, -=not supported
+       X/- : X=branch inside a transactional region, -=not in transaction region or not supported
+       A/- : A=TSX abort entry, -=not aborted region or not supported
+
+       The brstacksym is identical to brstack, except that the FROM and TO addresses are printed in a symbolic form if possible.
+
 -k::
 --vmlinux=<file>::
         vmlinux pathname
@@ -249,6 +259,9 @@ include::itrace.txt[]
 --full-source-path::
        Show the full path for source files for srcline output.
 
+--ns::
+       Use 9 decimal places when displaying time (i.e. show the nanoseconds)
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-script-perl[1],
index 47469abdcc1c11e2256effc86845232f78785075..4e074a6608269793d52a2ea4f28061532bb9798b 100644 (file)
@@ -128,8 +128,9 @@ perf stat --repeat 10 --null --sync --pre 'make -s O=defconfig-build/clean' -- m
 
 -I msecs::
 --interval-print msecs::
-       Print count deltas every N milliseconds (minimum: 100ms)
-       example: perf stat -I 1000 -e cycles -a sleep 5
+Print count deltas every N milliseconds (minimum: 10ms)
+The overhead percentage could be high in some cases, for instance with small, sub 100ms intervals.  Use with caution.
+       example: 'perf stat -I 1000 -e cycles -a sleep 5'
 
 --per-socket::
 Aggregate counts per processor socket for system-wide mode measurements.  This
index f6a23eb294e77ad2f364f7275d4916336ca60809..556cec09bf50cc4d5d9a50104d8efabd7879e2c7 100644 (file)
@@ -160,9 +160,10 @@ Default is to monitor all CPUS.
 -g::
        Enables call-graph (stack chain/backtrace) recording.
 
---call-graph::
+--call-graph [mode,type,min[,limit],order[,key][,branch]]::
        Setup and enable call-graph (stack chain/backtrace) recording,
-       implies -g.
+       implies -g.  See `--call-graph` section in perf-record and
+       perf-report man pages for details.
 
 --children::
        Accumulate callchain of children to parent entry so that then can
index 2b131776363e3a2498ce1b721440c985d22df7fd..864e37597252b3b0449cc1ae0eb5147f79b6e81b 100644 (file)
@@ -27,6 +27,14 @@ OPTIONS
        Setup buildid cache directory. It has higher priority than
        buildid.dir config file option.
 
+-v::
+--version::
+  Display perf version.
+
+-h::
+--help::
+  Run perf help command.
+
 DESCRIPTION
 -----------
 Performance counters for Linux are a new kernel-based subsystem
index af009bd6e6b7cfcc6b451838447f16922ef8bc3c..39c38cb45b00f8e3e478bfebab6a47a9bf4c3dd7 100644 (file)
@@ -17,6 +17,7 @@ tools/build
 tools/arch/x86/include/asm/atomic.h
 tools/arch/x86/include/asm/rmwcc.h
 tools/lib/traceevent
+tools/lib/bpf
 tools/lib/api
 tools/lib/bpf
 tools/lib/hweight.c
@@ -41,6 +42,7 @@ tools/include/asm-generic/bitops.h
 tools/include/linux/atomic.h
 tools/include/linux/bitops.h
 tools/include/linux/compiler.h
+tools/include/linux/filter.h
 tools/include/linux/hash.h
 tools/include/linux/kernel.h
 tools/include/linux/list.h
@@ -49,6 +51,7 @@ tools/include/linux/poison.h
 tools/include/linux/rbtree.h
 tools/include/linux/rbtree_augmented.h
 tools/include/linux/types.h
+tools/include/linux/err.h
 include/asm-generic/bitops/arch_hweight.h
 include/asm-generic/bitops/const_hweight.h
 include/asm-generic/bitops/fls64.h
@@ -67,6 +70,8 @@ arch/*/lib/memset*.S
 include/linux/poison.h
 include/linux/hw_breakpoint.h
 include/uapi/linux/perf_event.h
+include/uapi/linux/bpf.h
+include/uapi/linux/bpf_common.h
 include/uapi/linux/const.h
 include/uapi/linux/swab.h
 include/uapi/linux/hw_breakpoint.h
index d9863cb96f59b8c7bed554dd57e4bc275379eac7..0d19d5447d6c721bba185422ee32b45ce6fff3e9 100644 (file)
@@ -75,6 +75,8 @@ include config/utilities.mak
 # Define NO_LZMA if you do not want to support compressed (xz) kernel modules
 #
 # Define NO_AUXTRACE if you do not want AUX area tracing support
+#
+# Define NO_LIBBPF if you do not want BPF support
 
 # As per kernel Makefile, avoid funny character set dependencies
 unexport LC_ALL
@@ -145,6 +147,7 @@ AWK     = awk
 
 LIB_DIR          = $(srctree)/tools/lib/api/
 TRACE_EVENT_DIR = $(srctree)/tools/lib/traceevent/
+BPF_DIR = $(srctree)/tools/lib/bpf/
 
 # include config/Makefile by default and rule out
 # non-config cases
@@ -180,6 +183,7 @@ strip-libs = $(filter-out -l%,$(1))
 
 ifneq ($(OUTPUT),)
   TE_PATH=$(OUTPUT)
+  BPF_PATH=$(OUTPUT)
 ifneq ($(subdir),)
   LIB_PATH=$(OUTPUT)/../lib/api/
 else
@@ -188,6 +192,7 @@ endif
 else
   TE_PATH=$(TRACE_EVENT_DIR)
   LIB_PATH=$(LIB_DIR)
+  BPF_PATH=$(BPF_DIR)
 endif
 
 LIBTRACEEVENT = $(TE_PATH)libtraceevent.a
@@ -199,6 +204,8 @@ LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS = -Xlinker --dynamic-list=$(LIBTRACEEVENT_DYN
 LIBAPI = $(LIB_PATH)libapi.a
 export LIBAPI
 
+LIBBPF = $(BPF_PATH)libbpf.a
+
 # python extension build directories
 PYTHON_EXTBUILD     := $(OUTPUT)python_ext_build/
 PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/
@@ -251,6 +258,9 @@ export PERL_PATH
 LIB_FILE=$(OUTPUT)libperf.a
 
 PERFLIBS = $(LIB_FILE) $(LIBAPI) $(LIBTRACEEVENT)
+ifndef NO_LIBBPF
+  PERFLIBS += $(LIBBPF)
+endif
 
 # We choose to avoid "if .. else if .. else .. endif endif"
 # because maintaining the nesting to match is a pain.  If
@@ -297,16 +307,16 @@ strip: $(PROGRAMS) $(OUTPUT)perf
 PERF_IN := $(OUTPUT)perf-in.o
 
 export srctree OUTPUT RM CC LD AR CFLAGS V BISON FLEX AWK
-build := -f $(srctree)/tools/build/Makefile.build dir=. obj
+include $(srctree)/tools/build/Makefile.include
 
-$(PERF_IN): $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h FORCE
+$(PERF_IN): prepare FORCE
        $(Q)$(MAKE) $(build)=perf
 
 $(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(LIBTRACEEVENT_DYNAMIC_LIST)
        $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS) \
                $(PERF_IN) $(LIBS) -o $@
 
-$(GTK_IN): FORCE
+$(GTK_IN): fixdep FORCE
        $(Q)$(MAKE) $(build)=gtk
 
 $(OUTPUT)libperf-gtk.so: $(GTK_IN) $(PERFLIBS)
@@ -349,27 +359,27 @@ endif
 __build-dir = $(subst $(OUTPUT),,$(dir $@))
 build-dir   = $(if $(__build-dir),$(__build-dir),.)
 
-single_dep: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h
+prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h fixdep
 
-$(OUTPUT)%.o: %.c single_dep FORCE
+$(OUTPUT)%.o: %.c prepare FORCE
        $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
 
-$(OUTPUT)%.i: %.c single_dep FORCE
+$(OUTPUT)%.i: %.c prepare FORCE
        $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
 
-$(OUTPUT)%.s: %.c single_dep FORCE
+$(OUTPUT)%.s: %.c prepare FORCE
        $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
 
-$(OUTPUT)%-bison.o: %.c single_dep FORCE
+$(OUTPUT)%-bison.o: %.c prepare FORCE
        $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
 
-$(OUTPUT)%-flex.o: %.c single_dep FORCE
+$(OUTPUT)%-flex.o: %.c prepare FORCE
        $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
 
-$(OUTPUT)%.o: %.S single_dep FORCE
+$(OUTPUT)%.o: %.S prepare FORCE
        $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
 
-$(OUTPUT)%.i: %.S single_dep FORCE
+$(OUTPUT)%.i: %.S prepare FORCE
        $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
 
 $(OUTPUT)perf-%: %.o $(PERFLIBS)
@@ -389,7 +399,7 @@ $(patsubst perf-%,%.o,$(PROGRAMS)): $(wildcard */*.h)
 
 LIBPERF_IN := $(OUTPUT)libperf-in.o
 
-$(LIBPERF_IN): FORCE
+$(LIBPERF_IN): fixdep FORCE
        $(Q)$(MAKE) $(build)=libperf
 
 $(LIB_FILE): $(LIBPERF_IN)
@@ -397,10 +407,10 @@ $(LIB_FILE): $(LIBPERF_IN)
 
 LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ)
 
-$(LIBTRACEEVENT): FORCE
+$(LIBTRACEEVENT): fixdep FORCE
        $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent.a
 
-libtraceevent_plugins: FORCE
+libtraceevent_plugins: fixdep FORCE
        $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) plugins
 
 $(LIBTRACEEVENT_DYNAMIC_LIST): libtraceevent_plugins
@@ -413,13 +423,20 @@ $(LIBTRACEEVENT)-clean:
 install-traceevent-plugins: $(LIBTRACEEVENT)
        $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) install_plugins
 
-$(LIBAPI): FORCE
+$(LIBAPI): fixdep FORCE
        $(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) $(OUTPUT)libapi.a
 
 $(LIBAPI)-clean:
        $(call QUIET_CLEAN, libapi)
        $(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null
 
+$(LIBBPF): fixdep FORCE
+       $(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) $(OUTPUT)libbpf.a
+
+$(LIBBPF)-clean:
+       $(call QUIET_CLEAN, libbpf)
+       $(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) clean >/dev/null
+
 help:
        @echo 'Perf make targets:'
        @echo '  doc            - make *all* documentation (see below)'
@@ -459,7 +476,7 @@ INSTALL_DOC_TARGETS += quick-install-doc quick-install-man quick-install-html
 $(DOC_TARGETS):
        $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) $(@:doc=all)
 
-TAG_FOLDERS= . ../lib/traceevent ../lib/api ../lib/symbol
+TAG_FOLDERS= . ../lib/traceevent ../lib/api ../lib/symbol ../include ../lib/bpf
 TAG_FILES= ../../include/uapi/linux/perf_event.h
 
 TAGS:
@@ -567,7 +584,7 @@ config-clean:
        $(call QUIET_CLEAN, config)
        $(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
 
-clean: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean config-clean
+clean: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean config-clean
        $(call QUIET_CLEAN, core-objs)  $(RM) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS)
        $(Q)find . -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
        $(Q)$(RM) $(OUTPUT).config-detected
@@ -591,6 +608,6 @@ FORCE:
 
 .PHONY: all install clean config-clean strip install-gtk
 .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
-.PHONY: $(GIT-HEAD-PHONY) TAGS tags cscope FORCE single_dep
+.PHONY: $(GIT-HEAD-PHONY) TAGS tags cscope FORCE prepare
 .PHONY: libtraceevent_plugins
 
index b00dfd92ea731fe3c6319b703d2780207fe4cd6e..e83c8ce243039c12591781c1eea645fd2e341b2d 100644 (file)
@@ -128,9 +128,8 @@ static const char *normalize_arch(char *arch)
        return arch;
 }
 
-static int perf_session_env__lookup_binutils_path(struct perf_env *env,
-                                                 const char *name,
-                                                 const char **path)
+static int perf_env__lookup_binutils_path(struct perf_env *env,
+                                         const char *name, const char **path)
 {
        int idx;
        const char *arch, *cross_env;
@@ -206,7 +205,7 @@ out_error:
        return -1;
 }
 
-int perf_session_env__lookup_objdump(struct perf_env *env)
+int perf_env__lookup_objdump(struct perf_env *env)
 {
        /*
         * For live mode, env->arch will be NULL and we can use
@@ -215,6 +214,5 @@ int perf_session_env__lookup_objdump(struct perf_env *env)
        if (env->arch == NULL)
                return 0;
 
-       return perf_session_env__lookup_binutils_path(env, "objdump",
-                                                     &objdump_path);
+       return perf_env__lookup_binutils_path(env, "objdump", &objdump_path);
 }
index 20176df69fc83fcf31d3c38817b367723b0bed06..7529cfb143cecf0aaf74dbaaf277f3fe58e871e4 100644 (file)
@@ -1,10 +1,10 @@
 #ifndef ARCH_PERF_COMMON_H
 #define ARCH_PERF_COMMON_H
 
-#include "../util/session.h"
+#include "../util/env.h"
 
 extern const char *objdump_path;
 
-int perf_session_env__lookup_objdump(struct perf_env *env);
+int perf_env__lookup_objdump(struct perf_env *env);
 
 #endif /* ARCH_PERF_COMMON_H */
index 41bf61da476a4ce3150fc0184c84718d807be894..db52fa22d3a1d64a6fe62cffd4b34cd9ffe066f9 100644 (file)
@@ -1,2 +1,2 @@
 libperf-y += util/
-libperf-$(CONFIG_DWARF_UNWIND) += tests/
+libperf-y += tests/
index 21322e0385b886667d7bbd9a17edddc1ad1b3c8f..09ba923debe86810f8380f7df54504dee4232ec8 100644 (file)
@@ -2,3 +2,4 @@ ifndef NO_DWARF
 PERF_HAVE_DWARF_REGS := 1
 endif
 HAVE_KVM_STAT_SUPPORT := 1
+PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
diff --git a/tools/perf/arch/x86/include/arch-tests.h b/tools/perf/arch/x86/include/arch-tests.h
new file mode 100644 (file)
index 0000000..7ed00f4
--- /dev/null
@@ -0,0 +1,19 @@
+#ifndef ARCH_TESTS_H
+#define ARCH_TESTS_H
+
+/* Tests */
+int test__rdpmc(void);
+int test__perf_time_to_tsc(void);
+int test__insn_x86(void);
+int test__intel_cqm_count_nmi_context(void);
+
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
+struct thread;
+struct perf_sample;
+int test__arch_unwind_sample(struct perf_sample *sample,
+                            struct thread *thread);
+#endif
+
+extern struct test arch_tests[];
+
+#endif
index b30eff9bcc83fd14026100f8626e913fd8f954e9..cbb7e978166bcc67209f31a9600149c6a5fe30bc 100644 (file)
@@ -1,2 +1,8 @@
-libperf-y += regs_load.o
-libperf-y += dwarf-unwind.o
+libperf-$(CONFIG_DWARF_UNWIND) += regs_load.o
+libperf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
+
+libperf-y += arch-tests.o
+libperf-y += rdpmc.o
+libperf-y += perf-time-to-tsc.o
+libperf-$(CONFIG_AUXTRACE) += insn-x86.o
+libperf-y += intel-cqm.o
diff --git a/tools/perf/arch/x86/tests/arch-tests.c b/tools/perf/arch/x86/tests/arch-tests.c
new file mode 100644 (file)
index 0000000..2218cb6
--- /dev/null
@@ -0,0 +1,34 @@
+#include <string.h>
+#include "tests/tests.h"
+#include "arch-tests.h"
+
+struct test arch_tests[] = {
+       {
+               .desc = "x86 rdpmc test",
+               .func = test__rdpmc,
+       },
+       {
+               .desc = "Test converting perf time to TSC",
+               .func = test__perf_time_to_tsc,
+       },
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
+       {
+               .desc = "Test dwarf unwind",
+               .func = test__dwarf_unwind,
+       },
+#endif
+#ifdef HAVE_AUXTRACE_SUPPORT
+       {
+               .desc = "Test x86 instruction decoder - new instructions",
+               .func = test__insn_x86,
+       },
+#endif
+       {
+               .desc = "Test intel cqm nmi context read",
+               .func = test__intel_cqm_count_nmi_context,
+       },
+       {
+               .func = NULL,
+       },
+
+};
index d8bbf7ad1681f6e5afb4a49374a3f0dbd625f3ef..7f209ce827bf5aa9e0f8ca5ee137c93ec69f451d 100644 (file)
@@ -5,6 +5,7 @@
 #include "event.h"
 #include "debug.h"
 #include "tests/tests.h"
+#include "arch-tests.h"
 
 #define STACK_SIZE 8192
 
diff --git a/tools/perf/arch/x86/tests/gen-insn-x86-dat.awk b/tools/perf/arch/x86/tests/gen-insn-x86-dat.awk
new file mode 100644 (file)
index 0000000..a214548
--- /dev/null
@@ -0,0 +1,75 @@
+#!/bin/awk -f
+# gen-insn-x86-dat.awk: script to convert data for the insn-x86 test
+# Copyright (c) 2015, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+
+BEGIN {
+       print "/*"
+       print " * Generated by gen-insn-x86-dat.sh and gen-insn-x86-dat.awk"
+       print " * from insn-x86-dat-src.c for inclusion by insn-x86.c"
+       print " * Do not change this code."
+       print "*/\n"
+       op = ""
+       branch = ""
+       rel = 0
+       going = 0
+}
+
+/ Start here / {
+       going = 1
+}
+
+/ Stop here / {
+       going = 0
+}
+
+/^\s*[0-9a-fA-F]+\:/ {
+       if (going) {
+               colon_pos = index($0, ":")
+               useful_line = substr($0, colon_pos + 1)
+               first_pos = match(useful_line, "[0-9a-fA-F]")
+               useful_line = substr(useful_line, first_pos)
+               gsub("\t", "\\t", useful_line)
+               printf "{{"
+               len = 0
+               for (i = 2; i <= NF; i++) {
+                       if (match($i, "^[0-9a-fA-F][0-9a-fA-F]$")) {
+                               printf "0x%s, ", $i
+                               len += 1
+                       } else {
+                               break
+                       }
+               }
+               printf "}, %d, %s, \"%s\", \"%s\",", len, rel, op, branch
+               printf "\n\"%s\",},\n", useful_line
+               op = ""
+               branch = ""
+               rel = 0
+       }
+}
+
+/ Expecting: / {
+       expecting_str = " Expecting: "
+       expecting_len = length(expecting_str)
+       expecting_pos = index($0, expecting_str)
+       useful_line = substr($0, expecting_pos + expecting_len)
+       for (i = 1; i <= NF; i++) {
+               if ($i == "Expecting:") {
+                       i++
+                       op = $i
+                       i++
+                       branch = $i
+                       i++
+                       rel = $i
+                       break
+               }
+       }
+}
diff --git a/tools/perf/arch/x86/tests/gen-insn-x86-dat.sh b/tools/perf/arch/x86/tests/gen-insn-x86-dat.sh
new file mode 100755 (executable)
index 0000000..2d4ef94
--- /dev/null
@@ -0,0 +1,43 @@
+#!/bin/sh
+# gen-insn-x86-dat: generate data for the insn-x86 test
+# Copyright (c) 2015, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+
+set -e
+
+if [ "$(uname -m)" != "x86_64" ]; then
+       echo "ERROR: This script only works on x86_64"
+       exit 1
+fi
+
+cd $(dirname $0)
+
+trap 'echo "Might need a more recent version of binutils"' EXIT
+
+echo "Compiling insn-x86-dat-src.c to 64-bit object"
+
+gcc -g -c insn-x86-dat-src.c
+
+objdump -dSw insn-x86-dat-src.o | awk -f gen-insn-x86-dat.awk > insn-x86-dat-64.c
+
+rm -f insn-x86-dat-src.o
+
+echo "Compiling insn-x86-dat-src.c to 32-bit object"
+
+gcc -g -c -m32 insn-x86-dat-src.c
+
+objdump -dSw insn-x86-dat-src.o | awk -f gen-insn-x86-dat.awk > insn-x86-dat-32.c
+
+rm -f insn-x86-dat-src.o
+
+trap - EXIT
+
+echo "Done (use git diff to see the changes)"
diff --git a/tools/perf/arch/x86/tests/insn-x86-dat-32.c b/tools/perf/arch/x86/tests/insn-x86-dat-32.c
new file mode 100644 (file)
index 0000000..3b491cf
--- /dev/null
@@ -0,0 +1,658 @@
+/*
+ * Generated by gen-insn-x86-dat.sh and gen-insn-x86-dat.awk
+ * from insn-x86-dat-src.c for inclusion by insn-x86.c
+ * Do not change this code.
+*/
+
+{{0x0f, 0x31, }, 2, 0, "", "",
+"0f 31                \trdtsc  ",},
+{{0xf3, 0x0f, 0x1b, 0x00, }, 4, 0, "", "",
+"f3 0f 1b 00          \tbndmk  (%eax),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f3 0f 1b 05 78 56 34 12 \tbndmk  0x12345678,%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x18, }, 4, 0, "", "",
+"f3 0f 1b 18          \tbndmk  (%eax),%bnd3",},
+{{0xf3, 0x0f, 0x1b, 0x04, 0x01, }, 5, 0, "", "",
+"f3 0f 1b 04 01       \tbndmk  (%ecx,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1b 04 05 78 56 34 12 \tbndmk  0x12345678(,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x04, 0x08, }, 5, 0, "", "",
+"f3 0f 1b 04 08       \tbndmk  (%eax,%ecx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x04, 0xc8, }, 5, 0, "", "",
+"f3 0f 1b 04 c8       \tbndmk  (%eax,%ecx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x40, 0x12, }, 5, 0, "", "",
+"f3 0f 1b 40 12       \tbndmk  0x12(%eax),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x45, 0x12, }, 5, 0, "", "",
+"f3 0f 1b 45 12       \tbndmk  0x12(%ebp),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"f3 0f 1b 44 01 12    \tbndmk  0x12(%ecx,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"f3 0f 1b 44 05 12    \tbndmk  0x12(%ebp,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"f3 0f 1b 44 08 12    \tbndmk  0x12(%eax,%ecx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"f3 0f 1b 44 c8 12    \tbndmk  0x12(%eax,%ecx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f3 0f 1b 80 78 56 34 12 \tbndmk  0x12345678(%eax),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f3 0f 1b 85 78 56 34 12 \tbndmk  0x12345678(%ebp),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1b 84 01 78 56 34 12 \tbndmk  0x12345678(%ecx,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1b 84 05 78 56 34 12 \tbndmk  0x12345678(%ebp,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1b 84 08 78 56 34 12 \tbndmk  0x12345678(%eax,%ecx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1b 84 c8 78 56 34 12 \tbndmk  0x12345678(%eax,%ecx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x00, }, 4, 0, "", "",
+"f3 0f 1a 00          \tbndcl  (%eax),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f3 0f 1a 05 78 56 34 12 \tbndcl  0x12345678,%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x18, }, 4, 0, "", "",
+"f3 0f 1a 18          \tbndcl  (%eax),%bnd3",},
+{{0xf3, 0x0f, 0x1a, 0x04, 0x01, }, 5, 0, "", "",
+"f3 0f 1a 04 01       \tbndcl  (%ecx,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1a 04 05 78 56 34 12 \tbndcl  0x12345678(,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x04, 0x08, }, 5, 0, "", "",
+"f3 0f 1a 04 08       \tbndcl  (%eax,%ecx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x04, 0xc8, }, 5, 0, "", "",
+"f3 0f 1a 04 c8       \tbndcl  (%eax,%ecx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x40, 0x12, }, 5, 0, "", "",
+"f3 0f 1a 40 12       \tbndcl  0x12(%eax),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x45, 0x12, }, 5, 0, "", "",
+"f3 0f 1a 45 12       \tbndcl  0x12(%ebp),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"f3 0f 1a 44 01 12    \tbndcl  0x12(%ecx,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"f3 0f 1a 44 05 12    \tbndcl  0x12(%ebp,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"f3 0f 1a 44 08 12    \tbndcl  0x12(%eax,%ecx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"f3 0f 1a 44 c8 12    \tbndcl  0x12(%eax,%ecx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f3 0f 1a 80 78 56 34 12 \tbndcl  0x12345678(%eax),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f3 0f 1a 85 78 56 34 12 \tbndcl  0x12345678(%ebp),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1a 84 01 78 56 34 12 \tbndcl  0x12345678(%ecx,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1a 84 05 78 56 34 12 \tbndcl  0x12345678(%ebp,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1a 84 08 78 56 34 12 \tbndcl  0x12345678(%eax,%ecx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1a 84 c8 78 56 34 12 \tbndcl  0x12345678(%eax,%ecx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0xc0, }, 4, 0, "", "",
+"f3 0f 1a c0          \tbndcl  %eax,%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x00, }, 4, 0, "", "",
+"f2 0f 1a 00          \tbndcu  (%eax),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f2 0f 1a 05 78 56 34 12 \tbndcu  0x12345678,%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x18, }, 4, 0, "", "",
+"f2 0f 1a 18          \tbndcu  (%eax),%bnd3",},
+{{0xf2, 0x0f, 0x1a, 0x04, 0x01, }, 5, 0, "", "",
+"f2 0f 1a 04 01       \tbndcu  (%ecx,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1a 04 05 78 56 34 12 \tbndcu  0x12345678(,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x04, 0x08, }, 5, 0, "", "",
+"f2 0f 1a 04 08       \tbndcu  (%eax,%ecx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x04, 0xc8, }, 5, 0, "", "",
+"f2 0f 1a 04 c8       \tbndcu  (%eax,%ecx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x40, 0x12, }, 5, 0, "", "",
+"f2 0f 1a 40 12       \tbndcu  0x12(%eax),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x45, 0x12, }, 5, 0, "", "",
+"f2 0f 1a 45 12       \tbndcu  0x12(%ebp),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"f2 0f 1a 44 01 12    \tbndcu  0x12(%ecx,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"f2 0f 1a 44 05 12    \tbndcu  0x12(%ebp,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"f2 0f 1a 44 08 12    \tbndcu  0x12(%eax,%ecx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"f2 0f 1a 44 c8 12    \tbndcu  0x12(%eax,%ecx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f2 0f 1a 80 78 56 34 12 \tbndcu  0x12345678(%eax),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f2 0f 1a 85 78 56 34 12 \tbndcu  0x12345678(%ebp),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1a 84 01 78 56 34 12 \tbndcu  0x12345678(%ecx,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1a 84 05 78 56 34 12 \tbndcu  0x12345678(%ebp,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1a 84 08 78 56 34 12 \tbndcu  0x12345678(%eax,%ecx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1a 84 c8 78 56 34 12 \tbndcu  0x12345678(%eax,%ecx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0xc0, }, 4, 0, "", "",
+"f2 0f 1a c0          \tbndcu  %eax,%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x00, }, 4, 0, "", "",
+"f2 0f 1b 00          \tbndcn  (%eax),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f2 0f 1b 05 78 56 34 12 \tbndcn  0x12345678,%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x18, }, 4, 0, "", "",
+"f2 0f 1b 18          \tbndcn  (%eax),%bnd3",},
+{{0xf2, 0x0f, 0x1b, 0x04, 0x01, }, 5, 0, "", "",
+"f2 0f 1b 04 01       \tbndcn  (%ecx,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1b 04 05 78 56 34 12 \tbndcn  0x12345678(,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x04, 0x08, }, 5, 0, "", "",
+"f2 0f 1b 04 08       \tbndcn  (%eax,%ecx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x04, 0xc8, }, 5, 0, "", "",
+"f2 0f 1b 04 c8       \tbndcn  (%eax,%ecx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x40, 0x12, }, 5, 0, "", "",
+"f2 0f 1b 40 12       \tbndcn  0x12(%eax),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x45, 0x12, }, 5, 0, "", "",
+"f2 0f 1b 45 12       \tbndcn  0x12(%ebp),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"f2 0f 1b 44 01 12    \tbndcn  0x12(%ecx,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"f2 0f 1b 44 05 12    \tbndcn  0x12(%ebp,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"f2 0f 1b 44 08 12    \tbndcn  0x12(%eax,%ecx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"f2 0f 1b 44 c8 12    \tbndcn  0x12(%eax,%ecx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f2 0f 1b 80 78 56 34 12 \tbndcn  0x12345678(%eax),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f2 0f 1b 85 78 56 34 12 \tbndcn  0x12345678(%ebp),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1b 84 01 78 56 34 12 \tbndcn  0x12345678(%ecx,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1b 84 05 78 56 34 12 \tbndcn  0x12345678(%ebp,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1b 84 08 78 56 34 12 \tbndcn  0x12345678(%eax,%ecx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1b 84 c8 78 56 34 12 \tbndcn  0x12345678(%eax,%ecx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0xc0, }, 4, 0, "", "",
+"f2 0f 1b c0          \tbndcn  %eax,%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x00, }, 4, 0, "", "",
+"66 0f 1a 00          \tbndmov (%eax),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f 1a 05 78 56 34 12 \tbndmov 0x12345678,%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x18, }, 4, 0, "", "",
+"66 0f 1a 18          \tbndmov (%eax),%bnd3",},
+{{0x66, 0x0f, 0x1a, 0x04, 0x01, }, 5, 0, "", "",
+"66 0f 1a 04 01       \tbndmov (%ecx,%eax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1a 04 05 78 56 34 12 \tbndmov 0x12345678(,%eax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x04, 0x08, }, 5, 0, "", "",
+"66 0f 1a 04 08       \tbndmov (%eax,%ecx,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x04, 0xc8, }, 5, 0, "", "",
+"66 0f 1a 04 c8       \tbndmov (%eax,%ecx,8),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x40, 0x12, }, 5, 0, "", "",
+"66 0f 1a 40 12       \tbndmov 0x12(%eax),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x45, 0x12, }, 5, 0, "", "",
+"66 0f 1a 45 12       \tbndmov 0x12(%ebp),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"66 0f 1a 44 01 12    \tbndmov 0x12(%ecx,%eax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"66 0f 1a 44 05 12    \tbndmov 0x12(%ebp,%eax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"66 0f 1a 44 08 12    \tbndmov 0x12(%eax,%ecx,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"66 0f 1a 44 c8 12    \tbndmov 0x12(%eax,%ecx,8),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f 1a 80 78 56 34 12 \tbndmov 0x12345678(%eax),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f 1a 85 78 56 34 12 \tbndmov 0x12345678(%ebp),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1a 84 01 78 56 34 12 \tbndmov 0x12345678(%ecx,%eax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1a 84 05 78 56 34 12 \tbndmov 0x12345678(%ebp,%eax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1a 84 08 78 56 34 12 \tbndmov 0x12345678(%eax,%ecx,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1a 84 c8 78 56 34 12 \tbndmov 0x12345678(%eax,%ecx,8),%bnd0",},
+{{0x66, 0x0f, 0x1b, 0x00, }, 4, 0, "", "",
+"66 0f 1b 00          \tbndmov %bnd0,(%eax)",},
+{{0x66, 0x0f, 0x1b, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f 1b 05 78 56 34 12 \tbndmov %bnd0,0x12345678",},
+{{0x66, 0x0f, 0x1b, 0x18, }, 4, 0, "", "",
+"66 0f 1b 18          \tbndmov %bnd3,(%eax)",},
+{{0x66, 0x0f, 0x1b, 0x04, 0x01, }, 5, 0, "", "",
+"66 0f 1b 04 01       \tbndmov %bnd0,(%ecx,%eax,1)",},
+{{0x66, 0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1b 04 05 78 56 34 12 \tbndmov %bnd0,0x12345678(,%eax,1)",},
+{{0x66, 0x0f, 0x1b, 0x04, 0x08, }, 5, 0, "", "",
+"66 0f 1b 04 08       \tbndmov %bnd0,(%eax,%ecx,1)",},
+{{0x66, 0x0f, 0x1b, 0x04, 0xc8, }, 5, 0, "", "",
+"66 0f 1b 04 c8       \tbndmov %bnd0,(%eax,%ecx,8)",},
+{{0x66, 0x0f, 0x1b, 0x40, 0x12, }, 5, 0, "", "",
+"66 0f 1b 40 12       \tbndmov %bnd0,0x12(%eax)",},
+{{0x66, 0x0f, 0x1b, 0x45, 0x12, }, 5, 0, "", "",
+"66 0f 1b 45 12       \tbndmov %bnd0,0x12(%ebp)",},
+{{0x66, 0x0f, 0x1b, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"66 0f 1b 44 01 12    \tbndmov %bnd0,0x12(%ecx,%eax,1)",},
+{{0x66, 0x0f, 0x1b, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"66 0f 1b 44 05 12    \tbndmov %bnd0,0x12(%ebp,%eax,1)",},
+{{0x66, 0x0f, 0x1b, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"66 0f 1b 44 08 12    \tbndmov %bnd0,0x12(%eax,%ecx,1)",},
+{{0x66, 0x0f, 0x1b, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"66 0f 1b 44 c8 12    \tbndmov %bnd0,0x12(%eax,%ecx,8)",},
+{{0x66, 0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f 1b 80 78 56 34 12 \tbndmov %bnd0,0x12345678(%eax)",},
+{{0x66, 0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f 1b 85 78 56 34 12 \tbndmov %bnd0,0x12345678(%ebp)",},
+{{0x66, 0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1b 84 01 78 56 34 12 \tbndmov %bnd0,0x12345678(%ecx,%eax,1)",},
+{{0x66, 0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1b 84 05 78 56 34 12 \tbndmov %bnd0,0x12345678(%ebp,%eax,1)",},
+{{0x66, 0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1b 84 08 78 56 34 12 \tbndmov %bnd0,0x12345678(%eax,%ecx,1)",},
+{{0x66, 0x0f, 0x1b, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1b 84 c8 78 56 34 12 \tbndmov %bnd0,0x12345678(%eax,%ecx,8)",},
+{{0x66, 0x0f, 0x1a, 0xc8, }, 4, 0, "", "",
+"66 0f 1a c8          \tbndmov %bnd0,%bnd1",},
+{{0x66, 0x0f, 0x1a, 0xc1, }, 4, 0, "", "",
+"66 0f 1a c1          \tbndmov %bnd1,%bnd0",},
+{{0x0f, 0x1a, 0x00, }, 3, 0, "", "",
+"0f 1a 00             \tbndldx (%eax),%bnd0",},
+{{0x0f, 0x1a, 0x05, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f 1a 05 78 56 34 12 \tbndldx 0x12345678,%bnd0",},
+{{0x0f, 0x1a, 0x18, }, 3, 0, "", "",
+"0f 1a 18             \tbndldx (%eax),%bnd3",},
+{{0x0f, 0x1a, 0x04, 0x01, }, 4, 0, "", "",
+"0f 1a 04 01          \tbndldx (%ecx,%eax,1),%bnd0",},
+{{0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1a 04 05 78 56 34 12 \tbndldx 0x12345678(,%eax,1),%bnd0",},
+{{0x0f, 0x1a, 0x04, 0x08, }, 4, 0, "", "",
+"0f 1a 04 08          \tbndldx (%eax,%ecx,1),%bnd0",},
+{{0x0f, 0x1a, 0x40, 0x12, }, 4, 0, "", "",
+"0f 1a 40 12          \tbndldx 0x12(%eax),%bnd0",},
+{{0x0f, 0x1a, 0x45, 0x12, }, 4, 0, "", "",
+"0f 1a 45 12          \tbndldx 0x12(%ebp),%bnd0",},
+{{0x0f, 0x1a, 0x44, 0x01, 0x12, }, 5, 0, "", "",
+"0f 1a 44 01 12       \tbndldx 0x12(%ecx,%eax,1),%bnd0",},
+{{0x0f, 0x1a, 0x44, 0x05, 0x12, }, 5, 0, "", "",
+"0f 1a 44 05 12       \tbndldx 0x12(%ebp,%eax,1),%bnd0",},
+{{0x0f, 0x1a, 0x44, 0x08, 0x12, }, 5, 0, "", "",
+"0f 1a 44 08 12       \tbndldx 0x12(%eax,%ecx,1),%bnd0",},
+{{0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f 1a 80 78 56 34 12 \tbndldx 0x12345678(%eax),%bnd0",},
+{{0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f 1a 85 78 56 34 12 \tbndldx 0x12345678(%ebp),%bnd0",},
+{{0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1a 84 01 78 56 34 12 \tbndldx 0x12345678(%ecx,%eax,1),%bnd0",},
+{{0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1a 84 05 78 56 34 12 \tbndldx 0x12345678(%ebp,%eax,1),%bnd0",},
+{{0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1a 84 08 78 56 34 12 \tbndldx 0x12345678(%eax,%ecx,1),%bnd0",},
+{{0x0f, 0x1b, 0x00, }, 3, 0, "", "",
+"0f 1b 00             \tbndstx %bnd0,(%eax)",},
+{{0x0f, 0x1b, 0x05, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f 1b 05 78 56 34 12 \tbndstx %bnd0,0x12345678",},
+{{0x0f, 0x1b, 0x18, }, 3, 0, "", "",
+"0f 1b 18             \tbndstx %bnd3,(%eax)",},
+{{0x0f, 0x1b, 0x04, 0x01, }, 4, 0, "", "",
+"0f 1b 04 01          \tbndstx %bnd0,(%ecx,%eax,1)",},
+{{0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1b 04 05 78 56 34 12 \tbndstx %bnd0,0x12345678(,%eax,1)",},
+{{0x0f, 0x1b, 0x04, 0x08, }, 4, 0, "", "",
+"0f 1b 04 08          \tbndstx %bnd0,(%eax,%ecx,1)",},
+{{0x0f, 0x1b, 0x40, 0x12, }, 4, 0, "", "",
+"0f 1b 40 12          \tbndstx %bnd0,0x12(%eax)",},
+{{0x0f, 0x1b, 0x45, 0x12, }, 4, 0, "", "",
+"0f 1b 45 12          \tbndstx %bnd0,0x12(%ebp)",},
+{{0x0f, 0x1b, 0x44, 0x01, 0x12, }, 5, 0, "", "",
+"0f 1b 44 01 12       \tbndstx %bnd0,0x12(%ecx,%eax,1)",},
+{{0x0f, 0x1b, 0x44, 0x05, 0x12, }, 5, 0, "", "",
+"0f 1b 44 05 12       \tbndstx %bnd0,0x12(%ebp,%eax,1)",},
+{{0x0f, 0x1b, 0x44, 0x08, 0x12, }, 5, 0, "", "",
+"0f 1b 44 08 12       \tbndstx %bnd0,0x12(%eax,%ecx,1)",},
+{{0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f 1b 80 78 56 34 12 \tbndstx %bnd0,0x12345678(%eax)",},
+{{0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f 1b 85 78 56 34 12 \tbndstx %bnd0,0x12345678(%ebp)",},
+{{0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1b 84 01 78 56 34 12 \tbndstx %bnd0,0x12345678(%ecx,%eax,1)",},
+{{0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1b 84 05 78 56 34 12 \tbndstx %bnd0,0x12345678(%ebp,%eax,1)",},
+{{0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1b 84 08 78 56 34 12 \tbndstx %bnd0,0x12345678(%eax,%ecx,1)",},
+{{0xf2, 0xe8, 0xfc, 0xff, 0xff, 0xff, }, 6, 0xfffffffc, "call", "unconditional",
+"f2 e8 fc ff ff ff    \tbnd call 3c3 <main+0x3c3>",},
+{{0xf2, 0xff, 0x10, }, 3, 0, "call", "indirect",
+"f2 ff 10             \tbnd call *(%eax)",},
+{{0xf2, 0xc3, }, 2, 0, "ret", "indirect",
+"f2 c3                \tbnd ret ",},
+{{0xf2, 0xe9, 0xfc, 0xff, 0xff, 0xff, }, 6, 0xfffffffc, "jmp", "unconditional",
+"f2 e9 fc ff ff ff    \tbnd jmp 3ce <main+0x3ce>",},
+{{0xf2, 0xe9, 0xfc, 0xff, 0xff, 0xff, }, 6, 0xfffffffc, "jmp", "unconditional",
+"f2 e9 fc ff ff ff    \tbnd jmp 3d4 <main+0x3d4>",},
+{{0xf2, 0xff, 0x21, }, 3, 0, "jmp", "indirect",
+"f2 ff 21             \tbnd jmp *(%ecx)",},
+{{0xf2, 0x0f, 0x85, 0xfc, 0xff, 0xff, 0xff, }, 7, 0xfffffffc, "jcc", "conditional",
+"f2 0f 85 fc ff ff ff \tbnd jne 3de <main+0x3de>",},
+{{0x0f, 0x3a, 0xcc, 0xc1, 0x00, }, 5, 0, "", "",
+"0f 3a cc c1 00       \tsha1rnds4 $0x0,%xmm1,%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0xd7, 0x91, }, 5, 0, "", "",
+"0f 3a cc d7 91       \tsha1rnds4 $0x91,%xmm7,%xmm2",},
+{{0x0f, 0x3a, 0xcc, 0x00, 0x91, }, 5, 0, "", "",
+"0f 3a cc 00 91       \tsha1rnds4 $0x91,(%eax),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x05, 0x78, 0x56, 0x34, 0x12, 0x91, }, 9, 0, "", "",
+"0f 3a cc 05 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678,%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x18, 0x91, }, 5, 0, "", "",
+"0f 3a cc 18 91       \tsha1rnds4 $0x91,(%eax),%xmm3",},
+{{0x0f, 0x3a, 0xcc, 0x04, 0x01, 0x91, }, 6, 0, "", "",
+"0f 3a cc 04 01 91    \tsha1rnds4 $0x91,(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
+"0f 3a cc 04 05 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(,%eax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x04, 0x08, 0x91, }, 6, 0, "", "",
+"0f 3a cc 04 08 91    \tsha1rnds4 $0x91,(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x04, 0xc8, 0x91, }, 6, 0, "", "",
+"0f 3a cc 04 c8 91    \tsha1rnds4 $0x91,(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x40, 0x12, 0x91, }, 6, 0, "", "",
+"0f 3a cc 40 12 91    \tsha1rnds4 $0x91,0x12(%eax),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x45, 0x12, 0x91, }, 6, 0, "", "",
+"0f 3a cc 45 12 91    \tsha1rnds4 $0x91,0x12(%ebp),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x44, 0x01, 0x12, 0x91, }, 7, 0, "", "",
+"0f 3a cc 44 01 12 91 \tsha1rnds4 $0x91,0x12(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x44, 0x05, 0x12, 0x91, }, 7, 0, "", "",
+"0f 3a cc 44 05 12 91 \tsha1rnds4 $0x91,0x12(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x44, 0x08, 0x12, 0x91, }, 7, 0, "", "",
+"0f 3a cc 44 08 12 91 \tsha1rnds4 $0x91,0x12(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x44, 0xc8, 0x12, 0x91, }, 7, 0, "", "",
+"0f 3a cc 44 c8 12 91 \tsha1rnds4 $0x91,0x12(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x80, 0x78, 0x56, 0x34, 0x12, 0x91, }, 9, 0, "", "",
+"0f 3a cc 80 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%eax),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x85, 0x78, 0x56, 0x34, 0x12, 0x91, }, 9, 0, "", "",
+"0f 3a cc 85 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%ebp),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
+"0f 3a cc 84 01 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
+"0f 3a cc 84 05 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
+"0f 3a cc 84 08 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
+"0f 3a cc 84 c8 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0xc1, }, 4, 0, "", "",
+"0f 38 c8 c1          \tsha1nexte %xmm1,%xmm0",},
+{{0x0f, 0x38, 0xc8, 0xd7, }, 4, 0, "", "",
+"0f 38 c8 d7          \tsha1nexte %xmm7,%xmm2",},
+{{0x0f, 0x38, 0xc8, 0x00, }, 4, 0, "", "",
+"0f 38 c8 00          \tsha1nexte (%eax),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 c8 05 78 56 34 12 \tsha1nexte 0x12345678,%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x18, }, 4, 0, "", "",
+"0f 38 c8 18          \tsha1nexte (%eax),%xmm3",},
+{{0x0f, 0x38, 0xc8, 0x04, 0x01, }, 5, 0, "", "",
+"0f 38 c8 04 01       \tsha1nexte (%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c8 04 05 78 56 34 12 \tsha1nexte 0x12345678(,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x04, 0x08, }, 5, 0, "", "",
+"0f 38 c8 04 08       \tsha1nexte (%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x04, 0xc8, }, 5, 0, "", "",
+"0f 38 c8 04 c8       \tsha1nexte (%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x40, 0x12, }, 5, 0, "", "",
+"0f 38 c8 40 12       \tsha1nexte 0x12(%eax),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x45, 0x12, }, 5, 0, "", "",
+"0f 38 c8 45 12       \tsha1nexte 0x12(%ebp),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 c8 44 01 12    \tsha1nexte 0x12(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 c8 44 05 12    \tsha1nexte 0x12(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 c8 44 08 12    \tsha1nexte 0x12(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 c8 44 c8 12    \tsha1nexte 0x12(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 c8 80 78 56 34 12 \tsha1nexte 0x12345678(%eax),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 c8 85 78 56 34 12 \tsha1nexte 0x12345678(%ebp),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c8 84 01 78 56 34 12 \tsha1nexte 0x12345678(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c8 84 05 78 56 34 12 \tsha1nexte 0x12345678(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c8 84 08 78 56 34 12 \tsha1nexte 0x12345678(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c8 84 c8 78 56 34 12 \tsha1nexte 0x12345678(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0xc1, }, 4, 0, "", "",
+"0f 38 c9 c1          \tsha1msg1 %xmm1,%xmm0",},
+{{0x0f, 0x38, 0xc9, 0xd7, }, 4, 0, "", "",
+"0f 38 c9 d7          \tsha1msg1 %xmm7,%xmm2",},
+{{0x0f, 0x38, 0xc9, 0x00, }, 4, 0, "", "",
+"0f 38 c9 00          \tsha1msg1 (%eax),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 c9 05 78 56 34 12 \tsha1msg1 0x12345678,%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x18, }, 4, 0, "", "",
+"0f 38 c9 18          \tsha1msg1 (%eax),%xmm3",},
+{{0x0f, 0x38, 0xc9, 0x04, 0x01, }, 5, 0, "", "",
+"0f 38 c9 04 01       \tsha1msg1 (%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c9 04 05 78 56 34 12 \tsha1msg1 0x12345678(,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x04, 0x08, }, 5, 0, "", "",
+"0f 38 c9 04 08       \tsha1msg1 (%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x04, 0xc8, }, 5, 0, "", "",
+"0f 38 c9 04 c8       \tsha1msg1 (%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x40, 0x12, }, 5, 0, "", "",
+"0f 38 c9 40 12       \tsha1msg1 0x12(%eax),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x45, 0x12, }, 5, 0, "", "",
+"0f 38 c9 45 12       \tsha1msg1 0x12(%ebp),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 c9 44 01 12    \tsha1msg1 0x12(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 c9 44 05 12    \tsha1msg1 0x12(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 c9 44 08 12    \tsha1msg1 0x12(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 c9 44 c8 12    \tsha1msg1 0x12(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 c9 80 78 56 34 12 \tsha1msg1 0x12345678(%eax),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 c9 85 78 56 34 12 \tsha1msg1 0x12345678(%ebp),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c9 84 01 78 56 34 12 \tsha1msg1 0x12345678(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c9 84 05 78 56 34 12 \tsha1msg1 0x12345678(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c9 84 08 78 56 34 12 \tsha1msg1 0x12345678(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c9 84 c8 78 56 34 12 \tsha1msg1 0x12345678(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xca, 0xc1, }, 4, 0, "", "",
+"0f 38 ca c1          \tsha1msg2 %xmm1,%xmm0",},
+{{0x0f, 0x38, 0xca, 0xd7, }, 4, 0, "", "",
+"0f 38 ca d7          \tsha1msg2 %xmm7,%xmm2",},
+{{0x0f, 0x38, 0xca, 0x00, }, 4, 0, "", "",
+"0f 38 ca 00          \tsha1msg2 (%eax),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 ca 05 78 56 34 12 \tsha1msg2 0x12345678,%xmm0",},
+{{0x0f, 0x38, 0xca, 0x18, }, 4, 0, "", "",
+"0f 38 ca 18          \tsha1msg2 (%eax),%xmm3",},
+{{0x0f, 0x38, 0xca, 0x04, 0x01, }, 5, 0, "", "",
+"0f 38 ca 04 01       \tsha1msg2 (%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 ca 04 05 78 56 34 12 \tsha1msg2 0x12345678(,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x04, 0x08, }, 5, 0, "", "",
+"0f 38 ca 04 08       \tsha1msg2 (%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x04, 0xc8, }, 5, 0, "", "",
+"0f 38 ca 04 c8       \tsha1msg2 (%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x40, 0x12, }, 5, 0, "", "",
+"0f 38 ca 40 12       \tsha1msg2 0x12(%eax),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x45, 0x12, }, 5, 0, "", "",
+"0f 38 ca 45 12       \tsha1msg2 0x12(%ebp),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 ca 44 01 12    \tsha1msg2 0x12(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 ca 44 05 12    \tsha1msg2 0x12(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 ca 44 08 12    \tsha1msg2 0x12(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 ca 44 c8 12    \tsha1msg2 0x12(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 ca 80 78 56 34 12 \tsha1msg2 0x12345678(%eax),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 ca 85 78 56 34 12 \tsha1msg2 0x12345678(%ebp),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 ca 84 01 78 56 34 12 \tsha1msg2 0x12345678(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 ca 84 05 78 56 34 12 \tsha1msg2 0x12345678(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 ca 84 08 78 56 34 12 \tsha1msg2 0x12345678(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 ca 84 c8 78 56 34 12 \tsha1msg2 0x12345678(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xcb, 0xcc, }, 4, 0, "", "",
+"0f 38 cb cc          \tsha256rnds2 %xmm0,%xmm4,%xmm1",},
+{{0x0f, 0x38, 0xcb, 0xd7, }, 4, 0, "", "",
+"0f 38 cb d7          \tsha256rnds2 %xmm0,%xmm7,%xmm2",},
+{{0x0f, 0x38, 0xcb, 0x08, }, 4, 0, "", "",
+"0f 38 cb 08          \tsha256rnds2 %xmm0,(%eax),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x0d, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cb 0d 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678,%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x18, }, 4, 0, "", "",
+"0f 38 cb 18          \tsha256rnds2 %xmm0,(%eax),%xmm3",},
+{{0x0f, 0x38, 0xcb, 0x0c, 0x01, }, 5, 0, "", "",
+"0f 38 cb 0c 01       \tsha256rnds2 %xmm0,(%ecx,%eax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x0c, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cb 0c 05 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(,%eax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x0c, 0x08, }, 5, 0, "", "",
+"0f 38 cb 0c 08       \tsha256rnds2 %xmm0,(%eax,%ecx,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x0c, 0xc8, }, 5, 0, "", "",
+"0f 38 cb 0c c8       \tsha256rnds2 %xmm0,(%eax,%ecx,8),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x48, 0x12, }, 5, 0, "", "",
+"0f 38 cb 48 12       \tsha256rnds2 %xmm0,0x12(%eax),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x4d, 0x12, }, 5, 0, "", "",
+"0f 38 cb 4d 12       \tsha256rnds2 %xmm0,0x12(%ebp),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x4c, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 cb 4c 01 12    \tsha256rnds2 %xmm0,0x12(%ecx,%eax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x4c, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 cb 4c 05 12    \tsha256rnds2 %xmm0,0x12(%ebp,%eax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x4c, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 cb 4c 08 12    \tsha256rnds2 %xmm0,0x12(%eax,%ecx,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x4c, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 cb 4c c8 12    \tsha256rnds2 %xmm0,0x12(%eax,%ecx,8),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x88, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cb 88 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%eax),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x8d, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cb 8d 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%ebp),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x8c, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cb 8c 01 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%ecx,%eax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x8c, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cb 8c 05 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%ebp,%eax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x8c, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cb 8c 08 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%eax,%ecx,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cb 8c c8 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x0f, 0x38, 0xcc, 0xc1, }, 4, 0, "", "",
+"0f 38 cc c1          \tsha256msg1 %xmm1,%xmm0",},
+{{0x0f, 0x38, 0xcc, 0xd7, }, 4, 0, "", "",
+"0f 38 cc d7          \tsha256msg1 %xmm7,%xmm2",},
+{{0x0f, 0x38, 0xcc, 0x00, }, 4, 0, "", "",
+"0f 38 cc 00          \tsha256msg1 (%eax),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cc 05 78 56 34 12 \tsha256msg1 0x12345678,%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x18, }, 4, 0, "", "",
+"0f 38 cc 18          \tsha256msg1 (%eax),%xmm3",},
+{{0x0f, 0x38, 0xcc, 0x04, 0x01, }, 5, 0, "", "",
+"0f 38 cc 04 01       \tsha256msg1 (%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cc 04 05 78 56 34 12 \tsha256msg1 0x12345678(,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x04, 0x08, }, 5, 0, "", "",
+"0f 38 cc 04 08       \tsha256msg1 (%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x04, 0xc8, }, 5, 0, "", "",
+"0f 38 cc 04 c8       \tsha256msg1 (%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x40, 0x12, }, 5, 0, "", "",
+"0f 38 cc 40 12       \tsha256msg1 0x12(%eax),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x45, 0x12, }, 5, 0, "", "",
+"0f 38 cc 45 12       \tsha256msg1 0x12(%ebp),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 cc 44 01 12    \tsha256msg1 0x12(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 cc 44 05 12    \tsha256msg1 0x12(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 cc 44 08 12    \tsha256msg1 0x12(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 cc 44 c8 12    \tsha256msg1 0x12(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cc 80 78 56 34 12 \tsha256msg1 0x12345678(%eax),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cc 85 78 56 34 12 \tsha256msg1 0x12345678(%ebp),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cc 84 01 78 56 34 12 \tsha256msg1 0x12345678(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cc 84 05 78 56 34 12 \tsha256msg1 0x12345678(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cc 84 08 78 56 34 12 \tsha256msg1 0x12345678(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cc 84 c8 78 56 34 12 \tsha256msg1 0x12345678(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0xc1, }, 4, 0, "", "",
+"0f 38 cd c1          \tsha256msg2 %xmm1,%xmm0",},
+{{0x0f, 0x38, 0xcd, 0xd7, }, 4, 0, "", "",
+"0f 38 cd d7          \tsha256msg2 %xmm7,%xmm2",},
+{{0x0f, 0x38, 0xcd, 0x00, }, 4, 0, "", "",
+"0f 38 cd 00          \tsha256msg2 (%eax),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cd 05 78 56 34 12 \tsha256msg2 0x12345678,%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x18, }, 4, 0, "", "",
+"0f 38 cd 18          \tsha256msg2 (%eax),%xmm3",},
+{{0x0f, 0x38, 0xcd, 0x04, 0x01, }, 5, 0, "", "",
+"0f 38 cd 04 01       \tsha256msg2 (%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cd 04 05 78 56 34 12 \tsha256msg2 0x12345678(,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x04, 0x08, }, 5, 0, "", "",
+"0f 38 cd 04 08       \tsha256msg2 (%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x04, 0xc8, }, 5, 0, "", "",
+"0f 38 cd 04 c8       \tsha256msg2 (%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x40, 0x12, }, 5, 0, "", "",
+"0f 38 cd 40 12       \tsha256msg2 0x12(%eax),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x45, 0x12, }, 5, 0, "", "",
+"0f 38 cd 45 12       \tsha256msg2 0x12(%ebp),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 cd 44 01 12    \tsha256msg2 0x12(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 cd 44 05 12    \tsha256msg2 0x12(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 cd 44 08 12    \tsha256msg2 0x12(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 cd 44 c8 12    \tsha256msg2 0x12(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cd 80 78 56 34 12 \tsha256msg2 0x12345678(%eax),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cd 85 78 56 34 12 \tsha256msg2 0x12345678(%ebp),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cd 84 01 78 56 34 12 \tsha256msg2 0x12345678(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cd 84 05 78 56 34 12 \tsha256msg2 0x12345678(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cd 84 08 78 56 34 12 \tsha256msg2 0x12345678(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cd 84 c8 78 56 34 12 \tsha256msg2 0x12345678(%eax,%ecx,8),%xmm0",},
+{{0x66, 0x0f, 0xae, 0x38, }, 4, 0, "", "",
+"66 0f ae 38          \tclflushopt (%eax)",},
+{{0x66, 0x0f, 0xae, 0x3d, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f ae 3d 78 56 34 12 \tclflushopt 0x12345678",},
+{{0x66, 0x0f, 0xae, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f ae bc c8 78 56 34 12 \tclflushopt 0x12345678(%eax,%ecx,8)",},
+{{0x0f, 0xae, 0x38, }, 3, 0, "", "",
+"0f ae 38             \tclflush (%eax)",},
+{{0x0f, 0xae, 0xf8, }, 3, 0, "", "",
+"0f ae f8             \tsfence ",},
+{{0x66, 0x0f, 0xae, 0x30, }, 4, 0, "", "",
+"66 0f ae 30          \tclwb   (%eax)",},
+{{0x66, 0x0f, 0xae, 0x35, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f ae 35 78 56 34 12 \tclwb   0x12345678",},
+{{0x66, 0x0f, 0xae, 0xb4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f ae b4 c8 78 56 34 12 \tclwb   0x12345678(%eax,%ecx,8)",},
+{{0x0f, 0xae, 0x30, }, 3, 0, "", "",
+"0f ae 30             \txsaveopt (%eax)",},
+{{0x0f, 0xae, 0xf0, }, 3, 0, "", "",
+"0f ae f0             \tmfence ",},
+{{0x0f, 0xc7, 0x20, }, 3, 0, "", "",
+"0f c7 20             \txsavec (%eax)",},
+{{0x0f, 0xc7, 0x25, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f c7 25 78 56 34 12 \txsavec 0x12345678",},
+{{0x0f, 0xc7, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f c7 a4 c8 78 56 34 12 \txsavec 0x12345678(%eax,%ecx,8)",},
+{{0x0f, 0xc7, 0x28, }, 3, 0, "", "",
+"0f c7 28             \txsaves (%eax)",},
+{{0x0f, 0xc7, 0x2d, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f c7 2d 78 56 34 12 \txsaves 0x12345678",},
+{{0x0f, 0xc7, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f c7 ac c8 78 56 34 12 \txsaves 0x12345678(%eax,%ecx,8)",},
+{{0x0f, 0xc7, 0x18, }, 3, 0, "", "",
+"0f c7 18             \txrstors (%eax)",},
+{{0x0f, 0xc7, 0x1d, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f c7 1d 78 56 34 12 \txrstors 0x12345678",},
+{{0x0f, 0xc7, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f c7 9c c8 78 56 34 12 \txrstors 0x12345678(%eax,%ecx,8)",},
+{{0x66, 0x0f, 0xae, 0xf8, }, 4, 0, "", "",
+"66 0f ae f8          \tpcommit ",},
diff --git a/tools/perf/arch/x86/tests/insn-x86-dat-64.c b/tools/perf/arch/x86/tests/insn-x86-dat-64.c
new file mode 100644 (file)
index 0000000..4fe7cce
--- /dev/null
@@ -0,0 +1,768 @@
+/*
+ * Generated by gen-insn-x86-dat.sh and gen-insn-x86-dat.awk
+ * from insn-x86-dat-src.c for inclusion by insn-x86.c
+ * Do not change this code.
+*/
+
+{{0x0f, 0x31, }, 2, 0, "", "",
+"0f 31                \trdtsc  ",},
+{{0xf3, 0x0f, 0x1b, 0x00, }, 4, 0, "", "",
+"f3 0f 1b 00          \tbndmk  (%rax),%bnd0",},
+{{0xf3, 0x41, 0x0f, 0x1b, 0x00, }, 5, 0, "", "",
+"f3 41 0f 1b 00       \tbndmk  (%r8),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1b 04 25 78 56 34 12 \tbndmk  0x12345678,%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x18, }, 4, 0, "", "",
+"f3 0f 1b 18          \tbndmk  (%rax),%bnd3",},
+{{0xf3, 0x0f, 0x1b, 0x04, 0x01, }, 5, 0, "", "",
+"f3 0f 1b 04 01       \tbndmk  (%rcx,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1b 04 05 78 56 34 12 \tbndmk  0x12345678(,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x04, 0x08, }, 5, 0, "", "",
+"f3 0f 1b 04 08       \tbndmk  (%rax,%rcx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x04, 0xc8, }, 5, 0, "", "",
+"f3 0f 1b 04 c8       \tbndmk  (%rax,%rcx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x40, 0x12, }, 5, 0, "", "",
+"f3 0f 1b 40 12       \tbndmk  0x12(%rax),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x45, 0x12, }, 5, 0, "", "",
+"f3 0f 1b 45 12       \tbndmk  0x12(%rbp),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"f3 0f 1b 44 01 12    \tbndmk  0x12(%rcx,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"f3 0f 1b 44 05 12    \tbndmk  0x12(%rbp,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"f3 0f 1b 44 08 12    \tbndmk  0x12(%rax,%rcx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"f3 0f 1b 44 c8 12    \tbndmk  0x12(%rax,%rcx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f3 0f 1b 80 78 56 34 12 \tbndmk  0x12345678(%rax),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f3 0f 1b 85 78 56 34 12 \tbndmk  0x12345678(%rbp),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1b 84 01 78 56 34 12 \tbndmk  0x12345678(%rcx,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1b 84 05 78 56 34 12 \tbndmk  0x12345678(%rbp,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1b 84 08 78 56 34 12 \tbndmk  0x12345678(%rax,%rcx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1b 84 c8 78 56 34 12 \tbndmk  0x12345678(%rax,%rcx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x00, }, 4, 0, "", "",
+"f3 0f 1a 00          \tbndcl  (%rax),%bnd0",},
+{{0xf3, 0x41, 0x0f, 0x1a, 0x00, }, 5, 0, "", "",
+"f3 41 0f 1a 00       \tbndcl  (%r8),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1a 04 25 78 56 34 12 \tbndcl  0x12345678,%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x18, }, 4, 0, "", "",
+"f3 0f 1a 18          \tbndcl  (%rax),%bnd3",},
+{{0xf3, 0x0f, 0x1a, 0x04, 0x01, }, 5, 0, "", "",
+"f3 0f 1a 04 01       \tbndcl  (%rcx,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1a 04 05 78 56 34 12 \tbndcl  0x12345678(,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x04, 0x08, }, 5, 0, "", "",
+"f3 0f 1a 04 08       \tbndcl  (%rax,%rcx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x04, 0xc8, }, 5, 0, "", "",
+"f3 0f 1a 04 c8       \tbndcl  (%rax,%rcx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x40, 0x12, }, 5, 0, "", "",
+"f3 0f 1a 40 12       \tbndcl  0x12(%rax),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x45, 0x12, }, 5, 0, "", "",
+"f3 0f 1a 45 12       \tbndcl  0x12(%rbp),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"f3 0f 1a 44 01 12    \tbndcl  0x12(%rcx,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"f3 0f 1a 44 05 12    \tbndcl  0x12(%rbp,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"f3 0f 1a 44 08 12    \tbndcl  0x12(%rax,%rcx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"f3 0f 1a 44 c8 12    \tbndcl  0x12(%rax,%rcx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f3 0f 1a 80 78 56 34 12 \tbndcl  0x12345678(%rax),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f3 0f 1a 85 78 56 34 12 \tbndcl  0x12345678(%rbp),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1a 84 01 78 56 34 12 \tbndcl  0x12345678(%rcx,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1a 84 05 78 56 34 12 \tbndcl  0x12345678(%rbp,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1a 84 08 78 56 34 12 \tbndcl  0x12345678(%rax,%rcx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1a 84 c8 78 56 34 12 \tbndcl  0x12345678(%rax,%rcx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0xc0, }, 4, 0, "", "",
+"f3 0f 1a c0          \tbndcl  %rax,%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x00, }, 4, 0, "", "",
+"f2 0f 1a 00          \tbndcu  (%rax),%bnd0",},
+{{0xf2, 0x41, 0x0f, 0x1a, 0x00, }, 5, 0, "", "",
+"f2 41 0f 1a 00       \tbndcu  (%r8),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1a 04 25 78 56 34 12 \tbndcu  0x12345678,%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x18, }, 4, 0, "", "",
+"f2 0f 1a 18          \tbndcu  (%rax),%bnd3",},
+{{0xf2, 0x0f, 0x1a, 0x04, 0x01, }, 5, 0, "", "",
+"f2 0f 1a 04 01       \tbndcu  (%rcx,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1a 04 05 78 56 34 12 \tbndcu  0x12345678(,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x04, 0x08, }, 5, 0, "", "",
+"f2 0f 1a 04 08       \tbndcu  (%rax,%rcx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x04, 0xc8, }, 5, 0, "", "",
+"f2 0f 1a 04 c8       \tbndcu  (%rax,%rcx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x40, 0x12, }, 5, 0, "", "",
+"f2 0f 1a 40 12       \tbndcu  0x12(%rax),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x45, 0x12, }, 5, 0, "", "",
+"f2 0f 1a 45 12       \tbndcu  0x12(%rbp),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"f2 0f 1a 44 01 12    \tbndcu  0x12(%rcx,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"f2 0f 1a 44 05 12    \tbndcu  0x12(%rbp,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"f2 0f 1a 44 08 12    \tbndcu  0x12(%rax,%rcx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"f2 0f 1a 44 c8 12    \tbndcu  0x12(%rax,%rcx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f2 0f 1a 80 78 56 34 12 \tbndcu  0x12345678(%rax),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f2 0f 1a 85 78 56 34 12 \tbndcu  0x12345678(%rbp),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1a 84 01 78 56 34 12 \tbndcu  0x12345678(%rcx,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1a 84 05 78 56 34 12 \tbndcu  0x12345678(%rbp,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1a 84 08 78 56 34 12 \tbndcu  0x12345678(%rax,%rcx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1a 84 c8 78 56 34 12 \tbndcu  0x12345678(%rax,%rcx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0xc0, }, 4, 0, "", "",
+"f2 0f 1a c0          \tbndcu  %rax,%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x00, }, 4, 0, "", "",
+"f2 0f 1b 00          \tbndcn  (%rax),%bnd0",},
+{{0xf2, 0x41, 0x0f, 0x1b, 0x00, }, 5, 0, "", "",
+"f2 41 0f 1b 00       \tbndcn  (%r8),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1b 04 25 78 56 34 12 \tbndcn  0x12345678,%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x18, }, 4, 0, "", "",
+"f2 0f 1b 18          \tbndcn  (%rax),%bnd3",},
+{{0xf2, 0x0f, 0x1b, 0x04, 0x01, }, 5, 0, "", "",
+"f2 0f 1b 04 01       \tbndcn  (%rcx,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1b 04 05 78 56 34 12 \tbndcn  0x12345678(,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x04, 0x08, }, 5, 0, "", "",
+"f2 0f 1b 04 08       \tbndcn  (%rax,%rcx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x04, 0xc8, }, 5, 0, "", "",
+"f2 0f 1b 04 c8       \tbndcn  (%rax,%rcx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x40, 0x12, }, 5, 0, "", "",
+"f2 0f 1b 40 12       \tbndcn  0x12(%rax),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x45, 0x12, }, 5, 0, "", "",
+"f2 0f 1b 45 12       \tbndcn  0x12(%rbp),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"f2 0f 1b 44 01 12    \tbndcn  0x12(%rcx,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"f2 0f 1b 44 05 12    \tbndcn  0x12(%rbp,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"f2 0f 1b 44 08 12    \tbndcn  0x12(%rax,%rcx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"f2 0f 1b 44 c8 12    \tbndcn  0x12(%rax,%rcx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f2 0f 1b 80 78 56 34 12 \tbndcn  0x12345678(%rax),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f2 0f 1b 85 78 56 34 12 \tbndcn  0x12345678(%rbp),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1b 84 01 78 56 34 12 \tbndcn  0x12345678(%rcx,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1b 84 05 78 56 34 12 \tbndcn  0x12345678(%rbp,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1b 84 08 78 56 34 12 \tbndcn  0x12345678(%rax,%rcx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1b 84 c8 78 56 34 12 \tbndcn  0x12345678(%rax,%rcx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0xc0, }, 4, 0, "", "",
+"f2 0f 1b c0          \tbndcn  %rax,%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x00, }, 4, 0, "", "",
+"66 0f 1a 00          \tbndmov (%rax),%bnd0",},
+{{0x66, 0x41, 0x0f, 0x1a, 0x00, }, 5, 0, "", "",
+"66 41 0f 1a 00       \tbndmov (%r8),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1a 04 25 78 56 34 12 \tbndmov 0x12345678,%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x18, }, 4, 0, "", "",
+"66 0f 1a 18          \tbndmov (%rax),%bnd3",},
+{{0x66, 0x0f, 0x1a, 0x04, 0x01, }, 5, 0, "", "",
+"66 0f 1a 04 01       \tbndmov (%rcx,%rax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1a 04 05 78 56 34 12 \tbndmov 0x12345678(,%rax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x04, 0x08, }, 5, 0, "", "",
+"66 0f 1a 04 08       \tbndmov (%rax,%rcx,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x04, 0xc8, }, 5, 0, "", "",
+"66 0f 1a 04 c8       \tbndmov (%rax,%rcx,8),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x40, 0x12, }, 5, 0, "", "",
+"66 0f 1a 40 12       \tbndmov 0x12(%rax),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x45, 0x12, }, 5, 0, "", "",
+"66 0f 1a 45 12       \tbndmov 0x12(%rbp),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"66 0f 1a 44 01 12    \tbndmov 0x12(%rcx,%rax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"66 0f 1a 44 05 12    \tbndmov 0x12(%rbp,%rax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"66 0f 1a 44 08 12    \tbndmov 0x12(%rax,%rcx,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"66 0f 1a 44 c8 12    \tbndmov 0x12(%rax,%rcx,8),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f 1a 80 78 56 34 12 \tbndmov 0x12345678(%rax),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f 1a 85 78 56 34 12 \tbndmov 0x12345678(%rbp),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1a 84 01 78 56 34 12 \tbndmov 0x12345678(%rcx,%rax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1a 84 05 78 56 34 12 \tbndmov 0x12345678(%rbp,%rax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1a 84 08 78 56 34 12 \tbndmov 0x12345678(%rax,%rcx,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1a 84 c8 78 56 34 12 \tbndmov 0x12345678(%rax,%rcx,8),%bnd0",},
+{{0x66, 0x0f, 0x1b, 0x00, }, 4, 0, "", "",
+"66 0f 1b 00          \tbndmov %bnd0,(%rax)",},
+{{0x66, 0x41, 0x0f, 0x1b, 0x00, }, 5, 0, "", "",
+"66 41 0f 1b 00       \tbndmov %bnd0,(%r8)",},
+{{0x66, 0x0f, 0x1b, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1b 04 25 78 56 34 12 \tbndmov %bnd0,0x12345678",},
+{{0x66, 0x0f, 0x1b, 0x18, }, 4, 0, "", "",
+"66 0f 1b 18          \tbndmov %bnd3,(%rax)",},
+{{0x66, 0x0f, 0x1b, 0x04, 0x01, }, 5, 0, "", "",
+"66 0f 1b 04 01       \tbndmov %bnd0,(%rcx,%rax,1)",},
+{{0x66, 0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1b 04 05 78 56 34 12 \tbndmov %bnd0,0x12345678(,%rax,1)",},
+{{0x66, 0x0f, 0x1b, 0x04, 0x08, }, 5, 0, "", "",
+"66 0f 1b 04 08       \tbndmov %bnd0,(%rax,%rcx,1)",},
+{{0x66, 0x0f, 0x1b, 0x04, 0xc8, }, 5, 0, "", "",
+"66 0f 1b 04 c8       \tbndmov %bnd0,(%rax,%rcx,8)",},
+{{0x66, 0x0f, 0x1b, 0x40, 0x12, }, 5, 0, "", "",
+"66 0f 1b 40 12       \tbndmov %bnd0,0x12(%rax)",},
+{{0x66, 0x0f, 0x1b, 0x45, 0x12, }, 5, 0, "", "",
+"66 0f 1b 45 12       \tbndmov %bnd0,0x12(%rbp)",},
+{{0x66, 0x0f, 0x1b, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"66 0f 1b 44 01 12    \tbndmov %bnd0,0x12(%rcx,%rax,1)",},
+{{0x66, 0x0f, 0x1b, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"66 0f 1b 44 05 12    \tbndmov %bnd0,0x12(%rbp,%rax,1)",},
+{{0x66, 0x0f, 0x1b, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"66 0f 1b 44 08 12    \tbndmov %bnd0,0x12(%rax,%rcx,1)",},
+{{0x66, 0x0f, 0x1b, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"66 0f 1b 44 c8 12    \tbndmov %bnd0,0x12(%rax,%rcx,8)",},
+{{0x66, 0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f 1b 80 78 56 34 12 \tbndmov %bnd0,0x12345678(%rax)",},
+{{0x66, 0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f 1b 85 78 56 34 12 \tbndmov %bnd0,0x12345678(%rbp)",},
+{{0x66, 0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1b 84 01 78 56 34 12 \tbndmov %bnd0,0x12345678(%rcx,%rax,1)",},
+{{0x66, 0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1b 84 05 78 56 34 12 \tbndmov %bnd0,0x12345678(%rbp,%rax,1)",},
+{{0x66, 0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1b 84 08 78 56 34 12 \tbndmov %bnd0,0x12345678(%rax,%rcx,1)",},
+{{0x66, 0x0f, 0x1b, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1b 84 c8 78 56 34 12 \tbndmov %bnd0,0x12345678(%rax,%rcx,8)",},
+{{0x66, 0x0f, 0x1a, 0xc8, }, 4, 0, "", "",
+"66 0f 1a c8          \tbndmov %bnd0,%bnd1",},
+{{0x66, 0x0f, 0x1a, 0xc1, }, 4, 0, "", "",
+"66 0f 1a c1          \tbndmov %bnd1,%bnd0",},
+{{0x0f, 0x1a, 0x00, }, 3, 0, "", "",
+"0f 1a 00             \tbndldx (%rax),%bnd0",},
+{{0x41, 0x0f, 0x1a, 0x00, }, 4, 0, "", "",
+"41 0f 1a 00          \tbndldx (%r8),%bnd0",},
+{{0x0f, 0x1a, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1a 04 25 78 56 34 12 \tbndldx 0x12345678,%bnd0",},
+{{0x0f, 0x1a, 0x18, }, 3, 0, "", "",
+"0f 1a 18             \tbndldx (%rax),%bnd3",},
+{{0x0f, 0x1a, 0x04, 0x01, }, 4, 0, "", "",
+"0f 1a 04 01          \tbndldx (%rcx,%rax,1),%bnd0",},
+{{0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1a 04 05 78 56 34 12 \tbndldx 0x12345678(,%rax,1),%bnd0",},
+{{0x0f, 0x1a, 0x04, 0x08, }, 4, 0, "", "",
+"0f 1a 04 08          \tbndldx (%rax,%rcx,1),%bnd0",},
+{{0x0f, 0x1a, 0x40, 0x12, }, 4, 0, "", "",
+"0f 1a 40 12          \tbndldx 0x12(%rax),%bnd0",},
+{{0x0f, 0x1a, 0x45, 0x12, }, 4, 0, "", "",
+"0f 1a 45 12          \tbndldx 0x12(%rbp),%bnd0",},
+{{0x0f, 0x1a, 0x44, 0x01, 0x12, }, 5, 0, "", "",
+"0f 1a 44 01 12       \tbndldx 0x12(%rcx,%rax,1),%bnd0",},
+{{0x0f, 0x1a, 0x44, 0x05, 0x12, }, 5, 0, "", "",
+"0f 1a 44 05 12       \tbndldx 0x12(%rbp,%rax,1),%bnd0",},
+{{0x0f, 0x1a, 0x44, 0x08, 0x12, }, 5, 0, "", "",
+"0f 1a 44 08 12       \tbndldx 0x12(%rax,%rcx,1),%bnd0",},
+{{0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f 1a 80 78 56 34 12 \tbndldx 0x12345678(%rax),%bnd0",},
+{{0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f 1a 85 78 56 34 12 \tbndldx 0x12345678(%rbp),%bnd0",},
+{{0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1a 84 01 78 56 34 12 \tbndldx 0x12345678(%rcx,%rax,1),%bnd0",},
+{{0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1a 84 05 78 56 34 12 \tbndldx 0x12345678(%rbp,%rax,1),%bnd0",},
+{{0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1a 84 08 78 56 34 12 \tbndldx 0x12345678(%rax,%rcx,1),%bnd0",},
+{{0x0f, 0x1b, 0x00, }, 3, 0, "", "",
+"0f 1b 00             \tbndstx %bnd0,(%rax)",},
+{{0x41, 0x0f, 0x1b, 0x00, }, 4, 0, "", "",
+"41 0f 1b 00          \tbndstx %bnd0,(%r8)",},
+{{0x0f, 0x1b, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1b 04 25 78 56 34 12 \tbndstx %bnd0,0x12345678",},
+{{0x0f, 0x1b, 0x18, }, 3, 0, "", "",
+"0f 1b 18             \tbndstx %bnd3,(%rax)",},
+{{0x0f, 0x1b, 0x04, 0x01, }, 4, 0, "", "",
+"0f 1b 04 01          \tbndstx %bnd0,(%rcx,%rax,1)",},
+{{0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1b 04 05 78 56 34 12 \tbndstx %bnd0,0x12345678(,%rax,1)",},
+{{0x0f, 0x1b, 0x04, 0x08, }, 4, 0, "", "",
+"0f 1b 04 08          \tbndstx %bnd0,(%rax,%rcx,1)",},
+{{0x0f, 0x1b, 0x40, 0x12, }, 4, 0, "", "",
+"0f 1b 40 12          \tbndstx %bnd0,0x12(%rax)",},
+{{0x0f, 0x1b, 0x45, 0x12, }, 4, 0, "", "",
+"0f 1b 45 12          \tbndstx %bnd0,0x12(%rbp)",},
+{{0x0f, 0x1b, 0x44, 0x01, 0x12, }, 5, 0, "", "",
+"0f 1b 44 01 12       \tbndstx %bnd0,0x12(%rcx,%rax,1)",},
+{{0x0f, 0x1b, 0x44, 0x05, 0x12, }, 5, 0, "", "",
+"0f 1b 44 05 12       \tbndstx %bnd0,0x12(%rbp,%rax,1)",},
+{{0x0f, 0x1b, 0x44, 0x08, 0x12, }, 5, 0, "", "",
+"0f 1b 44 08 12       \tbndstx %bnd0,0x12(%rax,%rcx,1)",},
+{{0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f 1b 80 78 56 34 12 \tbndstx %bnd0,0x12345678(%rax)",},
+{{0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f 1b 85 78 56 34 12 \tbndstx %bnd0,0x12345678(%rbp)",},
+{{0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1b 84 01 78 56 34 12 \tbndstx %bnd0,0x12345678(%rcx,%rax,1)",},
+{{0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1b 84 05 78 56 34 12 \tbndstx %bnd0,0x12345678(%rbp,%rax,1)",},
+{{0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1b 84 08 78 56 34 12 \tbndstx %bnd0,0x12345678(%rax,%rcx,1)",},
+{{0xf2, 0xe8, 0x00, 0x00, 0x00, 0x00, }, 6, 0, "call", "unconditional",
+"f2 e8 00 00 00 00    \tbnd callq 3f6 <main+0x3f6>",},
+{{0x67, 0xf2, 0xff, 0x10, }, 4, 0, "call", "indirect",
+"67 f2 ff 10          \tbnd callq *(%eax)",},
+{{0xf2, 0xc3, }, 2, 0, "ret", "indirect",
+"f2 c3                \tbnd retq ",},
+{{0xf2, 0xe9, 0x00, 0x00, 0x00, 0x00, }, 6, 0, "jmp", "unconditional",
+"f2 e9 00 00 00 00    \tbnd jmpq 402 <main+0x402>",},
+{{0xf2, 0xe9, 0x00, 0x00, 0x00, 0x00, }, 6, 0, "jmp", "unconditional",
+"f2 e9 00 00 00 00    \tbnd jmpq 408 <main+0x408>",},
+{{0x67, 0xf2, 0xff, 0x21, }, 4, 0, "jmp", "indirect",
+"67 f2 ff 21          \tbnd jmpq *(%ecx)",},
+{{0xf2, 0x0f, 0x85, 0x00, 0x00, 0x00, 0x00, }, 7, 0, "jcc", "conditional",
+"f2 0f 85 00 00 00 00 \tbnd jne 413 <main+0x413>",},
+{{0x0f, 0x3a, 0xcc, 0xc1, 0x00, }, 5, 0, "", "",
+"0f 3a cc c1 00       \tsha1rnds4 $0x0,%xmm1,%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0xd7, 0x91, }, 5, 0, "", "",
+"0f 3a cc d7 91       \tsha1rnds4 $0x91,%xmm7,%xmm2",},
+{{0x41, 0x0f, 0x3a, 0xcc, 0xc0, 0x91, }, 6, 0, "", "",
+"41 0f 3a cc c0 91    \tsha1rnds4 $0x91,%xmm8,%xmm0",},
+{{0x44, 0x0f, 0x3a, 0xcc, 0xc7, 0x91, }, 6, 0, "", "",
+"44 0f 3a cc c7 91    \tsha1rnds4 $0x91,%xmm7,%xmm8",},
+{{0x45, 0x0f, 0x3a, 0xcc, 0xc7, 0x91, }, 6, 0, "", "",
+"45 0f 3a cc c7 91    \tsha1rnds4 $0x91,%xmm15,%xmm8",},
+{{0x0f, 0x3a, 0xcc, 0x00, 0x91, }, 5, 0, "", "",
+"0f 3a cc 00 91       \tsha1rnds4 $0x91,(%rax),%xmm0",},
+{{0x41, 0x0f, 0x3a, 0xcc, 0x00, 0x91, }, 6, 0, "", "",
+"41 0f 3a cc 00 91    \tsha1rnds4 $0x91,(%r8),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
+"0f 3a cc 04 25 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678,%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x18, 0x91, }, 5, 0, "", "",
+"0f 3a cc 18 91       \tsha1rnds4 $0x91,(%rax),%xmm3",},
+{{0x0f, 0x3a, 0xcc, 0x04, 0x01, 0x91, }, 6, 0, "", "",
+"0f 3a cc 04 01 91    \tsha1rnds4 $0x91,(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
+"0f 3a cc 04 05 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(,%rax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x04, 0x08, 0x91, }, 6, 0, "", "",
+"0f 3a cc 04 08 91    \tsha1rnds4 $0x91,(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x04, 0xc8, 0x91, }, 6, 0, "", "",
+"0f 3a cc 04 c8 91    \tsha1rnds4 $0x91,(%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x40, 0x12, 0x91, }, 6, 0, "", "",
+"0f 3a cc 40 12 91    \tsha1rnds4 $0x91,0x12(%rax),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x45, 0x12, 0x91, }, 6, 0, "", "",
+"0f 3a cc 45 12 91    \tsha1rnds4 $0x91,0x12(%rbp),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x44, 0x01, 0x12, 0x91, }, 7, 0, "", "",
+"0f 3a cc 44 01 12 91 \tsha1rnds4 $0x91,0x12(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x44, 0x05, 0x12, 0x91, }, 7, 0, "", "",
+"0f 3a cc 44 05 12 91 \tsha1rnds4 $0x91,0x12(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x44, 0x08, 0x12, 0x91, }, 7, 0, "", "",
+"0f 3a cc 44 08 12 91 \tsha1rnds4 $0x91,0x12(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x44, 0xc8, 0x12, 0x91, }, 7, 0, "", "",
+"0f 3a cc 44 c8 12 91 \tsha1rnds4 $0x91,0x12(%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x80, 0x78, 0x56, 0x34, 0x12, 0x91, }, 9, 0, "", "",
+"0f 3a cc 80 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%rax),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x85, 0x78, 0x56, 0x34, 0x12, 0x91, }, 9, 0, "", "",
+"0f 3a cc 85 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%rbp),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
+"0f 3a cc 84 01 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
+"0f 3a cc 84 05 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
+"0f 3a cc 84 08 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
+"0f 3a cc 84 c8 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%rax,%rcx,8),%xmm0",},
+{{0x44, 0x0f, 0x3a, 0xcc, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x91, }, 11, 0, "", "",
+"44 0f 3a cc bc c8 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%rax,%rcx,8),%xmm15",},
+{{0x0f, 0x38, 0xc8, 0xc1, }, 4, 0, "", "",
+"0f 38 c8 c1          \tsha1nexte %xmm1,%xmm0",},
+{{0x0f, 0x38, 0xc8, 0xd7, }, 4, 0, "", "",
+"0f 38 c8 d7          \tsha1nexte %xmm7,%xmm2",},
+{{0x41, 0x0f, 0x38, 0xc8, 0xc0, }, 5, 0, "", "",
+"41 0f 38 c8 c0       \tsha1nexte %xmm8,%xmm0",},
+{{0x44, 0x0f, 0x38, 0xc8, 0xc7, }, 5, 0, "", "",
+"44 0f 38 c8 c7       \tsha1nexte %xmm7,%xmm8",},
+{{0x45, 0x0f, 0x38, 0xc8, 0xc7, }, 5, 0, "", "",
+"45 0f 38 c8 c7       \tsha1nexte %xmm15,%xmm8",},
+{{0x0f, 0x38, 0xc8, 0x00, }, 4, 0, "", "",
+"0f 38 c8 00          \tsha1nexte (%rax),%xmm0",},
+{{0x41, 0x0f, 0x38, 0xc8, 0x00, }, 5, 0, "", "",
+"41 0f 38 c8 00       \tsha1nexte (%r8),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c8 04 25 78 56 34 12 \tsha1nexte 0x12345678,%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x18, }, 4, 0, "", "",
+"0f 38 c8 18          \tsha1nexte (%rax),%xmm3",},
+{{0x0f, 0x38, 0xc8, 0x04, 0x01, }, 5, 0, "", "",
+"0f 38 c8 04 01       \tsha1nexte (%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c8 04 05 78 56 34 12 \tsha1nexte 0x12345678(,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x04, 0x08, }, 5, 0, "", "",
+"0f 38 c8 04 08       \tsha1nexte (%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x04, 0xc8, }, 5, 0, "", "",
+"0f 38 c8 04 c8       \tsha1nexte (%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x40, 0x12, }, 5, 0, "", "",
+"0f 38 c8 40 12       \tsha1nexte 0x12(%rax),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x45, 0x12, }, 5, 0, "", "",
+"0f 38 c8 45 12       \tsha1nexte 0x12(%rbp),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 c8 44 01 12    \tsha1nexte 0x12(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 c8 44 05 12    \tsha1nexte 0x12(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 c8 44 08 12    \tsha1nexte 0x12(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 c8 44 c8 12    \tsha1nexte 0x12(%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 c8 80 78 56 34 12 \tsha1nexte 0x12345678(%rax),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 c8 85 78 56 34 12 \tsha1nexte 0x12345678(%rbp),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c8 84 01 78 56 34 12 \tsha1nexte 0x12345678(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c8 84 05 78 56 34 12 \tsha1nexte 0x12345678(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c8 84 08 78 56 34 12 \tsha1nexte 0x12345678(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c8 84 c8 78 56 34 12 \tsha1nexte 0x12345678(%rax,%rcx,8),%xmm0",},
+{{0x44, 0x0f, 0x38, 0xc8, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"44 0f 38 c8 bc c8 78 56 34 12 \tsha1nexte 0x12345678(%rax,%rcx,8),%xmm15",},
+{{0x0f, 0x38, 0xc9, 0xc1, }, 4, 0, "", "",
+"0f 38 c9 c1          \tsha1msg1 %xmm1,%xmm0",},
+{{0x0f, 0x38, 0xc9, 0xd7, }, 4, 0, "", "",
+"0f 38 c9 d7          \tsha1msg1 %xmm7,%xmm2",},
+{{0x41, 0x0f, 0x38, 0xc9, 0xc0, }, 5, 0, "", "",
+"41 0f 38 c9 c0       \tsha1msg1 %xmm8,%xmm0",},
+{{0x44, 0x0f, 0x38, 0xc9, 0xc7, }, 5, 0, "", "",
+"44 0f 38 c9 c7       \tsha1msg1 %xmm7,%xmm8",},
+{{0x45, 0x0f, 0x38, 0xc9, 0xc7, }, 5, 0, "", "",
+"45 0f 38 c9 c7       \tsha1msg1 %xmm15,%xmm8",},
+{{0x0f, 0x38, 0xc9, 0x00, }, 4, 0, "", "",
+"0f 38 c9 00          \tsha1msg1 (%rax),%xmm0",},
+{{0x41, 0x0f, 0x38, 0xc9, 0x00, }, 5, 0, "", "",
+"41 0f 38 c9 00       \tsha1msg1 (%r8),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c9 04 25 78 56 34 12 \tsha1msg1 0x12345678,%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x18, }, 4, 0, "", "",
+"0f 38 c9 18          \tsha1msg1 (%rax),%xmm3",},
+{{0x0f, 0x38, 0xc9, 0x04, 0x01, }, 5, 0, "", "",
+"0f 38 c9 04 01       \tsha1msg1 (%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c9 04 05 78 56 34 12 \tsha1msg1 0x12345678(,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x04, 0x08, }, 5, 0, "", "",
+"0f 38 c9 04 08       \tsha1msg1 (%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x04, 0xc8, }, 5, 0, "", "",
+"0f 38 c9 04 c8       \tsha1msg1 (%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x40, 0x12, }, 5, 0, "", "",
+"0f 38 c9 40 12       \tsha1msg1 0x12(%rax),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x45, 0x12, }, 5, 0, "", "",
+"0f 38 c9 45 12       \tsha1msg1 0x12(%rbp),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 c9 44 01 12    \tsha1msg1 0x12(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 c9 44 05 12    \tsha1msg1 0x12(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 c9 44 08 12    \tsha1msg1 0x12(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 c9 44 c8 12    \tsha1msg1 0x12(%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 c9 80 78 56 34 12 \tsha1msg1 0x12345678(%rax),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 c9 85 78 56 34 12 \tsha1msg1 0x12345678(%rbp),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c9 84 01 78 56 34 12 \tsha1msg1 0x12345678(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c9 84 05 78 56 34 12 \tsha1msg1 0x12345678(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c9 84 08 78 56 34 12 \tsha1msg1 0x12345678(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c9 84 c8 78 56 34 12 \tsha1msg1 0x12345678(%rax,%rcx,8),%xmm0",},
+{{0x44, 0x0f, 0x38, 0xc9, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"44 0f 38 c9 bc c8 78 56 34 12 \tsha1msg1 0x12345678(%rax,%rcx,8),%xmm15",},
+{{0x0f, 0x38, 0xca, 0xc1, }, 4, 0, "", "",
+"0f 38 ca c1          \tsha1msg2 %xmm1,%xmm0",},
+{{0x0f, 0x38, 0xca, 0xd7, }, 4, 0, "", "",
+"0f 38 ca d7          \tsha1msg2 %xmm7,%xmm2",},
+{{0x41, 0x0f, 0x38, 0xca, 0xc0, }, 5, 0, "", "",
+"41 0f 38 ca c0       \tsha1msg2 %xmm8,%xmm0",},
+{{0x44, 0x0f, 0x38, 0xca, 0xc7, }, 5, 0, "", "",
+"44 0f 38 ca c7       \tsha1msg2 %xmm7,%xmm8",},
+{{0x45, 0x0f, 0x38, 0xca, 0xc7, }, 5, 0, "", "",
+"45 0f 38 ca c7       \tsha1msg2 %xmm15,%xmm8",},
+{{0x0f, 0x38, 0xca, 0x00, }, 4, 0, "", "",
+"0f 38 ca 00          \tsha1msg2 (%rax),%xmm0",},
+{{0x41, 0x0f, 0x38, 0xca, 0x00, }, 5, 0, "", "",
+"41 0f 38 ca 00       \tsha1msg2 (%r8),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 ca 04 25 78 56 34 12 \tsha1msg2 0x12345678,%xmm0",},
+{{0x0f, 0x38, 0xca, 0x18, }, 4, 0, "", "",
+"0f 38 ca 18          \tsha1msg2 (%rax),%xmm3",},
+{{0x0f, 0x38, 0xca, 0x04, 0x01, }, 5, 0, "", "",
+"0f 38 ca 04 01       \tsha1msg2 (%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 ca 04 05 78 56 34 12 \tsha1msg2 0x12345678(,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x04, 0x08, }, 5, 0, "", "",
+"0f 38 ca 04 08       \tsha1msg2 (%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x04, 0xc8, }, 5, 0, "", "",
+"0f 38 ca 04 c8       \tsha1msg2 (%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x40, 0x12, }, 5, 0, "", "",
+"0f 38 ca 40 12       \tsha1msg2 0x12(%rax),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x45, 0x12, }, 5, 0, "", "",
+"0f 38 ca 45 12       \tsha1msg2 0x12(%rbp),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 ca 44 01 12    \tsha1msg2 0x12(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 ca 44 05 12    \tsha1msg2 0x12(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 ca 44 08 12    \tsha1msg2 0x12(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 ca 44 c8 12    \tsha1msg2 0x12(%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 ca 80 78 56 34 12 \tsha1msg2 0x12345678(%rax),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 ca 85 78 56 34 12 \tsha1msg2 0x12345678(%rbp),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 ca 84 01 78 56 34 12 \tsha1msg2 0x12345678(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 ca 84 05 78 56 34 12 \tsha1msg2 0x12345678(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 ca 84 08 78 56 34 12 \tsha1msg2 0x12345678(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 ca 84 c8 78 56 34 12 \tsha1msg2 0x12345678(%rax,%rcx,8),%xmm0",},
+{{0x44, 0x0f, 0x38, 0xca, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"44 0f 38 ca bc c8 78 56 34 12 \tsha1msg2 0x12345678(%rax,%rcx,8),%xmm15",},
+{{0x0f, 0x38, 0xcb, 0xcc, }, 4, 0, "", "",
+"0f 38 cb cc          \tsha256rnds2 %xmm0,%xmm4,%xmm1",},
+{{0x0f, 0x38, 0xcb, 0xd7, }, 4, 0, "", "",
+"0f 38 cb d7          \tsha256rnds2 %xmm0,%xmm7,%xmm2",},
+{{0x41, 0x0f, 0x38, 0xcb, 0xc8, }, 5, 0, "", "",
+"41 0f 38 cb c8       \tsha256rnds2 %xmm0,%xmm8,%xmm1",},
+{{0x44, 0x0f, 0x38, 0xcb, 0xc7, }, 5, 0, "", "",
+"44 0f 38 cb c7       \tsha256rnds2 %xmm0,%xmm7,%xmm8",},
+{{0x45, 0x0f, 0x38, 0xcb, 0xc7, }, 5, 0, "", "",
+"45 0f 38 cb c7       \tsha256rnds2 %xmm0,%xmm15,%xmm8",},
+{{0x0f, 0x38, 0xcb, 0x08, }, 4, 0, "", "",
+"0f 38 cb 08          \tsha256rnds2 %xmm0,(%rax),%xmm1",},
+{{0x41, 0x0f, 0x38, 0xcb, 0x08, }, 5, 0, "", "",
+"41 0f 38 cb 08       \tsha256rnds2 %xmm0,(%r8),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x0c, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cb 0c 25 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678,%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x18, }, 4, 0, "", "",
+"0f 38 cb 18          \tsha256rnds2 %xmm0,(%rax),%xmm3",},
+{{0x0f, 0x38, 0xcb, 0x0c, 0x01, }, 5, 0, "", "",
+"0f 38 cb 0c 01       \tsha256rnds2 %xmm0,(%rcx,%rax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x0c, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cb 0c 05 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(,%rax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x0c, 0x08, }, 5, 0, "", "",
+"0f 38 cb 0c 08       \tsha256rnds2 %xmm0,(%rax,%rcx,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x0c, 0xc8, }, 5, 0, "", "",
+"0f 38 cb 0c c8       \tsha256rnds2 %xmm0,(%rax,%rcx,8),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x48, 0x12, }, 5, 0, "", "",
+"0f 38 cb 48 12       \tsha256rnds2 %xmm0,0x12(%rax),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x4d, 0x12, }, 5, 0, "", "",
+"0f 38 cb 4d 12       \tsha256rnds2 %xmm0,0x12(%rbp),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x4c, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 cb 4c 01 12    \tsha256rnds2 %xmm0,0x12(%rcx,%rax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x4c, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 cb 4c 05 12    \tsha256rnds2 %xmm0,0x12(%rbp,%rax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x4c, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 cb 4c 08 12    \tsha256rnds2 %xmm0,0x12(%rax,%rcx,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x4c, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 cb 4c c8 12    \tsha256rnds2 %xmm0,0x12(%rax,%rcx,8),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x88, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cb 88 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%rax),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x8d, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cb 8d 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%rbp),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x8c, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cb 8c 01 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%rcx,%rax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x8c, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cb 8c 05 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%rbp,%rax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x8c, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cb 8c 08 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%rax,%rcx,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cb 8c c8 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x44, 0x0f, 0x38, 0xcb, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"44 0f 38 cb bc c8 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%rax,%rcx,8),%xmm15",},
+{{0x0f, 0x38, 0xcc, 0xc1, }, 4, 0, "", "",
+"0f 38 cc c1          \tsha256msg1 %xmm1,%xmm0",},
+{{0x0f, 0x38, 0xcc, 0xd7, }, 4, 0, "", "",
+"0f 38 cc d7          \tsha256msg1 %xmm7,%xmm2",},
+{{0x41, 0x0f, 0x38, 0xcc, 0xc0, }, 5, 0, "", "",
+"41 0f 38 cc c0       \tsha256msg1 %xmm8,%xmm0",},
+{{0x44, 0x0f, 0x38, 0xcc, 0xc7, }, 5, 0, "", "",
+"44 0f 38 cc c7       \tsha256msg1 %xmm7,%xmm8",},
+{{0x45, 0x0f, 0x38, 0xcc, 0xc7, }, 5, 0, "", "",
+"45 0f 38 cc c7       \tsha256msg1 %xmm15,%xmm8",},
+{{0x0f, 0x38, 0xcc, 0x00, }, 4, 0, "", "",
+"0f 38 cc 00          \tsha256msg1 (%rax),%xmm0",},
+{{0x41, 0x0f, 0x38, 0xcc, 0x00, }, 5, 0, "", "",
+"41 0f 38 cc 00       \tsha256msg1 (%r8),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cc 04 25 78 56 34 12 \tsha256msg1 0x12345678,%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x18, }, 4, 0, "", "",
+"0f 38 cc 18          \tsha256msg1 (%rax),%xmm3",},
+{{0x0f, 0x38, 0xcc, 0x04, 0x01, }, 5, 0, "", "",
+"0f 38 cc 04 01       \tsha256msg1 (%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cc 04 05 78 56 34 12 \tsha256msg1 0x12345678(,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x04, 0x08, }, 5, 0, "", "",
+"0f 38 cc 04 08       \tsha256msg1 (%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x04, 0xc8, }, 5, 0, "", "",
+"0f 38 cc 04 c8       \tsha256msg1 (%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x40, 0x12, }, 5, 0, "", "",
+"0f 38 cc 40 12       \tsha256msg1 0x12(%rax),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x45, 0x12, }, 5, 0, "", "",
+"0f 38 cc 45 12       \tsha256msg1 0x12(%rbp),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 cc 44 01 12    \tsha256msg1 0x12(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 cc 44 05 12    \tsha256msg1 0x12(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 cc 44 08 12    \tsha256msg1 0x12(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 cc 44 c8 12    \tsha256msg1 0x12(%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cc 80 78 56 34 12 \tsha256msg1 0x12345678(%rax),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cc 85 78 56 34 12 \tsha256msg1 0x12345678(%rbp),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cc 84 01 78 56 34 12 \tsha256msg1 0x12345678(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cc 84 05 78 56 34 12 \tsha256msg1 0x12345678(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cc 84 08 78 56 34 12 \tsha256msg1 0x12345678(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cc 84 c8 78 56 34 12 \tsha256msg1 0x12345678(%rax,%rcx,8),%xmm0",},
+{{0x44, 0x0f, 0x38, 0xcc, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"44 0f 38 cc bc c8 78 56 34 12 \tsha256msg1 0x12345678(%rax,%rcx,8),%xmm15",},
+{{0x0f, 0x38, 0xcd, 0xc1, }, 4, 0, "", "",
+"0f 38 cd c1          \tsha256msg2 %xmm1,%xmm0",},
+{{0x0f, 0x38, 0xcd, 0xd7, }, 4, 0, "", "",
+"0f 38 cd d7          \tsha256msg2 %xmm7,%xmm2",},
+{{0x41, 0x0f, 0x38, 0xcd, 0xc0, }, 5, 0, "", "",
+"41 0f 38 cd c0       \tsha256msg2 %xmm8,%xmm0",},
+{{0x44, 0x0f, 0x38, 0xcd, 0xc7, }, 5, 0, "", "",
+"44 0f 38 cd c7       \tsha256msg2 %xmm7,%xmm8",},
+{{0x45, 0x0f, 0x38, 0xcd, 0xc7, }, 5, 0, "", "",
+"45 0f 38 cd c7       \tsha256msg2 %xmm15,%xmm8",},
+{{0x0f, 0x38, 0xcd, 0x00, }, 4, 0, "", "",
+"0f 38 cd 00          \tsha256msg2 (%rax),%xmm0",},
+{{0x41, 0x0f, 0x38, 0xcd, 0x00, }, 5, 0, "", "",
+"41 0f 38 cd 00       \tsha256msg2 (%r8),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cd 04 25 78 56 34 12 \tsha256msg2 0x12345678,%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x18, }, 4, 0, "", "",
+"0f 38 cd 18          \tsha256msg2 (%rax),%xmm3",},
+{{0x0f, 0x38, 0xcd, 0x04, 0x01, }, 5, 0, "", "",
+"0f 38 cd 04 01       \tsha256msg2 (%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cd 04 05 78 56 34 12 \tsha256msg2 0x12345678(,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x04, 0x08, }, 5, 0, "", "",
+"0f 38 cd 04 08       \tsha256msg2 (%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x04, 0xc8, }, 5, 0, "", "",
+"0f 38 cd 04 c8       \tsha256msg2 (%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x40, 0x12, }, 5, 0, "", "",
+"0f 38 cd 40 12       \tsha256msg2 0x12(%rax),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x45, 0x12, }, 5, 0, "", "",
+"0f 38 cd 45 12       \tsha256msg2 0x12(%rbp),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 cd 44 01 12    \tsha256msg2 0x12(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 cd 44 05 12    \tsha256msg2 0x12(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 cd 44 08 12    \tsha256msg2 0x12(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 cd 44 c8 12    \tsha256msg2 0x12(%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cd 80 78 56 34 12 \tsha256msg2 0x12345678(%rax),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cd 85 78 56 34 12 \tsha256msg2 0x12345678(%rbp),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cd 84 01 78 56 34 12 \tsha256msg2 0x12345678(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cd 84 05 78 56 34 12 \tsha256msg2 0x12345678(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cd 84 08 78 56 34 12 \tsha256msg2 0x12345678(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cd 84 c8 78 56 34 12 \tsha256msg2 0x12345678(%rax,%rcx,8),%xmm0",},
+{{0x44, 0x0f, 0x38, 0xcd, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"44 0f 38 cd bc c8 78 56 34 12 \tsha256msg2 0x12345678(%rax,%rcx,8),%xmm15",},
+{{0x66, 0x0f, 0xae, 0x38, }, 4, 0, "", "",
+"66 0f ae 38          \tclflushopt (%rax)",},
+{{0x66, 0x41, 0x0f, 0xae, 0x38, }, 5, 0, "", "",
+"66 41 0f ae 38       \tclflushopt (%r8)",},
+{{0x66, 0x0f, 0xae, 0x3c, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f ae 3c 25 78 56 34 12 \tclflushopt 0x12345678",},
+{{0x66, 0x0f, 0xae, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f ae bc c8 78 56 34 12 \tclflushopt 0x12345678(%rax,%rcx,8)",},
+{{0x66, 0x41, 0x0f, 0xae, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"66 41 0f ae bc c8 78 56 34 12 \tclflushopt 0x12345678(%r8,%rcx,8)",},
+{{0x0f, 0xae, 0x38, }, 3, 0, "", "",
+"0f ae 38             \tclflush (%rax)",},
+{{0x41, 0x0f, 0xae, 0x38, }, 4, 0, "", "",
+"41 0f ae 38          \tclflush (%r8)",},
+{{0x0f, 0xae, 0xf8, }, 3, 0, "", "",
+"0f ae f8             \tsfence ",},
+{{0x66, 0x0f, 0xae, 0x30, }, 4, 0, "", "",
+"66 0f ae 30          \tclwb   (%rax)",},
+{{0x66, 0x41, 0x0f, 0xae, 0x30, }, 5, 0, "", "",
+"66 41 0f ae 30       \tclwb   (%r8)",},
+{{0x66, 0x0f, 0xae, 0x34, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f ae 34 25 78 56 34 12 \tclwb   0x12345678",},
+{{0x66, 0x0f, 0xae, 0xb4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f ae b4 c8 78 56 34 12 \tclwb   0x12345678(%rax,%rcx,8)",},
+{{0x66, 0x41, 0x0f, 0xae, 0xb4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"66 41 0f ae b4 c8 78 56 34 12 \tclwb   0x12345678(%r8,%rcx,8)",},
+{{0x0f, 0xae, 0x30, }, 3, 0, "", "",
+"0f ae 30             \txsaveopt (%rax)",},
+{{0x41, 0x0f, 0xae, 0x30, }, 4, 0, "", "",
+"41 0f ae 30          \txsaveopt (%r8)",},
+{{0x0f, 0xae, 0xf0, }, 3, 0, "", "",
+"0f ae f0             \tmfence ",},
+{{0x0f, 0xc7, 0x20, }, 3, 0, "", "",
+"0f c7 20             \txsavec (%rax)",},
+{{0x41, 0x0f, 0xc7, 0x20, }, 4, 0, "", "",
+"41 0f c7 20          \txsavec (%r8)",},
+{{0x0f, 0xc7, 0x24, 0x25, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f c7 24 25 78 56 34 12 \txsavec 0x12345678",},
+{{0x0f, 0xc7, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f c7 a4 c8 78 56 34 12 \txsavec 0x12345678(%rax,%rcx,8)",},
+{{0x41, 0x0f, 0xc7, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"41 0f c7 a4 c8 78 56 34 12 \txsavec 0x12345678(%r8,%rcx,8)",},
+{{0x0f, 0xc7, 0x28, }, 3, 0, "", "",
+"0f c7 28             \txsaves (%rax)",},
+{{0x41, 0x0f, 0xc7, 0x28, }, 4, 0, "", "",
+"41 0f c7 28          \txsaves (%r8)",},
+{{0x0f, 0xc7, 0x2c, 0x25, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f c7 2c 25 78 56 34 12 \txsaves 0x12345678",},
+{{0x0f, 0xc7, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f c7 ac c8 78 56 34 12 \txsaves 0x12345678(%rax,%rcx,8)",},
+{{0x41, 0x0f, 0xc7, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"41 0f c7 ac c8 78 56 34 12 \txsaves 0x12345678(%r8,%rcx,8)",},
+{{0x0f, 0xc7, 0x18, }, 3, 0, "", "",
+"0f c7 18             \txrstors (%rax)",},
+{{0x41, 0x0f, 0xc7, 0x18, }, 4, 0, "", "",
+"41 0f c7 18          \txrstors (%r8)",},
+{{0x0f, 0xc7, 0x1c, 0x25, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f c7 1c 25 78 56 34 12 \txrstors 0x12345678",},
+{{0x0f, 0xc7, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f c7 9c c8 78 56 34 12 \txrstors 0x12345678(%rax,%rcx,8)",},
+{{0x41, 0x0f, 0xc7, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"41 0f c7 9c c8 78 56 34 12 \txrstors 0x12345678(%r8,%rcx,8)",},
+{{0x66, 0x0f, 0xae, 0xf8, }, 4, 0, "", "",
+"66 0f ae f8          \tpcommit ",},
diff --git a/tools/perf/arch/x86/tests/insn-x86-dat-src.c b/tools/perf/arch/x86/tests/insn-x86-dat-src.c
new file mode 100644 (file)
index 0000000..41b1b1c
--- /dev/null
@@ -0,0 +1,877 @@
+/*
+ * This file contains instructions for testing by the test titled:
+ *
+ *         "Test x86 instruction decoder - new instructions"
+ *
+ * Note that the 'Expecting' comment lines are consumed by the
+ * gen-insn-x86-dat.awk script and have the format:
+ *
+ *         Expecting: <op> <branch> <rel>
+ *
+ * If this file is changed, remember to run the gen-insn-x86-dat.sh
+ * script and commit the result.
+ *
+ * Refer to insn-x86.c for more details.
+ */
+
+int main(void)
+{
+       /* Following line is a marker for the awk script - do not change */
+       asm volatile("rdtsc"); /* Start here */
+
+#ifdef __x86_64__
+
+       /* bndmk m64, bnd */
+
+       asm volatile("bndmk (%rax), %bnd0");
+       asm volatile("bndmk (%r8), %bnd0");
+       asm volatile("bndmk (0x12345678), %bnd0");
+       asm volatile("bndmk (%rax), %bnd3");
+       asm volatile("bndmk (%rcx,%rax,1), %bnd0");
+       asm volatile("bndmk 0x12345678(,%rax,1), %bnd0");
+       asm volatile("bndmk (%rax,%rcx,1), %bnd0");
+       asm volatile("bndmk (%rax,%rcx,8), %bnd0");
+       asm volatile("bndmk 0x12(%rax), %bnd0");
+       asm volatile("bndmk 0x12(%rbp), %bnd0");
+       asm volatile("bndmk 0x12(%rcx,%rax,1), %bnd0");
+       asm volatile("bndmk 0x12(%rbp,%rax,1), %bnd0");
+       asm volatile("bndmk 0x12(%rax,%rcx,1), %bnd0");
+       asm volatile("bndmk 0x12(%rax,%rcx,8), %bnd0");
+       asm volatile("bndmk 0x12345678(%rax), %bnd0");
+       asm volatile("bndmk 0x12345678(%rbp), %bnd0");
+       asm volatile("bndmk 0x12345678(%rcx,%rax,1), %bnd0");
+       asm volatile("bndmk 0x12345678(%rbp,%rax,1), %bnd0");
+       asm volatile("bndmk 0x12345678(%rax,%rcx,1), %bnd0");
+       asm volatile("bndmk 0x12345678(%rax,%rcx,8), %bnd0");
+
+       /* bndcl r/m64, bnd */
+
+       asm volatile("bndcl (%rax), %bnd0");
+       asm volatile("bndcl (%r8), %bnd0");
+       asm volatile("bndcl (0x12345678), %bnd0");
+       asm volatile("bndcl (%rax), %bnd3");
+       asm volatile("bndcl (%rcx,%rax,1), %bnd0");
+       asm volatile("bndcl 0x12345678(,%rax,1), %bnd0");
+       asm volatile("bndcl (%rax,%rcx,1), %bnd0");
+       asm volatile("bndcl (%rax,%rcx,8), %bnd0");
+       asm volatile("bndcl 0x12(%rax), %bnd0");
+       asm volatile("bndcl 0x12(%rbp), %bnd0");
+       asm volatile("bndcl 0x12(%rcx,%rax,1), %bnd0");
+       asm volatile("bndcl 0x12(%rbp,%rax,1), %bnd0");
+       asm volatile("bndcl 0x12(%rax,%rcx,1), %bnd0");
+       asm volatile("bndcl 0x12(%rax,%rcx,8), %bnd0");
+       asm volatile("bndcl 0x12345678(%rax), %bnd0");
+       asm volatile("bndcl 0x12345678(%rbp), %bnd0");
+       asm volatile("bndcl 0x12345678(%rcx,%rax,1), %bnd0");
+       asm volatile("bndcl 0x12345678(%rbp,%rax,1), %bnd0");
+       asm volatile("bndcl 0x12345678(%rax,%rcx,1), %bnd0");
+       asm volatile("bndcl 0x12345678(%rax,%rcx,8), %bnd0");
+       asm volatile("bndcl %rax, %bnd0");
+
+       /* bndcu r/m64, bnd */
+
+       asm volatile("bndcu (%rax), %bnd0");
+       asm volatile("bndcu (%r8), %bnd0");
+       asm volatile("bndcu (0x12345678), %bnd0");
+       asm volatile("bndcu (%rax), %bnd3");
+       asm volatile("bndcu (%rcx,%rax,1), %bnd0");
+       asm volatile("bndcu 0x12345678(,%rax,1), %bnd0");
+       asm volatile("bndcu (%rax,%rcx,1), %bnd0");
+       asm volatile("bndcu (%rax,%rcx,8), %bnd0");
+       asm volatile("bndcu 0x12(%rax), %bnd0");
+       asm volatile("bndcu 0x12(%rbp), %bnd0");
+       asm volatile("bndcu 0x12(%rcx,%rax,1), %bnd0");
+       asm volatile("bndcu 0x12(%rbp,%rax,1), %bnd0");
+       asm volatile("bndcu 0x12(%rax,%rcx,1), %bnd0");
+       asm volatile("bndcu 0x12(%rax,%rcx,8), %bnd0");
+       asm volatile("bndcu 0x12345678(%rax), %bnd0");
+       asm volatile("bndcu 0x12345678(%rbp), %bnd0");
+       asm volatile("bndcu 0x12345678(%rcx,%rax,1), %bnd0");
+       asm volatile("bndcu 0x12345678(%rbp,%rax,1), %bnd0");
+       asm volatile("bndcu 0x12345678(%rax,%rcx,1), %bnd0");
+       asm volatile("bndcu 0x12345678(%rax,%rcx,8), %bnd0");
+       asm volatile("bndcu %rax, %bnd0");
+
+       /* bndcn r/m64, bnd */
+
+       asm volatile("bndcn (%rax), %bnd0");
+       asm volatile("bndcn (%r8), %bnd0");
+       asm volatile("bndcn (0x12345678), %bnd0");
+       asm volatile("bndcn (%rax), %bnd3");
+       asm volatile("bndcn (%rcx,%rax,1), %bnd0");
+       asm volatile("bndcn 0x12345678(,%rax,1), %bnd0");
+       asm volatile("bndcn (%rax,%rcx,1), %bnd0");
+       asm volatile("bndcn (%rax,%rcx,8), %bnd0");
+       asm volatile("bndcn 0x12(%rax), %bnd0");
+       asm volatile("bndcn 0x12(%rbp), %bnd0");
+       asm volatile("bndcn 0x12(%rcx,%rax,1), %bnd0");
+       asm volatile("bndcn 0x12(%rbp,%rax,1), %bnd0");
+       asm volatile("bndcn 0x12(%rax,%rcx,1), %bnd0");
+       asm volatile("bndcn 0x12(%rax,%rcx,8), %bnd0");
+       asm volatile("bndcn 0x12345678(%rax), %bnd0");
+       asm volatile("bndcn 0x12345678(%rbp), %bnd0");
+       asm volatile("bndcn 0x12345678(%rcx,%rax,1), %bnd0");
+       asm volatile("bndcn 0x12345678(%rbp,%rax,1), %bnd0");
+       asm volatile("bndcn 0x12345678(%rax,%rcx,1), %bnd0");
+       asm volatile("bndcn 0x12345678(%rax,%rcx,8), %bnd0");
+       asm volatile("bndcn %rax, %bnd0");
+
+       /* bndmov m128, bnd */
+
+       asm volatile("bndmov (%rax), %bnd0");
+       asm volatile("bndmov (%r8), %bnd0");
+       asm volatile("bndmov (0x12345678), %bnd0");
+       asm volatile("bndmov (%rax), %bnd3");
+       asm volatile("bndmov (%rcx,%rax,1), %bnd0");
+       asm volatile("bndmov 0x12345678(,%rax,1), %bnd0");
+       asm volatile("bndmov (%rax,%rcx,1), %bnd0");
+       asm volatile("bndmov (%rax,%rcx,8), %bnd0");
+       asm volatile("bndmov 0x12(%rax), %bnd0");
+       asm volatile("bndmov 0x12(%rbp), %bnd0");
+       asm volatile("bndmov 0x12(%rcx,%rax,1), %bnd0");
+       asm volatile("bndmov 0x12(%rbp,%rax,1), %bnd0");
+       asm volatile("bndmov 0x12(%rax,%rcx,1), %bnd0");
+       asm volatile("bndmov 0x12(%rax,%rcx,8), %bnd0");
+       asm volatile("bndmov 0x12345678(%rax), %bnd0");
+       asm volatile("bndmov 0x12345678(%rbp), %bnd0");
+       asm volatile("bndmov 0x12345678(%rcx,%rax,1), %bnd0");
+       asm volatile("bndmov 0x12345678(%rbp,%rax,1), %bnd0");
+       asm volatile("bndmov 0x12345678(%rax,%rcx,1), %bnd0");
+       asm volatile("bndmov 0x12345678(%rax,%rcx,8), %bnd0");
+
+       /* bndmov bnd, m128 */
+
+       asm volatile("bndmov %bnd0, (%rax)");
+       asm volatile("bndmov %bnd0, (%r8)");
+       asm volatile("bndmov %bnd0, (0x12345678)");
+       asm volatile("bndmov %bnd3, (%rax)");
+       asm volatile("bndmov %bnd0, (%rcx,%rax,1)");
+       asm volatile("bndmov %bnd0, 0x12345678(,%rax,1)");
+       asm volatile("bndmov %bnd0, (%rax,%rcx,1)");
+       asm volatile("bndmov %bnd0, (%rax,%rcx,8)");
+       asm volatile("bndmov %bnd0, 0x12(%rax)");
+       asm volatile("bndmov %bnd0, 0x12(%rbp)");
+       asm volatile("bndmov %bnd0, 0x12(%rcx,%rax,1)");
+       asm volatile("bndmov %bnd0, 0x12(%rbp,%rax,1)");
+       asm volatile("bndmov %bnd0, 0x12(%rax,%rcx,1)");
+       asm volatile("bndmov %bnd0, 0x12(%rax,%rcx,8)");
+       asm volatile("bndmov %bnd0, 0x12345678(%rax)");
+       asm volatile("bndmov %bnd0, 0x12345678(%rbp)");
+       asm volatile("bndmov %bnd0, 0x12345678(%rcx,%rax,1)");
+       asm volatile("bndmov %bnd0, 0x12345678(%rbp,%rax,1)");
+       asm volatile("bndmov %bnd0, 0x12345678(%rax,%rcx,1)");
+       asm volatile("bndmov %bnd0, 0x12345678(%rax,%rcx,8)");
+
+       /* bndmov bnd2, bnd1 */
+
+       asm volatile("bndmov %bnd0, %bnd1");
+       asm volatile("bndmov %bnd1, %bnd0");
+
+       /* bndldx mib, bnd */
+
+       asm volatile("bndldx (%rax), %bnd0");
+       asm volatile("bndldx (%r8), %bnd0");
+       asm volatile("bndldx (0x12345678), %bnd0");
+       asm volatile("bndldx (%rax), %bnd3");
+       asm volatile("bndldx (%rcx,%rax,1), %bnd0");
+       asm volatile("bndldx 0x12345678(,%rax,1), %bnd0");
+       asm volatile("bndldx (%rax,%rcx,1), %bnd0");
+       asm volatile("bndldx 0x12(%rax), %bnd0");
+       asm volatile("bndldx 0x12(%rbp), %bnd0");
+       asm volatile("bndldx 0x12(%rcx,%rax,1), %bnd0");
+       asm volatile("bndldx 0x12(%rbp,%rax,1), %bnd0");
+       asm volatile("bndldx 0x12(%rax,%rcx,1), %bnd0");
+       asm volatile("bndldx 0x12345678(%rax), %bnd0");
+       asm volatile("bndldx 0x12345678(%rbp), %bnd0");
+       asm volatile("bndldx 0x12345678(%rcx,%rax,1), %bnd0");
+       asm volatile("bndldx 0x12345678(%rbp,%rax,1), %bnd0");
+       asm volatile("bndldx 0x12345678(%rax,%rcx,1), %bnd0");
+
+       /* bndstx bnd, mib */
+
+       asm volatile("bndstx %bnd0, (%rax)");
+       asm volatile("bndstx %bnd0, (%r8)");
+       asm volatile("bndstx %bnd0, (0x12345678)");
+       asm volatile("bndstx %bnd3, (%rax)");
+       asm volatile("bndstx %bnd0, (%rcx,%rax,1)");
+       asm volatile("bndstx %bnd0, 0x12345678(,%rax,1)");
+       asm volatile("bndstx %bnd0, (%rax,%rcx,1)");
+       asm volatile("bndstx %bnd0, 0x12(%rax)");
+       asm volatile("bndstx %bnd0, 0x12(%rbp)");
+       asm volatile("bndstx %bnd0, 0x12(%rcx,%rax,1)");
+       asm volatile("bndstx %bnd0, 0x12(%rbp,%rax,1)");
+       asm volatile("bndstx %bnd0, 0x12(%rax,%rcx,1)");
+       asm volatile("bndstx %bnd0, 0x12345678(%rax)");
+       asm volatile("bndstx %bnd0, 0x12345678(%rbp)");
+       asm volatile("bndstx %bnd0, 0x12345678(%rcx,%rax,1)");
+       asm volatile("bndstx %bnd0, 0x12345678(%rbp,%rax,1)");
+       asm volatile("bndstx %bnd0, 0x12345678(%rax,%rcx,1)");
+
+       /* bnd prefix on call, ret, jmp and all jcc */
+
+       asm volatile("bnd call label1");  /* Expecting: call unconditional 0 */
+       asm volatile("bnd call *(%eax)"); /* Expecting: call indirect      0 */
+       asm volatile("bnd ret");          /* Expecting: ret  indirect      0 */
+       asm volatile("bnd jmp label1");   /* Expecting: jmp  unconditional 0 */
+       asm volatile("bnd jmp label1");   /* Expecting: jmp  unconditional 0 */
+       asm volatile("bnd jmp *(%ecx)");  /* Expecting: jmp  indirect      0 */
+       asm volatile("bnd jne label1");   /* Expecting: jcc  conditional   0 */
+
+       /* sha1rnds4 imm8, xmm2/m128, xmm1 */
+
+       asm volatile("sha1rnds4 $0x0, %xmm1, %xmm0");
+       asm volatile("sha1rnds4 $0x91, %xmm7, %xmm2");
+       asm volatile("sha1rnds4 $0x91, %xmm8, %xmm0");
+       asm volatile("sha1rnds4 $0x91, %xmm7, %xmm8");
+       asm volatile("sha1rnds4 $0x91, %xmm15, %xmm8");
+       asm volatile("sha1rnds4 $0x91, (%rax), %xmm0");
+       asm volatile("sha1rnds4 $0x91, (%r8), %xmm0");
+       asm volatile("sha1rnds4 $0x91, (0x12345678), %xmm0");
+       asm volatile("sha1rnds4 $0x91, (%rax), %xmm3");
+       asm volatile("sha1rnds4 $0x91, (%rcx,%rax,1), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12345678(,%rax,1), %xmm0");
+       asm volatile("sha1rnds4 $0x91, (%rax,%rcx,1), %xmm0");
+       asm volatile("sha1rnds4 $0x91, (%rax,%rcx,8), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12(%rax), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12(%rbp), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12(%rcx,%rax,1), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12(%rbp,%rax,1), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12(%rax,%rcx,1), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12(%rax,%rcx,8), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12345678(%rax), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12345678(%rbp), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12345678(%rcx,%rax,1), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12345678(%rbp,%rax,1), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12345678(%rax,%rcx,1), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12345678(%rax,%rcx,8), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12345678(%rax,%rcx,8), %xmm15");
+
+       /* sha1nexte xmm2/m128, xmm1 */
+
+       asm volatile("sha1nexte %xmm1, %xmm0");
+       asm volatile("sha1nexte %xmm7, %xmm2");
+       asm volatile("sha1nexte %xmm8, %xmm0");
+       asm volatile("sha1nexte %xmm7, %xmm8");
+       asm volatile("sha1nexte %xmm15, %xmm8");
+       asm volatile("sha1nexte (%rax), %xmm0");
+       asm volatile("sha1nexte (%r8), %xmm0");
+       asm volatile("sha1nexte (0x12345678), %xmm0");
+       asm volatile("sha1nexte (%rax), %xmm3");
+       asm volatile("sha1nexte (%rcx,%rax,1), %xmm0");
+       asm volatile("sha1nexte 0x12345678(,%rax,1), %xmm0");
+       asm volatile("sha1nexte (%rax,%rcx,1), %xmm0");
+       asm volatile("sha1nexte (%rax,%rcx,8), %xmm0");
+       asm volatile("sha1nexte 0x12(%rax), %xmm0");
+       asm volatile("sha1nexte 0x12(%rbp), %xmm0");
+       asm volatile("sha1nexte 0x12(%rcx,%rax,1), %xmm0");
+       asm volatile("sha1nexte 0x12(%rbp,%rax,1), %xmm0");
+       asm volatile("sha1nexte 0x12(%rax,%rcx,1), %xmm0");
+       asm volatile("sha1nexte 0x12(%rax,%rcx,8), %xmm0");
+       asm volatile("sha1nexte 0x12345678(%rax), %xmm0");
+       asm volatile("sha1nexte 0x12345678(%rbp), %xmm0");
+       asm volatile("sha1nexte 0x12345678(%rcx,%rax,1), %xmm0");
+       asm volatile("sha1nexte 0x12345678(%rbp,%rax,1), %xmm0");
+       asm volatile("sha1nexte 0x12345678(%rax,%rcx,1), %xmm0");
+       asm volatile("sha1nexte 0x12345678(%rax,%rcx,8), %xmm0");
+       asm volatile("sha1nexte 0x12345678(%rax,%rcx,8), %xmm15");
+
+       /* sha1msg1 xmm2/m128, xmm1 */
+
+       asm volatile("sha1msg1 %xmm1, %xmm0");
+       asm volatile("sha1msg1 %xmm7, %xmm2");
+       asm volatile("sha1msg1 %xmm8, %xmm0");
+       asm volatile("sha1msg1 %xmm7, %xmm8");
+       asm volatile("sha1msg1 %xmm15, %xmm8");
+       asm volatile("sha1msg1 (%rax), %xmm0");
+       asm volatile("sha1msg1 (%r8), %xmm0");
+       asm volatile("sha1msg1 (0x12345678), %xmm0");
+       asm volatile("sha1msg1 (%rax), %xmm3");
+       asm volatile("sha1msg1 (%rcx,%rax,1), %xmm0");
+       asm volatile("sha1msg1 0x12345678(,%rax,1), %xmm0");
+       asm volatile("sha1msg1 (%rax,%rcx,1), %xmm0");
+       asm volatile("sha1msg1 (%rax,%rcx,8), %xmm0");
+       asm volatile("sha1msg1 0x12(%rax), %xmm0");
+       asm volatile("sha1msg1 0x12(%rbp), %xmm0");
+       asm volatile("sha1msg1 0x12(%rcx,%rax,1), %xmm0");
+       asm volatile("sha1msg1 0x12(%rbp,%rax,1), %xmm0");
+       asm volatile("sha1msg1 0x12(%rax,%rcx,1), %xmm0");
+       asm volatile("sha1msg1 0x12(%rax,%rcx,8), %xmm0");
+       asm volatile("sha1msg1 0x12345678(%rax), %xmm0");
+       asm volatile("sha1msg1 0x12345678(%rbp), %xmm0");
+       asm volatile("sha1msg1 0x12345678(%rcx,%rax,1), %xmm0");
+       asm volatile("sha1msg1 0x12345678(%rbp,%rax,1), %xmm0");
+       asm volatile("sha1msg1 0x12345678(%rax,%rcx,1), %xmm0");
+       asm volatile("sha1msg1 0x12345678(%rax,%rcx,8), %xmm0");
+       asm volatile("sha1msg1 0x12345678(%rax,%rcx,8), %xmm15");
+
+       /* sha1msg2 xmm2/m128, xmm1 */
+
+       asm volatile("sha1msg2 %xmm1, %xmm0");
+       asm volatile("sha1msg2 %xmm7, %xmm2");
+       asm volatile("sha1msg2 %xmm8, %xmm0");
+       asm volatile("sha1msg2 %xmm7, %xmm8");
+       asm volatile("sha1msg2 %xmm15, %xmm8");
+       asm volatile("sha1msg2 (%rax), %xmm0");
+       asm volatile("sha1msg2 (%r8), %xmm0");
+       asm volatile("sha1msg2 (0x12345678), %xmm0");
+       asm volatile("sha1msg2 (%rax), %xmm3");
+       asm volatile("sha1msg2 (%rcx,%rax,1), %xmm0");
+       asm volatile("sha1msg2 0x12345678(,%rax,1), %xmm0");
+       asm volatile("sha1msg2 (%rax,%rcx,1), %xmm0");
+       asm volatile("sha1msg2 (%rax,%rcx,8), %xmm0");
+       asm volatile("sha1msg2 0x12(%rax), %xmm0");
+       asm volatile("sha1msg2 0x12(%rbp), %xmm0");
+       asm volatile("sha1msg2 0x12(%rcx,%rax,1), %xmm0");
+       asm volatile("sha1msg2 0x12(%rbp,%rax,1), %xmm0");
+       asm volatile("sha1msg2 0x12(%rax,%rcx,1), %xmm0");
+       asm volatile("sha1msg2 0x12(%rax,%rcx,8), %xmm0");
+       asm volatile("sha1msg2 0x12345678(%rax), %xmm0");
+       asm volatile("sha1msg2 0x12345678(%rbp), %xmm0");
+       asm volatile("sha1msg2 0x12345678(%rcx,%rax,1), %xmm0");
+       asm volatile("sha1msg2 0x12345678(%rbp,%rax,1), %xmm0");
+       asm volatile("sha1msg2 0x12345678(%rax,%rcx,1), %xmm0");
+       asm volatile("sha1msg2 0x12345678(%rax,%rcx,8), %xmm0");
+       asm volatile("sha1msg2 0x12345678(%rax,%rcx,8), %xmm15");
+
+       /* sha256rnds2 <XMM0>, xmm2/m128, xmm1 */
+       /* Note sha256rnds2 has an implicit operand 'xmm0' */
+
+       asm volatile("sha256rnds2 %xmm4, %xmm1");
+       asm volatile("sha256rnds2 %xmm7, %xmm2");
+       asm volatile("sha256rnds2 %xmm8, %xmm1");
+       asm volatile("sha256rnds2 %xmm7, %xmm8");
+       asm volatile("sha256rnds2 %xmm15, %xmm8");
+       asm volatile("sha256rnds2 (%rax), %xmm1");
+       asm volatile("sha256rnds2 (%r8), %xmm1");
+       asm volatile("sha256rnds2 (0x12345678), %xmm1");
+       asm volatile("sha256rnds2 (%rax), %xmm3");
+       asm volatile("sha256rnds2 (%rcx,%rax,1), %xmm1");
+       asm volatile("sha256rnds2 0x12345678(,%rax,1), %xmm1");
+       asm volatile("sha256rnds2 (%rax,%rcx,1), %xmm1");
+       asm volatile("sha256rnds2 (%rax,%rcx,8), %xmm1");
+       asm volatile("sha256rnds2 0x12(%rax), %xmm1");
+       asm volatile("sha256rnds2 0x12(%rbp), %xmm1");
+       asm volatile("sha256rnds2 0x12(%rcx,%rax,1), %xmm1");
+       asm volatile("sha256rnds2 0x12(%rbp,%rax,1), %xmm1");
+       asm volatile("sha256rnds2 0x12(%rax,%rcx,1), %xmm1");
+       asm volatile("sha256rnds2 0x12(%rax,%rcx,8), %xmm1");
+       asm volatile("sha256rnds2 0x12345678(%rax), %xmm1");
+       asm volatile("sha256rnds2 0x12345678(%rbp), %xmm1");
+       asm volatile("sha256rnds2 0x12345678(%rcx,%rax,1), %xmm1");
+       asm volatile("sha256rnds2 0x12345678(%rbp,%rax,1), %xmm1");
+       asm volatile("sha256rnds2 0x12345678(%rax,%rcx,1), %xmm1");
+       asm volatile("sha256rnds2 0x12345678(%rax,%rcx,8), %xmm1");
+       asm volatile("sha256rnds2 0x12345678(%rax,%rcx,8), %xmm15");
+
+       /* sha256msg1 xmm2/m128, xmm1 */
+
+       asm volatile("sha256msg1 %xmm1, %xmm0");
+       asm volatile("sha256msg1 %xmm7, %xmm2");
+       asm volatile("sha256msg1 %xmm8, %xmm0");
+       asm volatile("sha256msg1 %xmm7, %xmm8");
+       asm volatile("sha256msg1 %xmm15, %xmm8");
+       asm volatile("sha256msg1 (%rax), %xmm0");
+       asm volatile("sha256msg1 (%r8), %xmm0");
+       asm volatile("sha256msg1 (0x12345678), %xmm0");
+       asm volatile("sha256msg1 (%rax), %xmm3");
+       asm volatile("sha256msg1 (%rcx,%rax,1), %xmm0");
+       asm volatile("sha256msg1 0x12345678(,%rax,1), %xmm0");
+       asm volatile("sha256msg1 (%rax,%rcx,1), %xmm0");
+       asm volatile("sha256msg1 (%rax,%rcx,8), %xmm0");
+       asm volatile("sha256msg1 0x12(%rax), %xmm0");
+       asm volatile("sha256msg1 0x12(%rbp), %xmm0");
+       asm volatile("sha256msg1 0x12(%rcx,%rax,1), %xmm0");
+       asm volatile("sha256msg1 0x12(%rbp,%rax,1), %xmm0");
+       asm volatile("sha256msg1 0x12(%rax,%rcx,1), %xmm0");
+       asm volatile("sha256msg1 0x12(%rax,%rcx,8), %xmm0");
+       asm volatile("sha256msg1 0x12345678(%rax), %xmm0");
+       asm volatile("sha256msg1 0x12345678(%rbp), %xmm0");
+       asm volatile("sha256msg1 0x12345678(%rcx,%rax,1), %xmm0");
+       asm volatile("sha256msg1 0x12345678(%rbp,%rax,1), %xmm0");
+       asm volatile("sha256msg1 0x12345678(%rax,%rcx,1), %xmm0");
+       asm volatile("sha256msg1 0x12345678(%rax,%rcx,8), %xmm0");
+       asm volatile("sha256msg1 0x12345678(%rax,%rcx,8), %xmm15");
+
+       /* sha256msg2 xmm2/m128, xmm1 */
+
+       asm volatile("sha256msg2 %xmm1, %xmm0");
+       asm volatile("sha256msg2 %xmm7, %xmm2");
+       asm volatile("sha256msg2 %xmm8, %xmm0");
+       asm volatile("sha256msg2 %xmm7, %xmm8");
+       asm volatile("sha256msg2 %xmm15, %xmm8");
+       asm volatile("sha256msg2 (%rax), %xmm0");
+       asm volatile("sha256msg2 (%r8), %xmm0");
+       asm volatile("sha256msg2 (0x12345678), %xmm0");
+       asm volatile("sha256msg2 (%rax), %xmm3");
+       asm volatile("sha256msg2 (%rcx,%rax,1), %xmm0");
+       asm volatile("sha256msg2 0x12345678(,%rax,1), %xmm0");
+       asm volatile("sha256msg2 (%rax,%rcx,1), %xmm0");
+       asm volatile("sha256msg2 (%rax,%rcx,8), %xmm0");
+       asm volatile("sha256msg2 0x12(%rax), %xmm0");
+       asm volatile("sha256msg2 0x12(%rbp), %xmm0");
+       asm volatile("sha256msg2 0x12(%rcx,%rax,1), %xmm0");
+       asm volatile("sha256msg2 0x12(%rbp,%rax,1), %xmm0");
+       asm volatile("sha256msg2 0x12(%rax,%rcx,1), %xmm0");
+       asm volatile("sha256msg2 0x12(%rax,%rcx,8), %xmm0");
+       asm volatile("sha256msg2 0x12345678(%rax), %xmm0");
+       asm volatile("sha256msg2 0x12345678(%rbp), %xmm0");
+       asm volatile("sha256msg2 0x12345678(%rcx,%rax,1), %xmm0");
+       asm volatile("sha256msg2 0x12345678(%rbp,%rax,1), %xmm0");
+       asm volatile("sha256msg2 0x12345678(%rax,%rcx,1), %xmm0");
+       asm volatile("sha256msg2 0x12345678(%rax,%rcx,8), %xmm0");
+       asm volatile("sha256msg2 0x12345678(%rax,%rcx,8), %xmm15");
+
+       /* clflushopt m8 */
+
+       asm volatile("clflushopt (%rax)");
+       asm volatile("clflushopt (%r8)");
+       asm volatile("clflushopt (0x12345678)");
+       asm volatile("clflushopt 0x12345678(%rax,%rcx,8)");
+       asm volatile("clflushopt 0x12345678(%r8,%rcx,8)");
+       /* Also check instructions in the same group encoding as clflushopt */
+       asm volatile("clflush (%rax)");
+       asm volatile("clflush (%r8)");
+       asm volatile("sfence");
+
+       /* clwb m8 */
+
+       asm volatile("clwb (%rax)");
+       asm volatile("clwb (%r8)");
+       asm volatile("clwb (0x12345678)");
+       asm volatile("clwb 0x12345678(%rax,%rcx,8)");
+       asm volatile("clwb 0x12345678(%r8,%rcx,8)");
+       /* Also check instructions in the same group encoding as clwb */
+       asm volatile("xsaveopt (%rax)");
+       asm volatile("xsaveopt (%r8)");
+       asm volatile("mfence");
+
+       /* xsavec mem */
+
+       asm volatile("xsavec (%rax)");
+       asm volatile("xsavec (%r8)");
+       asm volatile("xsavec (0x12345678)");
+       asm volatile("xsavec 0x12345678(%rax,%rcx,8)");
+       asm volatile("xsavec 0x12345678(%r8,%rcx,8)");
+
+       /* xsaves mem */
+
+       asm volatile("xsaves (%rax)");
+       asm volatile("xsaves (%r8)");
+       asm volatile("xsaves (0x12345678)");
+       asm volatile("xsaves 0x12345678(%rax,%rcx,8)");
+       asm volatile("xsaves 0x12345678(%r8,%rcx,8)");
+
+       /* xrstors mem */
+
+       asm volatile("xrstors (%rax)");
+       asm volatile("xrstors (%r8)");
+       asm volatile("xrstors (0x12345678)");
+       asm volatile("xrstors 0x12345678(%rax,%rcx,8)");
+       asm volatile("xrstors 0x12345678(%r8,%rcx,8)");
+
+#else  /* #ifdef __x86_64__ */
+
+       /* bndmk m32, bnd */
+
+       asm volatile("bndmk (%eax), %bnd0");
+       asm volatile("bndmk (0x12345678), %bnd0");
+       asm volatile("bndmk (%eax), %bnd3");
+       asm volatile("bndmk (%ecx,%eax,1), %bnd0");
+       asm volatile("bndmk 0x12345678(,%eax,1), %bnd0");
+       asm volatile("bndmk (%eax,%ecx,1), %bnd0");
+       asm volatile("bndmk (%eax,%ecx,8), %bnd0");
+       asm volatile("bndmk 0x12(%eax), %bnd0");
+       asm volatile("bndmk 0x12(%ebp), %bnd0");
+       asm volatile("bndmk 0x12(%ecx,%eax,1), %bnd0");
+       asm volatile("bndmk 0x12(%ebp,%eax,1), %bnd0");
+       asm volatile("bndmk 0x12(%eax,%ecx,1), %bnd0");
+       asm volatile("bndmk 0x12(%eax,%ecx,8), %bnd0");
+       asm volatile("bndmk 0x12345678(%eax), %bnd0");
+       asm volatile("bndmk 0x12345678(%ebp), %bnd0");
+       asm volatile("bndmk 0x12345678(%ecx,%eax,1), %bnd0");
+       asm volatile("bndmk 0x12345678(%ebp,%eax,1), %bnd0");
+       asm volatile("bndmk 0x12345678(%eax,%ecx,1), %bnd0");
+       asm volatile("bndmk 0x12345678(%eax,%ecx,8), %bnd0");
+
+       /* bndcl r/m32, bnd */
+
+       asm volatile("bndcl (%eax), %bnd0");
+       asm volatile("bndcl (0x12345678), %bnd0");
+       asm volatile("bndcl (%eax), %bnd3");
+       asm volatile("bndcl (%ecx,%eax,1), %bnd0");
+       asm volatile("bndcl 0x12345678(,%eax,1), %bnd0");
+       asm volatile("bndcl (%eax,%ecx,1), %bnd0");
+       asm volatile("bndcl (%eax,%ecx,8), %bnd0");
+       asm volatile("bndcl 0x12(%eax), %bnd0");
+       asm volatile("bndcl 0x12(%ebp), %bnd0");
+       asm volatile("bndcl 0x12(%ecx,%eax,1), %bnd0");
+       asm volatile("bndcl 0x12(%ebp,%eax,1), %bnd0");
+       asm volatile("bndcl 0x12(%eax,%ecx,1), %bnd0");
+       asm volatile("bndcl 0x12(%eax,%ecx,8), %bnd0");
+       asm volatile("bndcl 0x12345678(%eax), %bnd0");
+       asm volatile("bndcl 0x12345678(%ebp), %bnd0");
+       asm volatile("bndcl 0x12345678(%ecx,%eax,1), %bnd0");
+       asm volatile("bndcl 0x12345678(%ebp,%eax,1), %bnd0");
+       asm volatile("bndcl 0x12345678(%eax,%ecx,1), %bnd0");
+       asm volatile("bndcl 0x12345678(%eax,%ecx,8), %bnd0");
+       asm volatile("bndcl %eax, %bnd0");
+
+       /* bndcu r/m32, bnd */
+
+       asm volatile("bndcu (%eax), %bnd0");
+       asm volatile("bndcu (0x12345678), %bnd0");
+       asm volatile("bndcu (%eax), %bnd3");
+       asm volatile("bndcu (%ecx,%eax,1), %bnd0");
+       asm volatile("bndcu 0x12345678(,%eax,1), %bnd0");
+       asm volatile("bndcu (%eax,%ecx,1), %bnd0");
+       asm volatile("bndcu (%eax,%ecx,8), %bnd0");
+       asm volatile("bndcu 0x12(%eax), %bnd0");
+       asm volatile("bndcu 0x12(%ebp), %bnd0");
+       asm volatile("bndcu 0x12(%ecx,%eax,1), %bnd0");
+       asm volatile("bndcu 0x12(%ebp,%eax,1), %bnd0");
+       asm volatile("bndcu 0x12(%eax,%ecx,1), %bnd0");
+       asm volatile("bndcu 0x12(%eax,%ecx,8), %bnd0");
+       asm volatile("bndcu 0x12345678(%eax), %bnd0");
+       asm volatile("bndcu 0x12345678(%ebp), %bnd0");
+       asm volatile("bndcu 0x12345678(%ecx,%eax,1), %bnd0");
+       asm volatile("bndcu 0x12345678(%ebp,%eax,1), %bnd0");
+       asm volatile("bndcu 0x12345678(%eax,%ecx,1), %bnd0");
+       asm volatile("bndcu 0x12345678(%eax,%ecx,8), %bnd0");
+       asm volatile("bndcu %eax, %bnd0");
+
+       /* bndcn r/m32, bnd */
+
+       asm volatile("bndcn (%eax), %bnd0");
+       asm volatile("bndcn (0x12345678), %bnd0");
+       asm volatile("bndcn (%eax), %bnd3");
+       asm volatile("bndcn (%ecx,%eax,1), %bnd0");
+       asm volatile("bndcn 0x12345678(,%eax,1), %bnd0");
+       asm volatile("bndcn (%eax,%ecx,1), %bnd0");
+       asm volatile("bndcn (%eax,%ecx,8), %bnd0");
+       asm volatile("bndcn 0x12(%eax), %bnd0");
+       asm volatile("bndcn 0x12(%ebp), %bnd0");
+       asm volatile("bndcn 0x12(%ecx,%eax,1), %bnd0");
+       asm volatile("bndcn 0x12(%ebp,%eax,1), %bnd0");
+       asm volatile("bndcn 0x12(%eax,%ecx,1), %bnd0");
+       asm volatile("bndcn 0x12(%eax,%ecx,8), %bnd0");
+       asm volatile("bndcn 0x12345678(%eax), %bnd0");
+       asm volatile("bndcn 0x12345678(%ebp), %bnd0");
+       asm volatile("bndcn 0x12345678(%ecx,%eax,1), %bnd0");
+       asm volatile("bndcn 0x12345678(%ebp,%eax,1), %bnd0");
+       asm volatile("bndcn 0x12345678(%eax,%ecx,1), %bnd0");
+       asm volatile("bndcn 0x12345678(%eax,%ecx,8), %bnd0");
+       asm volatile("bndcn %eax, %bnd0");
+
+       /* bndmov m64, bnd */
+
+       asm volatile("bndmov (%eax), %bnd0");
+       asm volatile("bndmov (0x12345678), %bnd0");
+       asm volatile("bndmov (%eax), %bnd3");
+       asm volatile("bndmov (%ecx,%eax,1), %bnd0");
+       asm volatile("bndmov 0x12345678(,%eax,1), %bnd0");
+       asm volatile("bndmov (%eax,%ecx,1), %bnd0");
+       asm volatile("bndmov (%eax,%ecx,8), %bnd0");
+       asm volatile("bndmov 0x12(%eax), %bnd0");
+       asm volatile("bndmov 0x12(%ebp), %bnd0");
+       asm volatile("bndmov 0x12(%ecx,%eax,1), %bnd0");
+       asm volatile("bndmov 0x12(%ebp,%eax,1), %bnd0");
+       asm volatile("bndmov 0x12(%eax,%ecx,1), %bnd0");
+       asm volatile("bndmov 0x12(%eax,%ecx,8), %bnd0");
+       asm volatile("bndmov 0x12345678(%eax), %bnd0");
+       asm volatile("bndmov 0x12345678(%ebp), %bnd0");
+       asm volatile("bndmov 0x12345678(%ecx,%eax,1), %bnd0");
+       asm volatile("bndmov 0x12345678(%ebp,%eax,1), %bnd0");
+       asm volatile("bndmov 0x12345678(%eax,%ecx,1), %bnd0");
+       asm volatile("bndmov 0x12345678(%eax,%ecx,8), %bnd0");
+
+       /* bndmov bnd, m64 */
+
+       asm volatile("bndmov %bnd0, (%eax)");
+       asm volatile("bndmov %bnd0, (0x12345678)");
+       asm volatile("bndmov %bnd3, (%eax)");
+       asm volatile("bndmov %bnd0, (%ecx,%eax,1)");
+       asm volatile("bndmov %bnd0, 0x12345678(,%eax,1)");
+       asm volatile("bndmov %bnd0, (%eax,%ecx,1)");
+       asm volatile("bndmov %bnd0, (%eax,%ecx,8)");
+       asm volatile("bndmov %bnd0, 0x12(%eax)");
+       asm volatile("bndmov %bnd0, 0x12(%ebp)");
+       asm volatile("bndmov %bnd0, 0x12(%ecx,%eax,1)");
+       asm volatile("bndmov %bnd0, 0x12(%ebp,%eax,1)");
+       asm volatile("bndmov %bnd0, 0x12(%eax,%ecx,1)");
+       asm volatile("bndmov %bnd0, 0x12(%eax,%ecx,8)");
+       asm volatile("bndmov %bnd0, 0x12345678(%eax)");
+       asm volatile("bndmov %bnd0, 0x12345678(%ebp)");
+       asm volatile("bndmov %bnd0, 0x12345678(%ecx,%eax,1)");
+       asm volatile("bndmov %bnd0, 0x12345678(%ebp,%eax,1)");
+       asm volatile("bndmov %bnd0, 0x12345678(%eax,%ecx,1)");
+       asm volatile("bndmov %bnd0, 0x12345678(%eax,%ecx,8)");
+
+       /* bndmov bnd2, bnd1 */
+
+       asm volatile("bndmov %bnd0, %bnd1");
+       asm volatile("bndmov %bnd1, %bnd0");
+
+       /* bndldx mib, bnd */
+
+       asm volatile("bndldx (%eax), %bnd0");
+       asm volatile("bndldx (0x12345678), %bnd0");
+       asm volatile("bndldx (%eax), %bnd3");
+       asm volatile("bndldx (%ecx,%eax,1), %bnd0");
+       asm volatile("bndldx 0x12345678(,%eax,1), %bnd0");
+       asm volatile("bndldx (%eax,%ecx,1), %bnd0");
+       asm volatile("bndldx 0x12(%eax), %bnd0");
+       asm volatile("bndldx 0x12(%ebp), %bnd0");
+       asm volatile("bndldx 0x12(%ecx,%eax,1), %bnd0");
+       asm volatile("bndldx 0x12(%ebp,%eax,1), %bnd0");
+       asm volatile("bndldx 0x12(%eax,%ecx,1), %bnd0");
+       asm volatile("bndldx 0x12345678(%eax), %bnd0");
+       asm volatile("bndldx 0x12345678(%ebp), %bnd0");
+       asm volatile("bndldx 0x12345678(%ecx,%eax,1), %bnd0");
+       asm volatile("bndldx 0x12345678(%ebp,%eax,1), %bnd0");
+       asm volatile("bndldx 0x12345678(%eax,%ecx,1), %bnd0");
+
+       /* bndstx bnd, mib */
+
+       asm volatile("bndstx %bnd0, (%eax)");
+       asm volatile("bndstx %bnd0, (0x12345678)");
+       asm volatile("bndstx %bnd3, (%eax)");
+       asm volatile("bndstx %bnd0, (%ecx,%eax,1)");
+       asm volatile("bndstx %bnd0, 0x12345678(,%eax,1)");
+       asm volatile("bndstx %bnd0, (%eax,%ecx,1)");
+       asm volatile("bndstx %bnd0, 0x12(%eax)");
+       asm volatile("bndstx %bnd0, 0x12(%ebp)");
+       asm volatile("bndstx %bnd0, 0x12(%ecx,%eax,1)");
+       asm volatile("bndstx %bnd0, 0x12(%ebp,%eax,1)");
+       asm volatile("bndstx %bnd0, 0x12(%eax,%ecx,1)");
+       asm volatile("bndstx %bnd0, 0x12345678(%eax)");
+       asm volatile("bndstx %bnd0, 0x12345678(%ebp)");
+       asm volatile("bndstx %bnd0, 0x12345678(%ecx,%eax,1)");
+       asm volatile("bndstx %bnd0, 0x12345678(%ebp,%eax,1)");
+       asm volatile("bndstx %bnd0, 0x12345678(%eax,%ecx,1)");
+
+       /* bnd prefix on call, ret, jmp and all jcc */
+
+       asm volatile("bnd call label1");  /* Expecting: call unconditional 0xfffffffc */
+       asm volatile("bnd call *(%eax)"); /* Expecting: call indirect      0 */
+       asm volatile("bnd ret");          /* Expecting: ret  indirect      0 */
+       asm volatile("bnd jmp label1");   /* Expecting: jmp  unconditional 0xfffffffc */
+       asm volatile("bnd jmp label1");   /* Expecting: jmp  unconditional 0xfffffffc */
+       asm volatile("bnd jmp *(%ecx)");  /* Expecting: jmp  indirect      0 */
+       asm volatile("bnd jne label1");   /* Expecting: jcc  conditional   0xfffffffc */
+
+       /* sha1rnds4 imm8, xmm2/m128, xmm1 */
+
+       asm volatile("sha1rnds4 $0x0, %xmm1, %xmm0");
+       asm volatile("sha1rnds4 $0x91, %xmm7, %xmm2");
+       asm volatile("sha1rnds4 $0x91, (%eax), %xmm0");
+       asm volatile("sha1rnds4 $0x91, (0x12345678), %xmm0");
+       asm volatile("sha1rnds4 $0x91, (%eax), %xmm3");
+       asm volatile("sha1rnds4 $0x91, (%ecx,%eax,1), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12345678(,%eax,1), %xmm0");
+       asm volatile("sha1rnds4 $0x91, (%eax,%ecx,1), %xmm0");
+       asm volatile("sha1rnds4 $0x91, (%eax,%ecx,8), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12(%eax), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12(%ebp), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12(%ecx,%eax,1), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12(%ebp,%eax,1), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12(%eax,%ecx,1), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12(%eax,%ecx,8), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12345678(%eax), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12345678(%ebp), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12345678(%ecx,%eax,1), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12345678(%ebp,%eax,1), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12345678(%eax,%ecx,1), %xmm0");
+       asm volatile("sha1rnds4 $0x91, 0x12345678(%eax,%ecx,8), %xmm0");
+
+       /* sha1nexte xmm2/m128, xmm1 */
+
+       asm volatile("sha1nexte %xmm1, %xmm0");
+       asm volatile("sha1nexte %xmm7, %xmm2");
+       asm volatile("sha1nexte (%eax), %xmm0");
+       asm volatile("sha1nexte (0x12345678), %xmm0");
+       asm volatile("sha1nexte (%eax), %xmm3");
+       asm volatile("sha1nexte (%ecx,%eax,1), %xmm0");
+       asm volatile("sha1nexte 0x12345678(,%eax,1), %xmm0");
+       asm volatile("sha1nexte (%eax,%ecx,1), %xmm0");
+       asm volatile("sha1nexte (%eax,%ecx,8), %xmm0");
+       asm volatile("sha1nexte 0x12(%eax), %xmm0");
+       asm volatile("sha1nexte 0x12(%ebp), %xmm0");
+       asm volatile("sha1nexte 0x12(%ecx,%eax,1), %xmm0");
+       asm volatile("sha1nexte 0x12(%ebp,%eax,1), %xmm0");
+       asm volatile("sha1nexte 0x12(%eax,%ecx,1), %xmm0");
+       asm volatile("sha1nexte 0x12(%eax,%ecx,8), %xmm0");
+       asm volatile("sha1nexte 0x12345678(%eax), %xmm0");
+       asm volatile("sha1nexte 0x12345678(%ebp), %xmm0");
+       asm volatile("sha1nexte 0x12345678(%ecx,%eax,1), %xmm0");
+       asm volatile("sha1nexte 0x12345678(%ebp,%eax,1), %xmm0");
+       asm volatile("sha1nexte 0x12345678(%eax,%ecx,1), %xmm0");
+       asm volatile("sha1nexte 0x12345678(%eax,%ecx,8), %xmm0");
+
+       /* sha1msg1 xmm2/m128, xmm1 */
+
+       asm volatile("sha1msg1 %xmm1, %xmm0");
+       asm volatile("sha1msg1 %xmm7, %xmm2");
+       asm volatile("sha1msg1 (%eax), %xmm0");
+       asm volatile("sha1msg1 (0x12345678), %xmm0");
+       asm volatile("sha1msg1 (%eax), %xmm3");
+       asm volatile("sha1msg1 (%ecx,%eax,1), %xmm0");
+       asm volatile("sha1msg1 0x12345678(,%eax,1), %xmm0");
+       asm volatile("sha1msg1 (%eax,%ecx,1), %xmm0");
+       asm volatile("sha1msg1 (%eax,%ecx,8), %xmm0");
+       asm volatile("sha1msg1 0x12(%eax), %xmm0");
+       asm volatile("sha1msg1 0x12(%ebp), %xmm0");
+       asm volatile("sha1msg1 0x12(%ecx,%eax,1), %xmm0");
+       asm volatile("sha1msg1 0x12(%ebp,%eax,1), %xmm0");
+       asm volatile("sha1msg1 0x12(%eax,%ecx,1), %xmm0");
+       asm volatile("sha1msg1 0x12(%eax,%ecx,8), %xmm0");
+       asm volatile("sha1msg1 0x12345678(%eax), %xmm0");
+       asm volatile("sha1msg1 0x12345678(%ebp), %xmm0");
+       asm volatile("sha1msg1 0x12345678(%ecx,%eax,1), %xmm0");
+       asm volatile("sha1msg1 0x12345678(%ebp,%eax,1), %xmm0");
+       asm volatile("sha1msg1 0x12345678(%eax,%ecx,1), %xmm0");
+       asm volatile("sha1msg1 0x12345678(%eax,%ecx,8), %xmm0");
+
+       /* sha1msg2 xmm2/m128, xmm1 */
+
+       asm volatile("sha1msg2 %xmm1, %xmm0");
+       asm volatile("sha1msg2 %xmm7, %xmm2");
+       asm volatile("sha1msg2 (%eax), %xmm0");
+       asm volatile("sha1msg2 (0x12345678), %xmm0");
+       asm volatile("sha1msg2 (%eax), %xmm3");
+       asm volatile("sha1msg2 (%ecx,%eax,1), %xmm0");
+       asm volatile("sha1msg2 0x12345678(,%eax,1), %xmm0");
+       asm volatile("sha1msg2 (%eax,%ecx,1), %xmm0");
+       asm volatile("sha1msg2 (%eax,%ecx,8), %xmm0");
+       asm volatile("sha1msg2 0x12(%eax), %xmm0");
+       asm volatile("sha1msg2 0x12(%ebp), %xmm0");
+       asm volatile("sha1msg2 0x12(%ecx,%eax,1), %xmm0");
+       asm volatile("sha1msg2 0x12(%ebp,%eax,1), %xmm0");
+       asm volatile("sha1msg2 0x12(%eax,%ecx,1), %xmm0");
+       asm volatile("sha1msg2 0x12(%eax,%ecx,8), %xmm0");
+       asm volatile("sha1msg2 0x12345678(%eax), %xmm0");
+       asm volatile("sha1msg2 0x12345678(%ebp), %xmm0");
+       asm volatile("sha1msg2 0x12345678(%ecx,%eax,1), %xmm0");
+       asm volatile("sha1msg2 0x12345678(%ebp,%eax,1), %xmm0");
+       asm volatile("sha1msg2 0x12345678(%eax,%ecx,1), %xmm0");
+       asm volatile("sha1msg2 0x12345678(%eax,%ecx,8), %xmm0");
+
+       /* sha256rnds2 <XMM0>, xmm2/m128, xmm1 */
+       /* Note sha256rnds2 has an implicit operand 'xmm0' */
+
+       asm volatile("sha256rnds2 %xmm4, %xmm1");
+       asm volatile("sha256rnds2 %xmm7, %xmm2");
+       asm volatile("sha256rnds2 (%eax), %xmm1");
+       asm volatile("sha256rnds2 (0x12345678), %xmm1");
+       asm volatile("sha256rnds2 (%eax), %xmm3");
+       asm volatile("sha256rnds2 (%ecx,%eax,1), %xmm1");
+       asm volatile("sha256rnds2 0x12345678(,%eax,1), %xmm1");
+       asm volatile("sha256rnds2 (%eax,%ecx,1), %xmm1");
+       asm volatile("sha256rnds2 (%eax,%ecx,8), %xmm1");
+       asm volatile("sha256rnds2 0x12(%eax), %xmm1");
+       asm volatile("sha256rnds2 0x12(%ebp), %xmm1");
+       asm volatile("sha256rnds2 0x12(%ecx,%eax,1), %xmm1");
+       asm volatile("sha256rnds2 0x12(%ebp,%eax,1), %xmm1");
+       asm volatile("sha256rnds2 0x12(%eax,%ecx,1), %xmm1");
+       asm volatile("sha256rnds2 0x12(%eax,%ecx,8), %xmm1");
+       asm volatile("sha256rnds2 0x12345678(%eax), %xmm1");
+       asm volatile("sha256rnds2 0x12345678(%ebp), %xmm1");
+       asm volatile("sha256rnds2 0x12345678(%ecx,%eax,1), %xmm1");
+       asm volatile("sha256rnds2 0x12345678(%ebp,%eax,1), %xmm1");
+       asm volatile("sha256rnds2 0x12345678(%eax,%ecx,1), %xmm1");
+       asm volatile("sha256rnds2 0x12345678(%eax,%ecx,8), %xmm1");
+
+       /* sha256msg1 xmm2/m128, xmm1 */
+
+       asm volatile("sha256msg1 %xmm1, %xmm0");
+       asm volatile("sha256msg1 %xmm7, %xmm2");
+       asm volatile("sha256msg1 (%eax), %xmm0");
+       asm volatile("sha256msg1 (0x12345678), %xmm0");
+       asm volatile("sha256msg1 (%eax), %xmm3");
+       asm volatile("sha256msg1 (%ecx,%eax,1), %xmm0");
+       asm volatile("sha256msg1 0x12345678(,%eax,1), %xmm0");
+       asm volatile("sha256msg1 (%eax,%ecx,1), %xmm0");
+       asm volatile("sha256msg1 (%eax,%ecx,8), %xmm0");
+       asm volatile("sha256msg1 0x12(%eax), %xmm0");
+       asm volatile("sha256msg1 0x12(%ebp), %xmm0");
+       asm volatile("sha256msg1 0x12(%ecx,%eax,1), %xmm0");
+       asm volatile("sha256msg1 0x12(%ebp,%eax,1), %xmm0");
+       asm volatile("sha256msg1 0x12(%eax,%ecx,1), %xmm0");
+       asm volatile("sha256msg1 0x12(%eax,%ecx,8), %xmm0");
+       asm volatile("sha256msg1 0x12345678(%eax), %xmm0");
+       asm volatile("sha256msg1 0x12345678(%ebp), %xmm0");
+       asm volatile("sha256msg1 0x12345678(%ecx,%eax,1), %xmm0");
+       asm volatile("sha256msg1 0x12345678(%ebp,%eax,1), %xmm0");
+       asm volatile("sha256msg1 0x12345678(%eax,%ecx,1), %xmm0");
+       asm volatile("sha256msg1 0x12345678(%eax,%ecx,8), %xmm0");
+
+       /* sha256msg2 xmm2/m128, xmm1 */
+
+       asm volatile("sha256msg2 %xmm1, %xmm0");
+       asm volatile("sha256msg2 %xmm7, %xmm2");
+       asm volatile("sha256msg2 (%eax), %xmm0");
+       asm volatile("sha256msg2 (0x12345678), %xmm0");
+       asm volatile("sha256msg2 (%eax), %xmm3");
+       asm volatile("sha256msg2 (%ecx,%eax,1), %xmm0");
+       asm volatile("sha256msg2 0x12345678(,%eax,1), %xmm0");
+       asm volatile("sha256msg2 (%eax,%ecx,1), %xmm0");
+       asm volatile("sha256msg2 (%eax,%ecx,8), %xmm0");
+       asm volatile("sha256msg2 0x12(%eax), %xmm0");
+       asm volatile("sha256msg2 0x12(%ebp), %xmm0");
+       asm volatile("sha256msg2 0x12(%ecx,%eax,1), %xmm0");
+       asm volatile("sha256msg2 0x12(%ebp,%eax,1), %xmm0");
+       asm volatile("sha256msg2 0x12(%eax,%ecx,1), %xmm0");
+       asm volatile("sha256msg2 0x12(%eax,%ecx,8), %xmm0");
+       asm volatile("sha256msg2 0x12345678(%eax), %xmm0");
+       asm volatile("sha256msg2 0x12345678(%ebp), %xmm0");
+       asm volatile("sha256msg2 0x12345678(%ecx,%eax,1), %xmm0");
+       asm volatile("sha256msg2 0x12345678(%ebp,%eax,1), %xmm0");
+       asm volatile("sha256msg2 0x12345678(%eax,%ecx,1), %xmm0");
+       asm volatile("sha256msg2 0x12345678(%eax,%ecx,8), %xmm0");
+
+       /* clflushopt m8 */
+
+       asm volatile("clflushopt (%eax)");
+       asm volatile("clflushopt (0x12345678)");
+       asm volatile("clflushopt 0x12345678(%eax,%ecx,8)");
+       /* Also check instructions in the same group encoding as clflushopt */
+       asm volatile("clflush (%eax)");
+       asm volatile("sfence");
+
+       /* clwb m8 */
+
+       asm volatile("clwb (%eax)");
+       asm volatile("clwb (0x12345678)");
+       asm volatile("clwb 0x12345678(%eax,%ecx,8)");
+       /* Also check instructions in the same group encoding as clwb */
+       asm volatile("xsaveopt (%eax)");
+       asm volatile("mfence");
+
+       /* xsavec mem */
+
+       asm volatile("xsavec (%eax)");
+       asm volatile("xsavec (0x12345678)");
+       asm volatile("xsavec 0x12345678(%eax,%ecx,8)");
+
+       /* xsaves mem */
+
+       asm volatile("xsaves (%eax)");
+       asm volatile("xsaves (0x12345678)");
+       asm volatile("xsaves 0x12345678(%eax,%ecx,8)");
+
+       /* xrstors mem */
+
+       asm volatile("xrstors (%eax)");
+       asm volatile("xrstors (0x12345678)");
+       asm volatile("xrstors 0x12345678(%eax,%ecx,8)");
+
+#endif /* #ifndef __x86_64__ */
+
+       /* pcommit */
+
+       asm volatile("pcommit");
+
+       /* Following line is a marker for the awk script - do not change */
+       asm volatile("rdtsc"); /* Stop here */
+
+       return 0;
+}
diff --git a/tools/perf/arch/x86/tests/insn-x86.c b/tools/perf/arch/x86/tests/insn-x86.c
new file mode 100644 (file)
index 0000000..b6115df
--- /dev/null
@@ -0,0 +1,185 @@
+#include <linux/types.h>
+
+#include "debug.h"
+#include "tests/tests.h"
+#include "arch-tests.h"
+
+#include "intel-pt-decoder/insn.h"
+#include "intel-pt-decoder/intel-pt-insn-decoder.h"
+
+struct test_data {
+       u8 data[MAX_INSN_SIZE];
+       int expected_length;
+       int expected_rel;
+       const char *expected_op_str;
+       const char *expected_branch_str;
+       const char *asm_rep;
+};
+
+struct test_data test_data_32[] = {
+#include "insn-x86-dat-32.c"
+       {{0x0f, 0x01, 0xee}, 3, 0, NULL, NULL, "0f 01 ee             \trdpkru"},
+       {{0x0f, 0x01, 0xef}, 3, 0, NULL, NULL, "0f 01 ef             \twrpkru"},
+       {{0}, 0, 0, NULL, NULL, NULL},
+};
+
+struct test_data test_data_64[] = {
+#include "insn-x86-dat-64.c"
+       {{0x0f, 0x01, 0xee}, 3, 0, NULL, NULL, "0f 01 ee             \trdpkru"},
+       {{0x0f, 0x01, 0xef}, 3, 0, NULL, NULL, "0f 01 ef             \twrpkru"},
+       {{0}, 0, 0, NULL, NULL, NULL},
+};
+
+static int get_op(const char *op_str)
+{
+       struct val_data {
+               const char *name;
+               int val;
+       } vals[] = {
+               {"other",   INTEL_PT_OP_OTHER},
+               {"call",    INTEL_PT_OP_CALL},
+               {"ret",     INTEL_PT_OP_RET},
+               {"jcc",     INTEL_PT_OP_JCC},
+               {"jmp",     INTEL_PT_OP_JMP},
+               {"loop",    INTEL_PT_OP_LOOP},
+               {"iret",    INTEL_PT_OP_IRET},
+               {"int",     INTEL_PT_OP_INT},
+               {"syscall", INTEL_PT_OP_SYSCALL},
+               {"sysret",  INTEL_PT_OP_SYSRET},
+               {NULL, 0},
+       };
+       struct val_data *val;
+
+       if (!op_str || !strlen(op_str))
+               return 0;
+
+       for (val = vals; val->name; val++) {
+               if (!strcmp(val->name, op_str))
+                       return val->val;
+       }
+
+       pr_debug("Failed to get op\n");
+
+       return -1;
+}
+
+static int get_branch(const char *branch_str)
+{
+       struct val_data {
+               const char *name;
+               int val;
+       } vals[] = {
+               {"no_branch",     INTEL_PT_BR_NO_BRANCH},
+               {"indirect",      INTEL_PT_BR_INDIRECT},
+               {"conditional",   INTEL_PT_BR_CONDITIONAL},
+               {"unconditional", INTEL_PT_BR_UNCONDITIONAL},
+               {NULL, 0},
+       };
+       struct val_data *val;
+
+       if (!branch_str || !strlen(branch_str))
+               return 0;
+
+       for (val = vals; val->name; val++) {
+               if (!strcmp(val->name, branch_str))
+                       return val->val;
+       }
+
+       pr_debug("Failed to get branch\n");
+
+       return -1;
+}
+
+static int test_data_item(struct test_data *dat, int x86_64)
+{
+       struct intel_pt_insn intel_pt_insn;
+       struct insn insn;
+       int op, branch;
+
+       insn_init(&insn, dat->data, MAX_INSN_SIZE, x86_64);
+       insn_get_length(&insn);
+
+       if (!insn_complete(&insn)) {
+               pr_debug("Failed to decode: %s\n", dat->asm_rep);
+               return -1;
+       }
+
+       if (insn.length != dat->expected_length) {
+               pr_debug("Failed to decode length (%d vs expected %d): %s\n",
+                        insn.length, dat->expected_length, dat->asm_rep);
+               return -1;
+       }
+
+       op = get_op(dat->expected_op_str);
+       branch = get_branch(dat->expected_branch_str);
+
+       if (intel_pt_get_insn(dat->data, MAX_INSN_SIZE, x86_64, &intel_pt_insn)) {
+               pr_debug("Intel PT failed to decode: %s\n", dat->asm_rep);
+               return -1;
+       }
+
+       if ((int)intel_pt_insn.op != op) {
+               pr_debug("Failed to decode 'op' value (%d vs expected %d): %s\n",
+                        intel_pt_insn.op, op, dat->asm_rep);
+               return -1;
+       }
+
+       if ((int)intel_pt_insn.branch != branch) {
+               pr_debug("Failed to decode 'branch' value (%d vs expected %d): %s\n",
+                        intel_pt_insn.branch, branch, dat->asm_rep);
+               return -1;
+       }
+
+       if (intel_pt_insn.rel != dat->expected_rel) {
+               pr_debug("Failed to decode 'rel' value (%#x vs expected %#x): %s\n",
+                        intel_pt_insn.rel, dat->expected_rel, dat->asm_rep);
+               return -1;
+       }
+
+       pr_debug("Decoded ok: %s\n", dat->asm_rep);
+
+       return 0;
+}
+
+static int test_data_set(struct test_data *dat_set, int x86_64)
+{
+       struct test_data *dat;
+       int ret = 0;
+
+       for (dat = dat_set; dat->expected_length; dat++) {
+               if (test_data_item(dat, x86_64))
+                       ret = -1;
+       }
+
+       return ret;
+}
+
+/**
+ * test__insn_x86 - test x86 instruction decoder - new instructions.
+ *
+ * This function implements a test that decodes a selection of instructions and
+ * checks the results.  The Intel PT function that further categorizes
+ * instructions (i.e. intel_pt_get_insn()) is also checked.
+ *
+ * The instructions are originally in insn-x86-dat-src.c which has been
+ * processed by scripts gen-insn-x86-dat.sh and gen-insn-x86-dat.awk to produce
+ * insn-x86-dat-32.c and insn-x86-dat-64.c which are included into this program.
+ * i.e. to add new instructions to the test, edit insn-x86-dat-src.c, run the
+ * gen-insn-x86-dat.sh script, make perf, and then run the test.
+ *
+ * If the test passes %0 is returned, otherwise %-1 is returned.  Use the
+ * verbose (-v) option to see all the instructions and whether or not they
+ * decoded successfuly.
+ */
+int test__insn_x86(void)
+{
+       int ret = 0;
+
+       if (test_data_set(test_data_32, 0))
+               ret = -1;
+
+       if (test_data_set(test_data_64, 1))
+               ret = -1;
+
+       return ret;
+}
diff --git a/tools/perf/arch/x86/tests/intel-cqm.c b/tools/perf/arch/x86/tests/intel-cqm.c
new file mode 100644 (file)
index 0000000..d28c1b6
--- /dev/null
@@ -0,0 +1,124 @@
+#include "tests/tests.h"
+#include "perf.h"
+#include "cloexec.h"
+#include "debug.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "arch-tests.h"
+
+#include <sys/mman.h>
+#include <string.h>
+
+static pid_t spawn(void)
+{
+       pid_t pid;
+
+       pid = fork();
+       if (pid)
+               return pid;
+
+       while(1);
+               sleep(5);
+       return 0;
+}
+
+/*
+ * Create an event group that contains both a sampled hardware
+ * (cpu-cycles) and software (intel_cqm/llc_occupancy/) event. We then
+ * wait for the hardware perf counter to overflow and generate a PMI,
+ * which triggers an event read for both of the events in the group.
+ *
+ * Since reading Intel CQM event counters requires sending SMP IPIs, the
+ * CQM pmu needs to handle the above situation gracefully, and return
+ * the last read counter value to avoid triggering a WARN_ON_ONCE() in
+ * smp_call_function_many() caused by sending IPIs from NMI context.
+ */
+int test__intel_cqm_count_nmi_context(void)
+{
+       struct perf_evlist *evlist = NULL;
+       struct perf_evsel *evsel = NULL;
+       struct perf_event_attr pe;
+       int i, fd[2], flag, ret;
+       size_t mmap_len;
+       void *event;
+       pid_t pid;
+       int err = TEST_FAIL;
+
+       flag = perf_event_open_cloexec_flag();
+
+       evlist = perf_evlist__new();
+       if (!evlist) {
+               pr_debug("perf_evlist__new failed\n");
+               return TEST_FAIL;
+       }
+
+       ret = parse_events(evlist, "intel_cqm/llc_occupancy/", NULL);
+       if (ret) {
+               pr_debug("parse_events failed\n");
+               err = TEST_SKIP;
+               goto out;
+       }
+
+       evsel = perf_evlist__first(evlist);
+       if (!evsel) {
+               pr_debug("perf_evlist__first failed\n");
+               goto out;
+       }
+
+       memset(&pe, 0, sizeof(pe));
+       pe.size = sizeof(pe);
+
+       pe.type = PERF_TYPE_HARDWARE;
+       pe.config = PERF_COUNT_HW_CPU_CYCLES;
+       pe.read_format = PERF_FORMAT_GROUP;
+
+       pe.sample_period = 128;
+       pe.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_READ;
+
+       pid = spawn();
+
+       fd[0] = sys_perf_event_open(&pe, pid, -1, -1, flag);
+       if (fd[0] < 0) {
+               pr_debug("failed to open event\n");
+               goto out;
+       }
+
+       memset(&pe, 0, sizeof(pe));
+       pe.size = sizeof(pe);
+
+       pe.type = evsel->attr.type;
+       pe.config = evsel->attr.config;
+
+       fd[1] = sys_perf_event_open(&pe, pid, -1, fd[0], flag);
+       if (fd[1] < 0) {
+               pr_debug("failed to open event\n");
+               goto out;
+       }
+
+       /*
+        * Pick a power-of-two number of pages + 1 for the meta-data
+        * page (struct perf_event_mmap_page). See tools/perf/design.txt.
+        */
+       mmap_len = page_size * 65;
+
+       event = mmap(NULL, mmap_len, PROT_READ, MAP_SHARED, fd[0], 0);
+       if (event == (void *)(-1)) {
+               pr_debug("failed to mmap %d\n", errno);
+               goto out;
+       }
+
+       sleep(1);
+
+       err = TEST_OK;
+
+       munmap(event, mmap_len);
+
+       for (i = 0; i < 2; i++)
+               close(fd[i]);
+
+       kill(pid, SIGKILL);
+       wait(NULL);
+out:
+       perf_evlist__delete(evlist);
+       return err;
+}
similarity index 98%
rename from tools/perf/tests/perf-time-to-tsc.c
rename to tools/perf/arch/x86/tests/perf-time-to-tsc.c
index 5f49484f1abc03bed80d035c508501914a577caf..658cd200af74dca1a1f5165626b0cc51ab517087 100644 (file)
@@ -9,7 +9,9 @@
 #include "thread_map.h"
 #include "cpumap.h"
 #include "tsc.h"
-#include "tests.h"
+#include "tests/tests.h"
+
+#include "arch-tests.h"
 
 #define CHECK__(x) {                           \
        while ((x) < 0) {                       \
similarity index 97%
rename from tools/perf/tests/rdpmc.c
rename to tools/perf/arch/x86/tests/rdpmc.c
index d31f2c4d9f6491c62893f00cfbf46cdeff12ec90..e7688214c7cf19fc040170b2590d19d8e011b6e8 100644 (file)
@@ -5,10 +5,9 @@
 #include <linux/types.h>
 #include "perf.h"
 #include "debug.h"
-#include "tests.h"
+#include "tests/tests.h"
 #include "cloexec.h"
-
-#if defined(__x86_64__) || defined(__i386__)
+#include "arch-tests.h"
 
 static u64 rdpmc(unsigned int counter)
 {
@@ -173,5 +172,3 @@ int test__rdpmc(void)
 
        return 0;
 }
-
-#endif
index a08de0a35b8384ffd4cad3d6c8e08522c0276f59..9223c164e545d869267b9b7a17d409b774dd7904 100644 (file)
  */
 
 #include <stddef.h>
+#include <errno.h> /* for EINVAL */
+#include <string.h> /* for strcmp */
+#include <linux/ptrace.h> /* for struct pt_regs */
+#include <linux/kernel.h> /* for offsetof */
 #include <dwarf-regs.h>
 
 /*
- * Generic dwarf analysis helpers
+ * See arch/x86/kernel/ptrace.c.
+ * Different from it:
+ *
+ *  - Since struct pt_regs is defined differently for user and kernel,
+ *    but we want to use 'ax, bx' instead of 'rax, rbx' (which is struct
+ *    field name of user's pt_regs), we make REG_OFFSET_NAME to accept
+ *    both string name and reg field name.
+ *
+ *  - Since accessing x86_32's pt_regs from x86_64 building is difficult
+ *    and vise versa, we simply fill offset with -1, so
+ *    get_arch_regstr() still works but regs_query_register_offset()
+ *    returns error.
+ *    The only inconvenience caused by it now is that we are not allowed
+ *    to generate BPF prologue for a x86_64 kernel if perf is built for
+ *    x86_32. This is really a rare usecase.
+ *
+ *  - Order is different from kernel's ptrace.c for get_arch_regstr(). Use
+ *    the order defined by dwarf.
  */
 
-#define X86_32_MAX_REGS 8
-const char *x86_32_regs_table[X86_32_MAX_REGS] = {
-       "%ax",
-       "%cx",
-       "%dx",
-       "%bx",
-       "$stack",       /* Stack address instead of %sp */
-       "%bp",
-       "%si",
-       "%di",
+struct pt_regs_offset {
+       const char *name;
+       int offset;
+};
+
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+#ifdef __x86_64__
+# define REG_OFFSET_NAME_64(n, r) {.name = n, .offset = offsetof(struct pt_regs, r)}
+# define REG_OFFSET_NAME_32(n, r) {.name = n, .offset = -1}
+#else
+# define REG_OFFSET_NAME_64(n, r) {.name = n, .offset = -1}
+# define REG_OFFSET_NAME_32(n, r) {.name = n, .offset = offsetof(struct pt_regs, r)}
+#endif
+
+static const struct pt_regs_offset x86_32_regoffset_table[] = {
+       REG_OFFSET_NAME_32("%ax",       eax),
+       REG_OFFSET_NAME_32("%cx",       ecx),
+       REG_OFFSET_NAME_32("%dx",       edx),
+       REG_OFFSET_NAME_32("%bx",       ebx),
+       REG_OFFSET_NAME_32("$stack",    esp),   /* Stack address instead of %sp */
+       REG_OFFSET_NAME_32("%bp",       ebp),
+       REG_OFFSET_NAME_32("%si",       esi),
+       REG_OFFSET_NAME_32("%di",       edi),
+       REG_OFFSET_END,
 };
 
-#define X86_64_MAX_REGS 16
-const char *x86_64_regs_table[X86_64_MAX_REGS] = {
-       "%ax",
-       "%dx",
-       "%cx",
-       "%bx",
-       "%si",
-       "%di",
-       "%bp",
-       "%sp",
-       "%r8",
-       "%r9",
-       "%r10",
-       "%r11",
-       "%r12",
-       "%r13",
-       "%r14",
-       "%r15",
+static const struct pt_regs_offset x86_64_regoffset_table[] = {
+       REG_OFFSET_NAME_64("%ax",       rax),
+       REG_OFFSET_NAME_64("%dx",       rdx),
+       REG_OFFSET_NAME_64("%cx",       rcx),
+       REG_OFFSET_NAME_64("%bx",       rbx),
+       REG_OFFSET_NAME_64("%si",       rsi),
+       REG_OFFSET_NAME_64("%di",       rdi),
+       REG_OFFSET_NAME_64("%bp",       rbp),
+       REG_OFFSET_NAME_64("%sp",       rsp),
+       REG_OFFSET_NAME_64("%r8",       r8),
+       REG_OFFSET_NAME_64("%r9",       r9),
+       REG_OFFSET_NAME_64("%r10",      r10),
+       REG_OFFSET_NAME_64("%r11",      r11),
+       REG_OFFSET_NAME_64("%r12",      r12),
+       REG_OFFSET_NAME_64("%r13",      r13),
+       REG_OFFSET_NAME_64("%r14",      r14),
+       REG_OFFSET_NAME_64("%r15",      r15),
+       REG_OFFSET_END,
 };
 
 /* TODO: switching by dwarf address size */
 #ifdef __x86_64__
-#define ARCH_MAX_REGS X86_64_MAX_REGS
-#define arch_regs_table x86_64_regs_table
+#define regoffset_table x86_64_regoffset_table
 #else
-#define ARCH_MAX_REGS X86_32_MAX_REGS
-#define arch_regs_table x86_32_regs_table
+#define regoffset_table x86_32_regoffset_table
 #endif
 
+/* Minus 1 for the ending REG_OFFSET_END */
+#define ARCH_MAX_REGS ((sizeof(regoffset_table) / sizeof(regoffset_table[0])) - 1)
+
 /* Return architecture dependent register string (for kprobe-tracer) */
 const char *get_arch_regstr(unsigned int n)
 {
-       return (n < ARCH_MAX_REGS) ? arch_regs_table[n] : NULL;
+       return (n < ARCH_MAX_REGS) ? regoffset_table[n].name : NULL;
+}
+
+/* Reuse code from arch/x86/kernel/ptrace.c */
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name:      the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+       const struct pt_regs_offset *roff;
+       for (roff = regoffset_table; roff->name != NULL; roff++)
+               if (!strcmp(roff->name, name))
+                       return roff->offset;
+       return -EINVAL;
 }
index 2ca10d796c0bb5bf0b591a070a5835dfd76fa700..b02af064f0f98333b6f90a5d6dd8778e6a7a6bb4 100644 (file)
@@ -624,13 +624,49 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
         * threads.
         */
        if (have_timing_info && !cpu_map__empty(cpus)) {
-               err = intel_pt_track_switches(evlist);
-               if (err == -EPERM)
-                       pr_debug2("Unable to select sched:sched_switch\n");
-               else if (err)
-                       return err;
-               else
-                       ptr->have_sched_switch = 1;
+               if (perf_can_record_switch_events()) {
+                       bool cpu_wide = !target__none(&opts->target) &&
+                                       !target__has_task(&opts->target);
+
+                       if (!cpu_wide && perf_can_record_cpu_wide()) {
+                               struct perf_evsel *switch_evsel;
+
+                               err = parse_events(evlist, "dummy:u", NULL);
+                               if (err)
+                                       return err;
+
+                               switch_evsel = perf_evlist__last(evlist);
+
+                               switch_evsel->attr.freq = 0;
+                               switch_evsel->attr.sample_period = 1;
+                               switch_evsel->attr.context_switch = 1;
+
+                               switch_evsel->system_wide = true;
+                               switch_evsel->no_aux_samples = true;
+                               switch_evsel->immediate = true;
+
+                               perf_evsel__set_sample_bit(switch_evsel, TID);
+                               perf_evsel__set_sample_bit(switch_evsel, TIME);
+                               perf_evsel__set_sample_bit(switch_evsel, CPU);
+
+                               opts->record_switch_events = false;
+                               ptr->have_sched_switch = 3;
+                       } else {
+                               opts->record_switch_events = true;
+                               if (cpu_wide)
+                                       ptr->have_sched_switch = 3;
+                               else
+                                       ptr->have_sched_switch = 2;
+                       }
+               } else {
+                       err = intel_pt_track_switches(evlist);
+                       if (err == -EPERM)
+                               pr_debug2("Unable to select sched:sched_switch\n");
+                       else if (err)
+                               return err;
+                       else
+                               ptr->have_sched_switch = 1;
+               }
        }
 
        if (intel_pt_evsel) {
@@ -663,8 +699,11 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
                tracking_evsel->attr.sample_period = 1;
 
                /* In per-cpu case, always need the time of mmap events etc */
-               if (!cpu_map__empty(cpus))
+               if (!cpu_map__empty(cpus)) {
                        perf_evsel__set_sample_bit(tracking_evsel, TIME);
+                       /* And the CPU for switch events */
+                       perf_evsel__set_sample_bit(tracking_evsel, CPU);
+               }
        }
 
        /*
index 573e28896038d2f9045eb1eb700e77cbe5c66cbe..60bf119430479272a80c17e4231de4633bad2b6e 100644 (file)
@@ -1,6 +1,6 @@
 perf-y += sched-messaging.o
 perf-y += sched-pipe.o
-perf-y += mem-memcpy.o
+perf-y += mem-functions.o
 perf-y += futex-hash.o
 perf-y += futex-wake.o
 perf-y += futex-wake-parallel.o
diff --git a/tools/perf/bench/mem-functions.c b/tools/perf/bench/mem-functions.c
new file mode 100644 (file)
index 0000000..9419b94
--- /dev/null
@@ -0,0 +1,379 @@
+/*
+ * mem-memcpy.c
+ *
+ * Simple memcpy() and memset() benchmarks
+ *
+ * Written by Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp>
+ */
+
+#include "../perf.h"
+#include "../util/util.h"
+#include "../util/parse-options.h"
+#include "../util/header.h"
+#include "../util/cloexec.h"
+#include "bench.h"
+#include "mem-memcpy-arch.h"
+#include "mem-memset-arch.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/time.h>
+#include <errno.h>
+
+#define K 1024
+
+static const char      *size_str       = "1MB";
+static const char      *function_str   = "all";
+static int             nr_loops        = 1;
+static bool            use_cycles;
+static int             cycles_fd;
+
+static const struct option options[] = {
+       OPT_STRING('s', "size", &size_str, "1MB",
+                   "Specify the size of the memory buffers. "
+                   "Available units: B, KB, MB, GB and TB (case insensitive)"),
+
+       OPT_STRING('f', "function", &function_str, "all",
+                   "Specify the function to run, \"all\" runs all available functions, \"help\" lists them"),
+
+       OPT_INTEGER('l', "nr_loops", &nr_loops,
+                   "Specify the number of loops to run. (default: 1)"),
+
+       OPT_BOOLEAN('c', "cycles", &use_cycles,
+                   "Use a cycles event instead of gettimeofday() to measure performance"),
+
+       OPT_END()
+};
+
+typedef void *(*memcpy_t)(void *, const void *, size_t);
+typedef void *(*memset_t)(void *, int, size_t);
+
+struct function {
+       const char *name;
+       const char *desc;
+       union {
+               memcpy_t memcpy;
+               memset_t memset;
+       } fn;
+};
+
+static struct perf_event_attr cycle_attr = {
+       .type           = PERF_TYPE_HARDWARE,
+       .config         = PERF_COUNT_HW_CPU_CYCLES
+};
+
+static void init_cycles(void)
+{
+       cycles_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1, perf_event_open_cloexec_flag());
+
+       if (cycles_fd < 0 && errno == ENOSYS)
+               die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
+       else
+               BUG_ON(cycles_fd < 0);
+}
+
+static u64 get_cycles(void)
+{
+       int ret;
+       u64 clk;
+
+       ret = read(cycles_fd, &clk, sizeof(u64));
+       BUG_ON(ret != sizeof(u64));
+
+       return clk;
+}
+
+static double timeval2double(struct timeval *ts)
+{
+       return (double)ts->tv_sec + (double)ts->tv_usec / (double)1000000;
+}
+
+#define print_bps(x) do {                                              \
+               if (x < K)                                              \
+                       printf(" %14lf bytes/sec\n", x);                \
+               else if (x < K * K)                                     \
+                       printf(" %14lfd KB/sec\n", x / K);              \
+               else if (x < K * K * K)                                 \
+                       printf(" %14lf MB/sec\n", x / K / K);           \
+               else                                                    \
+                       printf(" %14lf GB/sec\n", x / K / K / K);       \
+       } while (0)
+
+struct bench_mem_info {
+       const struct function *functions;
+       u64 (*do_cycles)(const struct function *r, size_t size);
+       double (*do_gettimeofday)(const struct function *r, size_t size);
+       const char *const *usage;
+};
+
+static void __bench_mem_function(struct bench_mem_info *info, int r_idx, size_t size, double size_total)
+{
+       const struct function *r = &info->functions[r_idx];
+       double result_bps = 0.0;
+       u64 result_cycles = 0;
+
+       printf("# function '%s' (%s)\n", r->name, r->desc);
+
+       if (bench_format == BENCH_FORMAT_DEFAULT)
+               printf("# Copying %s bytes ...\n\n", size_str);
+
+       if (use_cycles) {
+               result_cycles = info->do_cycles(r, size);
+       } else {
+               result_bps = info->do_gettimeofday(r, size);
+       }
+
+       switch (bench_format) {
+       case BENCH_FORMAT_DEFAULT:
+               if (use_cycles) {
+                       printf(" %14lf cycles/byte\n", (double)result_cycles/size_total);
+               } else {
+                       print_bps(result_bps);
+               }
+               break;
+
+       case BENCH_FORMAT_SIMPLE:
+               if (use_cycles) {
+                       printf("%lf\n", (double)result_cycles/size_total);
+               } else {
+                       printf("%lf\n", result_bps);
+               }
+               break;
+
+       default:
+               BUG_ON(1);
+               break;
+       }
+}
+
+static int bench_mem_common(int argc, const char **argv, struct bench_mem_info *info)
+{
+       int i;
+       size_t size;
+       double size_total;
+
+       argc = parse_options(argc, argv, options, info->usage, 0);
+
+       if (use_cycles)
+               init_cycles();
+
+       size = (size_t)perf_atoll((char *)size_str);
+       size_total = (double)size * nr_loops;
+
+       if ((s64)size <= 0) {
+               fprintf(stderr, "Invalid size:%s\n", size_str);
+               return 1;
+       }
+
+       if (!strncmp(function_str, "all", 3)) {
+               for (i = 0; info->functions[i].name; i++)
+                       __bench_mem_function(info, i, size, size_total);
+               return 0;
+       }
+
+       for (i = 0; info->functions[i].name; i++) {
+               if (!strcmp(info->functions[i].name, function_str))
+                       break;
+       }
+       if (!info->functions[i].name) {
+               if (strcmp(function_str, "help") && strcmp(function_str, "h"))
+                       printf("Unknown function: %s\n", function_str);
+               printf("Available functions:\n");
+               for (i = 0; info->functions[i].name; i++) {
+                       printf("\t%s ... %s\n",
+                              info->functions[i].name, info->functions[i].desc);
+               }
+               return 1;
+       }
+
+       __bench_mem_function(info, i, size, size_total);
+
+       return 0;
+}
+
+static void memcpy_alloc_mem(void **dst, void **src, size_t size)
+{
+       *dst = zalloc(size);
+       if (!*dst)
+               die("memory allocation failed - maybe size is too large?\n");
+
+       *src = zalloc(size);
+       if (!*src)
+               die("memory allocation failed - maybe size is too large?\n");
+
+       /* Make sure to always prefault zero pages even if MMAP_THRESH is crossed: */
+       memset(*src, 0, size);
+}
+
+static u64 do_memcpy_cycles(const struct function *r, size_t size)
+{
+       u64 cycle_start = 0ULL, cycle_end = 0ULL;
+       void *src = NULL, *dst = NULL;
+       memcpy_t fn = r->fn.memcpy;
+       int i;
+
+       memcpy_alloc_mem(&dst, &src, size);
+
+       /*
+        * We prefault the freshly allocated memory range here,
+        * to not measure page fault overhead:
+        */
+       fn(dst, src, size);
+
+       cycle_start = get_cycles();
+       for (i = 0; i < nr_loops; ++i)
+               fn(dst, src, size);
+       cycle_end = get_cycles();
+
+       free(src);
+       free(dst);
+       return cycle_end - cycle_start;
+}
+
+static double do_memcpy_gettimeofday(const struct function *r, size_t size)
+{
+       struct timeval tv_start, tv_end, tv_diff;
+       memcpy_t fn = r->fn.memcpy;
+       void *src = NULL, *dst = NULL;
+       int i;
+
+       memcpy_alloc_mem(&dst, &src, size);
+
+       /*
+        * We prefault the freshly allocated memory range here,
+        * to not measure page fault overhead:
+        */
+       fn(dst, src, size);
+
+       BUG_ON(gettimeofday(&tv_start, NULL));
+       for (i = 0; i < nr_loops; ++i)
+               fn(dst, src, size);
+       BUG_ON(gettimeofday(&tv_end, NULL));
+
+       timersub(&tv_end, &tv_start, &tv_diff);
+
+       free(src);
+       free(dst);
+
+       return (double)(((double)size * nr_loops) / timeval2double(&tv_diff));
+}
+
+struct function memcpy_functions[] = {
+       { .name         = "default",
+         .desc         = "Default memcpy() provided by glibc",
+         .fn.memcpy    = memcpy },
+
+#ifdef HAVE_ARCH_X86_64_SUPPORT
+# define MEMCPY_FN(_fn, _name, _desc) {.name = _name, .desc = _desc, .fn.memcpy = _fn},
+# include "mem-memcpy-x86-64-asm-def.h"
+# undef MEMCPY_FN
+#endif
+
+       { .name = NULL, }
+};
+
+static const char * const bench_mem_memcpy_usage[] = {
+       "perf bench mem memcpy <options>",
+       NULL
+};
+
+int bench_mem_memcpy(int argc, const char **argv, const char *prefix __maybe_unused)
+{
+       struct bench_mem_info info = {
+               .functions              = memcpy_functions,
+               .do_cycles              = do_memcpy_cycles,
+               .do_gettimeofday        = do_memcpy_gettimeofday,
+               .usage                  = bench_mem_memcpy_usage,
+       };
+
+       return bench_mem_common(argc, argv, &info);
+}
+
+static void memset_alloc_mem(void **dst, size_t size)
+{
+       *dst = zalloc(size);
+       if (!*dst)
+               die("memory allocation failed - maybe size is too large?\n");
+}
+
+static u64 do_memset_cycles(const struct function *r, size_t size)
+{
+       u64 cycle_start = 0ULL, cycle_end = 0ULL;
+       memset_t fn = r->fn.memset;
+       void *dst = NULL;
+       int i;
+
+       memset_alloc_mem(&dst, size);
+
+       /*
+        * We prefault the freshly allocated memory range here,
+        * to not measure page fault overhead:
+        */
+       fn(dst, -1, size);
+
+       cycle_start = get_cycles();
+       for (i = 0; i < nr_loops; ++i)
+               fn(dst, i, size);
+       cycle_end = get_cycles();
+
+       free(dst);
+       return cycle_end - cycle_start;
+}
+
+static double do_memset_gettimeofday(const struct function *r, size_t size)
+{
+       struct timeval tv_start, tv_end, tv_diff;
+       memset_t fn = r->fn.memset;
+       void *dst = NULL;
+       int i;
+
+       memset_alloc_mem(&dst, size);
+
+       /*
+        * We prefault the freshly allocated memory range here,
+        * to not measure page fault overhead:
+        */
+       fn(dst, -1, size);
+
+       BUG_ON(gettimeofday(&tv_start, NULL));
+       for (i = 0; i < nr_loops; ++i)
+               fn(dst, i, size);
+       BUG_ON(gettimeofday(&tv_end, NULL));
+
+       timersub(&tv_end, &tv_start, &tv_diff);
+
+       free(dst);
+       return (double)(((double)size * nr_loops) / timeval2double(&tv_diff));
+}
+
+static const char * const bench_mem_memset_usage[] = {
+       "perf bench mem memset <options>",
+       NULL
+};
+
+static const struct function memset_functions[] = {
+       { .name         = "default",
+         .desc         = "Default memset() provided by glibc",
+         .fn.memset    = memset },
+
+#ifdef HAVE_ARCH_X86_64_SUPPORT
+# define MEMSET_FN(_fn, _name, _desc) { .name = _name, .desc = _desc, .fn.memset = _fn },
+# include "mem-memset-x86-64-asm-def.h"
+# undef MEMSET_FN
+#endif
+
+       { .name = NULL, }
+};
+
+int bench_mem_memset(int argc, const char **argv, const char *prefix __maybe_unused)
+{
+       struct bench_mem_info info = {
+               .functions              = memset_functions,
+               .do_cycles              = do_memset_cycles,
+               .do_gettimeofday        = do_memset_gettimeofday,
+               .usage                  = bench_mem_memset_usage,
+       };
+
+       return bench_mem_common(argc, argv, &info);
+}
diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c
deleted file mode 100644 (file)
index d3dfb79..0000000
+++ /dev/null
@@ -1,434 +0,0 @@
-/*
- * mem-memcpy.c
- *
- * memcpy: Simple memory copy in various ways
- *
- * Written by Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp>
- */
-
-#include "../perf.h"
-#include "../util/util.h"
-#include "../util/parse-options.h"
-#include "../util/header.h"
-#include "../util/cloexec.h"
-#include "bench.h"
-#include "mem-memcpy-arch.h"
-#include "mem-memset-arch.h"
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/time.h>
-#include <errno.h>
-
-#define K 1024
-
-static const char      *length_str     = "1MB";
-static const char      *routine        = "default";
-static int             iterations      = 1;
-static bool            use_cycle;
-static int             cycle_fd;
-static bool            only_prefault;
-static bool            no_prefault;
-
-static const struct option options[] = {
-       OPT_STRING('l', "length", &length_str, "1MB",
-                   "Specify length of memory to copy. "
-                   "Available units: B, KB, MB, GB and TB (upper and lower)"),
-       OPT_STRING('r', "routine", &routine, "default",
-                   "Specify routine to copy, \"all\" runs all available routines"),
-       OPT_INTEGER('i', "iterations", &iterations,
-                   "repeat memcpy() invocation this number of times"),
-       OPT_BOOLEAN('c', "cycle", &use_cycle,
-                   "Use cycles event instead of gettimeofday() for measuring"),
-       OPT_BOOLEAN('o', "only-prefault", &only_prefault,
-                   "Show only the result with page faults before memcpy()"),
-       OPT_BOOLEAN('n', "no-prefault", &no_prefault,
-                   "Show only the result without page faults before memcpy()"),
-       OPT_END()
-};
-
-typedef void *(*memcpy_t)(void *, const void *, size_t);
-typedef void *(*memset_t)(void *, int, size_t);
-
-struct routine {
-       const char *name;
-       const char *desc;
-       union {
-               memcpy_t memcpy;
-               memset_t memset;
-       } fn;
-};
-
-struct routine memcpy_routines[] = {
-       { .name = "default",
-         .desc = "Default memcpy() provided by glibc",
-         .fn.memcpy = memcpy },
-#ifdef HAVE_ARCH_X86_64_SUPPORT
-
-#define MEMCPY_FN(_fn, _name, _desc) {.name = _name, .desc = _desc, .fn.memcpy = _fn},
-#include "mem-memcpy-x86-64-asm-def.h"
-#undef MEMCPY_FN
-
-#endif
-
-       { NULL,
-         NULL,
-         {NULL}   }
-};
-
-static const char * const bench_mem_memcpy_usage[] = {
-       "perf bench mem memcpy <options>",
-       NULL
-};
-
-static struct perf_event_attr cycle_attr = {
-       .type           = PERF_TYPE_HARDWARE,
-       .config         = PERF_COUNT_HW_CPU_CYCLES
-};
-
-static void init_cycle(void)
-{
-       cycle_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1,
-                                      perf_event_open_cloexec_flag());
-
-       if (cycle_fd < 0 && errno == ENOSYS)
-               die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
-       else
-               BUG_ON(cycle_fd < 0);
-}
-
-static u64 get_cycle(void)
-{
-       int ret;
-       u64 clk;
-
-       ret = read(cycle_fd, &clk, sizeof(u64));
-       BUG_ON(ret != sizeof(u64));
-
-       return clk;
-}
-
-static double timeval2double(struct timeval *ts)
-{
-       return (double)ts->tv_sec +
-               (double)ts->tv_usec / (double)1000000;
-}
-
-#define pf (no_prefault ? 0 : 1)
-
-#define print_bps(x) do {                                      \
-               if (x < K)                                      \
-                       printf(" %14lf B/Sec", x);              \
-               else if (x < K * K)                             \
-                       printf(" %14lfd KB/Sec", x / K);        \
-               else if (x < K * K * K)                         \
-                       printf(" %14lf MB/Sec", x / K / K);     \
-               else                                            \
-                       printf(" %14lf GB/Sec", x / K / K / K); \
-       } while (0)
-
-struct bench_mem_info {
-       const struct routine *routines;
-       u64 (*do_cycle)(const struct routine *r, size_t len, bool prefault);
-       double (*do_gettimeofday)(const struct routine *r, size_t len, bool prefault);
-       const char *const *usage;
-};
-
-static void __bench_mem_routine(struct bench_mem_info *info, int r_idx, size_t len, double totallen)
-{
-       const struct routine *r = &info->routines[r_idx];
-       double result_bps[2];
-       u64 result_cycle[2];
-
-       result_cycle[0] = result_cycle[1] = 0ULL;
-       result_bps[0] = result_bps[1] = 0.0;
-
-       printf("Routine %s (%s)\n", r->name, r->desc);
-
-       if (bench_format == BENCH_FORMAT_DEFAULT)
-               printf("# Copying %s Bytes ...\n\n", length_str);
-
-       if (!only_prefault && !no_prefault) {
-               /* show both of results */
-               if (use_cycle) {
-                       result_cycle[0] = info->do_cycle(r, len, false);
-                       result_cycle[1] = info->do_cycle(r, len, true);
-               } else {
-                       result_bps[0]   = info->do_gettimeofday(r, len, false);
-                       result_bps[1]   = info->do_gettimeofday(r, len, true);
-               }
-       } else {
-               if (use_cycle)
-                       result_cycle[pf] = info->do_cycle(r, len, only_prefault);
-               else
-                       result_bps[pf] = info->do_gettimeofday(r, len, only_prefault);
-       }
-
-       switch (bench_format) {
-       case BENCH_FORMAT_DEFAULT:
-               if (!only_prefault && !no_prefault) {
-                       if (use_cycle) {
-                               printf(" %14lf Cycle/Byte\n",
-                                       (double)result_cycle[0]
-                                       / totallen);
-                               printf(" %14lf Cycle/Byte (with prefault)\n",
-                                       (double)result_cycle[1]
-                                       / totallen);
-                       } else {
-                               print_bps(result_bps[0]);
-                               printf("\n");
-                               print_bps(result_bps[1]);
-                               printf(" (with prefault)\n");
-                       }
-               } else {
-                       if (use_cycle) {
-                               printf(" %14lf Cycle/Byte",
-                                       (double)result_cycle[pf]
-                                       / totallen);
-                       } else
-                               print_bps(result_bps[pf]);
-
-                       printf("%s\n", only_prefault ? " (with prefault)" : "");
-               }
-               break;
-       case BENCH_FORMAT_SIMPLE:
-               if (!only_prefault && !no_prefault) {
-                       if (use_cycle) {
-                               printf("%lf %lf\n",
-                                       (double)result_cycle[0] / totallen,
-                                       (double)result_cycle[1] / totallen);
-                       } else {
-                               printf("%lf %lf\n",
-                                       result_bps[0], result_bps[1]);
-                       }
-               } else {
-                       if (use_cycle) {
-                               printf("%lf\n", (double)result_cycle[pf]
-                                       / totallen);
-                       } else
-                               printf("%lf\n", result_bps[pf]);
-               }
-               break;
-       default:
-               /* reaching this means there's some disaster: */
-               die("unknown format: %d\n", bench_format);
-               break;
-       }
-}
-
-static int bench_mem_common(int argc, const char **argv,
-                    const char *prefix __maybe_unused,
-                    struct bench_mem_info *info)
-{
-       int i;
-       size_t len;
-       double totallen;
-
-       argc = parse_options(argc, argv, options,
-                            info->usage, 0);
-
-       if (no_prefault && only_prefault) {
-               fprintf(stderr, "Invalid options: -o and -n are mutually exclusive\n");
-               return 1;
-       }
-
-       if (use_cycle)
-               init_cycle();
-
-       len = (size_t)perf_atoll((char *)length_str);
-       totallen = (double)len * iterations;
-
-       if ((s64)len <= 0) {
-               fprintf(stderr, "Invalid length:%s\n", length_str);
-               return 1;
-       }
-
-       /* same to without specifying either of prefault and no-prefault */
-       if (only_prefault && no_prefault)
-               only_prefault = no_prefault = false;
-
-       if (!strncmp(routine, "all", 3)) {
-               for (i = 0; info->routines[i].name; i++)
-                       __bench_mem_routine(info, i, len, totallen);
-               return 0;
-       }
-
-       for (i = 0; info->routines[i].name; i++) {
-               if (!strcmp(info->routines[i].name, routine))
-                       break;
-       }
-       if (!info->routines[i].name) {
-               printf("Unknown routine:%s\n", routine);
-               printf("Available routines...\n");
-               for (i = 0; info->routines[i].name; i++) {
-                       printf("\t%s ... %s\n",
-                              info->routines[i].name, info->routines[i].desc);
-               }
-               return 1;
-       }
-
-       __bench_mem_routine(info, i, len, totallen);
-
-       return 0;
-}
-
-static void memcpy_alloc_mem(void **dst, void **src, size_t length)
-{
-       *dst = zalloc(length);
-       if (!*dst)
-               die("memory allocation failed - maybe length is too large?\n");
-
-       *src = zalloc(length);
-       if (!*src)
-               die("memory allocation failed - maybe length is too large?\n");
-       /* Make sure to always replace the zero pages even if MMAP_THRESH is crossed */
-       memset(*src, 0, length);
-}
-
-static u64 do_memcpy_cycle(const struct routine *r, size_t len, bool prefault)
-{
-       u64 cycle_start = 0ULL, cycle_end = 0ULL;
-       void *src = NULL, *dst = NULL;
-       memcpy_t fn = r->fn.memcpy;
-       int i;
-
-       memcpy_alloc_mem(&dst, &src, len);
-
-       if (prefault)
-               fn(dst, src, len);
-
-       cycle_start = get_cycle();
-       for (i = 0; i < iterations; ++i)
-               fn(dst, src, len);
-       cycle_end = get_cycle();
-
-       free(src);
-       free(dst);
-       return cycle_end - cycle_start;
-}
-
-static double do_memcpy_gettimeofday(const struct routine *r, size_t len,
-                                    bool prefault)
-{
-       struct timeval tv_start, tv_end, tv_diff;
-       memcpy_t fn = r->fn.memcpy;
-       void *src = NULL, *dst = NULL;
-       int i;
-
-       memcpy_alloc_mem(&dst, &src, len);
-
-       if (prefault)
-               fn(dst, src, len);
-
-       BUG_ON(gettimeofday(&tv_start, NULL));
-       for (i = 0; i < iterations; ++i)
-               fn(dst, src, len);
-       BUG_ON(gettimeofday(&tv_end, NULL));
-
-       timersub(&tv_end, &tv_start, &tv_diff);
-
-       free(src);
-       free(dst);
-       return (double)(((double)len * iterations) / timeval2double(&tv_diff));
-}
-
-int bench_mem_memcpy(int argc, const char **argv,
-                    const char *prefix __maybe_unused)
-{
-       struct bench_mem_info info = {
-               .routines = memcpy_routines,
-               .do_cycle = do_memcpy_cycle,
-               .do_gettimeofday = do_memcpy_gettimeofday,
-               .usage = bench_mem_memcpy_usage,
-       };
-
-       return bench_mem_common(argc, argv, prefix, &info);
-}
-
-static void memset_alloc_mem(void **dst, size_t length)
-{
-       *dst = zalloc(length);
-       if (!*dst)
-               die("memory allocation failed - maybe length is too large?\n");
-}
-
-static u64 do_memset_cycle(const struct routine *r, size_t len, bool prefault)
-{
-       u64 cycle_start = 0ULL, cycle_end = 0ULL;
-       memset_t fn = r->fn.memset;
-       void *dst = NULL;
-       int i;
-
-       memset_alloc_mem(&dst, len);
-
-       if (prefault)
-               fn(dst, -1, len);
-
-       cycle_start = get_cycle();
-       for (i = 0; i < iterations; ++i)
-               fn(dst, i, len);
-       cycle_end = get_cycle();
-
-       free(dst);
-       return cycle_end - cycle_start;
-}
-
-static double do_memset_gettimeofday(const struct routine *r, size_t len,
-                                    bool prefault)
-{
-       struct timeval tv_start, tv_end, tv_diff;
-       memset_t fn = r->fn.memset;
-       void *dst = NULL;
-       int i;
-
-       memset_alloc_mem(&dst, len);
-
-       if (prefault)
-               fn(dst, -1, len);
-
-       BUG_ON(gettimeofday(&tv_start, NULL));
-       for (i = 0; i < iterations; ++i)
-               fn(dst, i, len);
-       BUG_ON(gettimeofday(&tv_end, NULL));
-
-       timersub(&tv_end, &tv_start, &tv_diff);
-
-       free(dst);
-       return (double)(((double)len * iterations) / timeval2double(&tv_diff));
-}
-
-static const char * const bench_mem_memset_usage[] = {
-       "perf bench mem memset <options>",
-       NULL
-};
-
-static const struct routine memset_routines[] = {
-       { .name ="default",
-         .desc = "Default memset() provided by glibc",
-         .fn.memset = memset },
-#ifdef HAVE_ARCH_X86_64_SUPPORT
-
-#define MEMSET_FN(_fn, _name, _desc) { .name = _name, .desc = _desc, .fn.memset = _fn },
-#include "mem-memset-x86-64-asm-def.h"
-#undef MEMSET_FN
-
-#endif
-
-       { .name = NULL,
-         .desc = NULL,
-         .fn.memset = NULL   }
-};
-
-int bench_mem_memset(int argc, const char **argv,
-                    const char *prefix __maybe_unused)
-{
-       struct bench_mem_info info = {
-               .routines = memset_routines,
-               .do_cycle = do_memset_cycle,
-               .do_gettimeofday = do_memset_gettimeofday,
-               .usage = bench_mem_memset_usage,
-       };
-
-       return bench_mem_common(argc, argv, prefix, &info);
-}
index 870b7e665a203264c1b7b27684a860cbe147c450..492df2752a2d1057bbde642538f84da93aa9df8b 100644 (file)
@@ -164,8 +164,8 @@ static const struct option options[] = {
        OPT_STRING('L', "mb_proc_locked", &p0.mb_proc_locked_str,"MB", "process serialized/locked memory access (MBs), <= process_memory"),
        OPT_STRING('T', "mb_thread"     , &p0.mb_thread_str,    "MB", "thread  memory (MBs)"),
 
-       OPT_UINTEGER('l', "nr_loops"    , &p0.nr_loops,         "max number of loops to run"),
-       OPT_UINTEGER('s', "nr_secs"     , &p0.nr_secs,          "max number of seconds to run"),
+       OPT_UINTEGER('l', "nr_loops"    , &p0.nr_loops,         "max number of loops to run (default: unlimited)"),
+       OPT_UINTEGER('s', "nr_secs"     , &p0.nr_secs,          "max number of seconds to run (default: 5 secs)"),
        OPT_UINTEGER('u', "usleep"      , &p0.sleep_usecs,      "usecs to sleep per loop iteration"),
 
        OPT_BOOLEAN('R', "data_reads"   , &p0.data_reads,       "access the data via writes (can be mixed with -W)"),
index d7f281c2828d97e35bb1f007f2f792666e89b78b..d4ff1b539cfd2727e0488858cf241aa83fe6c037 100644 (file)
@@ -33,7 +33,7 @@
 #define DATASIZE 100
 
 static bool use_pipes = false;
-static unsigned int loops = 100;
+static unsigned int nr_loops = 100;
 static bool thread_mode = false;
 static unsigned int num_groups = 10;
 
@@ -79,7 +79,7 @@ static void ready(int ready_out, int wakefd)
                err(EXIT_FAILURE, "poll");
 }
 
-/* Sender sprays loops messages down each file descriptor */
+/* Sender sprays nr_loops messages down each file descriptor */
 static void *sender(struct sender_context *ctx)
 {
        char data[DATASIZE];
@@ -88,7 +88,7 @@ static void *sender(struct sender_context *ctx)
        ready(ctx->ready_out, ctx->wakefd);
 
        /* Now pump to every receiver. */
-       for (i = 0; i < loops; i++) {
+       for (i = 0; i < nr_loops; i++) {
                for (j = 0; j < ctx->num_fds; j++) {
                        int ret, done = 0;
 
@@ -213,7 +213,7 @@ static unsigned int group(pthread_t *pth,
                /* Create the pipe between client and server */
                fdpair(fds);
 
-               ctx->num_packets = num_fds * loops;
+               ctx->num_packets = num_fds * nr_loops;
                ctx->in_fds[0] = fds[0];
                ctx->in_fds[1] = fds[1];
                ctx->ready_out = ready_out;
@@ -250,7 +250,7 @@ static const struct option options[] = {
        OPT_BOOLEAN('t', "thread", &thread_mode,
                    "Be multi thread instead of multi process"),
        OPT_UINTEGER('g', "group", &num_groups, "Specify number of groups"),
-       OPT_UINTEGER('l', "loop", &loops, "Specify number of loops"),
+       OPT_UINTEGER('l', "nr_loops", &nr_loops, "Specify the number of loops to run (default: 100)"),
        OPT_END()
 };
 
index 8edc205ff9a7fe41097f8ff72338079516f286e2..2bf9b3fd9e61546fb8fd58ff9a43f7c5106e2778 100644 (file)
@@ -211,7 +211,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
        }
 
        if (!objdump_path) {
-               ret = perf_session_env__lookup_objdump(&session->header.env);
+               ret = perf_env__lookup_objdump(&session->header.env);
                if (ret)
                        goto out;
        }
index f67934d46d4098773a746f0039f1c3d1f1097097..b17aed36ca16207915ab935ecceea426e49a9ec6 100644 (file)
@@ -36,7 +36,7 @@ struct bench {
 #ifdef HAVE_LIBNUMA_SUPPORT
 static struct bench numa_benchmarks[] = {
        { "mem",        "Benchmark for NUMA workloads",                 bench_numa              },
-       { "all",        "Test all NUMA benchmarks",                     NULL                    },
+       { "all",        "Run all NUMA benchmarks",                      NULL                    },
        { NULL,         NULL,                                           NULL                    }
 };
 #endif
@@ -44,14 +44,14 @@ static struct bench numa_benchmarks[] = {
 static struct bench sched_benchmarks[] = {
        { "messaging",  "Benchmark for scheduling and IPC",             bench_sched_messaging   },
        { "pipe",       "Benchmark for pipe() between two processes",   bench_sched_pipe        },
-       { "all",        "Test all scheduler benchmarks",                NULL                    },
+       { "all",        "Run all scheduler benchmarks",         NULL                    },
        { NULL,         NULL,                                           NULL                    }
 };
 
 static struct bench mem_benchmarks[] = {
-       { "memcpy",     "Benchmark for memcpy()",                       bench_mem_memcpy        },
-       { "memset",     "Benchmark for memset() tests",                 bench_mem_memset        },
-       { "all",        "Test all memory benchmarks",                   NULL                    },
+       { "memcpy",     "Benchmark for memcpy() functions",             bench_mem_memcpy        },
+       { "memset",     "Benchmark for memset() functions",             bench_mem_memset        },
+       { "all",        "Run all memory access benchmarks",             NULL                    },
        { NULL,         NULL,                                           NULL                    }
 };
 
@@ -62,7 +62,7 @@ static struct bench futex_benchmarks[] = {
        { "requeue",    "Benchmark for futex requeue calls",            bench_futex_requeue     },
        /* pi-futexes */
        { "lock-pi",    "Benchmark for futex lock_pi calls",            bench_futex_lock_pi     },
-       { "all",        "Test all futex benchmarks",                    NULL                    },
+       { "all",        "Run all futex benchmarks",                     NULL                    },
        { NULL,         NULL,                                           NULL                    }
 };
 
@@ -110,7 +110,7 @@ int bench_format = BENCH_FORMAT_DEFAULT;
 unsigned int bench_repeat = 10; /* default number of times to repeat the run */
 
 static const struct option bench_options[] = {
-       OPT_STRING('f', "format", &bench_format_str, "default", "Specify format style"),
+       OPT_STRING('f', "format", &bench_format_str, "default|simple", "Specify the output formatting style"),
        OPT_UINTEGER('r', "repeat",  &bench_repeat,   "Specify amount of times to repeat the run"),
        OPT_END()
 };
index 695ec5a50cf22c56e72961ae98ae9580a98f2512..f4d62510acbbb5603ad246bea89a7dfb95c07f43 100644 (file)
@@ -61,8 +61,8 @@ int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused)
                usage_with_options(evlist_usage, options);
 
        if (details.event_group && (details.verbose || details.freq)) {
-               pr_err("--group option is not compatible with other options\n");
-               usage_with_options(evlist_usage, options);
+               usage_with_options_msg(evlist_usage, options,
+                       "--group option is not compatible with other options\n");
        }
 
        return __cmd_evlist(input_name, &details);
index 36486eade1ef4f2d30736c99d7471b86d7dd0ada..a7d588bf3cdd345233042131373666af2f9d7598 100644 (file)
@@ -463,7 +463,7 @@ int cmd_help(int argc, const char **argv, const char *prefix __maybe_unused)
                        builtin_help_subcommands, builtin_help_usage, 0);
 
        if (show_all) {
-               printf("\n usage: %s\n\n", perf_usage_string);
+               printf("\n Usage: %s\n\n", perf_usage_string);
                list_commands("perf commands", &main_cmds, &other_cmds);
                printf(" %s\n\n", perf_more_info_string);
                return 0;
index f62c49b35be04ec698fc51e2b73ded6bb9dfbccf..0a945d2e8ca5b1bd319fa18537e7a4c57e3b7291 100644 (file)
@@ -28,9 +28,11 @@ struct perf_inject {
        bool                    build_ids;
        bool                    sched_stat;
        bool                    have_auxtrace;
+       bool                    strip;
        const char              *input_name;
        struct perf_data_file   output;
        u64                     bytes_written;
+       u64                     aux_id;
        struct list_head        samples;
        struct itrace_synth_opts itrace_synth_opts;
 };
@@ -176,6 +178,27 @@ static int perf_event__repipe(struct perf_tool *tool,
        return perf_event__repipe_synth(tool, event);
 }
 
+static int perf_event__drop(struct perf_tool *tool __maybe_unused,
+                           union perf_event *event __maybe_unused,
+                           struct perf_sample *sample __maybe_unused,
+                           struct machine *machine __maybe_unused)
+{
+       return 0;
+}
+
+static int perf_event__drop_aux(struct perf_tool *tool,
+                               union perf_event *event __maybe_unused,
+                               struct perf_sample *sample,
+                               struct machine *machine __maybe_unused)
+{
+       struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
+
+       if (!inject->aux_id)
+               inject->aux_id = sample->id;
+
+       return 0;
+}
+
 typedef int (*inject_handler)(struct perf_tool *tool,
                              union perf_event *event,
                              struct perf_sample *sample,
@@ -466,6 +489,78 @@ static int perf_evsel__check_stype(struct perf_evsel *evsel,
        return 0;
 }
 
+static int drop_sample(struct perf_tool *tool __maybe_unused,
+                      union perf_event *event __maybe_unused,
+                      struct perf_sample *sample __maybe_unused,
+                      struct perf_evsel *evsel __maybe_unused,
+                      struct machine *machine __maybe_unused)
+{
+       return 0;
+}
+
+static void strip_init(struct perf_inject *inject)
+{
+       struct perf_evlist *evlist = inject->session->evlist;
+       struct perf_evsel *evsel;
+
+       inject->tool.context_switch = perf_event__drop;
+
+       evlist__for_each(evlist, evsel)
+               evsel->handler = drop_sample;
+}
+
+static bool has_tracking(struct perf_evsel *evsel)
+{
+       return evsel->attr.mmap || evsel->attr.mmap2 || evsel->attr.comm ||
+              evsel->attr.task;
+}
+
+#define COMPAT_MASK (PERF_SAMPLE_ID | PERF_SAMPLE_TID | PERF_SAMPLE_TIME | \
+                    PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_IDENTIFIER)
+
+/*
+ * In order that the perf.data file is parsable, tracking events like MMAP need
+ * their selected event to exist, except if there is only 1 selected event left
+ * and it has a compatible sample type.
+ */
+static bool ok_to_remove(struct perf_evlist *evlist,
+                        struct perf_evsel *evsel_to_remove)
+{
+       struct perf_evsel *evsel;
+       int cnt = 0;
+       bool ok = false;
+
+       if (!has_tracking(evsel_to_remove))
+               return true;
+
+       evlist__for_each(evlist, evsel) {
+               if (evsel->handler != drop_sample) {
+                       cnt += 1;
+                       if ((evsel->attr.sample_type & COMPAT_MASK) ==
+                           (evsel_to_remove->attr.sample_type & COMPAT_MASK))
+                               ok = true;
+               }
+       }
+
+       return ok && cnt == 1;
+}
+
+static void strip_fini(struct perf_inject *inject)
+{
+       struct perf_evlist *evlist = inject->session->evlist;
+       struct perf_evsel *evsel, *tmp;
+
+       /* Remove non-synthesized evsels if possible */
+       evlist__for_each_safe(evlist, tmp, evsel) {
+               if (evsel->handler == drop_sample &&
+                   ok_to_remove(evlist, evsel)) {
+                       pr_debug("Deleting %s\n", perf_evsel__name(evsel));
+                       perf_evlist__remove(evlist, evsel);
+                       perf_evsel__delete(evsel);
+               }
+       }
+}
+
 static int __cmd_inject(struct perf_inject *inject)
 {
        int ret = -EINVAL;
@@ -512,10 +607,14 @@ static int __cmd_inject(struct perf_inject *inject)
                inject->tool.id_index       = perf_event__repipe_id_index;
                inject->tool.auxtrace_info  = perf_event__process_auxtrace_info;
                inject->tool.auxtrace       = perf_event__process_auxtrace;
+               inject->tool.aux            = perf_event__drop_aux;
+               inject->tool.itrace_start   = perf_event__drop_aux,
                inject->tool.ordered_events = true;
                inject->tool.ordering_requires_timestamps = true;
                /* Allow space in the header for new attributes */
                output_data_offset = 4096;
+               if (inject->strip)
+                       strip_init(inject);
        }
 
        if (!inject->itrace_synth_opts.set)
@@ -535,11 +634,28 @@ static int __cmd_inject(struct perf_inject *inject)
                }
                /*
                 * The AUX areas have been removed and replaced with
-                * synthesized hardware events, so clear the feature flag.
+                * synthesized hardware events, so clear the feature flag and
+                * remove the evsel.
                 */
-               if (inject->itrace_synth_opts.set)
+               if (inject->itrace_synth_opts.set) {
+                       struct perf_evsel *evsel;
+
                        perf_header__clear_feat(&session->header,
                                                HEADER_AUXTRACE);
+                       if (inject->itrace_synth_opts.last_branch)
+                               perf_header__set_feat(&session->header,
+                                                     HEADER_BRANCH_STACK);
+                       evsel = perf_evlist__id2evsel_strict(session->evlist,
+                                                            inject->aux_id);
+                       if (evsel) {
+                               pr_debug("Deleting %s\n",
+                                        perf_evsel__name(evsel));
+                               perf_evlist__remove(session->evlist, evsel);
+                               perf_evsel__delete(evsel);
+                       }
+                       if (inject->strip)
+                               strip_fini(inject);
+               }
                session->header.data_offset = output_data_offset;
                session->header.data_size = inject->bytes_written;
                perf_session__write_header(session, session->evlist, fd, true);
@@ -604,6 +720,8 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
                OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts,
                                    NULL, "opts", "Instruction Tracing options",
                                    itrace_parse_synth_opts),
+               OPT_BOOLEAN(0, "strip", &inject.strip,
+                           "strip non-synthesized events (use with --itrace)"),
                OPT_END()
        };
        const char * const inject_usage[] = {
@@ -619,6 +737,11 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
        if (argc)
                usage_with_options(inject_usage, options);
 
+       if (inject.strip && !inject.itrace_synth_opts.set) {
+               pr_err("--strip option requires --itrace option\n");
+               return -1;
+       }
+
        if (perf_data_file__open(&inject.output)) {
                perror("failed to create output file");
                return -1;
index 23b1faaaa4cc5f83c263ffbc9a0f14733c0dcae2..93ce665f976f65a4a1889ba01441bb1f1cce2786 100644 (file)
@@ -329,7 +329,7 @@ static int build_alloc_func_list(void)
                return -EINVAL;
        }
 
-       kernel_map = machine->vmlinux_maps[MAP__FUNCTION];
+       kernel_map = machine__kernel_map(machine);
        if (map__load(kernel_map, NULL) < 0) {
                pr_err("cannot load kernel map\n");
                return -ENOENT;
index fc1cffb1b7a28c9b0d9856770530a0e8b3bf9606..dd94b4ca22131a211729082925bbbf082d19c73e 100644 (file)
@@ -13,7 +13,6 @@
 #include "util/parse-options.h"
 #include "util/trace-event.h"
 #include "util/debug.h"
-#include <api/fs/debugfs.h>
 #include "util/tool.h"
 #include "util/stat.h"
 #include "util/top.h"
index af5bd05141088469d29d2fc7112927be96f2f610..bf679e2c978bdef9b7943271987e4b7e4e465981 100644 (file)
@@ -36,7 +36,7 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
 
        setup_pager();
 
-       if (!raw_dump)
+       if (!raw_dump && pager_in_use())
                printf("\nList of pre-defined events (to be used in -e):\n\n");
 
        if (argc == 0) {
@@ -45,6 +45,8 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
        }
 
        for (i = 0; i < argc; ++i) {
+               char *sep, *s;
+
                if (strcmp(argv[i], "tracepoint") == 0)
                        print_tracepoint_events(NULL, NULL, raw_dump);
                else if (strcmp(argv[i], "hw") == 0 ||
@@ -60,8 +62,7 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
                        print_hwcache_events(NULL, raw_dump);
                else if (strcmp(argv[i], "pmu") == 0)
                        print_pmu_events(NULL, raw_dump);
-               else {
-                       char *sep = strchr(argv[i], ':'), *s;
+               else if ((sep = strchr(argv[i], ':')) != NULL) {
                        int sep_idx;
 
                        if (sep == NULL) {
@@ -76,6 +77,19 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
                        s[sep_idx] = '\0';
                        print_tracepoint_events(s, s + sep_idx + 1, raw_dump);
                        free(s);
+               } else {
+                       if (asprintf(&s, "*%s*", argv[i]) < 0) {
+                               printf("Critical: Not enough memory! Trying to continue...\n");
+                               continue;
+                       }
+                       print_symbol_events(s, PERF_TYPE_HARDWARE,
+                                           event_symbols_hw, PERF_COUNT_HW_MAX, raw_dump);
+                       print_symbol_events(s, PERF_TYPE_SOFTWARE,
+                                           event_symbols_sw, PERF_COUNT_SW_MAX, raw_dump);
+                       print_hwcache_events(s, raw_dump);
+                       print_pmu_events(s, raw_dump);
+                       print_tracepoint_events(NULL, s, raw_dump);
+                       free(s);
                }
        }
        return 0;
index b81cec33b4b2e4e07cac866a514e6369499b0c16..132afc97676c1861d7be03bbbd4f5cea99eb9e57 100644 (file)
 #include "util/strfilter.h"
 #include "util/symbol.h"
 #include "util/debug.h"
-#include <api/fs/debugfs.h>
 #include "util/parse-options.h"
 #include "util/probe-finder.h"
 #include "util/probe-event.h"
+#include "util/probe-file.h"
 
 #define DEFAULT_VAR_FILTER "!__k???tab_* & !__crc_*"
 #define DEFAULT_FUNC_FILTER "!_*"
@@ -182,10 +182,8 @@ static int opt_set_target(const struct option *opt, const char *str,
        if  (str) {
                if (!strcmp(opt->long_name, "exec"))
                        params.uprobes = true;
-#ifdef HAVE_DWARF_SUPPORT
                else if (!strcmp(opt->long_name, "module"))
                        params.uprobes = false;
-#endif
                else
                        return ret;
 
@@ -311,6 +309,119 @@ static void pr_err_with_code(const char *msg, int err)
        pr_err("\n");
 }
 
+static int perf_add_probe_events(struct perf_probe_event *pevs, int npevs)
+{
+       int ret;
+       int i, k;
+       const char *event = NULL, *group = NULL;
+
+       ret = init_probe_symbol_maps(pevs->uprobes);
+       if (ret < 0)
+               return ret;
+
+       ret = convert_perf_probe_events(pevs, npevs);
+       if (ret < 0)
+               goto out_cleanup;
+
+       ret = apply_perf_probe_events(pevs, npevs);
+       if (ret < 0)
+               goto out_cleanup;
+
+       for (i = k = 0; i < npevs; i++)
+               k += pevs[i].ntevs;
+
+       pr_info("Added new event%s\n", (k > 1) ? "s:" : ":");
+       for (i = 0; i < npevs; i++) {
+               struct perf_probe_event *pev = &pevs[i];
+
+               for (k = 0; k < pev->ntevs; k++) {
+                       struct probe_trace_event *tev = &pev->tevs[k];
+
+                       /* We use tev's name for showing new events */
+                       show_perf_probe_event(tev->group, tev->event, pev,
+                                             tev->point.module, false);
+
+                       /* Save the last valid name */
+                       event = tev->event;
+                       group = tev->group;
+               }
+       }
+
+       /* Note that it is possible to skip all events because of blacklist */
+       if (event) {
+               /* Show how to use the event. */
+               pr_info("\nYou can now use it in all perf tools, such as:\n\n");
+               pr_info("\tperf record -e %s:%s -aR sleep 1\n\n", group, event);
+       }
+
+out_cleanup:
+       cleanup_perf_probe_events(pevs, npevs);
+       exit_probe_symbol_maps();
+       return ret;
+}
+
+static int perf_del_probe_events(struct strfilter *filter)
+{
+       int ret, ret2, ufd = -1, kfd = -1;
+       char *str = strfilter__string(filter);
+       struct strlist *klist = NULL, *ulist = NULL;
+       struct str_node *ent;
+
+       if (!str)
+               return -EINVAL;
+
+       pr_debug("Delete filter: \'%s\'\n", str);
+
+       /* Get current event names */
+       ret = probe_file__open_both(&kfd, &ufd, PF_FL_RW);
+       if (ret < 0)
+               goto out;
+
+       klist = strlist__new(NULL, NULL);
+       ulist = strlist__new(NULL, NULL);
+       if (!klist || !ulist) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       ret = probe_file__get_events(kfd, filter, klist);
+       if (ret == 0) {
+               strlist__for_each(ent, klist)
+                       pr_info("Removed event: %s\n", ent->s);
+
+               ret = probe_file__del_strlist(kfd, klist);
+               if (ret < 0)
+                       goto error;
+       }
+
+       ret2 = probe_file__get_events(ufd, filter, ulist);
+       if (ret2 == 0) {
+               strlist__for_each(ent, ulist)
+                       pr_info("Removed event: %s\n", ent->s);
+
+               ret2 = probe_file__del_strlist(ufd, ulist);
+               if (ret2 < 0)
+                       goto error;
+       }
+
+       if (ret == -ENOENT && ret2 == -ENOENT)
+               pr_debug("\"%s\" does not hit any event.\n", str);
+               /* Note that this is silently ignored */
+       ret = 0;
+
+error:
+       if (kfd >= 0)
+               close(kfd);
+       if (ufd >= 0)
+               close(ufd);
+out:
+       strlist__delete(klist);
+       strlist__delete(ulist);
+       free(str);
+
+       return ret;
+}
+
 static int
 __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
 {
@@ -377,9 +488,6 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
                   "file", "vmlinux pathname"),
        OPT_STRING('s', "source", &symbol_conf.source_prefix,
                   "directory", "path to kernel source"),
-       OPT_CALLBACK('m', "module", NULL, "modname|path",
-               "target module name (for online) or path (for offline)",
-               opt_set_target),
        OPT_BOOLEAN('\0', "no-inlines", &probe_conf.no_inlines,
                "Don't search inlined functions"),
 #endif
@@ -396,6 +504,9 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
                     opt_set_filter),
        OPT_CALLBACK('x', "exec", NULL, "executable|path",
                        "target executable name or path", opt_set_target),
+       OPT_CALLBACK('m', "module", NULL, "modname|path",
+               "target module name (for online) or path (for offline)",
+               opt_set_target),
        OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
                    "Enable symbol demangling"),
        OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
@@ -417,12 +528,12 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
                             PARSE_OPT_STOP_AT_NON_OPTION);
        if (argc > 0) {
                if (strcmp(argv[0], "-") == 0) {
-                       pr_warning("  Error: '-' is not supported.\n");
-                       usage_with_options(probe_usage, options);
+                       usage_with_options_msg(probe_usage, options,
+                               "'-' is not supported.\n");
                }
                if (params.command && params.command != 'a') {
-                       pr_warning("  Error: another command except --add is set.\n");
-                       usage_with_options(probe_usage, options);
+                       usage_with_options_msg(probe_usage, options,
+                               "another command except --add is set.\n");
                }
                ret = parse_probe_event_argv(argc, argv);
                if (ret < 0) {
@@ -451,8 +562,10 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
        switch (params.command) {
        case 'l':
                if (params.uprobes) {
-                       pr_warning("  Error: Don't use --list with --exec.\n");
-                       usage_with_options(probe_usage, options);
+                       pr_err("  Error: Don't use --list with --exec.\n");
+                       parse_options_usage(probe_usage, options, "l", true);
+                       parse_options_usage(NULL, options, "x", true);
+                       return -EINVAL;
                }
                ret = show_perf_probe_events(params.filter);
                if (ret < 0)
@@ -483,7 +596,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
                return ret;
 #endif
        case 'd':
-               ret = del_perf_probe_events(params.filter);
+               ret = perf_del_probe_events(params.filter);
                if (ret < 0) {
                        pr_err_with_code("  Error: Failed to delete events.", ret);
                        return ret;
@@ -492,11 +605,13 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
        case 'a':
                /* Ensure the last given target is used */
                if (params.target && !params.target_used) {
-                       pr_warning("  Error: -x/-m must follow the probe definitions.\n");
-                       usage_with_options(probe_usage, options);
+                       pr_err("  Error: -x/-m must follow the probe definitions.\n");
+                       parse_options_usage(probe_usage, options, "m", true);
+                       parse_options_usage(NULL, options, "x", true);
+                       return -EINVAL;
                }
 
-               ret = add_perf_probe_events(params.events, params.nevents);
+               ret = perf_add_probe_events(params.events, params.nevents);
                if (ret < 0) {
                        pr_err_with_code("  Error: Failed to add events.", ret);
                        return ret;
index 142eeb341b295a722aec214d3346790221031ff1..199fc31e3919c5743ca305f9eef5ef8c8fc86a70 100644 (file)
@@ -31,6 +31,7 @@
 #include "util/auxtrace.h"
 #include "util/parse-branch-options.h"
 #include "util/parse-regs-options.h"
+#include "util/llvm-utils.h"
 
 #include <unistd.h>
 #include <sched.h>
@@ -49,7 +50,7 @@ struct record {
        int                     realtime_prio;
        bool                    no_buildid;
        bool                    no_buildid_cache;
-       long                    samples;
+       unsigned long long      samples;
 };
 
 static int record__write(struct record *rec, void *bf, size_t size)
@@ -636,8 +637,29 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
        /*
         * Let the child rip
         */
-       if (forks)
+       if (forks) {
+               union perf_event *event;
+
+               event = malloc(sizeof(event->comm) + machine->id_hdr_size);
+               if (event == NULL) {
+                       err = -ENOMEM;
+                       goto out_child;
+               }
+
+               /*
+                * Some H/W events are generated before COMM event
+                * which is emitted during exec(), so perf script
+                * cannot see a correct process name for those events.
+                * Synthesize COMM event to prevent it.
+                */
+               perf_event__synthesize_comm(tool, event,
+                                           rec->evlist->workload.pid,
+                                           process_synthesized_event,
+                                           machine);
+               free(event);
+
                perf_evlist__start_workload(rec->evlist);
+       }
 
        if (opts->initial_delay) {
                usleep(opts->initial_delay * 1000);
@@ -646,7 +668,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
 
        auxtrace_snapshot_enabled = 1;
        for (;;) {
-               int hits = rec->samples;
+               unsigned long long hits = rec->samples;
 
                if (record__mmap_read_all(rec) < 0) {
                        auxtrace_snapshot_enabled = 0;
@@ -989,13 +1011,8 @@ static struct record record = {
        },
 };
 
-#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
-
-#ifdef HAVE_DWARF_UNWIND_SUPPORT
-const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf lbr";
-#else
-const char record_callchain_help[] = CALLCHAIN_HELP "fp lbr";
-#endif
+const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
+       "\n\t\t\t\tDefault: fp";
 
 /*
  * XXX Will stay a global variable till we fix builtin-script.c to stop messing
@@ -1043,7 +1060,7 @@ struct option __record_options[] = {
                           NULL, "enables call-graph recording" ,
                           &record_callchain_opt),
        OPT_CALLBACK(0, "call-graph", &record.opts,
-                    "mode[,dump_size]", record_callchain_help,
+                    "record_mode[,record_size]", record_callchain_help,
                     &record_parse_callchain_opt),
        OPT_INCR('v', "verbose", &verbose,
                    "be more verbose (show counter open errors, etc)"),
@@ -1096,6 +1113,12 @@ struct option __record_options[] = {
                        "per thread proc mmap processing timeout in ms"),
        OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
                    "Record context switch events"),
+#ifdef HAVE_LIBBPF_SUPPORT
+       OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
+                  "clang binary to use for compiling BPF scriptlets"),
+       OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
+                  "options passed to clang when compiling BPF scriptlets"),
+#endif
        OPT_END()
 };
 
@@ -1119,14 +1142,15 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
                usage_with_options(record_usage, record_options);
 
        if (nr_cgroups && !rec->opts.target.system_wide) {
-               ui__error("cgroup monitoring only available in"
-                         " system-wide mode\n");
-               usage_with_options(record_usage, record_options);
+               usage_with_options_msg(record_usage, record_options,
+                       "cgroup monitoring only available in system-wide mode");
+
        }
        if (rec->opts.record_switch_events &&
            !perf_can_record_switch_events()) {
-               ui__error("kernel does not support recording context switch events (--switch-events option)\n");
-               usage_with_options(record_usage, record_options);
+               ui__error("kernel does not support recording context switch events\n");
+               parse_options_usage(record_usage, record_options, "switch-events", 0);
+               return -EINVAL;
        }
 
        if (!rec->itr) {
index 62b285e32aa551093d9869798827a5be2cf3e1e5..2853ad2bd43541b1291292f94a5c6f8a6804dd03 100644 (file)
@@ -62,6 +62,7 @@ struct report {
        float                   min_percent;
        u64                     nr_entries;
        u64                     queue_size;
+       int                     socket_filter;
        DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
 };
 
@@ -162,14 +163,21 @@ static int process_sample_event(struct perf_tool *tool,
        if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
                goto out_put;
 
-       if (sort__mode == SORT_MODE__BRANCH)
+       if (sort__mode == SORT_MODE__BRANCH) {
+               /*
+                * A non-synthesized event might not have a branch stack if
+                * branch stacks have been synthesized (using itrace options).
+                */
+               if (!sample->branch_stack)
+                       goto out_put;
                iter.ops = &hist_iter_branch;
-       else if (rep->mem_mode)
+       } else if (rep->mem_mode) {
                iter.ops = &hist_iter_mem;
-       else if (symbol_conf.cumulate_callchain)
+       } else if (symbol_conf.cumulate_callchain) {
                iter.ops = &hist_iter_cumulative;
-       else
+       } else {
                iter.ops = &hist_iter_normal;
+       }
 
        if (al.map != NULL)
                al.map->dso->hit = 1;
@@ -213,6 +221,15 @@ static int report__setup_sample_type(struct report *rep)
        u64 sample_type = perf_evlist__combined_sample_type(session->evlist);
        bool is_pipe = perf_data_file__is_pipe(session->file);
 
+       if (session->itrace_synth_opts->callchain ||
+           (!is_pipe &&
+            perf_header__has_feat(&session->header, HEADER_AUXTRACE) &&
+            !session->itrace_synth_opts->set))
+               sample_type |= PERF_SAMPLE_CALLCHAIN;
+
+       if (session->itrace_synth_opts->last_branch)
+               sample_type |= PERF_SAMPLE_BRANCH_STACK;
+
        if (!is_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
                if (sort__has_parent) {
                        ui__error("Selected --sort parent, but no "
@@ -286,6 +303,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
        struct perf_evsel *evsel = hists_to_evsel(hists);
        char buf[512];
        size_t size = sizeof(buf);
+       int socked_id = hists->socket_filter;
 
        if (symbol_conf.filter_relative) {
                nr_samples = hists->stats.nr_non_filtered_samples;
@@ -326,6 +344,10 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
                ret += fprintf(fp, "\n# Sort order   : %s", sort_order ? : default_mem_sort_order);
        } else
                ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events);
+
+       if (socked_id > -1)
+               ret += fprintf(fp, "\n# Processor Socket: %d", socked_id);
+
        return ret + fprintf(fp, "\n#\n");
 }
 
@@ -365,7 +387,7 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
 
 static void report__warn_kptr_restrict(const struct report *rep)
 {
-       struct map *kernel_map = rep->session->machines.host.vmlinux_maps[MAP__FUNCTION];
+       struct map *kernel_map = machine__kernel_map(&rep->session->machines.host);
        struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
 
        if (kernel_map == NULL ||
@@ -450,6 +472,8 @@ static void report__collapse_hists(struct report *rep)
                if (pos->idx == 0)
                        hists->symbol_filter_str = rep->symbol_filter_str;
 
+               hists->socket_filter = rep->socket_filter;
+
                hists__collapse_resort(hists, &prog);
 
                /* Non-group events are considered as leader */
@@ -601,6 +625,12 @@ parse_percent_limit(const struct option *opt, const char *str,
        return 0;
 }
 
+#define CALLCHAIN_DEFAULT_OPT  "graph,0.5,caller,function"
+
+const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
+                                    CALLCHAIN_REPORT_HELP
+                                    "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
+
 int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
 {
        struct perf_session *session;
@@ -609,7 +639,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
        bool has_br_stack = false;
        int branch_mode = -1;
        bool branch_call_mode = false;
-       char callchain_default_opt[] = "fractal,0.5,callee";
+       char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT;
        const char * const report_usage[] = {
                "perf report [<options>]",
                NULL
@@ -635,6 +665,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
                },
                .max_stack               = PERF_MAX_STACK_DEPTH,
                .pretty_printing_style   = "normal",
+               .socket_filter           = -1,
        };
        const struct option options[] = {
        OPT_STRING('i', "input", &input_name, "file",
@@ -668,15 +699,18 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
                   " Please refer the man page for the complete list."),
        OPT_STRING('F', "fields", &field_order, "key[,keys...]",
                   "output field(s): overhead, period, sample plus all of sort keys"),
-       OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
+       OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization,
                    "Show sample percentage for different cpu modes"),
+       OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
+                   "Show sample percentage for different cpu modes", PARSE_OPT_HIDDEN),
        OPT_STRING('p', "parent", &parent_pattern, "regex",
                   "regex filter to identify parent, see: '--sort parent'"),
        OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
                    "Only display entries with parent-match"),
-       OPT_CALLBACK_DEFAULT('g', "call-graph", &report, "output_type,min_percent[,print_limit],call_order[,branch]",
-                    "Display callchains using output_type (graph, flat, fractal, or none) , min percent threshold, optional print limit, callchain order, key (function or address), add branches. "
-                    "Default: fractal,0.5,callee,function", &report_parse_callchain_opt, callchain_default_opt),
+       OPT_CALLBACK_DEFAULT('g', "call-graph", &report,
+                            "print_type,threshold[,print_limit],order,sort_key[,branch]",
+                            report_callchain_help, &report_parse_callchain_opt,
+                            callchain_default_opt),
        OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
                    "Accumulate callchains of children and show total overhead as well"),
        OPT_INTEGER(0, "max-stack", &report.max_stack,
@@ -747,6 +781,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
                        "Show full source file name path for source lines"),
        OPT_BOOLEAN(0, "show-ref-call-graph", &symbol_conf.show_ref_callgraph,
                    "Show callgraph from reference event"),
+       OPT_INTEGER(0, "socket-filter", &report.socket_filter,
+                   "only show processor socket that match with this filter"),
        OPT_END()
        };
        struct perf_data_file file = {
@@ -781,6 +817,12 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
 
        if (report.inverted_callchain)
                callchain_param.order = ORDER_CALLER;
+       if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
+               callchain_param.order = ORDER_CALLER;
+
+       if (itrace_synth_opts.callchain &&
+           (int)itrace_synth_opts.callchain_sz > report.max_stack)
+               report.max_stack = itrace_synth_opts.callchain_sz;
 
        if (!input_name || !strlen(input_name)) {
                if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
@@ -809,6 +851,9 @@ repeat:
        has_br_stack = perf_header__has_feat(&session->header,
                                             HEADER_BRANCH_STACK);
 
+       if (itrace_synth_opts.last_branch)
+               has_br_stack = true;
+
        /*
         * Branch mode is a tristate:
         * -1 means default, so decide based on the file having branch data.
index 33962612a5e9035ae42c83e15497a5713922556b..0ee6d900e100a29ceeb3cacf7e97d81dce8f8d17 100644 (file)
@@ -1728,8 +1728,8 @@ static void setup_sorting(struct perf_sched *sched, const struct option *options
        for (tok = strtok_r(str, ", ", &tmp);
                        tok; tok = strtok_r(NULL, ", ", &tmp)) {
                if (sort_dimension__add(tok, &sched->sort_list) < 0) {
-                       error("Unknown --sort key: `%s'", tok);
-                       usage_with_options(usage_msg, options);
+                       usage_with_options_msg(usage_msg, options,
+                                       "Unknown --sort key: `%s'", tok);
                }
        }
 
index 284a76e046284983cd769da89cb45604b114a41a..72b5deb4bd7961bc4fdb86c50689253b23660b0e 100644 (file)
@@ -29,9 +29,12 @@ static bool                  no_callchain;
 static bool                    latency_format;
 static bool                    system_wide;
 static bool                    print_flags;
+static bool                    nanosecs;
 static const char              *cpu_list;
 static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
 
+unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
+
 enum perf_output_field {
        PERF_OUTPUT_COMM            = 1U << 0,
        PERF_OUTPUT_TID             = 1U << 1,
@@ -48,6 +51,8 @@ enum perf_output_field {
        PERF_OUTPUT_SRCLINE         = 1U << 12,
        PERF_OUTPUT_PERIOD          = 1U << 13,
        PERF_OUTPUT_IREGS           = 1U << 14,
+       PERF_OUTPUT_BRSTACK         = 1U << 15,
+       PERF_OUTPUT_BRSTACKSYM      = 1U << 16,
 };
 
 struct output_option {
@@ -69,6 +74,8 @@ struct output_option {
        {.str = "srcline", .field = PERF_OUTPUT_SRCLINE},
        {.str = "period", .field = PERF_OUTPUT_PERIOD},
        {.str = "iregs", .field = PERF_OUTPUT_IREGS},
+       {.str = "brstack", .field = PERF_OUTPUT_BRSTACK},
+       {.str = "brstacksym", .field = PERF_OUTPUT_BRSTACKSYM},
 };
 
 /* default set to maintain compatibility with current format */
@@ -415,10 +422,84 @@ static void print_sample_start(struct perf_sample *sample,
                secs = nsecs / NSECS_PER_SEC;
                nsecs -= secs * NSECS_PER_SEC;
                usecs = nsecs / NSECS_PER_USEC;
-               printf("%5lu.%06lu: ", secs, usecs);
+               if (nanosecs)
+                       printf("%5lu.%09llu: ", secs, nsecs);
+               else
+                       printf("%5lu.%06lu: ", secs, usecs);
        }
 }
 
+static inline char
+mispred_str(struct branch_entry *br)
+{
+       if (!(br->flags.mispred  || br->flags.predicted))
+               return '-';
+
+       return br->flags.predicted ? 'P' : 'M';
+}
+
+static void print_sample_brstack(union perf_event *event __maybe_unused,
+                         struct perf_sample *sample,
+                         struct thread *thread __maybe_unused,
+                         struct perf_event_attr *attr __maybe_unused)
+{
+       struct branch_stack *br = sample->branch_stack;
+       u64 i;
+
+       if (!(br && br->nr))
+               return;
+
+       for (i = 0; i < br->nr; i++) {
+               printf(" 0x%"PRIx64"/0x%"PRIx64"/%c/%c/%c/%d ",
+                       br->entries[i].from,
+                       br->entries[i].to,
+                       mispred_str( br->entries + i),
+                       br->entries[i].flags.in_tx? 'X' : '-',
+                       br->entries[i].flags.abort? 'A' : '-',
+                       br->entries[i].flags.cycles);
+       }
+}
+
+static void print_sample_brstacksym(union perf_event *event __maybe_unused,
+                         struct perf_sample *sample,
+                         struct thread *thread __maybe_unused,
+                         struct perf_event_attr *attr __maybe_unused)
+{
+       struct branch_stack *br = sample->branch_stack;
+       struct addr_location alf, alt;
+       u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+       u64 i, from, to;
+
+       if (!(br && br->nr))
+               return;
+
+       for (i = 0; i < br->nr; i++) {
+
+               memset(&alf, 0, sizeof(alf));
+               memset(&alt, 0, sizeof(alt));
+               from = br->entries[i].from;
+               to   = br->entries[i].to;
+
+               thread__find_addr_map(thread, cpumode, MAP__FUNCTION, from, &alf);
+               if (alf.map)
+                       alf.sym = map__find_symbol(alf.map, alf.addr, NULL);
+
+               thread__find_addr_map(thread, cpumode, MAP__FUNCTION, to, &alt);
+               if (alt.map)
+                       alt.sym = map__find_symbol(alt.map, alt.addr, NULL);
+
+               symbol__fprintf_symname_offs(alf.sym, &alf, stdout);
+               putchar('/');
+               symbol__fprintf_symname_offs(alt.sym, &alt, stdout);
+               printf("/%c/%c/%c/%d ",
+                       mispred_str( br->entries + i),
+                       br->entries[i].flags.in_tx? 'X' : '-',
+                       br->entries[i].flags.abort? 'A' : '-',
+                       br->entries[i].flags.cycles);
+       }
+}
+
+
 static void print_sample_addr(union perf_event *event,
                          struct perf_sample *sample,
                          struct thread *thread,
@@ -471,7 +552,7 @@ static void print_sample_bts(union perf_event *event,
                        }
                }
                perf_evsel__print_ip(evsel, sample, al, print_opts,
-                                    PERF_MAX_STACK_DEPTH);
+                                    scripting_max_stack);
        }
 
        /* print branch_to information */
@@ -548,12 +629,17 @@ static void process_event(union perf_event *event, struct perf_sample *sample,
 
                perf_evsel__print_ip(evsel, sample, al,
                                     output[attr->type].print_ip_opts,
-                                    PERF_MAX_STACK_DEPTH);
+                                    scripting_max_stack);
        }
 
        if (PRINT_FIELD(IREGS))
                print_sample_iregs(event, sample, thread, attr);
 
+       if (PRINT_FIELD(BRSTACK))
+               print_sample_brstack(event, sample, thread, attr);
+       else if (PRINT_FIELD(BRSTACKSYM))
+               print_sample_brstacksym(event, sample, thread, attr);
+
        printf("\n");
 }
 
@@ -680,7 +766,10 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
 
        set_print_ip_opts(&evsel->attr);
 
-       return perf_evsel__check_attr(evsel, scr->session);
+       if (evsel->attr.sample_type)
+               err = perf_evsel__check_attr(evsel, scr->session);
+
+       return err;
 }
 
 static int process_comm_event(struct perf_tool *tool,
@@ -1672,7 +1761,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
                     "comma separated output fields prepend with 'type:'. "
                     "Valid types: hw,sw,trace,raw. "
                     "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
-                    "addr,symoff,period,iregs,flags", parse_output_fields),
+                    "addr,symoff,period,iregs,brstack,brstacksym,flags", parse_output_fields),
        OPT_BOOLEAN('a', "all-cpus", &system_wide,
                    "system-wide collection from all CPUs"),
        OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
@@ -1695,6 +1784,8 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
        OPT_BOOLEAN('\0', "show-switch-events", &script.show_switch_events,
                    "Show context switch events (if recorded)"),
        OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
+       OPT_BOOLEAN(0, "ns", &nanosecs,
+                   "Use 9 decimal places when displaying time"),
        OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
                            "Instruction Tracing options",
                            itrace_parse_synth_opts),
@@ -1740,6 +1831,10 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
                }
        }
 
+       if (itrace_synth_opts.callchain &&
+           itrace_synth_opts.callchain_sz > scripting_max_stack)
+               scripting_max_stack = itrace_synth_opts.callchain_sz;
+
        /* make sure PERF_EXEC_PATH is set for scripts */
        perf_set_argv_exec_path(perf_exec_path());
 
@@ -1752,9 +1847,9 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
                rep_script_path = get_script_path(argv[0], REPORT_SUFFIX);
 
                if (!rec_script_path && !rep_script_path) {
-                       fprintf(stderr, " Couldn't find script %s\n\n See perf"
+                       usage_with_options_msg(script_usage, options,
+                               "Couldn't find script `%s'\n\n See perf"
                                " script -l for available scripts.\n", argv[0]);
-                       usage_with_options(script_usage, options);
                }
 
                if (is_top_script(argv[0])) {
@@ -1765,10 +1860,10 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
                        rep_args = has_required_arg(rep_script_path);
                        rec_args = (argc - 1) - rep_args;
                        if (rec_args < 0) {
-                               fprintf(stderr, " %s script requires options."
+                               usage_with_options_msg(script_usage, options,
+                                       "`%s' script requires options."
                                        "\n\n See perf script -l for available "
                                        "scripts and options.\n", argv[0]);
-                               usage_with_options(script_usage, options);
                        }
                }
 
index d46dbb1bc65d95980b13ec7f473f793c8262a05b..2f438f76cceb2cb1681c30d62324e6a03c4b2108 100644 (file)
@@ -100,6 +100,8 @@ static struct target target = {
        .uid    = UINT_MAX,
 };
 
+typedef int (*aggr_get_id_t)(struct cpu_map *m, int cpu);
+
 static int                     run_count                       =  1;
 static bool                    no_inherit                      = false;
 static volatile pid_t          child_pid                       = -1;
@@ -119,7 +121,7 @@ static unsigned int         unit_width                      = 4; /* strlen("unit") */
 static bool                    forever                         = false;
 static struct timespec         ref_time;
 static struct cpu_map          *aggr_map;
-static int                     (*aggr_get_id)(struct cpu_map *m, int cpu);
+static aggr_get_id_t           aggr_get_id;
 
 static volatile int done = 0;
 
@@ -215,7 +217,7 @@ static void read_counters(bool close_counters)
 
        evlist__for_each(evsel_list, counter) {
                if (read_counter(counter))
-                       pr_warning("failed to read counter %s\n", counter->name);
+                       pr_debug("failed to read counter %s\n", counter->name);
 
                if (perf_stat_process_counter(&stat_config, counter))
                        pr_warning("failed to process counter %s\n", counter->name);
@@ -434,7 +436,7 @@ static void print_noise_pct(double total, double avg)
 
 static void print_noise(struct perf_evsel *evsel, double avg)
 {
-       struct perf_stat *ps;
+       struct perf_stat_evsel *ps;
 
        if (run_count == 1)
                return;
@@ -479,6 +481,7 @@ static void aggr_printout(struct perf_evsel *evsel, int id, int nr)
                        csv_sep);
                break;
        case AGGR_GLOBAL:
+       case AGGR_UNSET:
        default:
                break;
        }
@@ -671,7 +674,7 @@ static void print_aggr_thread(struct perf_evsel *counter, char *prefix)
 static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
 {
        FILE *output = stat_config.output;
-       struct perf_stat *ps = counter->priv;
+       struct perf_stat_evsel *ps = counter->priv;
        double avg = avg_stats(&ps->res_stats[0]);
        int scaled = counter->counts->scaled;
        double uval;
@@ -799,6 +802,8 @@ static void print_interval(char *prefix, struct timespec *ts)
                case AGGR_GLOBAL:
                default:
                        fprintf(output, "#           time             counts %*s events\n", unit_width, "unit");
+               case AGGR_UNSET:
+                       break;
                }
        }
 
@@ -880,6 +885,7 @@ static void print_counters(struct timespec *ts, int argc, const char **argv)
                evlist__for_each(evsel_list, counter)
                        print_counter(counter, prefix);
                break;
+       case AGGR_UNSET:
        default:
                break;
        }
@@ -940,30 +946,90 @@ static int stat__set_big_num(const struct option *opt __maybe_unused,
        return 0;
 }
 
+static int perf_stat__get_socket(struct cpu_map *map, int cpu)
+{
+       return cpu_map__get_socket(map, cpu, NULL);
+}
+
+static int perf_stat__get_core(struct cpu_map *map, int cpu)
+{
+       return cpu_map__get_core(map, cpu, NULL);
+}
+
+static int cpu_map__get_max(struct cpu_map *map)
+{
+       int i, max = -1;
+
+       for (i = 0; i < map->nr; i++) {
+               if (map->map[i] > max)
+                       max = map->map[i];
+       }
+
+       return max;
+}
+
+static struct cpu_map *cpus_aggr_map;
+
+static int perf_stat__get_aggr(aggr_get_id_t get_id, struct cpu_map *map, int idx)
+{
+       int cpu;
+
+       if (idx >= map->nr)
+               return -1;
+
+       cpu = map->map[idx];
+
+       if (cpus_aggr_map->map[cpu] == -1)
+               cpus_aggr_map->map[cpu] = get_id(map, idx);
+
+       return cpus_aggr_map->map[cpu];
+}
+
+static int perf_stat__get_socket_cached(struct cpu_map *map, int idx)
+{
+       return perf_stat__get_aggr(perf_stat__get_socket, map, idx);
+}
+
+static int perf_stat__get_core_cached(struct cpu_map *map, int idx)
+{
+       return perf_stat__get_aggr(perf_stat__get_core, map, idx);
+}
+
 static int perf_stat_init_aggr_mode(void)
 {
+       int nr;
+
        switch (stat_config.aggr_mode) {
        case AGGR_SOCKET:
                if (cpu_map__build_socket_map(evsel_list->cpus, &aggr_map)) {
                        perror("cannot build socket map");
                        return -1;
                }
-               aggr_get_id = cpu_map__get_socket;
+               aggr_get_id = perf_stat__get_socket_cached;
                break;
        case AGGR_CORE:
                if (cpu_map__build_core_map(evsel_list->cpus, &aggr_map)) {
                        perror("cannot build core map");
                        return -1;
                }
-               aggr_get_id = cpu_map__get_core;
+               aggr_get_id = perf_stat__get_core_cached;
                break;
        case AGGR_NONE:
        case AGGR_GLOBAL:
        case AGGR_THREAD:
+       case AGGR_UNSET:
        default:
                break;
        }
-       return 0;
+
+       /*
+        * The evsel_list->cpus is the base we operate on,
+        * taking the highest cpu number to be the size of
+        * the aggregation translate cpumap.
+        */
+       nr = cpu_map__get_max(evsel_list->cpus);
+       cpus_aggr_map = cpu_map__empty_new(nr + 1);
+       return cpus_aggr_map ? 0 : -ENOMEM;
 }
 
 /*
@@ -1179,7 +1245,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
        OPT_STRING(0, "post", &post_cmd, "command",
                        "command to run after to the measured command"),
        OPT_UINTEGER('I', "interval-print", &stat_config.interval,
-                   "print counts at regular interval in ms (>= 100)"),
+                   "print counts at regular interval in ms (>= 10)"),
        OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
                     "aggregate counts per processor socket", AGGR_SOCKET),
        OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
@@ -1332,9 +1398,14 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
                thread_map__read_comms(evsel_list->threads);
 
        if (interval && interval < 100) {
-               pr_err("print interval must be >= 100ms\n");
-               parse_options_usage(stat_usage, options, "I", 1);
-               goto out;
+               if (interval < 10) {
+                       pr_err("print interval must be >= 10ms\n");
+                       parse_options_usage(stat_usage, options, "I", 1);
+                       goto out;
+               } else
+                       pr_warning("print interval < 100ms. "
+                                  "The overhead percentage could be high in some cases. "
+                                  "Please proceed with caution.\n");
        }
 
        if (perf_evlist__alloc_stats(evsel_list, interval))
index 8c465c83aabf31bfd4ce5e600c4084b6d2a62746..7e2e72e6d9d16323c3c448986372fa13234d368a 100644 (file)
@@ -655,7 +655,7 @@ static int symbol_filter(struct map *map, struct symbol *sym)
 {
        const char *name = sym->name;
 
-       if (!map->dso->kernel)
+       if (!__map__is_kernel(map))
                return 0;
        /*
         * ppc64 uses function descriptors and appends a '.' to the
@@ -857,9 +857,12 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
                         * TODO: we don't process guest user from host side
                         * except simple counting.
                         */
-                       /* Fall thru */
-               default:
                        goto next_event;
+               default:
+                       if (event->header.type == PERF_RECORD_SAMPLE)
+                               goto next_event;
+                       machine = &session->machines.host;
+                       break;
                }
 
 
@@ -952,7 +955,7 @@ static int __cmd_top(struct perf_top *top)
        machines__set_symbol_filter(&top->session->machines, symbol_filter);
 
        if (!objdump_path) {
-               ret = perf_session_env__lookup_objdump(&top->session->header.env);
+               ret = perf_env__lookup_objdump(&top->session->header.env);
                if (ret)
                        goto out_delete;
        }
@@ -961,8 +964,18 @@ static int __cmd_top(struct perf_top *top)
        if (ret)
                goto out_delete;
 
+       if (perf_session__register_idle_thread(top->session) == NULL)
+               goto out_delete;
+
        machine__synthesize_threads(&top->session->machines.host, &opts->target,
                                    top->evlist->threads, false, opts->proc_map_timeout);
+
+       if (sort__has_socket) {
+               ret = perf_env__read_cpu_topology_map(&perf_env);
+               if (ret < 0)
+                       goto out_err_cpu_topo;
+       }
+
        ret = perf_top__start_counters(top);
        if (ret)
                goto out_delete;
@@ -1020,6 +1033,14 @@ out_delete:
        top->session = NULL;
 
        return ret;
+
+out_err_cpu_topo: {
+       char errbuf[BUFSIZ];
+       const char *err = strerror_r(-ret, errbuf, sizeof(errbuf));
+
+       ui__error("Could not read the CPU topology map: %s\n", err);
+       goto out_delete;
+}
 }
 
 static int
@@ -1032,8 +1053,22 @@ callchain_opt(const struct option *opt, const char *arg, int unset)
 static int
 parse_callchain_opt(const struct option *opt, const char *arg, int unset)
 {
-       symbol_conf.use_callchain = true;
-       return record_parse_callchain_opt(opt, arg, unset);
+       struct record_opts *record = (struct record_opts *)opt->value;
+
+       record->callgraph_set = true;
+       callchain_param.enabled = !unset;
+       callchain_param.record_mode = CALLCHAIN_FP;
+
+       /*
+        * --no-call-graph
+        */
+       if (unset) {
+               symbol_conf.use_callchain = false;
+               callchain_param.record_mode = CALLCHAIN_NONE;
+               return 0;
+       }
+
+       return parse_callchain_top_opt(arg);
 }
 
 static int perf_top_config(const char *var, const char *value, void *cb)
@@ -1058,6 +1093,9 @@ parse_percent_limit(const struct option *opt, const char *arg,
        return 0;
 }
 
+const char top_callchain_help[] = CALLCHAIN_RECORD_HELP CALLCHAIN_REPORT_HELP
+       "\n\t\t\t\tDefault: fp,graph,0.5,caller,function";
+
 int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
 {
        char errbuf[BUFSIZ];
@@ -1133,11 +1171,11 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
        OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
                    "Show a column with the number of samples"),
        OPT_CALLBACK_NOOPT('g', NULL, &top.record_opts,
-                          NULL, "enables call-graph recording",
+                          NULL, "enables call-graph recording and display",
                           &callchain_opt),
        OPT_CALLBACK(0, "call-graph", &top.record_opts,
-                    "mode[,dump_size]", record_callchain_help,
-                    &parse_callchain_opt),
+                    "record_mode[,record_size],print_type,threshold[,print_limit],order,sort_key[,branch]",
+                    top_callchain_help, &parse_callchain_opt),
        OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
                    "Accumulate callchains of children and show total overhead as well"),
        OPT_INTEGER(0, "max-stack", &top.max_stack,
@@ -1267,6 +1305,9 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
                perf_hpp__cancel_cumulate();
        }
 
+       if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
+               callchain_param.order = ORDER_CALLER;
+
        symbol_conf.priv_size = sizeof(struct annotation);
 
        symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
index 4e3abba03062f3e84cfbdea5ca2c07adc641b35b..c783d8fd3a80fb4ae29ab2cb81d3a7d8b54a0123 100644 (file)
@@ -17,6 +17,7 @@
  */
 
 #include <traceevent/event-parse.h>
+#include <api/fs/tracing_path.h>
 #include "builtin.h"
 #include "util/color.h"
 #include "util/debug.h"
@@ -37,6 +38,7 @@
 #include <stdlib.h>
 #include <sys/mman.h>
 #include <linux/futex.h>
+#include <linux/err.h>
 
 /* For older distros: */
 #ifndef MAP_STACK
@@ -244,13 +246,14 @@ static struct perf_evsel *perf_evsel__syscall_newtp(const char *direction, void
        struct perf_evsel *evsel = perf_evsel__newtp("raw_syscalls", direction);
 
        /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
-       if (evsel == NULL)
+       if (IS_ERR(evsel))
                evsel = perf_evsel__newtp("syscalls", direction);
 
-       if (evsel) {
-               if (perf_evsel__init_syscall_tp(evsel, handler))
-                       goto out_delete;
-       }
+       if (IS_ERR(evsel))
+               return NULL;
+
+       if (perf_evsel__init_syscall_tp(evsel, handler))
+               goto out_delete;
 
        return evsel;
 
@@ -582,6 +585,12 @@ static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, struct sysc
 
 #define SCA_FUTEX_OP  syscall_arg__scnprintf_futex_op
 
+static const char *bpf_cmd[] = {
+       "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
+       "MAP_GET_NEXT_KEY", "PROG_LOAD",
+};
+static DEFINE_STRARRAY(bpf_cmd);
+
 static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, 1);
 
@@ -1008,6 +1017,7 @@ static struct syscall_fmt {
          .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */
                             [1] = SCA_ACCMODE,  /* mode */ }, },
        { .name     = "arch_prctl", .errmsg = true, .alias = "prctl", },
+       { .name     = "bpf",        .errmsg = true, STRARRAY(0, cmd, bpf_cmd), },
        { .name     = "brk",        .hexret = true,
          .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
        { .name     = "chdir",      .errmsg = true,
@@ -1704,12 +1714,12 @@ static int trace__read_syscall_info(struct trace *trace, int id)
        snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
        sc->tp_format = trace_event__tp_format("syscalls", tp_name);
 
-       if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) {
+       if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
                snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
                sc->tp_format = trace_event__tp_format("syscalls", tp_name);
        }
 
-       if (sc->tp_format == NULL)
+       if (IS_ERR(sc->tp_format))
                return -1;
 
        sc->args = sc->tp_format->format.fields;
@@ -2389,7 +2399,8 @@ static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
 static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
 {
        struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
-       if (evsel == NULL)
+
+       if (IS_ERR(evsel))
                return false;
 
        if (perf_evsel__field(evsel, "pathname") == NULL) {
@@ -2686,11 +2697,11 @@ out_delete_evlist:
        char errbuf[BUFSIZ];
 
 out_error_sched_stat_runtime:
-       debugfs__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
+       tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
        goto out_error;
 
 out_error_raw_syscalls:
-       debugfs__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
+       tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
        goto out_error;
 
 out_error_mmap:
index 827557fc751123bf030363d355b5b801911d6cb1..de89ec57436171ef74e5aa48135db8c2e9b4c0a0 100644 (file)
@@ -106,9 +106,14 @@ ifdef LIBBABELTRACE
   FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf
 endif
 
+FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/arch/$(ARCH)/include/uapi -I$(srctree)/include/uapi
 # include ARCH specific config
 -include $(src-perf)/arch/$(ARCH)/Makefile
 
+ifdef PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
+  CFLAGS += -DHAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
+endif
+
 include $(src-perf)/config/utilities.mak
 
 ifeq ($(call get-executable,$(FLEX)),)
@@ -233,6 +238,7 @@ ifdef NO_LIBELF
   NO_DEMANGLE := 1
   NO_LIBUNWIND := 1
   NO_LIBDW_DWARF_UNWIND := 1
+  NO_LIBBPF := 1
 else
   ifeq ($(feature-libelf), 0)
     ifeq ($(feature-glibc), 1)
@@ -242,13 +248,14 @@ else
       LIBC_SUPPORT := 1
     endif
     ifeq ($(LIBC_SUPPORT),1)
-      msg := $(warning No libelf found, disables 'probe' tool, please install elfutils-libelf-devel/libelf-dev);
+      msg := $(warning No libelf found, disables 'probe' tool and BPF support in 'perf record', please install elfutils-libelf-devel/libelf-dev);
 
       NO_LIBELF := 1
       NO_DWARF := 1
       NO_DEMANGLE := 1
       NO_LIBUNWIND := 1
       NO_LIBDW_DWARF_UNWIND := 1
+      NO_LIBBPF := 1
     else
       ifneq ($(filter s% -static%,$(LDFLAGS),),)
         msg := $(error No static glibc found, please install glibc-static);
@@ -305,6 +312,13 @@ ifndef NO_LIBELF
       $(call detected,CONFIG_DWARF)
     endif # PERF_HAVE_DWARF_REGS
   endif # NO_DWARF
+
+  ifndef NO_LIBBPF
+    ifeq ($(feature-bpf), 1)
+      CFLAGS += -DHAVE_LIBBPF_SUPPORT
+      $(call detected,CONFIG_LIBBPF)
+    endif
+  endif # NO_LIBBPF
 endif # NO_LIBELF
 
 ifeq ($(ARCH),powerpc)
@@ -320,6 +334,13 @@ ifndef NO_LIBUNWIND
   endif
 endif
 
+ifndef NO_LIBBPF
+  ifneq ($(feature-bpf), 1)
+    msg := $(warning BPF API too old. Please install recent kernel headers. BPF support in 'perf record' is disabled.)
+    NO_LIBBPF := 1
+  endif
+endif
+
 dwarf-post-unwind := 1
 dwarf-post-unwind-text := BUG
 
@@ -573,9 +594,14 @@ ifndef NO_LIBNUMA
     msg := $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev);
     NO_LIBNUMA := 1
   else
-    CFLAGS += -DHAVE_LIBNUMA_SUPPORT
-    EXTLIBS += -lnuma
-    $(call detected,CONFIG_NUMA)
+    ifeq ($(feature-numa_num_possible_cpus), 0)
+      msg := $(warning Old numa library found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev >= 2.0.8);
+      NO_LIBNUMA := 1
+    else
+      CFLAGS += -DHAVE_LIBNUMA_SUPPORT
+      EXTLIBS += -lnuma
+      $(call detected,CONFIG_NUMA)
+    endif
   endif
 endif
 
@@ -621,8 +647,13 @@ ifdef LIBBABELTRACE
 endif
 
 ifndef NO_AUXTRACE
-  $(call detected,CONFIG_AUXTRACE)
-  CFLAGS += -DHAVE_AUXTRACE_SUPPORT
+  ifeq ($(feature-get_cpuid), 0)
+    msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc);
+    NO_AUXTRACE := 1
+  else
+    $(call detected,CONFIG_AUXTRACE)
+    CFLAGS += -DHAVE_AUXTRACE_SUPPORT
+  endif
 endif
 
 # Among the variables below, these:
index 07dbff5c0e60ea10b0c330b194e234666b84f6c5..3d4c7c09adeae9afeeccf69161882b2fc65f9e41 100644 (file)
@@ -8,14 +8,16 @@
  */
 #include "builtin.h"
 
+#include "util/env.h"
 #include "util/exec_cmd.h"
 #include "util/cache.h"
 #include "util/quote.h"
 #include "util/run-command.h"
 #include "util/parse-events.h"
 #include "util/parse-options.h"
+#include "util/bpf-loader.h"
 #include "util/debug.h"
-#include <api/fs/debugfs.h>
+#include <api/fs/tracing_path.h>
 #include <pthread.h>
 
 const char perf_usage_string[] =
@@ -160,6 +162,20 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
                if (!strcmp(cmd, "--help") || !strcmp(cmd, "--version"))
                        break;
 
+               /*
+                * Shortcut for '-h' and '-v' options to invoke help
+                * and version command.
+                */
+               if (!strcmp(cmd, "-h")) {
+                       (*argv)[0] = "--help";
+                       break;
+               }
+
+               if (!strcmp(cmd, "-v")) {
+                       (*argv)[0] = "--version";
+                       break;
+               }
+
                /*
                 * Check remaining flags.
                 */
@@ -214,7 +230,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
                                fprintf(stderr, "No directory given for --debugfs-dir.\n");
                                usage(perf_usage_string);
                        }
-                       perf_debugfs_set_path((*argv)[1]);
+                       tracing_path_set((*argv)[1]);
                        if (envchanged)
                                *envchanged = 1;
                        (*argv)++;
@@ -230,7 +246,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
                        (*argv)++;
                        (*argc)--;
                } else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) {
-                       perf_debugfs_set_path(cmd + strlen(CMD_DEBUGFS_DIR));
+                       tracing_path_set(cmd + strlen(CMD_DEBUGFS_DIR));
                        fprintf(stderr, "dir: %s\n", tracing_path);
                        if (envchanged)
                                *envchanged = 1;
@@ -369,6 +385,8 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
 
        status = p->fn(argc, argv, prefix);
        exit_browser(status);
+       perf_env__exit(&perf_env);
+       bpf__clear();
 
        if (status)
                return status & 0xff;
@@ -517,8 +535,10 @@ int main(int argc, const char **argv)
        cmd = perf_extract_argv0_path(argv[0]);
        if (!cmd)
                cmd = "perf-help";
-       /* get debugfs mount point from /proc/mounts */
-       perf_debugfs_mount(NULL);
+
+       /* get debugfs/tracefs mount point from /proc/mounts */
+       tracing_path_mount();
+
        /*
         * "perf-xxxx" is the same as "perf xxxx", but we obviously:
         *
index b9d508336ae6ff0f39de4bee8a98083503ab4dca..c235c22b107ab5531d3f03de1f328f5fc2758a4c 100755 (executable)
 
 import perf
 
-def main():
+def main(context_switch = 0, thread = -1):
        cpus = perf.cpu_map()
-       threads = perf.thread_map()
+       threads = perf.thread_map(thread)
        evsel = perf.evsel(type   = perf.TYPE_SOFTWARE,
                           config = perf.COUNT_SW_DUMMY,
                           task = 1, comm = 1, mmap = 0, freq = 0,
                           wakeup_events = 1, watermark = 1,
-                          sample_id_all = 1,
+                          sample_id_all = 1, context_switch = context_switch,
                           sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU)
 
        """What we want are just the PERF_RECORD_ lifetime events for threads,
@@ -48,4 +48,21 @@ def main():
                        print event
 
 if __name__ == '__main__':
+    """
+       To test the PERF_RECORD_SWITCH record, pick a pid and replace
+       in the following line.
+
+       Example output:
+
+cpu: 3, pid: 31463, tid: 31593 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31593, switch_out: 1 }
+cpu: 1, pid: 31463, tid: 31489 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31489, switch_out: 1 }
+cpu: 2, pid: 31463, tid: 31496 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31496, switch_out: 1 }
+cpu: 3, pid: 31463, tid: 31491 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31491, switch_out: 0 }
+
+       It is possible as well to use event.misc & perf.PERF_RECORD_MISC_SWITCH_OUT
+       to figure out if this is a context switch in or out of the monitored threads.
+
+       If bored, please add command line option parsing support for these options :-)
+    """
+    # main(context_switch = 1, thread = 31463)
     main()
index 84a32037a80fff854108b21cc66eb5ebadab2d21..1b02cdc0cab69b53f0aa97ed544d6383fba56d3d 100644 (file)
@@ -61,6 +61,142 @@ import datetime
 #
 # An example of using the database is provided by the script
 # call-graph-from-postgresql.py.  Refer to that script for details.
+#
+# Tables:
+#
+#      The tables largely correspond to perf tools' data structures.  They are largely self-explanatory.
+#
+#      samples
+#
+#              'samples' is the main table. It represents what instruction was executing at a point in time
+#              when something (a selected event) happened.  The memory address is the instruction pointer or 'ip'.
+#
+#      calls
+#
+#              'calls' represents function calls and is related to 'samples' by 'call_id' and 'return_id'.
+#              'calls' is only created when the 'calls' option to this script is specified.
+#
+#      call_paths
+#
+#              'call_paths' represents all the call stacks.  Each 'call' has an associated record in 'call_paths'.
+#              'calls_paths' is only created when the 'calls' option to this script is specified.
+#
+#      branch_types
+#
+#              'branch_types' provides descriptions for each type of branch.
+#
+#      comm_threads
+#
+#              'comm_threads' shows how 'comms' relates to 'threads'.
+#
+#      comms
+#
+#              'comms' contains a record for each 'comm' - the name given to the executable that is running.
+#
+#      dsos
+#
+#              'dsos' contains a record for each executable file or library.
+#
+#      machines
+#
+#              'machines' can be used to distinguish virtual machines if virtualization is supported.
+#
+#      selected_events
+#
+#              'selected_events' contains a record for each kind of event that has been sampled.
+#
+#      symbols
+#
+#              'symbols' contains a record for each symbol.  Only symbols that have samples are present.
+#
+#      threads
+#
+#              'threads' contains a record for each thread.
+#
+# Views:
+#
+#      Most of the tables have views for more friendly display.  The views are:
+#
+#              calls_view
+#              call_paths_view
+#              comm_threads_view
+#              dsos_view
+#              machines_view
+#              samples_view
+#              symbols_view
+#              threads_view
+#
+# More examples of browsing the database with psql:
+#   Note that some of the examples are not the most optimal SQL query.
+#   Note that call information is only available if the script's 'calls' option has been used.
+#
+#      Top 10 function calls (not aggregated by symbol):
+#
+#              SELECT * FROM calls_view ORDER BY elapsed_time DESC LIMIT 10;
+#
+#      Top 10 function calls (aggregated by symbol):
+#
+#              SELECT symbol_id,(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,
+#                      SUM(elapsed_time) AS tot_elapsed_time,SUM(branch_count) AS tot_branch_count
+#                      FROM calls_view GROUP BY symbol_id ORDER BY tot_elapsed_time DESC LIMIT 10;
+#
+#              Note that the branch count gives a rough estimation of cpu usage, so functions
+#              that took a long time but have a relatively low branch count must have spent time
+#              waiting.
+#
+#      Find symbols by pattern matching on part of the name (e.g. names containing 'alloc'):
+#
+#              SELECT * FROM symbols_view WHERE name LIKE '%alloc%';
+#
+#      Top 10 function calls for a specific symbol (e.g. whose symbol_id is 187):
+#
+#              SELECT * FROM calls_view WHERE symbol_id = 187 ORDER BY elapsed_time DESC LIMIT 10;
+#
+#      Show function calls made by function in the same context (i.e. same call path) (e.g. one with call_path_id 254):
+#
+#              SELECT * FROM calls_view WHERE parent_call_path_id = 254;
+#
+#      Show branches made during a function call (e.g. where call_id is 29357 and return_id is 29370 and tid is 29670)
+#
+#              SELECT * FROM samples_view WHERE id >= 29357 AND id <= 29370 AND tid = 29670 AND event LIKE 'branches%';
+#
+#      Show transactions:
+#
+#              SELECT * FROM samples_view WHERE event = 'transactions';
+#
+#              Note transaction start has 'in_tx' true whereas, transaction end has 'in_tx' false.
+#              Transaction aborts have branch_type_name 'transaction abort'
+#
+#      Show transaction aborts:
+#
+#              SELECT * FROM samples_view WHERE event = 'transactions' AND branch_type_name = 'transaction abort';
+#
+# To print a call stack requires walking the call_paths table.  For example this python script:
+#   #!/usr/bin/python2
+#
+#   import sys
+#   from PySide.QtSql import *
+#
+#   if __name__ == '__main__':
+#           if (len(sys.argv) < 3):
+#                   print >> sys.stderr, "Usage is: printcallstack.py <database name> <call_path_id>"
+#                   raise Exception("Too few arguments")
+#           dbname = sys.argv[1]
+#           call_path_id = sys.argv[2]
+#           db = QSqlDatabase.addDatabase('QPSQL')
+#           db.setDatabaseName(dbname)
+#           if not db.open():
+#                   raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text())
+#           query = QSqlQuery(db)
+#           print "    id          ip  symbol_id  symbol                          dso_id  dso_short_name"
+#           while call_path_id != 0 and call_path_id != 1:
+#                   ret = query.exec_('SELECT * FROM call_paths_view WHERE id = ' + str(call_path_id))
+#                   if not ret:
+#                           raise Exception("Query failed: " + query.lastError().text())
+#                   if not query.next():
+#                           raise Exception("Query failed")
+#                   print "{0:>6}  {1:>10}  {2:>9}  {3:<30}  {4:>6}  {5:<30}".format(query.value(0), query.value(1), query.value(2), query.value(3), query.value(4), query.value(5))
+#                   call_path_id = query.value(6)
 
 from PySide.QtSql import *
 
@@ -244,6 +380,91 @@ if perf_db_export_calls:
                'parent_call_path_id    bigint,'
                'flags          integer)')
 
+do_query(query, 'CREATE VIEW machines_view AS '
+       'SELECT '
+               'id,'
+               'pid,'
+               'root_dir,'
+               'CASE WHEN id=0 THEN \'unknown\' WHEN pid=-1 THEN \'host\' ELSE \'guest\' END AS host_or_guest'
+       ' FROM machines')
+
+do_query(query, 'CREATE VIEW dsos_view AS '
+       'SELECT '
+               'id,'
+               'machine_id,'
+               '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
+               'short_name,'
+               'long_name,'
+               'build_id'
+       ' FROM dsos')
+
+do_query(query, 'CREATE VIEW symbols_view AS '
+       'SELECT '
+               'id,'
+               'name,'
+               '(SELECT short_name FROM dsos WHERE id=dso_id) AS dso,'
+               'dso_id,'
+               'sym_start,'
+               'sym_end,'
+               'CASE WHEN binding=0 THEN \'local\' WHEN binding=1 THEN \'global\' ELSE \'weak\' END AS binding'
+       ' FROM symbols')
+
+do_query(query, 'CREATE VIEW threads_view AS '
+       'SELECT '
+               'id,'
+               'machine_id,'
+               '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
+               'process_id,'
+               'pid,'
+               'tid'
+       ' FROM threads')
+
+do_query(query, 'CREATE VIEW comm_threads_view AS '
+       'SELECT '
+               'comm_id,'
+               '(SELECT comm FROM comms WHERE id = comm_id) AS command,'
+               'thread_id,'
+               '(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
+               '(SELECT tid FROM threads WHERE id = thread_id) AS tid'
+       ' FROM comm_threads')
+
+if perf_db_export_calls:
+       do_query(query, 'CREATE VIEW call_paths_view AS '
+               'SELECT '
+                       'c.id,'
+                       'to_hex(c.ip) AS ip,'
+                       'c.symbol_id,'
+                       '(SELECT name FROM symbols WHERE id = c.symbol_id) AS symbol,'
+                       '(SELECT dso_id FROM symbols WHERE id = c.symbol_id) AS dso_id,'
+                       '(SELECT dso FROM symbols_view  WHERE id = c.symbol_id) AS dso_short_name,'
+                       'c.parent_id,'
+                       'to_hex(p.ip) AS parent_ip,'
+                       'p.symbol_id AS parent_symbol_id,'
+                       '(SELECT name FROM symbols WHERE id = p.symbol_id) AS parent_symbol,'
+                       '(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,'
+                       '(SELECT dso FROM symbols_view  WHERE id = p.symbol_id) AS parent_dso_short_name'
+               ' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id')
+       do_query(query, 'CREATE VIEW calls_view AS '
+               'SELECT '
+                       'calls.id,'
+                       'thread_id,'
+                       '(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
+                       '(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
+                       '(SELECT comm FROM comms WHERE id = comm_id) AS command,'
+                       'call_path_id,'
+                       'to_hex(ip) AS ip,'
+                       'symbol_id,'
+                       '(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
+                       'call_time,'
+                       'return_time,'
+                       'return_time - call_time AS elapsed_time,'
+                       'branch_count,'
+                       'call_id,'
+                       'return_id,'
+                       'CASE WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' ELSE \'\' END AS flags,'
+                       'parent_call_path_id'
+               ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
+
 do_query(query, 'CREATE VIEW samples_view AS '
        'SELECT '
                'id,'
index c1518bdd0f1b626242dd101698b13304106c200e..50de2253cff6c64450a23d0332d22c43c802dd8f 100644 (file)
@@ -8,7 +8,6 @@ perf-y += openat-syscall-all-cpus.o
 perf-y += openat-syscall-tp-fields.o
 perf-y += mmap-basic.o
 perf-y += perf-record.o
-perf-y += rdpmc.o
 perf-y += evsel-roundtrip-name.o
 perf-y += evsel-tp-sched.o
 perf-y += fdarray.o
@@ -33,8 +32,7 @@ perf-y += parse-no-sample-id-all.o
 perf-y += kmod-path.o
 perf-y += thread-map.o
 perf-y += llvm.o
-
-perf-$(CONFIG_X86) += perf-time-to-tsc.o
+perf-y += topology.o
 
 ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64))
 perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
diff --git a/tools/perf/tests/bpf-script-example.c b/tools/perf/tests/bpf-script-example.c
new file mode 100644 (file)
index 0000000..410a70b
--- /dev/null
@@ -0,0 +1,44 @@
+#ifndef LINUX_VERSION_CODE
+# error Need LINUX_VERSION_CODE
+# error Example: for 4.2 kernel, put 'clang-opt="-DLINUX_VERSION_CODE=0x40200" into llvm section of ~/.perfconfig'
+#endif
+#define BPF_ANY 0
+#define BPF_MAP_TYPE_ARRAY 2
+#define BPF_FUNC_map_lookup_elem 1
+#define BPF_FUNC_map_update_elem 2
+
+static void *(*bpf_map_lookup_elem)(void *map, void *key) =
+       (void *) BPF_FUNC_map_lookup_elem;
+static void *(*bpf_map_update_elem)(void *map, void *key, void *value, int flags) =
+       (void *) BPF_FUNC_map_update_elem;
+
+struct bpf_map_def {
+       unsigned int type;
+       unsigned int key_size;
+       unsigned int value_size;
+       unsigned int max_entries;
+};
+
+#define SEC(NAME) __attribute__((section(NAME), used))
+struct bpf_map_def SEC("maps") flip_table = {
+       .type = BPF_MAP_TYPE_ARRAY,
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .max_entries = 1,
+};
+
+SEC("func=sys_epoll_pwait")
+int bpf_func__sys_epoll_pwait(void *ctx)
+{
+       int ind =0;
+       int *flag = bpf_map_lookup_elem(&flip_table, &ind);
+       int new_flag;
+       if (!flag)
+               return 0;
+       /* flip flag and store back */
+       new_flag = !*flag;
+       bpf_map_update_elem(&flip_table, &ind, &new_flag, BPF_ANY);
+       return new_flag;
+}
+char _license[] SEC("license") = "GPL";
+int _version SEC("version") = LINUX_VERSION_CODE;
index 136cd934be66a862baa5300397d6b7254155d069..66f72d3d66771ab4c9ad1cec7a53b92d8bcb8f1b 100644 (file)
 #include "parse-options.h"
 #include "symbol.h"
 
-static struct test {
-       const char *desc;
-       int (*func)(void);
-} tests[] = {
+struct test __weak arch_tests[] = {
+       {
+               .func = NULL,
+       },
+};
+
+static struct test generic_tests[] = {
        {
                .desc = "vmlinux symtab matches kallsyms",
                .func = test__vmlinux_matches_kallsyms,
@@ -38,12 +41,6 @@ static struct test {
                .desc = "parse events tests",
                .func = test__parse_events,
        },
-#if defined(__x86_64__) || defined(__i386__)
-       {
-               .desc = "x86 rdpmc test",
-               .func = test__rdpmc,
-       },
-#endif
        {
                .desc = "Validate PERF_RECORD_* events & perf_sample fields",
                .func = test__PERF_RECORD,
@@ -104,12 +101,6 @@ static struct test {
                .desc = "Test software clock events have valid period values",
                .func = test__sw_clock_freq,
        },
-#if defined(__x86_64__) || defined(__i386__)
-       {
-               .desc = "Test converting perf time to TSC",
-               .func = test__perf_time_to_tsc,
-       },
-#endif
        {
                .desc = "Test object code reading",
                .func = test__code_reading,
@@ -126,14 +117,6 @@ static struct test {
                .desc = "Test parsing with no sample_id_all bit set",
                .func = test__parse_no_sample_id_all,
        },
-#if defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__)
-#ifdef HAVE_DWARF_UNWIND_SUPPORT
-       {
-               .desc = "Test dwarf unwind",
-               .func = test__dwarf_unwind,
-       },
-#endif
-#endif
        {
                .desc = "Test filtering hist entries",
                .func = test__hists_filter,
@@ -178,12 +161,21 @@ static struct test {
                .desc = "Test LLVM searching and compiling",
                .func = test__llvm,
        },
+       {
+               .desc = "Test topology in session",
+               .func = test_session_topology,
+       },
        {
                .func = NULL,
        },
 };
 
-static bool perf_test__matches(int curr, int argc, const char *argv[])
+static struct test *tests[] = {
+       generic_tests,
+       arch_tests,
+};
+
+static bool perf_test__matches(struct test *test, int curr, int argc, const char *argv[])
 {
        int i;
 
@@ -200,7 +192,7 @@ static bool perf_test__matches(int curr, int argc, const char *argv[])
                        continue;
                }
 
-               if (strstr(tests[curr].desc, argv[i]))
+               if (strstr(test->desc, argv[i]))
                        return true;
        }
 
@@ -237,27 +229,31 @@ static int run_test(struct test *test)
        return err;
 }
 
+#define for_each_test(j, t)                                    \
+       for (j = 0; j < ARRAY_SIZE(tests); j++) \
+               for (t = &tests[j][0]; t->func; t++)
+
 static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
 {
+       struct test *t;
+       unsigned int j;
        int i = 0;
        int width = 0;
 
-       while (tests[i].func) {
-               int len = strlen(tests[i].desc);
+       for_each_test(j, t) {
+               int len = strlen(t->desc);
 
                if (width < len)
                        width = len;
-               ++i;
        }
 
-       i = 0;
-       while (tests[i].func) {
+       for_each_test(j, t) {
                int curr = i++, err;
 
-               if (!perf_test__matches(curr, argc, argv))
+               if (!perf_test__matches(t, curr, argc, argv))
                        continue;
 
-               pr_info("%2d: %-*s:", i, width, tests[curr].desc);
+               pr_info("%2d: %-*s:", i, width, t->desc);
 
                if (intlist__find(skiplist, i)) {
                        color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
@@ -265,8 +261,8 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
                }
 
                pr_debug("\n--- start ---\n");
-               err = run_test(&tests[curr]);
-               pr_debug("---- end ----\n%s:", tests[curr].desc);
+               err = run_test(t);
+               pr_debug("---- end ----\n%s:", t->desc);
 
                switch (err) {
                case TEST_OK:
@@ -287,15 +283,15 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
 
 static int perf_test__list(int argc, const char **argv)
 {
+       unsigned int j;
+       struct test *t;
        int i = 0;
 
-       while (tests[i].func) {
-               int curr = i++;
-
-               if (argc > 1 && !strstr(tests[curr].desc, argv[1]))
+       for_each_test(j, t) {
+               if (argc > 1 && !strstr(t->desc, argv[1]))
                        continue;
 
-               pr_info("%2d: %s\n", i, tests[curr].desc);
+               pr_info("%2d: %s\n", ++i, t->desc);
        }
 
        return 0;
index 39c784a100a955143e401fc46cbcb93a2a3d8136..49b1959dda41f0ceb5bc248f3c3e90b558ce9169 100644 (file)
@@ -33,20 +33,20 @@ static unsigned int hex(char c)
        return c - 'A' + 10;
 }
 
-static void read_objdump_line(const char *line, size_t line_len, void **buf,
-                             size_t *len)
+static size_t read_objdump_line(const char *line, size_t line_len, void *buf,
+                             size_t len)
 {
        const char *p;
-       size_t i;
+       size_t i, j = 0;
 
        /* Skip to a colon */
        p = strchr(line, ':');
        if (!p)
-               return;
+               return 0;
        i = p + 1 - line;
 
        /* Read bytes */
-       while (*len) {
+       while (j < len) {
                char c1, c2;
 
                /* Skip spaces */
@@ -65,20 +65,26 @@ static void read_objdump_line(const char *line, size_t line_len, void **buf,
                if (i < line_len && line[i] && !isspace(line[i]))
                        break;
                /* Store byte */
-               *(unsigned char *)*buf = (hex(c1) << 4) | hex(c2);
-               *buf += 1;
-               *len -= 1;
+               *(unsigned char *)buf = (hex(c1) << 4) | hex(c2);
+               buf += 1;
+               j++;
        }
+       /* return number of successfully read bytes */
+       return j;
 }
 
-static int read_objdump_output(FILE *f, void **buf, size_t *len)
+static int read_objdump_output(FILE *f, void *buf, size_t *len, u64 start_addr)
 {
        char *line = NULL;
-       size_t line_len;
+       size_t line_len, off_last = 0;
        ssize_t ret;
        int err = 0;
+       u64 addr, last_addr = start_addr;
+
+       while (off_last < *len) {
+               size_t off, read_bytes, written_bytes;
+               unsigned char tmp[BUFSZ];
 
-       while (1) {
                ret = getline(&line, &line_len, f);
                if (feof(f))
                        break;
@@ -87,9 +93,33 @@ static int read_objdump_output(FILE *f, void **buf, size_t *len)
                        err = -1;
                        break;
                }
-               read_objdump_line(line, ret, buf, len);
+
+               /* read objdump data into temporary buffer */
+               read_bytes = read_objdump_line(line, ret, tmp, sizeof(tmp));
+               if (!read_bytes)
+                       continue;
+
+               if (sscanf(line, "%"PRIx64, &addr) != 1)
+                       continue;
+               if (addr < last_addr) {
+                       pr_debug("addr going backwards, read beyond section?\n");
+                       break;
+               }
+               last_addr = addr;
+
+               /* copy it from temporary buffer to 'buf' according
+                * to address on current objdump line */
+               off = addr - start_addr;
+               if (off >= *len)
+                       break;
+               written_bytes = MIN(read_bytes, *len - off);
+               memcpy(buf + off, tmp, written_bytes);
+               off_last = off + written_bytes;
        }
 
+       /* len returns number of bytes that could not be read */
+       *len -= off_last;
+
        free(line);
 
        return err;
@@ -103,7 +133,7 @@ static int read_via_objdump(const char *filename, u64 addr, void *buf,
        FILE *f;
        int ret;
 
-       fmt = "%s -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s";
+       fmt = "%s -z -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s";
        ret = snprintf(cmd, sizeof(cmd), fmt, "objdump", addr, addr + len,
                       filename);
        if (ret <= 0 || (size_t)ret >= sizeof(cmd))
@@ -120,7 +150,7 @@ static int read_via_objdump(const char *filename, u64 addr, void *buf,
                return -1;
        }
 
-       ret = read_objdump_output(f, &buf, &len);
+       ret = read_objdump_output(f, buf, &len, addr);
        if (len) {
                pr_debug("objdump read too few bytes\n");
                if (!ret)
@@ -132,6 +162,18 @@ static int read_via_objdump(const char *filename, u64 addr, void *buf,
        return ret;
 }
 
+static void dump_buf(unsigned char *buf, size_t len)
+{
+       size_t i;
+
+       for (i = 0; i < len; i++) {
+               pr_debug("0x%02x ", buf[i]);
+               if (i % 16 == 15)
+                       pr_debug("\n");
+       }
+       pr_debug("\n");
+}
+
 static int read_object_code(u64 addr, size_t len, u8 cpumode,
                            struct thread *thread, struct state *state)
 {
@@ -234,6 +276,10 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
        /* The results should be identical */
        if (memcmp(buf1, buf2, len)) {
                pr_debug("Bytes read differ from those read by objdump\n");
+               pr_debug("buf1 (dso):\n");
+               dump_buf(buf1, len);
+               pr_debug("buf2 (objdump):\n");
+               dump_buf(buf2, len);
                return -1;
        }
        pr_debug("Bytes read match those read by objdump\n");
@@ -427,7 +473,7 @@ static int do_test_code_reading(bool try_kcore)
                symbol_conf.kallsyms_name = "/proc/kallsyms";
 
        /* Load kernel map */
-       map = machine->vmlinux_maps[MAP__FUNCTION];
+       map = machine__kernel_map(machine);
        ret = map__load(map, NULL);
        if (ret < 0) {
                pr_debug("map__load failed\n");
index 40b36c4624275a360d4a0f3226eaf850d6d0e825..07221793a3acec65d083f68548d4bf5e287b4628 100644 (file)
 #include "thread.h"
 #include "callchain.h"
 
+#if defined (__x86_64__) || defined (__i386__)
+#include "arch-tests.h"
+#endif
+
 /* For bsearch. We try to unwind functions in shared object. */
 #include <stdlib.h>
 
index 52162425c969ef2b29596658931ec0c85e822182..790e413d9a1f39c1945bb4519a4391ded30560e9 100644 (file)
@@ -1,3 +1,4 @@
+#include <linux/err.h>
 #include <traceevent/event-parse.h>
 #include "evsel.h"
 #include "tests.h"
@@ -36,8 +37,8 @@ int test__perf_evsel__tp_sched_test(void)
        struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch");
        int ret = 0;
 
-       if (evsel == NULL) {
-               pr_debug("perf_evsel__new\n");
+       if (IS_ERR(evsel)) {
+               pr_debug("perf_evsel__newtp failed with %ld\n", PTR_ERR(evsel));
                return -1;
        }
 
@@ -66,6 +67,11 @@ int test__perf_evsel__tp_sched_test(void)
 
        evsel = perf_evsel__newtp("sched", "sched_wakeup");
 
+       if (IS_ERR(evsel)) {
+               pr_debug("perf_evsel__newtp failed with %ld\n", PTR_ERR(evsel));
+               return -1;
+       }
+
        if (perf_evsel__test_field(evsel, "comm", 16, true))
                ret = -1;
 
index ce48775e6ada13886000013183908f3f6b63f26d..818acf875dd0bb4afc7e4e60873125c4b4448a5c 100644 (file)
@@ -16,30 +16,31 @@ struct sample {
        struct thread *thread;
        struct map *map;
        struct symbol *sym;
+       int socket;
 };
 
 /* For the numbers, see hists_common.c */
 static struct sample fake_samples[] = {
        /* perf [kernel] schedule() */
-       { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
+       { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, .socket = 0 },
        /* perf [perf]   main() */
-       { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, },
+       { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, .socket = 0 },
        /* perf [libc]   malloc() */
-       { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
+       { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, .socket = 0 },
        /* perf [perf]   main() */
-       { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, }, /* will be merged */
+       { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, .socket = 0 }, /* will be merged */
        /* perf [perf]   cmd_record() */
-       { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, },
+       { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, .socket = 1 },
        /* perf [kernel] page_fault() */
-       { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
+       { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, .socket = 1 },
        /* bash [bash]   main() */
-       { .pid = FAKE_PID_BASH,  .ip = FAKE_IP_BASH_MAIN, },
+       { .pid = FAKE_PID_BASH,  .ip = FAKE_IP_BASH_MAIN, .socket = 2 },
        /* bash [bash]   xmalloc() */
-       { .pid = FAKE_PID_BASH,  .ip = FAKE_IP_BASH_XMALLOC, },
+       { .pid = FAKE_PID_BASH,  .ip = FAKE_IP_BASH_XMALLOC, .socket = 2 },
        /* bash [libc]   malloc() */
-       { .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_MALLOC, },
+       { .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_MALLOC, .socket = 3 },
        /* bash [kernel] page_fault() */
-       { .pid = FAKE_PID_BASH,  .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
+       { .pid = FAKE_PID_BASH,  .ip = FAKE_IP_KERNEL_PAGE_FAULT, .socket = 3 },
 };
 
 static int add_hist_entries(struct perf_evlist *evlist,
@@ -83,6 +84,7 @@ static int add_hist_entries(struct perf_evlist *evlist,
                                                          &sample) < 0)
                                goto out;
 
+                       al.socket = fake_samples[i].socket;
                        if (hist_entry_iter__add(&iter, &al,
                                                 PERF_MAX_STACK_DEPTH, NULL) < 0) {
                                addr_location__put(&al);
@@ -253,6 +255,39 @@ int test__hists_filter(void)
                TEST_ASSERT_VAL("Unmatched total period for symbol filter",
                                hists->stats.total_non_filtered_period == 300);
 
+               /* remove symbol filter first */
+               hists->symbol_filter_str = NULL;
+               hists__filter_by_symbol(hists);
+
+               /* now applying socket filters */
+               hists->socket_filter = 2;
+               hists__filter_by_socket(hists);
+
+               if (verbose > 2) {
+                       pr_info("Histogram for socket filters\n");
+                       print_hists_out(hists);
+               }
+
+               /* normal stats should be invariant */
+               TEST_ASSERT_VAL("Invalid nr samples",
+                               hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10);
+               TEST_ASSERT_VAL("Invalid nr hist entries",
+                               hists->nr_entries == 9);
+               TEST_ASSERT_VAL("Invalid total period",
+                               hists->stats.total_period == 1000);
+
+               /* but filter stats are changed */
+               TEST_ASSERT_VAL("Unmatched nr samples for socket filter",
+                               hists->stats.nr_non_filtered_samples == 2);
+               TEST_ASSERT_VAL("Unmatched nr hist entries for socket filter",
+                               hists->nr_non_filtered_entries == 2);
+               TEST_ASSERT_VAL("Unmatched total period for socket filter",
+                               hists->stats.total_non_filtered_period == 200);
+
+               /* remove socket filter first */
+               hists->socket_filter = -1;
+               hists__filter_by_socket(hists);
+
                /* now applying all filters at once. */
                hists->thread_filter = fake_samples[1].thread;
                hists->dso_filter = fake_samples[1].map->dso;
index ba31c4bd441d72195fbc48fa72673ba7dad0c337..2cbd0c6901e35ff3954bf06892bb02b3c9f92cef 100644 (file)
@@ -44,6 +44,7 @@ make_no_libnuma     := NO_LIBNUMA=1
 make_no_libaudit    := NO_LIBAUDIT=1
 make_no_libbionic   := NO_LIBBIONIC=1
 make_no_auxtrace    := NO_AUXTRACE=1
+make_no_libbpf     := NO_LIBBPF=1
 make_tags           := tags
 make_cscope         := cscope
 make_help           := help
@@ -66,7 +67,7 @@ make_static         := LDFLAGS=-static
 make_minimal        := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1
 make_minimal        += NO_DEMANGLE=1 NO_LIBELF=1 NO_LIBUNWIND=1 NO_BACKTRACE=1
 make_minimal        += NO_LIBNUMA=1 NO_LIBAUDIT=1 NO_LIBBIONIC=1
-make_minimal        += NO_LIBDW_DWARF_UNWIND=1 NO_AUXTRACE=1
+make_minimal        += NO_LIBDW_DWARF_UNWIND=1 NO_AUXTRACE=1 NO_LIBBPF=1
 
 # $(run) contains all available tests
 run := make_pure
@@ -94,6 +95,7 @@ run += make_no_libnuma
 run += make_no_libaudit
 run += make_no_libbionic
 run += make_no_auxtrace
+run += make_no_libbpf
 run += make_help
 run += make_doc
 run += make_perf_o
index 666b67a4df9dd0d8ea8d41736af4355008c11464..4495493c943111aa6c76fd492d7ca6ee88b74e37 100644 (file)
@@ -3,6 +3,7 @@
 #include "thread_map.h"
 #include "cpumap.h"
 #include "tests.h"
+#include <linux/err.h>
 
 /*
  * This test will generate random numbers of calls to some getpid syscalls,
@@ -65,7 +66,7 @@ int test__basic_mmap(void)
 
                snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
                evsels[i] = perf_evsel__newtp("syscalls", name);
-               if (evsels[i] == NULL) {
+               if (IS_ERR(evsels[i])) {
                        pr_debug("perf_evsel__new\n");
                        goto out_delete_evlist;
                }
index a572f87e9c8d8e25665bf75e1c45b1f86ff8a6e9..2006485a2859ba450ddbe6da6c005ff58b5b47d8 100644 (file)
@@ -1,3 +1,5 @@
+#include <api/fs/fs.h>
+#include <linux/err.h>
 #include "evsel.h"
 #include "tests.h"
 #include "thread_map.h"
@@ -14,6 +16,7 @@ int test__openat_syscall_event_on_all_cpus(void)
        cpu_set_t cpu_set;
        struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
        char sbuf[STRERR_BUFSIZE];
+       char errbuf[BUFSIZ];
 
        if (threads == NULL) {
                pr_debug("thread_map__new\n");
@@ -29,13 +32,9 @@ int test__openat_syscall_event_on_all_cpus(void)
        CPU_ZERO(&cpu_set);
 
        evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
-       if (evsel == NULL) {
-               if (tracefs_configured())
-                       pr_debug("is tracefs mounted on /sys/kernel/tracing?\n");
-               else if (debugfs_configured())
-                       pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
-               else
-                       pr_debug("Neither tracefs or debugfs is enabled in this kernel\n");
+       if (IS_ERR(evsel)) {
+               tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
+               pr_debug("%s\n", errbuf);
                goto out_thread_map_delete;
        }
 
index 01a19626c84624136d909e1c3d3e58ef877d1b39..5e811cd8f1c3e14567ef38e9da9595f502946c5d 100644 (file)
@@ -1,3 +1,4 @@
+#include <linux/err.h>
 #include "perf.h"
 #include "evlist.h"
 #include "evsel.h"
@@ -30,7 +31,7 @@ int test__syscall_openat_tp_fields(void)
        }
 
        evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
-       if (evsel == NULL) {
+       if (IS_ERR(evsel)) {
                pr_debug("%s: perf_evsel__newtp\n", __func__);
                goto out_delete_evlist;
        }
@@ -88,7 +89,7 @@ int test__syscall_openat_tp_fields(void)
 
                                err = perf_evsel__parse_sample(evsel, event, &sample);
                                if (err) {
-                                       pr_err("Can't parse sample, err = %d\n", err);
+                                       pr_debug("Can't parse sample, err = %d\n", err);
                                        goto out_delete_evlist;
                                }
 
index c9a37bc6b33ac336a128a4948623a1a93a5b388f..033b54797b8a021fe5f5c7ed25459a23d9cb5b6c 100644 (file)
@@ -1,3 +1,5 @@
+#include <api/fs/tracing_path.h>
+#include <linux/err.h>
 #include "thread_map.h"
 #include "evsel.h"
 #include "debug.h"
@@ -10,6 +12,7 @@ int test__openat_syscall_event(void)
        unsigned int nr_openat_calls = 111, i;
        struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
        char sbuf[STRERR_BUFSIZE];
+       char errbuf[BUFSIZ];
 
        if (threads == NULL) {
                pr_debug("thread_map__new\n");
@@ -17,13 +20,9 @@ int test__openat_syscall_event(void)
        }
 
        evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
-       if (evsel == NULL) {
-               if (tracefs_configured())
-                       pr_debug("is tracefs mounted on /sys/kernel/tracing?\n");
-               else if (debugfs_configured())
-                       pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
-               else
-                       pr_debug("Neither tracefs or debugfs is enabled in this kernel\n");
+       if (IS_ERR(evsel)) {
+               tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
+               pr_debug("%s\n", errbuf);
                goto out_thread_map_delete;
        }
 
index 9b6b2b6324a1d479835af199e4f02e129c949075..636d7b42d8447f93917ce64b0aff528fd3fceed6 100644 (file)
@@ -3,11 +3,11 @@
 #include "evsel.h"
 #include "evlist.h"
 #include <api/fs/fs.h>
-#include <api/fs/tracefs.h>
-#include <api/fs/debugfs.h>
 #include "tests.h"
 #include "debug.h"
+#include "util.h"
 #include <linux/hw_breakpoint.h>
+#include <api/fs/fs.h>
 
 #define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
                             PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
@@ -1260,25 +1260,24 @@ test__checkevent_breakpoint_len_rw_modifier(struct perf_evlist *evlist)
        return test__checkevent_breakpoint_rw(evlist);
 }
 
+static int test__checkevent_precise_max_modifier(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+       TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
+       TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->attr.type);
+       TEST_ASSERT_VAL("wrong config",
+                       PERF_COUNT_SW_TASK_CLOCK == evsel->attr.config);
+       return 0;
+}
+
 static int count_tracepoints(void)
 {
-       char events_path[PATH_MAX];
        struct dirent *events_ent;
-       const char *mountpoint;
        DIR *events_dir;
        int cnt = 0;
 
-       mountpoint = tracefs_find_mountpoint();
-       if (mountpoint) {
-               scnprintf(events_path, PATH_MAX, "%s/events",
-                         mountpoint);
-       } else {
-               mountpoint = debugfs_find_mountpoint();
-               scnprintf(events_path, PATH_MAX, "%s/tracing/events",
-                         mountpoint);
-       }
-
-       events_dir = opendir(events_path);
+       events_dir = opendir(tracing_events_path);
 
        TEST_ASSERT_VAL("Can't open events dir", events_dir);
 
@@ -1295,7 +1294,7 @@ static int count_tracepoints(void)
                        continue;
 
                scnprintf(sys_path, PATH_MAX, "%s/%s",
-                         events_path, events_ent->d_name);
+                         tracing_events_path, events_ent->d_name);
 
                sys_dir = opendir(sys_path);
                TEST_ASSERT_VAL("Can't open sys dir", sys_dir);
@@ -1575,6 +1574,11 @@ static struct evlist_test test__events[] = {
                .check = test__checkevent_exclude_idle_modifier_1,
                .id    = 46,
        },
+       {
+               .name  = "task-clock:P,cycles",
+               .check = test__checkevent_precise_max_modifier,
+               .id    = 47,
+       },
 };
 
 static struct evlist_test test__events_pmu[] = {
@@ -1750,6 +1754,17 @@ static int test_pmu_events(void)
        return ret;
 }
 
+static void debug_warn(const char *warn, va_list params)
+{
+       char msg[1024];
+
+       if (!verbose)
+               return;
+
+       vsnprintf(msg, sizeof(msg), warn, params);
+       fprintf(stderr, " Warning: %s\n", msg);
+}
+
 int test__parse_events(void)
 {
        int ret1, ret2 = 0;
@@ -1761,6 +1776,8 @@ do {                                                      \
                ret2 = ret1;                            \
 } while (0)
 
+       set_warning_routine(debug_warn);
+
        TEST_EVENTS(test__events);
 
        if (test_pmu())
index bf113a247987e7beb271f25da5343f821e5c12da..c80486969f83412621336a33ee95edef87d2fb31 100644 (file)
@@ -24,13 +24,17 @@ enum {
        TEST_SKIP = -2,
 };
 
+struct test {
+       const char *desc;
+       int (*func)(void);
+};
+
 /* Tests */
 int test__vmlinux_matches_kallsyms(void);
 int test__openat_syscall_event(void);
 int test__openat_syscall_event_on_all_cpus(void);
 int test__basic_mmap(void);
 int test__PERF_RECORD(void);
-int test__rdpmc(void);
 int test__perf_evsel__roundtrip_name_test(void);
 int test__perf_evsel__tp_sched_test(void);
 int test__syscall_openat_tp_fields(void);
@@ -46,7 +50,6 @@ int test__bp_signal(void);
 int test__bp_signal_overflow(void);
 int test__task_exit(void);
 int test__sw_clock_freq(void);
-int test__perf_time_to_tsc(void);
 int test__code_reading(void);
 int test__sample_parsing(void);
 int test__keep_tracking(void);
@@ -63,8 +66,9 @@ int test__fdarray__add(void);
 int test__kmod_path__parse(void);
 int test__thread_map(void);
 int test__llvm(void);
+int test_session_topology(void);
 
-#if defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__)
+#if defined(__arm__) || defined(__aarch64__)
 #ifdef HAVE_DWARF_UNWIND_SUPPORT
 struct thread;
 struct perf_sample;
diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
new file mode 100644 (file)
index 0000000..f5bb096
--- /dev/null
@@ -0,0 +1,115 @@
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "tests.h"
+#include "util.h"
+#include "session.h"
+#include "evlist.h"
+#include "debug.h"
+
+#define TEMPL "/tmp/perf-test-XXXXXX"
+#define DATA_SIZE      10
+
+static int get_temp(char *path)
+{
+       int fd;
+
+       strcpy(path, TEMPL);
+
+       fd = mkstemp(path);
+       if (fd < 0) {
+               perror("mkstemp failed");
+               return -1;
+       }
+
+       close(fd);
+       return 0;
+}
+
+static int session_write_header(char *path)
+{
+       struct perf_session *session;
+       struct perf_data_file file = {
+               .path = path,
+               .mode = PERF_DATA_MODE_WRITE,
+       };
+
+       session = perf_session__new(&file, false, NULL);
+       TEST_ASSERT_VAL("can't get session", session);
+
+       session->evlist = perf_evlist__new_default();
+       TEST_ASSERT_VAL("can't get evlist", session->evlist);
+
+       perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
+       perf_header__set_feat(&session->header, HEADER_NRCPUS);
+
+       session->header.data_size += DATA_SIZE;
+
+       TEST_ASSERT_VAL("failed to write header",
+                       !perf_session__write_header(session, session->evlist, file.fd, true));
+
+       perf_session__delete(session);
+
+       return 0;
+}
+
+static int check_cpu_topology(char *path, struct cpu_map *map)
+{
+       struct perf_session *session;
+       struct perf_data_file file = {
+               .path = path,
+               .mode = PERF_DATA_MODE_READ,
+       };
+       int i;
+
+       session = perf_session__new(&file, false, NULL);
+       TEST_ASSERT_VAL("can't get session", session);
+
+       for (i = 0; i < session->header.env.nr_cpus_online; i++) {
+               pr_debug("CPU %d, core %d, socket %d\n", i,
+                        session->header.env.cpu[i].core_id,
+                        session->header.env.cpu[i].socket_id);
+       }
+
+       for (i = 0; i < map->nr; i++) {
+               TEST_ASSERT_VAL("Core ID doesn't match",
+                       (session->header.env.cpu[map->map[i]].core_id == (cpu_map__get_core(map, i, NULL) & 0xffff)));
+
+               TEST_ASSERT_VAL("Socket ID doesn't match",
+                       (session->header.env.cpu[map->map[i]].socket_id == cpu_map__get_socket(map, i, NULL)));
+       }
+
+       perf_session__delete(session);
+
+       return 0;
+}
+
+int test_session_topology(void)
+{
+       char path[PATH_MAX];
+       struct cpu_map *map;
+       int ret = -1;
+
+       TEST_ASSERT_VAL("can't get templ file", !get_temp(path));
+
+       pr_debug("templ file: %s\n", path);
+
+       if (session_write_header(path))
+               goto free_path;
+
+       map = cpu_map__new(NULL);
+       if (map == NULL) {
+               pr_debug("failed to get system cpumap\n");
+               goto free_path;
+       }
+
+       if (check_cpu_topology(path, map))
+               goto free_map;
+       ret = 0;
+
+free_map:
+       cpu_map__put(map);
+free_path:
+       unlink(path);
+       return ret;
+}
index b34c5fc829ae2b0da7389bd31649dd0389e40604..d677e018e50426a8b2c2b417a63b8dfbe36949b7 100644 (file)
@@ -68,7 +68,7 @@ int test__vmlinux_matches_kallsyms(void)
         * to see if the running kernel was relocated by checking if it has the
         * same value in the vmlinux file we load.
         */
-       kallsyms_map = machine__kernel_map(&kallsyms, type);
+       kallsyms_map = machine__kernel_map(&kallsyms);
 
        /*
         * Step 5:
@@ -80,7 +80,7 @@ int test__vmlinux_matches_kallsyms(void)
                goto out;
        }
 
-       vmlinux_map = machine__kernel_map(&vmlinux, type);
+       vmlinux_map = machine__kernel_map(&vmlinux);
 
        /*
         * Step 6:
index 62378a899d79b116d53102c6a8a082ef80080365..722e25d200bf9f9861232d499ddb75b41dccd14d 100644 (file)
@@ -9,6 +9,7 @@ mkdir
 open
 openat
 quotactl
+read
 readlink
 rename
 rmdir
@@ -16,3 +17,4 @@ stat
 statfs
 symlink
 unlink
+write
index c6c7e5189214fb7f8df1c3fb3659a673d80e8ffa..e9703c0829f104bf9d7819d04831306cfd31dbe8 100644 (file)
@@ -393,6 +393,7 @@ int ui_browser__run(struct ui_browser *browser, int delay_secs)
 
                if (browser->use_navkeypressed && !browser->navkeypressed) {
                        if (key == K_DOWN || key == K_UP ||
+                           (browser->columns && (key == K_LEFT || key == K_RIGHT)) ||
                            key == K_PGDN || key == K_PGUP ||
                            key == K_HOME || key == K_END ||
                            key == ' ') {
@@ -421,6 +422,18 @@ int ui_browser__run(struct ui_browser *browser, int delay_secs)
                                browser->seek(browser, -1, SEEK_CUR);
                        }
                        break;
+               case K_RIGHT:
+                       if (!browser->columns)
+                               goto out;
+                       if (browser->horiz_scroll < browser->columns - 1)
+                               ++browser->horiz_scroll;
+                       break;
+               case K_LEFT:
+                       if (!browser->columns)
+                               goto out;
+                       if (browser->horiz_scroll != 0)
+                               --browser->horiz_scroll;
+                       break;
                case K_PGDN:
                case ' ':
                        if (browser->top_idx + browser->rows > browser->nr_entries - 1)
@@ -459,6 +472,7 @@ int ui_browser__run(struct ui_browser *browser, int delay_secs)
                        browser->seek(browser, -offset, SEEK_END);
                        break;
                default:
+               out:
                        return key;
                }
        }
index f3cef564de02e7262c820d52b3063543adf9c2fc..01781de59532ce9c9fd1ff7f8c7fdf97d45fa105 100644 (file)
@@ -14,7 +14,7 @@
 struct ui_browser {
        u64           index, top_idx;
        void          *top, *entries;
-       u16           y, x, width, height, rows;
+       u16           y, x, width, height, rows, columns, horiz_scroll;
        int           current_color;
        void          *priv;
        const char    *title;
index 29739b34759923f6a7c099f070c5689fd80a03d3..d4d7cc27252f1184bf2d678b6460f138099efd28 100644 (file)
@@ -768,8 +768,8 @@ static int annotate_browser__run(struct annotate_browser *browser,
                "UP/DOWN/PGUP\n"
                "PGDN/SPACE    Navigate\n"
                "q/ESC/CTRL+C  Exit\n\n"
-               "->            Go to target\n"
-               "<-            Exit\n"
+               "ENTER         Go to target\n"
+               "ESC           Exit\n"
                "H             Cycle thru hottest instructions\n"
                "j             Toggle showing jump to target arrows\n"
                "J             Toggle showing number of jump sources on targets\n"
@@ -1056,7 +1056,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
                goto out_free_offsets;
        }
 
-       ui_helpline__push("Press <- or ESC to exit");
+       ui_helpline__push("Press ESC to exit");
 
        notes = symbol__annotation(sym);
        browser.start = map__rip_2objdump(map, sym->start);
@@ -1125,8 +1125,8 @@ static struct annotate_config {
        ANNOTATE_CFG(jump_arrows),
        ANNOTATE_CFG(show_linenr),
        ANNOTATE_CFG(show_nr_jumps),
-       ANNOTATE_CFG(use_offset),
        ANNOTATE_CFG(show_total_period),
+       ANNOTATE_CFG(use_offset),
 };
 
 #undef ANNOTATE_CFG
@@ -1152,9 +1152,9 @@ static int annotate__config(const char *var, const char *value,
                      sizeof(struct annotate_config), annotate_config__cmp);
 
        if (cfg == NULL)
-               return -1;
-
-       *cfg->value = perf_config_bool(name, value);
+               ui__warning("%s variable unknown, ignoring...", var);
+       else
+               *cfg->value = perf_config_bool(name, value);
        return 0;
 }
 
index c04c60d4863ce71a08014c396aa7342c8af2fa16..e5afb893604044f189da5a0f50962f990362e0da 100644 (file)
@@ -784,11 +784,12 @@ static int hist_browser__show_entry(struct hist_browser *browser,
                        .size           = sizeof(s),
                        .ptr            = &arg,
                };
+               int column = 0;
 
                hist_browser__gotorc(browser, row, 0);
 
                perf_hpp__for_each_format(fmt) {
-                       if (perf_hpp__should_skip(fmt))
+                       if (perf_hpp__should_skip(fmt) || column++ < browser->b.horiz_scroll)
                                continue;
 
                        if (current_entry && browser->b.navkeypressed) {
@@ -861,14 +862,16 @@ static int advance_hpp_check(struct perf_hpp *hpp, int inc)
        return hpp->size <= 0;
 }
 
-static int hists__scnprintf_headers(char *buf, size_t size, struct hists *hists)
+static int hists_browser__scnprintf_headers(struct hist_browser *browser, char *buf, size_t size)
 {
+       struct hists *hists = browser->hists;
        struct perf_hpp dummy_hpp = {
                .buf    = buf,
                .size   = size,
        };
        struct perf_hpp_fmt *fmt;
        size_t ret = 0;
+       int column = 0;
 
        if (symbol_conf.use_callchain) {
                ret = scnprintf(buf, size, "  ");
@@ -877,7 +880,7 @@ static int hists__scnprintf_headers(char *buf, size_t size, struct hists *hists)
        }
 
        perf_hpp__for_each_format(fmt) {
-               if (perf_hpp__should_skip(fmt))
+               if (perf_hpp__should_skip(fmt)  || column++ < browser->b.horiz_scroll)
                        continue;
 
                ret = fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
@@ -896,7 +899,7 @@ static void hist_browser__show_headers(struct hist_browser *browser)
 {
        char headers[1024];
 
-       hists__scnprintf_headers(headers, sizeof(headers), browser->hists);
+       hists_browser__scnprintf_headers(browser, headers, sizeof(headers));
        ui_browser__gotorc(&browser->b, 0, 0);
        ui_browser__set_color(&browser->b, HE_COLORSET_ROOT);
        ui_browser__write_nstring(&browser->b, headers, browser->b.width + 1);
@@ -1261,6 +1264,7 @@ static int hists__browser_title(struct hists *hists,
        int printed;
        const struct dso *dso = hists->dso_filter;
        const struct thread *thread = hists->thread_filter;
+       int socket_id = hists->socket_filter;
        unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
        u64 nr_events = hists->stats.total_period;
        struct perf_evsel *evsel = hists_to_evsel(hists);
@@ -1314,6 +1318,9 @@ static int hists__browser_title(struct hists *hists,
        if (dso)
                printed += scnprintf(bf + printed, size - printed,
                                    ", DSO: %s", dso->short_name);
+       if (socket_id > -1)
+               printed += scnprintf(bf + printed, size - printed,
+                                   ", Processor Socket: %d", socket_id);
        if (!is_report_browser(hbt)) {
                struct perf_top *top = hbt->arg;
 
@@ -1425,6 +1432,7 @@ struct popup_action {
        struct thread           *thread;
        struct dso              *dso;
        struct map_symbol       ms;
+       int                     socket;
 
        int (*fn)(struct hist_browser *browser, struct popup_action *act);
 };
@@ -1437,7 +1445,7 @@ do_annotate(struct hist_browser *browser, struct popup_action *act)
        struct hist_entry *he;
        int err;
 
-       if (!objdump_path && perf_session_env__lookup_objdump(browser->env))
+       if (!objdump_path && perf_env__lookup_objdump(browser->env))
                return 0;
 
        notes = symbol__annotation(act->ms.sym);
@@ -1488,7 +1496,7 @@ do_zoom_thread(struct hist_browser *browser, struct popup_action *act)
                thread__zput(browser->hists->thread_filter);
                ui_helpline__pop();
        } else {
-               ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s(%d) thread\"",
+               ui_helpline__fpush("To zoom out press ESC or ENTER + \"Zoom out of %s(%d) thread\"",
                                   thread->comm_set ? thread__comm_str(thread) : "",
                                   thread->tid);
                browser->hists->thread_filter = thread__get(thread);
@@ -1522,7 +1530,7 @@ add_thread_opt(struct hist_browser *browser, struct popup_action *act,
 static int
 do_zoom_dso(struct hist_browser *browser, struct popup_action *act)
 {
-       struct dso *dso = act->dso;
+       struct map *map = act->ms.map;
 
        if (browser->hists->dso_filter) {
                pstack__remove(browser->pstack, &browser->hists->dso_filter);
@@ -1530,11 +1538,11 @@ do_zoom_dso(struct hist_browser *browser, struct popup_action *act)
                browser->hists->dso_filter = NULL;
                ui_helpline__pop();
        } else {
-               if (dso == NULL)
+               if (map == NULL)
                        return 0;
-               ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s DSO\"",
-                                  dso->kernel ? "the Kernel" : dso->short_name);
-               browser->hists->dso_filter = dso;
+               ui_helpline__fpush("To zoom out press ESC or ENTER + \"Zoom out of %s DSO\"",
+                                  __map__is_kernel(map) ? "the Kernel" : map->dso->short_name);
+               browser->hists->dso_filter = map->dso;
                perf_hpp__set_elide(HISTC_DSO, true);
                pstack__push(browser->pstack, &browser->hists->dso_filter);
        }
@@ -1546,17 +1554,18 @@ do_zoom_dso(struct hist_browser *browser, struct popup_action *act)
 
 static int
 add_dso_opt(struct hist_browser *browser, struct popup_action *act,
-           char **optstr, struct dso *dso)
+           char **optstr, struct map *map)
 {
-       if (dso == NULL)
+       if (map == NULL)
                return 0;
 
        if (asprintf(optstr, "Zoom %s %s DSO",
                     browser->hists->dso_filter ? "out of" : "into",
-                    dso->kernel ? "the Kernel" : dso->short_name) < 0)
+                    __map__is_kernel(map) ? "the Kernel" : map->dso->short_name) < 0)
                return 0;
 
-       act->dso = dso;
+       act->ms.map = map;
+       act->dso = map->dso;
        act->fn = do_zoom_dso;
        return 1;
 }
@@ -1672,6 +1681,41 @@ add_exit_opt(struct hist_browser *browser __maybe_unused,
        return 1;
 }
 
+static int
+do_zoom_socket(struct hist_browser *browser, struct popup_action *act)
+{
+       if (browser->hists->socket_filter > -1) {
+               pstack__remove(browser->pstack, &browser->hists->socket_filter);
+               browser->hists->socket_filter = -1;
+               perf_hpp__set_elide(HISTC_SOCKET, false);
+       } else {
+               browser->hists->socket_filter = act->socket;
+               perf_hpp__set_elide(HISTC_SOCKET, true);
+               pstack__push(browser->pstack, &browser->hists->socket_filter);
+       }
+
+       hists__filter_by_socket(browser->hists);
+       hist_browser__reset(browser);
+       return 0;
+}
+
+static int
+add_socket_opt(struct hist_browser *browser, struct popup_action *act,
+              char **optstr, int socket_id)
+{
+       if (socket_id < 0)
+               return 0;
+
+       if (asprintf(optstr, "Zoom %s Processor Socket %d",
+                    (browser->hists->socket_filter > -1) ? "out of" : "into",
+                    socket_id) < 0)
+               return 0;
+
+       act->socket = socket_id;
+       act->fn = do_zoom_socket;
+       return 1;
+}
+
 static void hist_browser__update_nr_entries(struct hist_browser *hb)
 {
        u64 nr_entries = 0;
@@ -1717,14 +1761,16 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
        "For multiple event sessions:\n\n"                              \
        "TAB/UNTAB     Switch events\n\n"                               \
        "For symbolic views (--sort has sym):\n\n"                      \
-       "->            Zoom into DSO/Threads & Annotate current symbol\n" \
-       "<-            Zoom out\n"                                      \
+       "ENTER         Zoom into DSO/Threads & Annotate current symbol\n" \
+       "ESC           Zoom out\n"                                      \
        "a             Annotate current symbol\n"                       \
        "C             Collapse all callchains\n"                       \
        "d             Zoom into current DSO\n"                         \
        "E             Expand all callchains\n"                         \
        "F             Toggle percentage of filtered entries\n"         \
        "H             Display column headers\n"                        \
+       "m             Display context menu\n"                          \
+       "S             Zoom into current Processor Socket\n"            \
 
        /* help messages are sorted by lexical order of the hotkey */
        const char report_help[] = HIST_BROWSER_HELP_COMMON
@@ -1755,7 +1801,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                hist_browser__update_nr_entries(browser);
        }
 
-       browser->pstack = pstack__new(2);
+       browser->pstack = pstack__new(3);
        if (browser->pstack == NULL)
                goto out;
 
@@ -1764,8 +1810,17 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
        memset(options, 0, sizeof(options));
        memset(actions, 0, sizeof(actions));
 
-       perf_hpp__for_each_format(fmt)
+       perf_hpp__for_each_format(fmt) {
                perf_hpp__reset_width(fmt, hists);
+               /*
+                * This is done just once, and activates the horizontal scrolling
+                * code in the ui_browser code, it would be better to have a the
+                * counter in the perf_hpp code, but I couldn't find doing it here
+                * works, FIXME by setting this in hist_browser__new, for now, be
+                * clever 8-)
+                */
+               ++browser->b.columns;
+       }
 
        if (symbol_conf.col_width_list_str)
                perf_hpp__set_user_width(symbol_conf.col_width_list_str);
@@ -1773,7 +1828,9 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
        while (1) {
                struct thread *thread = NULL;
                struct dso *dso = NULL;
+               struct map *map = NULL;
                int choice = 0;
+               int socked_id = -1;
 
                nr_options = 0;
 
@@ -1781,7 +1838,10 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
 
                if (browser->he_selection != NULL) {
                        thread = hist_browser__selected_thread(browser);
-                       dso = browser->selection->map ? browser->selection->map->dso : NULL;
+                       map = browser->selection->map;
+                       if (map)
+                               dso = map->dso;
+                       socked_id = browser->he_selection->socket;
                }
                switch (key) {
                case K_TAB:
@@ -1824,9 +1884,14 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                        actions->thread = thread;
                        do_zoom_thread(browser, actions);
                        continue;
+               case 'S':
+                       actions->socket = socked_id;
+                       do_zoom_socket(browser, actions);
+                       continue;
                case '/':
                        if (ui_browser__input_window("Symbol to show",
-                                       "Please enter the name of symbol you want to see",
+                                       "Please enter the name of symbol you want to see.\n"
+                                       "To remove the filter later, press / + ENTER.",
                                        buf, "ENTER: OK, ESC: Cancel",
                                        delay_secs * 2) == K_ENTER) {
                                hists->symbol_filter_str = *buf ? buf : NULL;
@@ -1871,6 +1936,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                        continue;
                case K_ENTER:
                case K_RIGHT:
+               case 'm':
                        /* menu */
                        break;
                case K_ESC:
@@ -1899,9 +1965,11 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                                 * Ditto for thread below.
                                 */
                                do_zoom_dso(browser, actions);
-                       }
-                       if (top == &browser->hists->thread_filter)
+                       } else if (top == &browser->hists->thread_filter) {
                                do_zoom_thread(browser, actions);
+                       } else if (top == &browser->hists->socket_filter) {
+                               do_zoom_socket(browser, actions);
+                       }
                        continue;
                }
                case 'q':
@@ -1965,12 +2033,14 @@ skip_annotation:
                nr_options += add_thread_opt(browser, &actions[nr_options],
                                             &options[nr_options], thread);
                nr_options += add_dso_opt(browser, &actions[nr_options],
-                                         &options[nr_options], dso);
+                                         &options[nr_options], map);
                nr_options += add_map_opt(browser, &actions[nr_options],
                                          &options[nr_options],
                                          browser->selection ?
                                                browser->selection->map : NULL);
-
+               nr_options += add_socket_opt(browser, &actions[nr_options],
+                                            &options[nr_options],
+                                            socked_id);
                /* perf script support */
                if (browser->he_selection) {
                        nr_options += add_script_opt(browser,
index 8c154c7d46697455c6fb8da8e8362f8c24f85953..80912778bb6d5623bd6555fd7f843a84acb1d51a 100644 (file)
@@ -72,7 +72,7 @@ static int map_browser__run(struct map_browser *browser)
        int key;
 
        if (ui_browser__show(&browser->b, browser->map->dso->long_name,
-                            "Press <- or ESC to exit, %s / to search",
+                            "Press ESC to exit, %s / to search",
                             verbose ? "" : "restart with -v to use") < 0)
                return -1;
 
index e13b48d705ef454a2ebe8be5708ca6eabf0f85d5..ad6b6ee3770e436339fa3d68b2c016e1a7e9dfb7 100644 (file)
@@ -89,7 +89,7 @@ static int script_browser__run(struct perf_script_browser *browser)
        int key;
 
        if (ui_browser__show(&browser->b, browser->script_name,
-                            "Press <- or ESC to exit") < 0)
+                            "Press ESC to exit") < 0)
                return -1;
 
        while (1) {
index 25d608394d746fcb7435517915bed787f20aa674..5029ba2b55af0cb20daaeec875d1f493dc807e62 100644 (file)
@@ -463,27 +463,27 @@ void perf_hpp__init(void)
                return;
 
        if (symbol_conf.cumulate_callchain) {
-               perf_hpp__column_enable(PERF_HPP__OVERHEAD_ACC);
+               hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC);
                perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
        }
 
-       perf_hpp__column_enable(PERF_HPP__OVERHEAD);
+       hpp_dimension__add_output(PERF_HPP__OVERHEAD);
 
        if (symbol_conf.show_cpu_utilization) {
-               perf_hpp__column_enable(PERF_HPP__OVERHEAD_SYS);
-               perf_hpp__column_enable(PERF_HPP__OVERHEAD_US);
+               hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS);
+               hpp_dimension__add_output(PERF_HPP__OVERHEAD_US);
 
                if (perf_guest) {
-                       perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_SYS);
-                       perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_US);
+                       hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS);
+                       hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US);
                }
        }
 
        if (symbol_conf.show_nr_samples)
-               perf_hpp__column_enable(PERF_HPP__SAMPLES);
+               hpp_dimension__add_output(PERF_HPP__SAMPLES);
 
        if (symbol_conf.show_total_period)
-               perf_hpp__column_enable(PERF_HPP__PERIOD);
+               hpp_dimension__add_output(PERF_HPP__PERIOD);
 
        /* prepend overhead field for backward compatiblity.  */
        list = &perf_hpp__format[PERF_HPP__OVERHEAD].sort_list;
index 60d1f29b4b50a9fedf0a163855056edfce1ed22b..7dfeba0a91f37c33c87b826e9ed79ec4ad59869c 100644 (file)
@@ -141,10 +141,6 @@ int ui__init(void)
 
        SLkp_define_keysym((char *)"^(kB)", SL_KEY_UNTAB);
 
-       ui_helpline__init();
-       ui_browser__init();
-       tui_progress__init();
-
        signal(SIGSEGV, ui__signal_backtrace);
        signal(SIGFPE, ui__signal_backtrace);
        signal(SIGINT, ui__signal);
@@ -153,6 +149,10 @@ int ui__init(void)
 
        perf_error__register(&perf_tui_eops);
 
+       ui_helpline__init();
+       ui_browser__init();
+       tui_progress__init();
+
        hist_browser__init_hpp();
 out:
        return err;
index 349bc96ca1fedc4946ab9596edd5dd1b3813ac0b..591b3fe3ed49acd8b31f701a78b3302a84786546 100644 (file)
@@ -5,6 +5,7 @@ libperf-y += build-id.o
 libperf-y += config.o
 libperf-y += ctype.o
 libperf-y += db-export.o
+libperf-y += env.o
 libperf-y += environment.o
 libperf-y += event.o
 libperf-y += evlist.o
@@ -17,6 +18,7 @@ libperf-y += levenshtein.o
 libperf-y += llvm-utils.o
 libperf-y += parse-options.o
 libperf-y += parse-events.o
+libperf-y += perf_regs.o
 libperf-y += path.o
 libperf-y += rbtree.o
 libperf-y += bitmap.o
@@ -85,6 +87,7 @@ libperf-$(CONFIG_AUXTRACE) += intel-bts.o
 libperf-y += parse-branch-options.o
 libperf-y += parse-regs-options.o
 
+libperf-$(CONFIG_LIBBPF) += bpf-loader.o
 libperf-$(CONFIG_LIBELF) += symbol-elf.o
 libperf-$(CONFIG_LIBELF) += probe-file.o
 libperf-$(CONFIG_LIBELF) += probe-event.o
@@ -103,7 +106,6 @@ libperf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o
 
 libperf-y += scripting-engines/
 
-libperf-$(CONFIG_PERF_REGS) += perf_regs.o
 libperf-$(CONFIG_ZLIB) += zlib.o
 libperf-$(CONFIG_LZMA) += lzma.o
 
index d1eece70b84d6e00c527ccd6027362517013aba8..0fc8d7a2fea5f2ddee03789c3c0b442278f0fb0a 100644 (file)
@@ -548,8 +548,11 @@ static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
 
        pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
 
-       if (addr < sym->start || addr >= sym->end)
+       if (addr < sym->start || addr >= sym->end) {
+               pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n",
+                      __func__, __LINE__, sym->name, sym->start, addr, sym->end);
                return -ERANGE;
+       }
 
        offset = addr - sym->start;
        h = annotation__histogram(notes, evidx);
index e9996092a093d7ef6af5aa72ac78fdd5cab61d81..cea323d9ee7eabbd2101eff85be5e0f564520b91 100644 (file)
@@ -122,7 +122,7 @@ struct annotated_source {
        struct list_head   source;
        struct source_line *lines;
        int                nr_histograms;
-       int                sizeof_sym_hist;
+       size_t             sizeof_sym_hist;
        struct cyc_hist    *cycles_hist;
        struct sym_hist    histograms[0];
 };
index a980e7c50ee078c47f6579e454572c67616bdc0d..7f10430af39c3ac9e47f4da1aca93e37a8f9cf87 100644 (file)
@@ -926,6 +926,8 @@ s64 perf_event__process_auxtrace(struct perf_tool *tool,
 #define PERF_ITRACE_DEFAULT_PERIOD             100000
 #define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ       16
 #define PERF_ITRACE_MAX_CALLCHAIN_SZ           1024
+#define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ     64
+#define PERF_ITRACE_MAX_LAST_BRANCH_SZ         1024
 
 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts)
 {
@@ -936,6 +938,7 @@ void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts)
        synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
        synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
        synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
+       synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
 }
 
 /*
@@ -950,6 +953,7 @@ int itrace_parse_synth_opts(const struct option *opt, const char *str,
        const char *p;
        char *endptr;
        bool period_type_set = false;
+       bool period_set = false;
 
        synth_opts->set = true;
 
@@ -971,6 +975,7 @@ int itrace_parse_synth_opts(const struct option *opt, const char *str,
                                p += 1;
                        if (isdigit(*p)) {
                                synth_opts->period = strtoull(p, &endptr, 10);
+                               period_set = true;
                                p = endptr;
                                while (*p == ' ' || *p == ',')
                                        p += 1;
@@ -1041,6 +1046,23 @@ int itrace_parse_synth_opts(const struct option *opt, const char *str,
                                synth_opts->callchain_sz = val;
                        }
                        break;
+               case 'l':
+                       synth_opts->last_branch = true;
+                       synth_opts->last_branch_sz =
+                                       PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
+                       while (*p == ' ' || *p == ',')
+                               p += 1;
+                       if (isdigit(*p)) {
+                               unsigned int val;
+
+                               val = strtoul(p, &endptr, 10);
+                               p = endptr;
+                               if (!val ||
+                                   val > PERF_ITRACE_MAX_LAST_BRANCH_SZ)
+                                       goto out_err;
+                               synth_opts->last_branch_sz = val;
+                       }
+                       break;
                case ' ':
                case ',':
                        break;
@@ -1053,7 +1075,7 @@ out:
                if (!period_type_set)
                        synth_opts->period_type =
                                        PERF_ITRACE_DEFAULT_PERIOD_TYPE;
-               if (!synth_opts->period)
+               if (!period_set)
                        synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
        }
 
index bf72b77a588a60b330a8af18622ba8cd9a371bf5..b86f90db1352a6c8635e3ea5d02aa3c21bccc323 100644 (file)
@@ -63,7 +63,9 @@ enum itrace_period_type {
  * @calls: limit branch samples to calls (can be combined with @returns)
  * @returns: limit branch samples to returns (can be combined with @calls)
  * @callchain: add callchain to 'instructions' events
+ * @last_branch: add branch context to 'instruction' events
  * @callchain_sz: maximum callchain size
+ * @last_branch_sz: branch context size
  * @period: 'instructions' events period
  * @period_type: 'instructions' events period type
  */
@@ -79,7 +81,9 @@ struct itrace_synth_opts {
        bool                    calls;
        bool                    returns;
        bool                    callchain;
+       bool                    last_branch;
        unsigned int            callchain_sz;
+       unsigned int            last_branch_sz;
        unsigned long long      period;
        enum itrace_period_type period_type;
 };
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
new file mode 100644 (file)
index 0000000..ba6f752
--- /dev/null
@@ -0,0 +1,352 @@
+/*
+ * bpf-loader.c
+ *
+ * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
+ * Copyright (C) 2015 Huawei Inc.
+ */
+
+#include <bpf/libbpf.h>
+#include <linux/err.h>
+#include "perf.h"
+#include "debug.h"
+#include "bpf-loader.h"
+#include "probe-event.h"
+#include "probe-finder.h" // for MAX_PROBES
+#include "llvm-utils.h"
+
+#define DEFINE_PRINT_FN(name, level) \
+static int libbpf_##name(const char *fmt, ...) \
+{                                              \
+       va_list args;                           \
+       int ret;                                \
+                                               \
+       va_start(args, fmt);                    \
+       ret = veprintf(level, verbose, pr_fmt(fmt), args);\
+       va_end(args);                           \
+       return ret;                             \
+}
+
+DEFINE_PRINT_FN(warning, 0)
+DEFINE_PRINT_FN(info, 0)
+DEFINE_PRINT_FN(debug, 1)
+
+struct bpf_prog_priv {
+       struct perf_probe_event pev;
+};
+
+struct bpf_object *bpf__prepare_load(const char *filename, bool source)
+{
+       struct bpf_object *obj;
+       static bool libbpf_initialized;
+
+       if (!libbpf_initialized) {
+               libbpf_set_print(libbpf_warning,
+                                libbpf_info,
+                                libbpf_debug);
+               libbpf_initialized = true;
+       }
+
+       if (source) {
+               int err;
+               void *obj_buf;
+               size_t obj_buf_sz;
+
+               err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
+               if (err)
+                       return ERR_PTR(err);
+               obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename);
+               free(obj_buf);
+       } else
+               obj = bpf_object__open(filename);
+
+       if (!obj) {
+               pr_debug("bpf: failed to load %s\n", filename);
+               return ERR_PTR(-EINVAL);
+       }
+
+       return obj;
+}
+
+void bpf__clear(void)
+{
+       struct bpf_object *obj, *tmp;
+
+       bpf_object__for_each_safe(obj, tmp) {
+               bpf__unprobe(obj);
+               bpf_object__close(obj);
+       }
+}
+
+static void
+bpf_prog_priv__clear(struct bpf_program *prog __maybe_unused,
+                    void *_priv)
+{
+       struct bpf_prog_priv *priv = _priv;
+
+       cleanup_perf_probe_events(&priv->pev, 1);
+       free(priv);
+}
+
+static int
+config_bpf_program(struct bpf_program *prog)
+{
+       struct perf_probe_event *pev = NULL;
+       struct bpf_prog_priv *priv = NULL;
+       const char *config_str;
+       int err;
+
+       config_str = bpf_program__title(prog, false);
+       if (!config_str) {
+               pr_debug("bpf: unable to get title for program\n");
+               return -EINVAL;
+       }
+
+       priv = calloc(sizeof(*priv), 1);
+       if (!priv) {
+               pr_debug("bpf: failed to alloc priv\n");
+               return -ENOMEM;
+       }
+       pev = &priv->pev;
+
+       pr_debug("bpf: config program '%s'\n", config_str);
+       err = parse_perf_probe_command(config_str, pev);
+       if (err < 0) {
+               pr_debug("bpf: '%s' is not a valid config string\n",
+                        config_str);
+               err = -EINVAL;
+               goto errout;
+       }
+
+       if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
+               pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
+                        config_str, PERF_BPF_PROBE_GROUP);
+               err = -EINVAL;
+               goto errout;
+       } else if (!pev->group)
+               pev->group = strdup(PERF_BPF_PROBE_GROUP);
+
+       if (!pev->group) {
+               pr_debug("bpf: strdup failed\n");
+               err = -ENOMEM;
+               goto errout;
+       }
+
+       if (!pev->event) {
+               pr_debug("bpf: '%s': event name is missing\n",
+                        config_str);
+               err = -EINVAL;
+               goto errout;
+       }
+       pr_debug("bpf: config '%s' is ok\n", config_str);
+
+       err = bpf_program__set_private(prog, priv, bpf_prog_priv__clear);
+       if (err) {
+               pr_debug("Failed to set priv for program '%s'\n", config_str);
+               goto errout;
+       }
+
+       return 0;
+
+errout:
+       if (pev)
+               clear_perf_probe_event(pev);
+       free(priv);
+       return err;
+}
+
+static int bpf__prepare_probe(void)
+{
+       static int err = 0;
+       static bool initialized = false;
+
+       /*
+        * Make err static, so if init failed the first, bpf__prepare_probe()
+        * fails each time without calling init_probe_symbol_maps multiple
+        * times.
+        */
+       if (initialized)
+               return err;
+
+       initialized = true;
+       err = init_probe_symbol_maps(false);
+       if (err < 0)
+               pr_debug("Failed to init_probe_symbol_maps\n");
+       probe_conf.max_probes = MAX_PROBES;
+       return err;
+}
+
+int bpf__probe(struct bpf_object *obj)
+{
+       int err = 0;
+       struct bpf_program *prog;
+       struct bpf_prog_priv *priv;
+       struct perf_probe_event *pev;
+
+       err = bpf__prepare_probe();
+       if (err) {
+               pr_debug("bpf__prepare_probe failed\n");
+               return err;
+       }
+
+       bpf_object__for_each_program(prog, obj) {
+               err = config_bpf_program(prog);
+               if (err)
+                       goto out;
+
+               err = bpf_program__get_private(prog, (void **)&priv);
+               if (err || !priv)
+                       goto out;
+               pev = &priv->pev;
+
+               err = convert_perf_probe_events(pev, 1);
+               if (err < 0) {
+                       pr_debug("bpf_probe: failed to convert perf probe events");
+                       goto out;
+               }
+
+               err = apply_perf_probe_events(pev, 1);
+               if (err < 0) {
+                       pr_debug("bpf_probe: failed to apply perf probe events");
+                       goto out;
+               }
+       }
+out:
+       return err < 0 ? err : 0;
+}
+
+#define EVENTS_WRITE_BUFSIZE  4096
+int bpf__unprobe(struct bpf_object *obj)
+{
+       int err, ret = 0;
+       struct bpf_program *prog;
+       struct bpf_prog_priv *priv;
+
+       bpf_object__for_each_program(prog, obj) {
+               int i;
+
+               err = bpf_program__get_private(prog, (void **)&priv);
+               if (err || !priv)
+                       continue;
+
+               for (i = 0; i < priv->pev.ntevs; i++) {
+                       struct probe_trace_event *tev = &priv->pev.tevs[i];
+                       char name_buf[EVENTS_WRITE_BUFSIZE];
+                       struct strfilter *delfilter;
+
+                       snprintf(name_buf, EVENTS_WRITE_BUFSIZE,
+                                "%s:%s", tev->group, tev->event);
+                       name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0';
+
+                       delfilter = strfilter__new(name_buf, NULL);
+                       if (!delfilter) {
+                               pr_debug("Failed to create filter for unprobing\n");
+                               ret = -ENOMEM;
+                               continue;
+                       }
+
+                       err = del_perf_probe_events(delfilter);
+                       strfilter__delete(delfilter);
+                       if (err) {
+                               pr_debug("Failed to delete %s\n", name_buf);
+                               ret = err;
+                               continue;
+                       }
+               }
+       }
+       return ret;
+}
+
+int bpf__load(struct bpf_object *obj)
+{
+       int err;
+
+       err = bpf_object__load(obj);
+       if (err) {
+               pr_debug("bpf: load objects failed\n");
+               return err;
+       }
+       return 0;
+}
+
+int bpf__foreach_tev(struct bpf_object *obj,
+                    bpf_prog_iter_callback_t func,
+                    void *arg)
+{
+       struct bpf_program *prog;
+       int err;
+
+       bpf_object__for_each_program(prog, obj) {
+               struct probe_trace_event *tev;
+               struct perf_probe_event *pev;
+               struct bpf_prog_priv *priv;
+               int i, fd;
+
+               err = bpf_program__get_private(prog,
+                               (void **)&priv);
+               if (err || !priv) {
+                       pr_debug("bpf: failed to get private field\n");
+                       return -EINVAL;
+               }
+
+               pev = &priv->pev;
+               for (i = 0; i < pev->ntevs; i++) {
+                       tev = &pev->tevs[i];
+
+                       fd = bpf_program__fd(prog);
+                       if (fd < 0) {
+                               pr_debug("bpf: failed to get file descriptor\n");
+                               return fd;
+                       }
+
+                       err = (*func)(tev, fd, arg);
+                       if (err) {
+                               pr_debug("bpf: call back failed, stop iterate\n");
+                               return err;
+                       }
+               }
+       }
+       return 0;
+}
+
+#define bpf__strerror_head(err, buf, size) \
+       char sbuf[STRERR_BUFSIZE], *emsg;\
+       if (!size)\
+               return 0;\
+       if (err < 0)\
+               err = -err;\
+       emsg = strerror_r(err, sbuf, sizeof(sbuf));\
+       switch (err) {\
+       default:\
+               scnprintf(buf, size, "%s", emsg);\
+               break;
+
+#define bpf__strerror_entry(val, fmt...)\
+       case val: {\
+               scnprintf(buf, size, fmt);\
+               break;\
+       }
+
+#define bpf__strerror_end(buf, size)\
+       }\
+       buf[size - 1] = '\0';
+
+int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
+                       int err, char *buf, size_t size)
+{
+       bpf__strerror_head(err, buf, size);
+       bpf__strerror_entry(EEXIST, "Probe point exist. Try use 'perf probe -d \"*\"'");
+       bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0\n");
+       bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file\n");
+       bpf__strerror_end(buf, size);
+       return 0;
+}
+
+int bpf__strerror_load(struct bpf_object *obj __maybe_unused,
+                      int err, char *buf, size_t size)
+{
+       bpf__strerror_head(err, buf, size);
+       bpf__strerror_entry(EINVAL, "%s: Are you root and runing a CONFIG_BPF_SYSCALL kernel?",
+                           emsg)
+       bpf__strerror_end(buf, size);
+       return 0;
+}
diff --git a/tools/perf/util/bpf-loader.h b/tools/perf/util/bpf-loader.h
new file mode 100644 (file)
index 0000000..ccd8d7f
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2015, Wang Nan <wangnan0@huawei.com>
+ * Copyright (C) 2015, Huawei Inc.
+ */
+#ifndef __BPF_LOADER_H
+#define __BPF_LOADER_H
+
+#include <linux/compiler.h>
+#include <linux/err.h>
+#include <string.h>
+#include "probe-event.h"
+#include "debug.h"
+
+struct bpf_object;
+#define PERF_BPF_PROBE_GROUP "perf_bpf_probe"
+
+typedef int (*bpf_prog_iter_callback_t)(struct probe_trace_event *tev,
+                                       int fd, void *arg);
+
+#ifdef HAVE_LIBBPF_SUPPORT
+struct bpf_object *bpf__prepare_load(const char *filename, bool source);
+
+void bpf__clear(void);
+
+int bpf__probe(struct bpf_object *obj);
+int bpf__unprobe(struct bpf_object *obj);
+int bpf__strerror_probe(struct bpf_object *obj, int err,
+                       char *buf, size_t size);
+
+int bpf__load(struct bpf_object *obj);
+int bpf__strerror_load(struct bpf_object *obj, int err,
+                      char *buf, size_t size);
+int bpf__foreach_tev(struct bpf_object *obj,
+                    bpf_prog_iter_callback_t func, void *arg);
+#else
+static inline struct bpf_object *
+bpf__prepare_load(const char *filename __maybe_unused,
+                 bool source __maybe_unused)
+{
+       pr_debug("ERROR: eBPF object loading is disabled during compiling.\n");
+       return ERR_PTR(-ENOTSUP);
+}
+
+static inline void bpf__clear(void) { }
+
+static inline int bpf__probe(struct bpf_object *obj __maybe_unused) { return 0;}
+static inline int bpf__unprobe(struct bpf_object *obj __maybe_unused) { return 0;}
+static inline int bpf__load(struct bpf_object *obj __maybe_unused) { return 0; }
+
+static inline int
+bpf__foreach_tev(struct bpf_object *obj __maybe_unused,
+                bpf_prog_iter_callback_t func __maybe_unused,
+                void *arg __maybe_unused)
+{
+       return 0;
+}
+
+static inline int
+__bpf_strerror(char *buf, size_t size)
+{
+       if (!size)
+               return 0;
+       strncpy(buf,
+               "ERROR: eBPF object loading is disabled during compiling.\n",
+               size);
+       buf[size - 1] = '\0';
+       return 0;
+}
+
+static inline int
+bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
+                   int err __maybe_unused,
+                   char *buf, size_t size)
+{
+       return __bpf_strerror(buf, size);
+}
+
+static inline int bpf__strerror_load(struct bpf_object *obj __maybe_unused,
+                                    int err __maybe_unused,
+                                    char *buf, size_t size)
+{
+       return __bpf_strerror(buf, size);
+}
+#endif
+#endif
index 773fe13ce6271b192bc9482ac8496578a9f41a81..735ad48e1858b0382c9aacc84e0b912e9329c86a 100644 (file)
@@ -51,10 +51,12 @@ static int parse_callchain_order(const char *value)
 {
        if (!strncmp(value, "caller", strlen(value))) {
                callchain_param.order = ORDER_CALLER;
+               callchain_param.order_set = true;
                return 0;
        }
        if (!strncmp(value, "callee", strlen(value))) {
                callchain_param.order = ORDER_CALLEE;
+               callchain_param.order_set = true;
                return 0;
        }
        return -1;
@@ -77,12 +79,14 @@ static int parse_callchain_sort_key(const char *value)
        return -1;
 }
 
-int
-parse_callchain_report_opt(const char *arg)
+static int
+__parse_callchain_report_opt(const char *arg, bool allow_record_opt)
 {
        char *tok;
        char *endptr;
        bool minpcnt_set = false;
+       bool record_opt_set = false;
+       bool try_stack_size = false;
 
        symbol_conf.use_callchain = true;
 
@@ -100,6 +104,28 @@ parse_callchain_report_opt(const char *arg)
                    !parse_callchain_order(tok) ||
                    !parse_callchain_sort_key(tok)) {
                        /* parsing ok - move on to the next */
+                       try_stack_size = false;
+                       goto next;
+               } else if (allow_record_opt && !record_opt_set) {
+                       if (parse_callchain_record(tok, &callchain_param))
+                               goto try_numbers;
+
+                       /* assume that number followed by 'dwarf' is stack size */
+                       if (callchain_param.record_mode == CALLCHAIN_DWARF)
+                               try_stack_size = true;
+
+                       record_opt_set = true;
+                       goto next;
+               }
+
+try_numbers:
+               if (try_stack_size) {
+                       unsigned long size = 0;
+
+                       if (get_stack_size(tok, &size) < 0)
+                               return -1;
+                       callchain_param.dump_size = size;
+                       try_stack_size = false;
                } else if (!minpcnt_set) {
                        /* try to get the min percent */
                        callchain_param.min_percent = strtod(tok, &endptr);
@@ -112,7 +138,7 @@ parse_callchain_report_opt(const char *arg)
                        if (tok == endptr)
                                return -1;
                }
-
+next:
                arg = NULL;
        }
 
@@ -123,6 +149,16 @@ parse_callchain_report_opt(const char *arg)
        return 0;
 }
 
+int parse_callchain_report_opt(const char *arg)
+{
+       return __parse_callchain_report_opt(arg, false);
+}
+
+int parse_callchain_top_opt(const char *arg)
+{
+       return __parse_callchain_report_opt(arg, true);
+}
+
 int perf_callchain_config(const char *var, const char *value)
 {
        char *endptr;
index acee2b3cd801737985fc7ca44443027a03ad4d54..fce8161e54dbbbae16f0667c0cd86cb50d7b58a2 100644 (file)
@@ -7,6 +7,30 @@
 #include "event.h"
 #include "symbol.h"
 
+#define HELP_PAD "\t\t\t\t"
+
+#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace):\n\n"
+
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
+# define RECORD_MODE_HELP  HELP_PAD "record_mode:\tcall graph recording mode (fp|dwarf|lbr)\n"
+#else
+# define RECORD_MODE_HELP  HELP_PAD "record_mode:\tcall graph recording mode (fp|lbr)\n"
+#endif
+
+#define RECORD_SIZE_HELP                                               \
+       HELP_PAD "record_size:\tif record_mode is 'dwarf', max size of stack recording (<bytes>)\n" \
+       HELP_PAD "\t\tdefault: 8192 (bytes)\n"
+
+#define CALLCHAIN_RECORD_HELP  CALLCHAIN_HELP RECORD_MODE_HELP RECORD_SIZE_HELP
+
+#define CALLCHAIN_REPORT_HELP                                          \
+       HELP_PAD "print_type:\tcall graph printing style (graph|flat|fractal|none)\n" \
+       HELP_PAD "threshold:\tminimum call graph inclusion threshold (<percent>)\n" \
+       HELP_PAD "print_limit:\tmaximum number of call graph entry (<number>)\n" \
+       HELP_PAD "order:\t\tcall graph order (caller|callee)\n" \
+       HELP_PAD "sort_key:\tcall graph sort key (function|address)\n"  \
+       HELP_PAD "branch:\t\tinclude last branch info to call graph (branch)\n"
+
 enum perf_call_graph_mode {
        CALLCHAIN_NONE,
        CALLCHAIN_FP,
@@ -63,6 +87,7 @@ struct callchain_param {
        double                  min_percent;
        sort_chain_func_t       sort;
        enum chain_order        order;
+       bool                    order_set;
        enum chain_key          key;
        bool                    branch_callstack;
 };
@@ -180,6 +205,7 @@ extern const char record_callchain_help[];
 extern int parse_callchain_record(const char *arg, struct callchain_param *param);
 int parse_callchain_record_opt(const char *arg, struct callchain_param *param);
 int parse_callchain_report_opt(const char *arg);
+int parse_callchain_top_opt(const char *arg);
 int perf_callchain_config(const char *var, const char *value);
 
 static inline void callchain_cursor_snapshot(struct callchain_cursor *dest,
index 3667e2123e5b44414a483c23b239e61b955dd7f7..10af1e7524fbd24de791c38fa23c7d730d54a193 100644 (file)
@@ -203,6 +203,23 @@ struct cpu_map *cpu_map__dummy_new(void)
        return cpus;
 }
 
+struct cpu_map *cpu_map__empty_new(int nr)
+{
+       struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int) * nr);
+
+       if (cpus != NULL) {
+               int i;
+
+               cpus->nr = nr;
+               for (i = 0; i < nr; i++)
+                       cpus->map[i] = -1;
+
+               atomic_set(&cpus->refcnt, 1);
+       }
+
+       return cpus;
+}
+
 static void cpu_map__delete(struct cpu_map *map)
 {
        if (map) {
@@ -225,32 +242,32 @@ void cpu_map__put(struct cpu_map *map)
                cpu_map__delete(map);
 }
 
-int cpu_map__get_socket(struct cpu_map *map, int idx)
+static int cpu__get_topology_int(int cpu, const char *name, int *value)
 {
-       FILE *fp;
-       const char *mnt;
        char path[PATH_MAX];
-       int cpu, ret;
 
-       if (idx > map->nr)
-               return -1;
+       snprintf(path, PATH_MAX,
+               "devices/system/cpu/cpu%d/topology/%s", cpu, name);
 
-       cpu = map->map[idx];
+       return sysfs__read_int(path, value);
+}
 
-       mnt = sysfs__mountpoint();
-       if (!mnt)
-               return -1;
+int cpu_map__get_socket_id(int cpu)
+{
+       int value, ret = cpu__get_topology_int(cpu, "physical_package_id", &value);
+       return ret ?: value;
+}
 
-       snprintf(path, PATH_MAX,
-               "%s/devices/system/cpu/cpu%d/topology/physical_package_id",
-               mnt, cpu);
+int cpu_map__get_socket(struct cpu_map *map, int idx, void *data __maybe_unused)
+{
+       int cpu;
 
-       fp = fopen(path, "r");
-       if (!fp)
+       if (idx > map->nr)
                return -1;
-       ret = fscanf(fp, "%d", &cpu);
-       fclose(fp);
-       return ret == 1 ? cpu : -1;
+
+       cpu = map->map[idx];
+
+       return cpu_map__get_socket_id(cpu);
 }
 
 static int cmp_ids(const void *a, const void *b)
@@ -258,8 +275,9 @@ static int cmp_ids(const void *a, const void *b)
        return *(int *)a - *(int *)b;
 }
 
-static int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
-                             int (*f)(struct cpu_map *map, int cpu))
+int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
+                      int (*f)(struct cpu_map *map, int cpu, void *data),
+                      void *data)
 {
        struct cpu_map *c;
        int nr = cpus->nr;
@@ -271,7 +289,7 @@ static int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
                return -1;
 
        for (cpu = 0; cpu < nr; cpu++) {
-               s1 = f(cpus, cpu);
+               s1 = f(cpus, cpu, data);
                for (s2 = 0; s2 < c->nr; s2++) {
                        if (s1 == c->map[s2])
                                break;
@@ -284,40 +302,29 @@ static int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
        /* ensure we process id in increasing order */
        qsort(c->map, c->nr, sizeof(int), cmp_ids);
 
-       atomic_set(&cpus->refcnt, 1);
+       atomic_set(&c->refcnt, 1);
        *res = c;
        return 0;
 }
 
-int cpu_map__get_core(struct cpu_map *map, int idx)
+int cpu_map__get_core_id(int cpu)
 {
-       FILE *fp;
-       const char *mnt;
-       char path[PATH_MAX];
-       int cpu, ret, s;
+       int value, ret = cpu__get_topology_int(cpu, "core_id", &value);
+       return ret ?: value;
+}
+
+int cpu_map__get_core(struct cpu_map *map, int idx, void *data)
+{
+       int cpu, s;
 
        if (idx > map->nr)
                return -1;
 
        cpu = map->map[idx];
 
-       mnt = sysfs__mountpoint();
-       if (!mnt)
-               return -1;
-
-       snprintf(path, PATH_MAX,
-               "%s/devices/system/cpu/cpu%d/topology/core_id",
-               mnt, cpu);
-
-       fp = fopen(path, "r");
-       if (!fp)
-               return -1;
-       ret = fscanf(fp, "%d", &cpu);
-       fclose(fp);
-       if (ret != 1)
-               return -1;
+       cpu = cpu_map__get_core_id(cpu);
 
-       s = cpu_map__get_socket(map, idx);
+       s = cpu_map__get_socket(map, idx, data);
        if (s == -1)
                return -1;
 
@@ -332,12 +339,12 @@ int cpu_map__get_core(struct cpu_map *map, int idx)
 
 int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp)
 {
-       return cpu_map__build_map(cpus, sockp, cpu_map__get_socket);
+       return cpu_map__build_map(cpus, sockp, cpu_map__get_socket, NULL);
 }
 
 int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep)
 {
-       return cpu_map__build_map(cpus, corep, cpu_map__get_core);
+       return cpu_map__build_map(cpus, corep, cpu_map__get_core, NULL);
 }
 
 /* setup simple routines to easily access node numbers given a cpu number */
index 0af9cecb4c519d44da13d392baa9abca37ebe6ec..85f7772457fa091655d62212067f2edddf6e55ae 100644 (file)
@@ -15,11 +15,14 @@ struct cpu_map {
 };
 
 struct cpu_map *cpu_map__new(const char *cpu_list);
+struct cpu_map *cpu_map__empty_new(int nr);
 struct cpu_map *cpu_map__dummy_new(void);
 struct cpu_map *cpu_map__read(FILE *file);
 size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp);
-int cpu_map__get_socket(struct cpu_map *map, int idx);
-int cpu_map__get_core(struct cpu_map *map, int idx);
+int cpu_map__get_socket_id(int cpu);
+int cpu_map__get_socket(struct cpu_map *map, int idx, void *data);
+int cpu_map__get_core_id(int cpu);
+int cpu_map__get_core(struct cpu_map *map, int idx, void *data);
 int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp);
 int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep);
 
@@ -85,4 +88,7 @@ static inline int cpu__get_node(int cpu)
        return cpunode_map[cpu];
 }
 
+int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
+                      int (*f)(struct cpu_map *map, int cpu, void *data),
+                      void *data);
 #endif /* __PERF_CPUMAP_H */
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
new file mode 100644 (file)
index 0000000..6af4f7c
--- /dev/null
@@ -0,0 +1,86 @@
+#include "cpumap.h"
+#include "env.h"
+#include "util.h"
+
+struct perf_env perf_env;
+
+void perf_env__exit(struct perf_env *env)
+{
+       zfree(&env->hostname);
+       zfree(&env->os_release);
+       zfree(&env->version);
+       zfree(&env->arch);
+       zfree(&env->cpu_desc);
+       zfree(&env->cpuid);
+       zfree(&env->cmdline);
+       zfree(&env->cmdline_argv);
+       zfree(&env->sibling_cores);
+       zfree(&env->sibling_threads);
+       zfree(&env->numa_nodes);
+       zfree(&env->pmu_mappings);
+       zfree(&env->cpu);
+}
+
+int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
+{
+       int i;
+
+       /*
+        * If env->cmdline_argv has already been set, do not override it.  This allows
+        * a command to set the cmdline, parse args and then call another
+        * builtin function that implements a command -- e.g, cmd_kvm calling
+        * cmd_record.
+        */
+       if (env->cmdline_argv != NULL)
+               return 0;
+
+       /* do not include NULL termination */
+       env->cmdline_argv = calloc(argc, sizeof(char *));
+       if (env->cmdline_argv == NULL)
+               goto out_enomem;
+
+       /*
+        * Must copy argv contents because it gets moved around during option
+        * parsing:
+        */
+       for (i = 0; i < argc ; i++) {
+               env->cmdline_argv[i] = argv[i];
+               if (env->cmdline_argv[i] == NULL)
+                       goto out_free;
+       }
+
+       env->nr_cmdline = argc;
+
+       return 0;
+out_free:
+       zfree(&env->cmdline_argv);
+out_enomem:
+       return -ENOMEM;
+}
+
+int perf_env__read_cpu_topology_map(struct perf_env *env)
+{
+       int cpu, nr_cpus;
+
+       if (env->cpu != NULL)
+               return 0;
+
+       if (env->nr_cpus_avail == 0)
+               env->nr_cpus_avail = sysconf(_SC_NPROCESSORS_CONF);
+
+       nr_cpus = env->nr_cpus_avail;
+       if (nr_cpus == -1)
+               return -EINVAL;
+
+       env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
+       if (env->cpu == NULL)
+               return -ENOMEM;
+
+       for (cpu = 0; cpu < nr_cpus; ++cpu) {
+               env->cpu[cpu].core_id   = cpu_map__get_core_id(cpu);
+               env->cpu[cpu].socket_id = cpu_map__get_socket_id(cpu);
+       }
+
+       env->nr_cpus_avail = nr_cpus;
+       return 0;
+}
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
new file mode 100644 (file)
index 0000000..0132b95
--- /dev/null
@@ -0,0 +1,44 @@
+#ifndef __PERF_ENV_H
+#define __PERF_ENV_H
+
+struct cpu_topology_map {
+       int     socket_id;
+       int     core_id;
+};
+
+struct perf_env {
+       char                    *hostname;
+       char                    *os_release;
+       char                    *version;
+       char                    *arch;
+       int                     nr_cpus_online;
+       int                     nr_cpus_avail;
+       char                    *cpu_desc;
+       char                    *cpuid;
+       unsigned long long      total_mem;
+       unsigned int            msr_pmu_type;
+
+       int                     nr_cmdline;
+       int                     nr_sibling_cores;
+       int                     nr_sibling_threads;
+       int                     nr_numa_nodes;
+       int                     nr_pmu_mappings;
+       int                     nr_groups;
+       char                    *cmdline;
+       const char              **cmdline_argv;
+       char                    *sibling_cores;
+       char                    *sibling_threads;
+       char                    *numa_nodes;
+       char                    *pmu_mappings;
+       struct cpu_topology_map *cpu;
+};
+
+extern struct perf_env perf_env;
+
+void perf_env__exit(struct perf_env *env);
+
+int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]);
+
+int perf_env__read_cpu_topology_map(struct perf_env *env);
+
+#endif /* __PERF_ENV_H */
index 7ff61274ed57f5219416c69767fac9acca21e091..8b10621b415c684564a6dd5af130ebc9d3ab33c7 100644 (file)
@@ -67,7 +67,8 @@ static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
        char filename[PATH_MAX];
        char bf[4096];
        int fd;
-       size_t size = 0, n;
+       size_t size = 0;
+       ssize_t n;
        char *nl, *name, *tgids, *ppids;
 
        *tgid = -1;
@@ -167,7 +168,7 @@ static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
        return 0;
 }
 
-static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
+pid_t perf_event__synthesize_comm(struct perf_tool *tool,
                                         union perf_event *event, pid_t pid,
                                         perf_event__handler_t process,
                                         struct machine *machine)
@@ -378,7 +379,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool,
        for (pos = maps__first(maps); pos; pos = map__next(pos)) {
                size_t size;
 
-               if (pos->dso->kernel)
+               if (__map__is_kernel(pos))
                        continue;
 
                size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
@@ -649,12 +650,12 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
        size_t size;
        const char *mmap_name;
        char name_buff[PATH_MAX];
-       struct map *map;
+       struct map *map = machine__kernel_map(machine);
        struct kmap *kmap;
        int err;
        union perf_event *event;
 
-       if (machine->vmlinux_maps[0] == NULL)
+       if (map == NULL)
                return -1;
 
        /*
@@ -680,7 +681,6 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
                event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
        }
 
-       map = machine->vmlinux_maps[MAP__FUNCTION];
        kmap = map__kmap(map);
        size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
                        "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
@@ -1008,7 +1008,7 @@ int perf_event__preprocess_sample(const union perf_event *event,
         * it now.
         */
        if (cpumode == PERF_RECORD_MISC_KERNEL &&
-           machine->vmlinux_maps[MAP__FUNCTION] == NULL)
+           machine__kernel_map(machine) == NULL)
                machine__create_kernel_maps(machine);
 
        thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->ip, al);
@@ -1021,6 +1021,14 @@ int perf_event__preprocess_sample(const union perf_event *event,
 
        al->sym = NULL;
        al->cpu = sample->cpu;
+       al->socket = -1;
+
+       if (al->cpu >= 0) {
+               struct perf_env *env = machine->env;
+
+               if (env && env->cpu)
+                       al->socket = env->cpu[al->cpu].socket_id;
+       }
 
        if (al->map) {
                struct dso *dso = al->map->dso;
index f729df5e25e634607f6b4c2f2cb4cc4e1ec933a3..a0dbcbd4f6d82d6564d56e48c0672ea56cba45bc 100644 (file)
@@ -257,6 +257,7 @@ struct events_stats {
        u64 total_non_filtered_period;
        u64 total_lost;
        u64 total_lost_samples;
+       u64 total_aux_lost;
        u64 total_invalid_chains;
        u32 nr_events[PERF_RECORD_HEADER_MAX];
        u32 nr_non_filtered_samples;
@@ -478,6 +479,11 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
                                  const struct perf_sample *sample,
                                  bool swapped);
 
+pid_t perf_event__synthesize_comm(struct perf_tool *tool,
+                                 union perf_event *event, pid_t pid,
+                                 perf_event__handler_t process,
+                                 struct machine *machine);
+
 int perf_event__synthesize_mmap_events(struct perf_tool *tool,
                                       union perf_event *event,
                                       pid_t pid, pid_t tgid,
index c8fc8a258f4265c42c636d045b195a612cfae3a4..d1392194a9a951bd3e1dd5c9b72fd383ede98439 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/bitops.h>
 #include <linux/hash.h>
 #include <linux/log2.h>
+#include <linux/err.h>
 
 static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx);
 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx);
@@ -164,6 +165,13 @@ void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
        __perf_evlist__propagate_maps(evlist, entry);
 }
 
+void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel)
+{
+       evsel->evlist = NULL;
+       list_del_init(&evsel->node);
+       evlist->nr_entries -= 1;
+}
+
 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
                                   struct list_head *list)
 {
@@ -197,6 +205,20 @@ void perf_evlist__set_leader(struct perf_evlist *evlist)
        }
 }
 
+void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
+{
+       attr->precise_ip = 3;
+
+       while (attr->precise_ip != 0) {
+               int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
+               if (fd != -1) {
+                       close(fd);
+                       break;
+               }
+               --attr->precise_ip;
+       }
+}
+
 int perf_evlist__add_default(struct perf_evlist *evlist)
 {
        struct perf_event_attr attr = {
@@ -207,13 +229,15 @@ int perf_evlist__add_default(struct perf_evlist *evlist)
 
        event_attr_init(&attr);
 
+       perf_event_attr__set_max_precise_ip(&attr);
+
        evsel = perf_evsel__new(&attr);
        if (evsel == NULL)
                goto error;
 
-       /* use strdup() because free(evsel) assumes name is allocated */
-       evsel->name = strdup("cycles");
-       if (!evsel->name)
+       /* use asprintf() because free(evsel) assumes name is allocated */
+       if (asprintf(&evsel->name, "cycles%.*s",
+                    attr.precise_ip ? attr.precise_ip + 1 : 0, ":ppp") < 0)
                goto error_free;
 
        perf_evlist__add(evlist, evsel);
@@ -293,7 +317,7 @@ int perf_evlist__add_newtp(struct perf_evlist *evlist,
 {
        struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
 
-       if (evsel == NULL)
+       if (IS_ERR(evsel))
                return -1;
 
        evsel->handler = handler;
@@ -616,6 +640,21 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
        return NULL;
 }
 
+struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
+                                               u64 id)
+{
+       struct perf_sample_id *sid;
+
+       if (!id)
+               return NULL;
+
+       sid = perf_evlist__id2sid(evlist, id);
+       if (sid)
+               return sid->evsel;
+
+       return NULL;
+}
+
 static int perf_evlist__event2id(struct perf_evlist *evlist,
                                 union perf_event *event, u64 *id)
 {
index 115d8b53c6010a5a1d466b04535245d9fd8f8579..a459fe71b452e0b721d798cc73cc6287ae87dc1c 100644 (file)
@@ -73,6 +73,7 @@ void perf_evlist__exit(struct perf_evlist *evlist);
 void perf_evlist__delete(struct perf_evlist *evlist);
 
 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
+void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel);
 int perf_evlist__add_default(struct perf_evlist *evlist);
 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
                                     struct perf_event_attr *attrs, size_t nr_attrs);
@@ -104,6 +105,8 @@ int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mas
 int perf_evlist__poll(struct perf_evlist *evlist, int timeout);
 
 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
+struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
+                                               u64 id);
 
 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
 
@@ -287,4 +290,6 @@ void perf_evlist__to_front(struct perf_evlist *evlist,
 
 void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
                                     struct perf_evsel *tracking_evsel);
+
+void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr);
 #endif /* __PERF_EVLIST_H */
index 5410483d52198c5909ec5a502c61567c5119c8ef..397fb4ed3c97b6deffeffd8f69bbceb886ac58ea 100644 (file)
@@ -9,10 +9,11 @@
 
 #include <byteswap.h>
 #include <linux/bitops.h>
-#include <api/fs/debugfs.h>
+#include <api/fs/tracing_path.h>
 #include <traceevent/event-parse.h>
 #include <linux/hw_breakpoint.h>
 #include <linux/perf_event.h>
+#include <linux/err.h>
 #include <sys/resource.h>
 #include "asm/bug.h"
 #include "callchain.h"
@@ -207,6 +208,7 @@ void perf_evsel__init(struct perf_evsel *evsel,
        evsel->unit        = "";
        evsel->scale       = 1.0;
        evsel->evlist      = NULL;
+       evsel->bpf_fd      = -1;
        INIT_LIST_HEAD(&evsel->node);
        INIT_LIST_HEAD(&evsel->config_terms);
        perf_evsel__object.init(evsel);
@@ -225,11 +227,17 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
        return evsel;
 }
 
+/*
+ * Returns pointer with encoded error via <linux/err.h> interface.
+ */
 struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
 {
        struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
+       int err = -ENOMEM;
 
-       if (evsel != NULL) {
+       if (evsel == NULL) {
+               goto out_err;
+       } else {
                struct perf_event_attr attr = {
                        .type          = PERF_TYPE_TRACEPOINT,
                        .sample_type   = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
@@ -240,8 +248,10 @@ struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int
                        goto out_free;
 
                evsel->tp_format = trace_event__tp_format(sys, name);
-               if (evsel->tp_format == NULL)
+               if (IS_ERR(evsel->tp_format)) {
+                       err = PTR_ERR(evsel->tp_format);
                        goto out_free;
+               }
 
                event_attr_init(&attr);
                attr.config = evsel->tp_format->id;
@@ -254,7 +264,8 @@ struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int
 out_free:
        zfree(&evsel->name);
        free(evsel);
-       return NULL;
+out_err:
+       return ERR_PTR(err);
 }
 
 const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
@@ -642,6 +653,15 @@ static void apply_config_terms(struct perf_evsel *evsel,
                case PERF_EVSEL__CONFIG_TERM_STACK_USER:
                        dump_size = term->val.stack_user;
                        break;
+               case PERF_EVSEL__CONFIG_TERM_INHERIT:
+                       /*
+                        * attr->inherit should has already been set by
+                        * perf_evsel__config. If user explicitly set
+                        * inherit using config terms, override global
+                        * opt->no_inherit setting.
+                        */
+                       attr->inherit = term->val.inherit ? 1 : 0;
+                       break;
                default:
                        break;
                }
@@ -872,6 +892,9 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
                attr->clockid = opts->clockid;
        }
 
+       if (evsel->precise_max)
+               perf_event_attr__set_max_precise_ip(attr);
+
        /*
         * Apply event specific term settings,
         * it overloads any global configuration.
@@ -1168,7 +1191,7 @@ static void __p_sample_type(char *buf, size_t size, u64 value)
                bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
                bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
                bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
-               bit_name(IDENTIFIER), bit_name(REGS_INTR),
+               bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC),
                { .name = NULL, }
        };
 #undef bit_name
@@ -1249,6 +1272,7 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
        PRINT_ATTRf(bp_type, p_unsigned);
        PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
        PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
+       PRINT_ATTRf(branch_sample_type, p_unsigned);
        PRINT_ATTRf(sample_regs_user, p_hex);
        PRINT_ATTRf(sample_stack_user, p_unsigned);
        PRINT_ATTRf(clockid, p_signed);
@@ -1333,6 +1357,22 @@ retry_open:
                                          err);
                                goto try_fallback;
                        }
+
+                       if (evsel->bpf_fd >= 0) {
+                               int evt_fd = FD(evsel, cpu, thread);
+                               int bpf_fd = evsel->bpf_fd;
+
+                               err = ioctl(evt_fd,
+                                           PERF_EVENT_IOC_SET_BPF,
+                                           bpf_fd);
+                               if (err && errno != EEXIST) {
+                                       pr_err("failed to attach bpf fd %d: %s\n",
+                                              bpf_fd, strerror(errno));
+                                       err = -EINVAL;
+                                       goto out_close;
+                               }
+                       }
+
                        set_rlimit = NO_CHANGE;
 
                        /*
index ef8925f7211a4a311c927e6919d83817745eb1cf..0e49bd742c639c02d1aef18f421c0204faa95823 100644 (file)
@@ -43,6 +43,7 @@ enum {
        PERF_EVSEL__CONFIG_TERM_TIME,
        PERF_EVSEL__CONFIG_TERM_CALLGRAPH,
        PERF_EVSEL__CONFIG_TERM_STACK_USER,
+       PERF_EVSEL__CONFIG_TERM_INHERIT,
        PERF_EVSEL__CONFIG_TERM_MAX,
 };
 
@@ -55,6 +56,7 @@ struct perf_evsel_config_term {
                bool    time;
                char    *callgraph;
                u64     stack_user;
+               bool    inherit;
        } val;
 };
 
@@ -90,9 +92,9 @@ struct perf_evsel {
        double                  scale;
        const char              *unit;
        struct event_format     *tp_format;
+       off_t                   id_offset;
        union {
                void            *priv;
-               off_t           id_offset;
                u64             db_id;
        };
        struct cgroup_sel       *cgrp;
@@ -111,6 +113,7 @@ struct perf_evsel {
        bool                    system_wide;
        bool                    tracking;
        bool                    per_pkg;
+       bool                    precise_max;
        /* parse modifier helper */
        int                     exclude_GH;
        int                     nr_members;
@@ -120,6 +123,7 @@ struct perf_evsel {
        char                    *group_name;
        bool                    cmdline_group_boundary;
        struct list_head        config_terms;
+       int                     bpf_fd;
 };
 
 union u64_swap {
@@ -130,7 +134,6 @@ union u64_swap {
 struct cpu_map;
 struct target;
 struct thread_map;
-struct perf_evlist;
 struct record_opts;
 
 static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
@@ -162,6 +165,9 @@ static inline struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
 
 struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx);
 
+/*
+ * Returns pointer with encoded error via <linux/err.h> interface.
+ */
 static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name)
 {
        return perf_evsel__newtp_idx(sys, name, 0);
index fce6634aebe25d19d126120ec1e6e4dd323be46a..43838003c1a160ff56a0ae72a70dcef68631fe2f 100644 (file)
@@ -24,9 +24,6 @@
 #include "build-id.h"
 #include "data.h"
 
-static u32 header_argc;
-static const char **header_argv;
-
 /*
  * magic2 = "PERFILE2"
  * must be a numerical value to let the endianness
@@ -88,6 +85,9 @@ int write_padded(int fd, const void *bf, size_t count, size_t count_aligned)
        return err;
 }
 
+#define string_size(str)                                               \
+       (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
+
 static int do_write_string(int fd, const char *str)
 {
        u32 len, olen;
@@ -135,37 +135,6 @@ static char *do_read_string(int fd, struct perf_header *ph)
        return NULL;
 }
 
-int
-perf_header__set_cmdline(int argc, const char **argv)
-{
-       int i;
-
-       /*
-        * If header_argv has already been set, do not override it.
-        * This allows a command to set the cmdline, parse args and
-        * then call another builtin function that implements a
-        * command -- e.g, cmd_kvm calling cmd_record.
-        */
-       if (header_argv)
-               return 0;
-
-       header_argc = (u32)argc;
-
-       /* do not include NULL termination */
-       header_argv = calloc(argc, sizeof(char *));
-       if (!header_argv)
-               return -ENOMEM;
-
-       /*
-        * must copy argv contents because it gets moved
-        * around during option parsing
-        */
-       for (i = 0; i < argc ; i++)
-               header_argv[i] = argv[i];
-
-       return 0;
-}
-
 static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
                            struct perf_evlist *evlist)
 {
@@ -402,8 +371,8 @@ static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
 {
        char buf[MAXPATHLEN];
        char proc[32];
-       u32 i, n;
-       int ret;
+       u32 n;
+       int i, ret;
 
        /*
         * actual atual path to perf binary
@@ -417,7 +386,7 @@ static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
        buf[ret] = '\0';
 
        /* account for binary path */
-       n = header_argc + 1;
+       n = perf_env.nr_cmdline + 1;
 
        ret = do_write(fd, &n, sizeof(n));
        if (ret < 0)
@@ -427,8 +396,8 @@ static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
        if (ret < 0)
                return ret;
 
-       for (i = 0 ; i < header_argc; i++) {
-               ret = do_write_string(fd, header_argv[i]);
+       for (i = 0 ; i < perf_env.nr_cmdline; i++) {
+               ret = do_write_string(fd, perf_env.cmdline_argv[i]);
                if (ret < 0)
                        return ret;
        }
@@ -441,6 +410,7 @@ static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
        "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
 
 struct cpu_topo {
+       u32 cpu_nr;
        u32 core_sib;
        u32 thread_sib;
        char **core_siblings;
@@ -551,7 +521,7 @@ static struct cpu_topo *build_cpu_topology(void)
                return NULL;
 
        tp = addr;
-
+       tp->cpu_nr = nr;
        addr += sizeof(*tp);
        tp->core_siblings = addr;
        addr += sz;
@@ -574,7 +544,7 @@ static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
 {
        struct cpu_topo *tp;
        u32 i;
-       int ret;
+       int ret, j;
 
        tp = build_cpu_topology();
        if (!tp)
@@ -598,6 +568,21 @@ static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
                if (ret < 0)
                        break;
        }
+
+       ret = perf_env__read_cpu_topology_map(&perf_env);
+       if (ret < 0)
+               goto done;
+
+       for (j = 0; j < perf_env.nr_cpus_avail; j++) {
+               ret = do_write(fd, &perf_env.cpu[j].core_id,
+                              sizeof(perf_env.cpu[j].core_id));
+               if (ret < 0)
+                       return ret;
+               ret = do_write(fd, &perf_env.cpu[j].socket_id,
+                              sizeof(perf_env.cpu[j].socket_id));
+               if (ret < 0)
+                       return ret;
+       }
 done:
        free_cpu_topo(tp);
        return ret;
@@ -938,6 +923,7 @@ static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
 {
        int nr, i;
        char *str;
+       int cpu_nr = ph->env.nr_cpus_online;
 
        nr = ph->env.nr_sibling_cores;
        str = ph->env.sibling_cores;
@@ -954,6 +940,13 @@ static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
                fprintf(fp, "# sibling threads : %s\n", str);
                str += strlen(str) + 1;
        }
+
+       if (ph->env.cpu != NULL) {
+               for (i = 0; i < cpu_nr; i++)
+                       fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
+                               ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
+       } else
+               fprintf(fp, "# Core ID and Socket ID information is not available\n");
 }
 
 static void free_event_desc(struct perf_evsel *events)
@@ -1582,7 +1575,7 @@ error:
        return -1;
 }
 
-static int process_cpu_topology(struct perf_file_section *section __maybe_unused,
+static int process_cpu_topology(struct perf_file_section *section,
                                struct perf_header *ph, int fd,
                                void *data __maybe_unused)
 {
@@ -1590,15 +1583,22 @@ static int process_cpu_topology(struct perf_file_section *section __maybe_unused
        u32 nr, i;
        char *str;
        struct strbuf sb;
+       int cpu_nr = ph->env.nr_cpus_online;
+       u64 size = 0;
+
+       ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
+       if (!ph->env.cpu)
+               return -1;
 
        ret = readn(fd, &nr, sizeof(nr));
        if (ret != sizeof(nr))
-               return -1;
+               goto free_cpu;
 
        if (ph->needs_swap)
                nr = bswap_32(nr);
 
        ph->env.nr_sibling_cores = nr;
+       size += sizeof(u32);
        strbuf_init(&sb, 128);
 
        for (i = 0; i < nr; i++) {
@@ -1608,6 +1608,7 @@ static int process_cpu_topology(struct perf_file_section *section __maybe_unused
 
                /* include a NULL character at the end */
                strbuf_add(&sb, str, strlen(str) + 1);
+               size += string_size(str);
                free(str);
        }
        ph->env.sibling_cores = strbuf_detach(&sb, NULL);
@@ -1620,6 +1621,7 @@ static int process_cpu_topology(struct perf_file_section *section __maybe_unused
                nr = bswap_32(nr);
 
        ph->env.nr_sibling_threads = nr;
+       size += sizeof(u32);
 
        for (i = 0; i < nr; i++) {
                str = do_read_string(fd, ph);
@@ -1628,13 +1630,57 @@ static int process_cpu_topology(struct perf_file_section *section __maybe_unused
 
                /* include a NULL character at the end */
                strbuf_add(&sb, str, strlen(str) + 1);
+               size += string_size(str);
                free(str);
        }
        ph->env.sibling_threads = strbuf_detach(&sb, NULL);
+
+       /*
+        * The header may be from old perf,
+        * which doesn't include core id and socket id information.
+        */
+       if (section->size <= size) {
+               zfree(&ph->env.cpu);
+               return 0;
+       }
+
+       for (i = 0; i < (u32)cpu_nr; i++) {
+               ret = readn(fd, &nr, sizeof(nr));
+               if (ret != sizeof(nr))
+                       goto free_cpu;
+
+               if (ph->needs_swap)
+                       nr = bswap_32(nr);
+
+               if (nr > (u32)cpu_nr) {
+                       pr_debug("core_id number is too big."
+                                "You may need to upgrade the perf tool.\n");
+                       goto free_cpu;
+               }
+               ph->env.cpu[i].core_id = nr;
+
+               ret = readn(fd, &nr, sizeof(nr));
+               if (ret != sizeof(nr))
+                       goto free_cpu;
+
+               if (ph->needs_swap)
+                       nr = bswap_32(nr);
+
+               if (nr > (u32)cpu_nr) {
+                       pr_debug("socket_id number is too big."
+                                "You may need to upgrade the perf tool.\n");
+                       goto free_cpu;
+               }
+
+               ph->env.cpu[i].socket_id = nr;
+       }
+
        return 0;
 
 error:
        strbuf_release(&sb);
+free_cpu:
+       zfree(&ph->env.cpu);
        return -1;
 }
 
@@ -1737,6 +1783,9 @@ static int process_pmu_mappings(struct perf_file_section *section __maybe_unused
                /* include a NULL character at the end */
                strbuf_add(&sb, "", 1);
 
+               if (!strcmp(name, "msr"))
+                       ph->env.msr_pmu_type = type;
+
                free(name);
                pmu_num--;
        }
@@ -2515,6 +2564,7 @@ int perf_session__read_header(struct perf_session *session)
                return -ENOMEM;
 
        session->evlist->env = &header->env;
+       session->machines.host.env = &header->env;
        if (perf_data_file__is_pipe(file))
                return perf_header__read_pipe(session);
 
index 396e4965f0c97469056324e55183883a58b151b5..05f27cb6b7e36a2e663f7c18fb22513bc5280af2 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/bitmap.h>
 #include <linux/types.h>
 #include "event.h"
-
+#include "env.h"
 
 enum {
        HEADER_RESERVED         = 0,    /* always cleared */
@@ -66,31 +66,6 @@ struct perf_header;
 int perf_file_header__read(struct perf_file_header *header,
                           struct perf_header *ph, int fd);
 
-struct perf_env {
-       char                    *hostname;
-       char                    *os_release;
-       char                    *version;
-       char                    *arch;
-       int                     nr_cpus_online;
-       int                     nr_cpus_avail;
-       char                    *cpu_desc;
-       char                    *cpuid;
-       unsigned long long      total_mem;
-
-       int                     nr_cmdline;
-       int                     nr_sibling_cores;
-       int                     nr_sibling_threads;
-       int                     nr_numa_nodes;
-       int                     nr_pmu_mappings;
-       int                     nr_groups;
-       char                    *cmdline;
-       const char              **cmdline_argv;
-       char                    *sibling_cores;
-       char                    *sibling_threads;
-       char                    *numa_nodes;
-       char                    *pmu_mappings;
-};
-
 struct perf_header {
        enum perf_header_version        version;
        bool                            needs_swap;
index 08b6cd945f1ece736249318835c044a735c6e29c..4fd37d6708cb973d2653c1ec4f6f17b086c712d0 100644 (file)
@@ -15,6 +15,8 @@ static bool hists__filter_entry_by_thread(struct hists *hists,
                                          struct hist_entry *he);
 static bool hists__filter_entry_by_symbol(struct hists *hists,
                                          struct hist_entry *he);
+static bool hists__filter_entry_by_socket(struct hists *hists,
+                                         struct hist_entry *he);
 
 u16 hists__col_len(struct hists *hists, enum hist_column col)
 {
@@ -130,6 +132,18 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
                        hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
                                           symlen);
                }
+
+               if (h->mem_info->iaddr.sym) {
+                       symlen = (int)h->mem_info->iaddr.sym->namelen + 4
+                              + unresolved_col_width + 2;
+                       hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
+                                          symlen);
+               } else {
+                       symlen = unresolved_col_width + 4 + 2;
+                       hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
+                                          symlen);
+               }
+
                if (h->mem_info->daddr.map) {
                        symlen = dso__name_len(h->mem_info->daddr.map->dso);
                        hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
@@ -141,9 +155,12 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
        } else {
                symlen = unresolved_col_width + 4 + 2;
                hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
+               hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
                hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
        }
 
+       hists__new_col_len(hists, HISTC_CPU, 3);
+       hists__new_col_len(hists, HISTC_SOCKET, 6);
        hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
        hists__new_col_len(hists, HISTC_MEM_TLB, 22);
        hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
@@ -452,6 +469,7 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
                        .map    = al->map,
                        .sym    = al->sym,
                },
+               .socket  = al->socket,
                .cpu     = al->cpu,
                .cpumode = al->cpumode,
                .ip      = al->addr,
@@ -690,7 +708,7 @@ iter_finish_normal_entry(struct hist_entry_iter *iter,
 }
 
 static int
-iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
+iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
                              struct addr_location *al __maybe_unused)
 {
        struct hist_entry **he_cache;
@@ -702,7 +720,7 @@ iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
         * cumulated only one time to prevent entries more than 100%
         * overhead.
         */
-       he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1));
+       he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
        if (he_cache == NULL)
                return -ENOMEM;
 
@@ -863,6 +881,8 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
        if (err)
                return err;
 
+       iter->max_stack = max_stack_depth;
+
        err = iter->ops->prepare_entry(iter, al);
        if (err)
                goto out;
@@ -1024,6 +1044,7 @@ static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
        hists__filter_entry_by_dso(hists, he);
        hists__filter_entry_by_thread(hists, he);
        hists__filter_entry_by_symbol(hists, he);
+       hists__filter_entry_by_socket(hists, he);
 }
 
 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
@@ -1143,7 +1164,7 @@ void hists__output_resort(struct hists *hists, struct ui_progress *prog)
        struct perf_evsel *evsel = hists_to_evsel(hists);
        bool use_callchain;
 
-       if (evsel && !symbol_conf.show_ref_callgraph)
+       if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
                use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
        else
                use_callchain = symbol_conf.use_callchain;
@@ -1292,6 +1313,37 @@ void hists__filter_by_symbol(struct hists *hists)
        }
 }
 
+static bool hists__filter_entry_by_socket(struct hists *hists,
+                                         struct hist_entry *he)
+{
+       if ((hists->socket_filter > -1) &&
+           (he->socket != hists->socket_filter)) {
+               he->filtered |= (1 << HIST_FILTER__SOCKET);
+               return true;
+       }
+
+       return false;
+}
+
+void hists__filter_by_socket(struct hists *hists)
+{
+       struct rb_node *nd;
+
+       hists->stats.nr_non_filtered_samples = 0;
+
+       hists__reset_filter_stats(hists);
+       hists__reset_col_len(hists);
+
+       for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+               struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+               if (hists__filter_entry_by_socket(hists, h))
+                       continue;
+
+               hists__remove_entry_filter(hists, h, HIST_FILTER__SOCKET);
+       }
+}
+
 void events_stats__inc(struct events_stats *stats, u32 type)
 {
        ++stats->nr_events[0];
@@ -1517,6 +1569,7 @@ static int hists_evsel__init(struct perf_evsel *evsel)
        hists->entries_collapsed = RB_ROOT;
        hists->entries = RB_ROOT;
        pthread_mutex_init(&hists->lock, NULL);
+       hists->socket_filter = -1;
        return 0;
 }
 
index de6d58e7f0d561db9e204939453a5ca8b6a9cc11..a48a2078d288f01b9edf10814571509803b674d1 100644 (file)
@@ -20,6 +20,7 @@ enum hist_filter {
        HIST_FILTER__SYMBOL,
        HIST_FILTER__GUEST,
        HIST_FILTER__HOST,
+       HIST_FILTER__SOCKET,
 };
 
 enum hist_column {
@@ -29,6 +30,7 @@ enum hist_column {
        HISTC_COMM,
        HISTC_PARENT,
        HISTC_CPU,
+       HISTC_SOCKET,
        HISTC_SRCLINE,
        HISTC_SRCFILE,
        HISTC_MISPREDICT,
@@ -47,6 +49,7 @@ enum hist_column {
        HISTC_MEM_LVL,
        HISTC_MEM_SNOOP,
        HISTC_MEM_DCACHELINE,
+       HISTC_MEM_IADDR_SYMBOL,
        HISTC_TRANSACTION,
        HISTC_CYCLES,
        HISTC_NR_COLS, /* Last entry */
@@ -70,6 +73,7 @@ struct hists {
        struct events_stats     stats;
        u64                     event_stream;
        u16                     col_len[HISTC_NR_COLS];
+       int                     socket_filter;
 };
 
 struct hist_entry_iter;
@@ -87,6 +91,7 @@ struct hist_entry_iter {
        int curr;
 
        bool hide_unresolved;
+       int max_stack;
 
        struct perf_evsel *evsel;
        struct perf_sample *sample;
@@ -144,11 +149,12 @@ size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp);
 void hists__filter_by_dso(struct hists *hists);
 void hists__filter_by_thread(struct hists *hists);
 void hists__filter_by_symbol(struct hists *hists);
+void hists__filter_by_socket(struct hists *hists);
 
 static inline bool hists__has_filter(struct hists *hists)
 {
        return hists->thread_filter || hists->dso_filter ||
-               hists->symbol_filter_str;
+               hists->symbol_filter_str || (hists->socket_filter > -1);
 }
 
 u16 hists__col_len(struct hists *hists, enum hist_column col);
index 8f149655f497302f39b5352cf099fbd71d4d02d7..07c644ed64c4ecd9809bf322297e5a3876e83aad 100644 (file)
@@ -5,4 +5,12 @@
 const char *get_arch_regstr(unsigned int n);
 #endif
 
+#ifdef HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
+/*
+ * Arch should support fetching the offset of a register in pt_regs
+ * by its name. See kernel's regs_query_register_offset in
+ * arch/xxx/kernel/ptrace.c.
+ */
+int regs_query_register_offset(const char *name);
+#endif
 #endif
index 2386322ece4fc8af0851fd2768cc20d1bf454fd4..0611d619a42e645baa4dfbbecca099b38ee23f21 100644 (file)
@@ -7,6 +7,17 @@ $(OUTPUT)util/intel-pt-decoder/inat-tables.c: $(inat_tables_script) $(inat_table
        $(call rule_mkdir)
        @$(call echo-cmd,gen)$(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ || rm -f $@
 
-$(OUTPUT)util/intel-pt-decoder/intel-pt-insn-decoder.o: util/intel-pt-decoder/inat.c $(OUTPUT)util/intel-pt-decoder/inat-tables.c
+$(OUTPUT)util/intel-pt-decoder/intel-pt-insn-decoder.o: util/intel-pt-decoder/intel-pt-insn-decoder.c util/intel-pt-decoder/inat.c $(OUTPUT)util/intel-pt-decoder/inat-tables.c
+       @(test -d ../../kernel -a -d ../../tools -a -d ../perf && (( \
+       diff -B -I'^#include' util/intel-pt-decoder/insn.c ../../arch/x86/lib/insn.c >/dev/null && \
+       diff -B -I'^#include' util/intel-pt-decoder/inat.c ../../arch/x86/lib/inat.c >/dev/null && \
+       diff -B util/intel-pt-decoder/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null && \
+       diff -B util/intel-pt-decoder/gen-insn-attr-x86.awk ../../arch/x86/tools/gen-insn-attr-x86.awk >/dev/null && \
+       diff -B -I'^#include' util/intel-pt-decoder/insn.h ../../arch/x86/include/asm/insn.h >/dev/null && \
+       diff -B -I'^#include' util/intel-pt-decoder/inat.h ../../arch/x86/include/asm/inat.h >/dev/null && \
+       diff -B -I'^#include' util/intel-pt-decoder/inat_types.h ../../arch/x86/include/asm/inat_types.h >/dev/null) \
+       || echo "Warning: Intel PT: x86 instruction decoder differs from kernel" >&2 )) || true
+       $(call rule_mkdir)
+       $(call if_changed_dep,cc_o_c)
 
 CFLAGS_intel-pt-insn-decoder.o += -I$(OUTPUT)util/intel-pt-decoder -Wno-override-init
index 22ba5022431958d82d5ade0fe4a1b3cbecfc63d8..9409d014b46c713de02df828cf9ad7ff726881e8 100644 (file)
@@ -650,7 +650,7 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
                if (data->from_mtc && timestamp < data->timestamp &&
                    data->timestamp - timestamp < decoder->tsc_slip)
                        return 1;
-               while (timestamp < data->timestamp)
+               if (timestamp < data->timestamp)
                        timestamp += (1ULL << 56);
                if (pkt_info->last_packet_type != INTEL_PT_CYC) {
                        if (data->from_mtc)
@@ -1191,7 +1191,7 @@ static void intel_pt_calc_tsc_timestamp(struct intel_pt_decoder *decoder)
                                        timestamp);
                        timestamp = decoder->timestamp;
                }
-               while (timestamp < decoder->timestamp) {
+               if (timestamp < decoder->timestamp) {
                        intel_pt_log_to("Wraparound timestamp", timestamp);
                        timestamp += (1ULL << 56);
                        decoder->tsc_timestamp = timestamp;
index d09c7d9f905002c8f07e833428dd897858826eb6..319bef33a64b2104ab7b17436e6c222f4d656d01 100644 (file)
 
 static FILE *f;
 static char log_name[MAX_LOG_NAME];
-static bool enable_logging;
+bool intel_pt_enable_logging;
 
 void intel_pt_log_enable(void)
 {
-       enable_logging = true;
+       intel_pt_enable_logging = true;
 }
 
 void intel_pt_log_disable(void)
 {
        if (f)
                fflush(f);
-       enable_logging = false;
+       intel_pt_enable_logging = false;
 }
 
 void intel_pt_log_set_name(const char *name)
@@ -80,7 +80,7 @@ static void intel_pt_print_no_data(uint64_t pos, int indent)
 
 static int intel_pt_log_open(void)
 {
-       if (!enable_logging)
+       if (!intel_pt_enable_logging)
                return -1;
 
        if (f)
@@ -91,15 +91,15 @@ static int intel_pt_log_open(void)
 
        f = fopen(log_name, "w+");
        if (!f) {
-               enable_logging = false;
+               intel_pt_enable_logging = false;
                return -1;
        }
 
        return 0;
 }
 
-void intel_pt_log_packet(const struct intel_pt_pkt *packet, int pkt_len,
-                        uint64_t pos, const unsigned char *buf)
+void __intel_pt_log_packet(const struct intel_pt_pkt *packet, int pkt_len,
+                          uint64_t pos, const unsigned char *buf)
 {
        char desc[INTEL_PT_PKT_DESC_MAX];
 
@@ -111,7 +111,7 @@ void intel_pt_log_packet(const struct intel_pt_pkt *packet, int pkt_len,
        fprintf(f, "%s\n", desc);
 }
 
-void intel_pt_log_insn(struct intel_pt_insn *intel_pt_insn, uint64_t ip)
+void __intel_pt_log_insn(struct intel_pt_insn *intel_pt_insn, uint64_t ip)
 {
        char desc[INTEL_PT_INSN_DESC_MAX];
        size_t len = intel_pt_insn->length;
@@ -128,7 +128,8 @@ void intel_pt_log_insn(struct intel_pt_insn *intel_pt_insn, uint64_t ip)
                fprintf(f, "Bad instruction!\n");
 }
 
-void intel_pt_log_insn_no_data(struct intel_pt_insn *intel_pt_insn, uint64_t ip)
+void __intel_pt_log_insn_no_data(struct intel_pt_insn *intel_pt_insn,
+                                uint64_t ip)
 {
        char desc[INTEL_PT_INSN_DESC_MAX];
 
@@ -142,7 +143,7 @@ void intel_pt_log_insn_no_data(struct intel_pt_insn *intel_pt_insn, uint64_t ip)
                fprintf(f, "Bad instruction!\n");
 }
 
-void intel_pt_log(const char *fmt, ...)
+void __intel_pt_log(const char *fmt, ...)
 {
        va_list args;
 
index db3942f83677eb51c2f8681e1811cb7521683759..debe751dc3d68403b31c9b89349657c400648d49 100644 (file)
@@ -25,20 +25,46 @@ void intel_pt_log_enable(void);
 void intel_pt_log_disable(void);
 void intel_pt_log_set_name(const char *name);
 
-void intel_pt_log_packet(const struct intel_pt_pkt *packet, int pkt_len,
-                        uint64_t pos, const unsigned char *buf);
+void __intel_pt_log_packet(const struct intel_pt_pkt *packet, int pkt_len,
+                          uint64_t pos, const unsigned char *buf);
 
 struct intel_pt_insn;
 
-void intel_pt_log_insn(struct intel_pt_insn *intel_pt_insn, uint64_t ip);
-void intel_pt_log_insn_no_data(struct intel_pt_insn *intel_pt_insn,
-                              uint64_t ip);
+void __intel_pt_log_insn(struct intel_pt_insn *intel_pt_insn, uint64_t ip);
+void __intel_pt_log_insn_no_data(struct intel_pt_insn *intel_pt_insn,
+                                uint64_t ip);
 
 __attribute__((format(printf, 1, 2)))
-void intel_pt_log(const char *fmt, ...);
+void __intel_pt_log(const char *fmt, ...);
+
+#define intel_pt_log(fmt, ...) \
+       do { \
+               if (intel_pt_enable_logging) \
+                       __intel_pt_log(fmt, ##__VA_ARGS__); \
+       } while (0)
+
+#define intel_pt_log_packet(arg, ...) \
+       do { \
+               if (intel_pt_enable_logging) \
+                       __intel_pt_log_packet(arg, ##__VA_ARGS__); \
+       } while (0)
+
+#define intel_pt_log_insn(arg, ...) \
+       do { \
+               if (intel_pt_enable_logging) \
+                       __intel_pt_log_insn(arg, ##__VA_ARGS__); \
+       } while (0)
+
+#define intel_pt_log_insn_no_data(arg, ...) \
+       do { \
+               if (intel_pt_enable_logging) \
+                       __intel_pt_log_insn_no_data(arg, ##__VA_ARGS__); \
+       } while (0)
 
 #define x64_fmt "0x%" PRIx64
 
+extern bool intel_pt_enable_logging;
+
 static inline void intel_pt_log_at(const char *msg, uint64_t u)
 {
        intel_pt_log("%s at " x64_fmt "\n", msg, u);
index 816488c0b97e3540a59af5c5f4c001fed1f5ba41..d388de72eacaa353ba14e3340a30ef64b63bcd97 100644 (file)
@@ -353,8 +353,12 @@ AVXcode: 1
 17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1)
 18: Grp16 (1A)
 19:
-1a: BNDCL Ev,Gv | BNDCU Ev,Gv | BNDMOV Gv,Ev | BNDLDX Gv,Ev,Gv
-1b: BNDCN Ev,Gv | BNDMOV Ev,Gv | BNDMK Gv,Ev | BNDSTX Ev,GV,Gv
+# Intel SDM opcode map does not list MPX instructions. For now using Gv for
+# bnd registers and Ev for everything else is OK because the instruction
+# decoder does not use the information except as an indication that there is
+# a ModR/M byte.
+1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
+1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
 1c:
 1d:
 1e:
@@ -732,6 +736,12 @@ bd: vfnmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
 be: vfnmsub231ps/d Vx,Hx,Wx (66),(v)
 bf: vfnmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
 # 0x0f 0x38 0xc0-0xff
+c8: sha1nexte Vdq,Wdq
+c9: sha1msg1 Vdq,Wdq
+ca: sha1msg2 Vdq,Wdq
+cb: sha256rnds2 Vdq,Wdq
+cc: sha256msg1 Vdq,Wdq
+cd: sha256msg2 Vdq,Wdq
 db: VAESIMC Vdq,Wdq (66),(v1)
 dc: VAESENC Vdq,Hdq,Wdq (66),(v1)
 dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1)
@@ -790,6 +800,7 @@ AVXcode: 3
 61: vpcmpestri Vdq,Wdq,Ib (66),(v1)
 62: vpcmpistrm Vdq,Wdq,Ib (66),(v1)
 63: vpcmpistri Vdq,Wdq,Ib (66),(v1)
+cc: sha1rnds4 Vdq,Wdq,Ib
 df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1)
 f0: RORX Gy,Ey,Ib (F2),(v)
 EndTable
@@ -874,7 +885,7 @@ GrpTable: Grp7
 2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
 3: LIDT Ms
 4: SMSW Mw/Rv
-5:
+5: rdpkru (110),(11B) | wrpkru (111),(11B)
 6: LMSW Ew
 7: INVLPG Mb | SWAPGS (o64),(000),(11B) | RDTSCP (001),(11B)
 EndTable
@@ -888,6 +899,9 @@ EndTable
 
 GrpTable: Grp9
 1: CMPXCHG8B/16B Mq/Mdq
+3: xrstors
+4: xsavec
+5: xsaves
 6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B)
 7: VMPTRST Mq | VMPTRST Mq (F3) | RDSEED Rv (11B)
 EndTable
@@ -932,8 +946,8 @@ GrpTable: Grp15
 3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
 4: XSAVE
 5: XRSTOR | lfence (11B)
-6: XSAVEOPT | mfence (11B)
-7: clflush | sfence (11B)
+6: XSAVEOPT | clwb (66) | mfence (11B)
+7: clflush | clflushopt (66) | sfence (11B) | pcommit (66),(11B)
 EndTable
 
 GrpTable: Grp16
index 535d86f8e4d17b802a4c0473a7bf7ff937a10b15..97f963a3dcb95157f8dcaf3e568d3ac487989608 100644 (file)
@@ -22,6 +22,7 @@
 #include "../perf.h"
 #include "session.h"
 #include "machine.h"
+#include "sort.h"
 #include "tool.h"
 #include "event.h"
 #include "evlist.h"
@@ -63,6 +64,7 @@ struct intel_pt {
        bool data_queued;
        bool est_tsc;
        bool sync_switch;
+       bool mispred_all;
        int have_sched_switch;
        u32 pmu_type;
        u64 kernel_start;
@@ -115,6 +117,9 @@ struct intel_pt_queue {
        void *decoder;
        const struct intel_pt_state *state;
        struct ip_callchain *chain;
+       struct branch_stack *last_branch;
+       struct branch_stack *last_branch_rb;
+       size_t last_branch_pos;
        union perf_event *event_buf;
        bool on_heap;
        bool stop;
@@ -675,6 +680,19 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
                        goto out_free;
        }
 
+       if (pt->synth_opts.last_branch) {
+               size_t sz = sizeof(struct branch_stack);
+
+               sz += pt->synth_opts.last_branch_sz *
+                     sizeof(struct branch_entry);
+               ptq->last_branch = zalloc(sz);
+               if (!ptq->last_branch)
+                       goto out_free;
+               ptq->last_branch_rb = zalloc(sz);
+               if (!ptq->last_branch_rb)
+                       goto out_free;
+       }
+
        ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
        if (!ptq->event_buf)
                goto out_free;
@@ -720,7 +738,7 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
 
                if (!params.period) {
                        params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
-                       params.period = 1000;
+                       params.period = 1;
                }
        }
 
@@ -732,6 +750,8 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
 
 out_free:
        zfree(&ptq->event_buf);
+       zfree(&ptq->last_branch);
+       zfree(&ptq->last_branch_rb);
        zfree(&ptq->chain);
        free(ptq);
        return NULL;
@@ -746,6 +766,8 @@ static void intel_pt_free_queue(void *priv)
        thread__zput(ptq->thread);
        intel_pt_decoder_free(ptq->decoder);
        zfree(&ptq->event_buf);
+       zfree(&ptq->last_branch);
+       zfree(&ptq->last_branch_rb);
        zfree(&ptq->chain);
        free(ptq);
 }
@@ -876,6 +898,58 @@ static int intel_pt_setup_queues(struct intel_pt *pt)
        return 0;
 }
 
+static inline void intel_pt_copy_last_branch_rb(struct intel_pt_queue *ptq)
+{
+       struct branch_stack *bs_src = ptq->last_branch_rb;
+       struct branch_stack *bs_dst = ptq->last_branch;
+       size_t nr = 0;
+
+       bs_dst->nr = bs_src->nr;
+
+       if (!bs_src->nr)
+               return;
+
+       nr = ptq->pt->synth_opts.last_branch_sz - ptq->last_branch_pos;
+       memcpy(&bs_dst->entries[0],
+              &bs_src->entries[ptq->last_branch_pos],
+              sizeof(struct branch_entry) * nr);
+
+       if (bs_src->nr >= ptq->pt->synth_opts.last_branch_sz) {
+               memcpy(&bs_dst->entries[nr],
+                      &bs_src->entries[0],
+                      sizeof(struct branch_entry) * ptq->last_branch_pos);
+       }
+}
+
+static inline void intel_pt_reset_last_branch_rb(struct intel_pt_queue *ptq)
+{
+       ptq->last_branch_pos = 0;
+       ptq->last_branch_rb->nr = 0;
+}
+
+static void intel_pt_update_last_branch_rb(struct intel_pt_queue *ptq)
+{
+       const struct intel_pt_state *state = ptq->state;
+       struct branch_stack *bs = ptq->last_branch_rb;
+       struct branch_entry *be;
+
+       if (!ptq->last_branch_pos)
+               ptq->last_branch_pos = ptq->pt->synth_opts.last_branch_sz;
+
+       ptq->last_branch_pos -= 1;
+
+       be              = &bs->entries[ptq->last_branch_pos];
+       be->from        = state->from_ip;
+       be->to          = state->to_ip;
+       be->flags.abort = !!(state->flags & INTEL_PT_ABORT_TX);
+       be->flags.in_tx = !!(state->flags & INTEL_PT_IN_TX);
+       /* No support for mispredict */
+       be->flags.mispred = ptq->pt->mispred_all;
+
+       if (bs->nr < ptq->pt->synth_opts.last_branch_sz)
+               bs->nr += 1;
+}
+
 static int intel_pt_inject_event(union perf_event *event,
                                 struct perf_sample *sample, u64 type,
                                 bool swapped)
@@ -890,6 +964,13 @@ static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
        struct intel_pt *pt = ptq->pt;
        union perf_event *event = ptq->event_buf;
        struct perf_sample sample = { .ip = 0, };
+       struct dummy_branch_stack {
+               u64                     nr;
+               struct branch_entry     entries;
+       } dummy_bs;
+
+       if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
+               return 0;
 
        event->sample.header.type = PERF_RECORD_SAMPLE;
        event->sample.header.misc = PERF_RECORD_MISC_USER;
@@ -909,8 +990,20 @@ static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
        sample.flags = ptq->flags;
        sample.insn_len = ptq->insn_len;
 
-       if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
-               return 0;
+       /*
+        * perf report cannot handle events without a branch stack when using
+        * SORT_MODE__BRANCH so make a dummy one.
+        */
+       if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
+               dummy_bs = (struct dummy_branch_stack){
+                       .nr = 1,
+                       .entries = {
+                               .from = sample.ip,
+                               .to = sample.addr,
+                       },
+               };
+               sample.branch_stack = (struct branch_stack *)&dummy_bs;
+       }
 
        if (pt->synth_opts.inject) {
                ret = intel_pt_inject_event(event, &sample,
@@ -961,6 +1054,11 @@ static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
                sample.callchain = ptq->chain;
        }
 
+       if (pt->synth_opts.last_branch) {
+               intel_pt_copy_last_branch_rb(ptq);
+               sample.branch_stack = ptq->last_branch;
+       }
+
        if (pt->synth_opts.inject) {
                ret = intel_pt_inject_event(event, &sample,
                                            pt->instructions_sample_type,
@@ -974,6 +1072,9 @@ static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
                pr_err("Intel Processor Trace: failed to deliver instruction event, error %d\n",
                       ret);
 
+       if (pt->synth_opts.last_branch)
+               intel_pt_reset_last_branch_rb(ptq);
+
        return ret;
 }
 
@@ -1008,6 +1109,11 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
                sample.callchain = ptq->chain;
        }
 
+       if (pt->synth_opts.last_branch) {
+               intel_pt_copy_last_branch_rb(ptq);
+               sample.branch_stack = ptq->last_branch;
+       }
+
        if (pt->synth_opts.inject) {
                ret = intel_pt_inject_event(event, &sample,
                                            pt->transactions_sample_type,
@@ -1021,6 +1127,9 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
                pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
                       ret);
 
+       if (pt->synth_opts.callchain)
+               intel_pt_reset_last_branch_rb(ptq);
+
        return ret;
 }
 
@@ -1116,6 +1225,9 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
                        return err;
        }
 
+       if (pt->synth_opts.last_branch)
+               intel_pt_update_last_branch_rb(ptq);
+
        if (!pt->sync_switch)
                return 0;
 
@@ -1145,16 +1257,18 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
        return 0;
 }
 
-static u64 intel_pt_switch_ip(struct machine *machine, u64 *ptss_ip)
+static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
 {
+       struct machine *machine = pt->machine;
        struct map *map;
        struct symbol *sym, *start;
        u64 ip, switch_ip = 0;
+       const char *ptss;
 
        if (ptss_ip)
                *ptss_ip = 0;
 
-       map = machine__kernel_map(machine, MAP__FUNCTION);
+       map = machine__kernel_map(machine);
        if (!map)
                return 0;
 
@@ -1177,8 +1291,13 @@ static u64 intel_pt_switch_ip(struct machine *machine, u64 *ptss_ip)
        if (!switch_ip || !ptss_ip)
                return 0;
 
+       if (pt->have_sched_switch == 1)
+               ptss = "perf_trace_sched_switch";
+       else
+               ptss = "__perf_event_task_sched_out";
+
        for (sym = start; sym; sym = dso__next_symbol(sym)) {
-               if (!strcmp(sym->name, "perf_trace_sched_switch")) {
+               if (!strcmp(sym->name, ptss)) {
                        ip = map->unmap_ip(map, sym->start);
                        if (ip >= map->start && ip < map->end) {
                                *ptss_ip = ip;
@@ -1198,11 +1317,11 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
 
        if (!pt->kernel_start) {
                pt->kernel_start = machine__kernel_start(pt->machine);
-               if (pt->per_cpu_mmaps && pt->have_sched_switch &&
+               if (pt->per_cpu_mmaps &&
+                   (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
                    !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
                    !pt->sampling_mode) {
-                       pt->switch_ip = intel_pt_switch_ip(pt->machine,
-                                                          &pt->ptss_ip);
+                       pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
                        if (pt->switch_ip) {
                                intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
                                             pt->switch_ip, pt->ptss_ip);
@@ -1387,31 +1506,18 @@ static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
        return NULL;
 }
 
-static int intel_pt_process_switch(struct intel_pt *pt,
-                                  struct perf_sample *sample)
+static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
+                               u64 timestamp)
 {
        struct intel_pt_queue *ptq;
-       struct perf_evsel *evsel;
-       pid_t tid;
-       int cpu, err;
-
-       evsel = perf_evlist__id2evsel(pt->session->evlist, sample->id);
-       if (evsel != pt->switch_evsel)
-               return 0;
-
-       tid = perf_evsel__intval(evsel, sample, "next_pid");
-       cpu = sample->cpu;
-
-       intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
-                    cpu, tid, sample->time, perf_time_to_tsc(sample->time,
-                    &pt->tc));
+       int err;
 
        if (!pt->sync_switch)
-               goto out;
+               return 1;
 
        ptq = intel_pt_cpu_to_ptq(pt, cpu);
        if (!ptq)
-               goto out;
+               return 1;
 
        switch (ptq->switch_state) {
        case INTEL_PT_SS_NOT_TRACING:
@@ -1424,7 +1530,7 @@ static int intel_pt_process_switch(struct intel_pt *pt,
                return 0;
        case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
                if (!ptq->on_heap) {
-                       ptq->timestamp = perf_time_to_tsc(sample->time,
+                       ptq->timestamp = perf_time_to_tsc(timestamp,
                                                          &pt->tc);
                        err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
                                                 ptq->timestamp);
@@ -1441,10 +1547,76 @@ static int intel_pt_process_switch(struct intel_pt *pt,
        default:
                break;
        }
-out:
+
+       return 1;
+}
+
+static int intel_pt_process_switch(struct intel_pt *pt,
+                                  struct perf_sample *sample)
+{
+       struct perf_evsel *evsel;
+       pid_t tid;
+       int cpu, ret;
+
+       evsel = perf_evlist__id2evsel(pt->session->evlist, sample->id);
+       if (evsel != pt->switch_evsel)
+               return 0;
+
+       tid = perf_evsel__intval(evsel, sample, "next_pid");
+       cpu = sample->cpu;
+
+       intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
+                    cpu, tid, sample->time, perf_time_to_tsc(sample->time,
+                    &pt->tc));
+
+       ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
+       if (ret <= 0)
+               return ret;
+
        return machine__set_current_tid(pt->machine, cpu, -1, tid);
 }
 
+static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
+                                  struct perf_sample *sample)
+{
+       bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
+       pid_t pid, tid;
+       int cpu, ret;
+
+       cpu = sample->cpu;
+
+       if (pt->have_sched_switch == 3) {
+               if (!out)
+                       return 0;
+               if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
+                       pr_err("Expecting CPU-wide context switch event\n");
+                       return -EINVAL;
+               }
+               pid = event->context_switch.next_prev_pid;
+               tid = event->context_switch.next_prev_tid;
+       } else {
+               if (out)
+                       return 0;
+               pid = sample->pid;
+               tid = sample->tid;
+       }
+
+       if (tid == -1) {
+               pr_err("context_switch event has no tid\n");
+               return -EINVAL;
+       }
+
+       intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
+                    cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time,
+                    &pt->tc));
+
+       ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
+       if (ret <= 0)
+               return ret;
+
+       return machine__set_current_tid(pt->machine, cpu, pid, tid);
+}
+
 static int intel_pt_process_itrace_start(struct intel_pt *pt,
                                         union perf_event *event,
                                         struct perf_sample *sample)
@@ -1515,6 +1687,9 @@ static int intel_pt_process_event(struct perf_session *session,
                err = intel_pt_process_switch(pt, sample);
        else if (event->header.type == PERF_RECORD_ITRACE_START)
                err = intel_pt_process_itrace_start(pt, event, sample);
+       else if (event->header.type == PERF_RECORD_SWITCH ||
+                event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
+               err = intel_pt_context_switch(pt, event, sample);
 
        intel_pt_log("event %s (%u): cpu %d time %"PRIu64" tsc %#"PRIx64"\n",
                     perf_event__name(event->header.type), event->header.type,
@@ -1700,6 +1875,8 @@ static int intel_pt_synth_events(struct intel_pt *pt,
                pt->instructions_sample_period = attr.sample_period;
                if (pt->synth_opts.callchain)
                        attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
+               if (pt->synth_opts.last_branch)
+                       attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
                pr_debug("Synthesizing 'instructions' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
                         id, (u64)attr.sample_type);
                err = intel_pt_synth_event(session, &attr, id);
@@ -1719,6 +1896,8 @@ static int intel_pt_synth_events(struct intel_pt *pt,
                attr.sample_period = 1;
                if (pt->synth_opts.callchain)
                        attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
+               if (pt->synth_opts.last_branch)
+                       attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
                pr_debug("Synthesizing 'transactions' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
                         id, (u64)attr.sample_type);
                err = intel_pt_synth_event(session, &attr, id);
@@ -1745,6 +1924,7 @@ static int intel_pt_synth_events(struct intel_pt *pt,
                attr.sample_period = 1;
                attr.sample_type |= PERF_SAMPLE_ADDR;
                attr.sample_type &= ~(u64)PERF_SAMPLE_CALLCHAIN;
+               attr.sample_type &= ~(u64)PERF_SAMPLE_BRANCH_STACK;
                pr_debug("Synthesizing 'branches' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
                         id, (u64)attr.sample_type);
                err = intel_pt_synth_event(session, &attr, id);
@@ -1777,6 +1957,28 @@ static struct perf_evsel *intel_pt_find_sched_switch(struct perf_evlist *evlist)
        return NULL;
 }
 
+static bool intel_pt_find_switch(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel;
+
+       evlist__for_each(evlist, evsel) {
+               if (evsel->attr.context_switch)
+                       return true;
+       }
+
+       return false;
+}
+
+static int intel_pt_perf_config(const char *var, const char *value, void *data)
+{
+       struct intel_pt *pt = data;
+
+       if (!strcmp(var, "intel-pt.mispred-all"))
+               pt->mispred_all = perf_config_bool(var, value);
+
+       return 0;
+}
+
 static const char * const intel_pt_info_fmts[] = {
        [INTEL_PT_PMU_TYPE]             = "  PMU Type            %"PRId64"\n",
        [INTEL_PT_TIME_SHIFT]           = "  Time Shift          %"PRIu64"\n",
@@ -1821,6 +2023,8 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
        if (!pt)
                return -ENOMEM;
 
+       perf_config(intel_pt_perf_config, pt);
+
        err = auxtrace_queues__init(&pt->queues);
        if (err)
                goto err_free;
@@ -1888,6 +2092,10 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
                        pr_err("%s: missing sched_switch event\n", __func__);
                        goto err_delete_thread;
                }
+       } else if (pt->have_sched_switch == 2 &&
+                  !intel_pt_find_switch(session->evlist)) {
+               pr_err("%s: missing context_switch attribute flag\n", __func__);
+               goto err_delete_thread;
        }
 
        if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
index 6309f7ceb08f14cfbf0ce0b3cd23d82d4603a60e..5ef90be2a2497a05731a5fb8d24af33a192f2011 100644 (file)
@@ -35,6 +35,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
        machine->last_match = NULL;
 
        machine->vdso_info = NULL;
+       machine->env = NULL;
 
        machine->pid = pid;
 
@@ -624,7 +625,7 @@ size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
 {
        int i;
        size_t printed = 0;
-       struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
+       struct dso *kdso = machine__kernel_map(machine)->dso;
 
        if (kdso->has_build_id) {
                char filename[PATH_MAX];
@@ -740,6 +741,7 @@ int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
 
        for (type = 0; type < MAP__NR_TYPES; ++type) {
                struct kmap *kmap;
+               struct map *map;
 
                machine->vmlinux_maps[type] = map__new2(start, kernel, type);
                if (machine->vmlinux_maps[type] == NULL)
@@ -748,13 +750,13 @@ int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
                machine->vmlinux_maps[type]->map_ip =
                        machine->vmlinux_maps[type]->unmap_ip =
                                identity__map_ip;
-               kmap = map__kmap(machine->vmlinux_maps[type]);
+               map = __machine__kernel_map(machine, type);
+               kmap = map__kmap(map);
                if (!kmap)
                        return -1;
 
                kmap->kmaps = &machine->kmaps;
-               map_groups__insert(&machine->kmaps,
-                                  machine->vmlinux_maps[type]);
+               map_groups__insert(&machine->kmaps, map);
        }
 
        return 0;
@@ -766,13 +768,13 @@ void machine__destroy_kernel_maps(struct machine *machine)
 
        for (type = 0; type < MAP__NR_TYPES; ++type) {
                struct kmap *kmap;
+               struct map *map = __machine__kernel_map(machine, type);
 
-               if (machine->vmlinux_maps[type] == NULL)
+               if (map == NULL)
                        continue;
 
-               kmap = map__kmap(machine->vmlinux_maps[type]);
-               map_groups__remove(&machine->kmaps,
-                                  machine->vmlinux_maps[type]);
+               kmap = map__kmap(map);
+               map_groups__remove(&machine->kmaps, map);
                if (kmap && kmap->ref_reloc_sym) {
                        /*
                         * ref_reloc_sym is shared among all maps, so free just
@@ -866,7 +868,7 @@ int machines__create_kernel_maps(struct machines *machines, pid_t pid)
 int machine__load_kallsyms(struct machine *machine, const char *filename,
                           enum map_type type, symbol_filter_t filter)
 {
-       struct map *map = machine->vmlinux_maps[type];
+       struct map *map = machine__kernel_map(machine);
        int ret = dso__load_kallsyms(map->dso, filename, map, filter);
 
        if (ret > 0) {
@@ -885,7 +887,7 @@ int machine__load_kallsyms(struct machine *machine, const char *filename,
 int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
                               symbol_filter_t filter)
 {
-       struct map *map = machine->vmlinux_maps[type];
+       struct map *map = machine__kernel_map(machine);
        int ret = dso__load_vmlinux_path(map->dso, map, filter);
 
        if (ret > 0)
@@ -1243,8 +1245,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
                        /*
                         * preload dso of guest kernel and modules
                         */
-                       dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
-                                 NULL);
+                       dso__load(kernel, machine__kernel_map(machine), NULL);
                }
        }
        return 0;
@@ -1830,7 +1831,7 @@ static int thread__resolve_callchain_sample(struct thread *thread,
        }
 
 check_calls:
-       if (chain->nr > PERF_MAX_STACK_DEPTH) {
+       if (chain->nr > PERF_MAX_STACK_DEPTH && (int)chain->nr > max_stack) {
                pr_warning("corrupted callchain. skipping...\n");
                return 0;
        }
@@ -1996,7 +1997,7 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
 
 int machine__get_kernel_start(struct machine *machine)
 {
-       struct map *map = machine__kernel_map(machine, MAP__FUNCTION);
+       struct map *map = machine__kernel_map(machine);
        int err = 0;
 
        /*
index ea5cb4a621db80056f008d3e3f71ddecbda761d8..2c2b443df5ba796c43ee12c6c0f2061b5d7c7b80 100644 (file)
@@ -34,6 +34,7 @@ struct machine {
        struct list_head  dead_threads;
        struct thread     *last_match;
        struct vdso_info  *vdso_info;
+       struct perf_env   *env;
        struct dsos       dsos;
        struct map_groups kmaps;
        struct map        *vmlinux_maps[MAP__NR_TYPES];
@@ -47,11 +48,17 @@ struct machine {
 };
 
 static inline
-struct map *machine__kernel_map(struct machine *machine, enum map_type type)
+struct map *__machine__kernel_map(struct machine *machine, enum map_type type)
 {
        return machine->vmlinux_maps[type];
 }
 
+static inline
+struct map *machine__kernel_map(struct machine *machine)
+{
+       return __machine__kernel_map(machine, MAP__FUNCTION);
+}
+
 int machine__get_kernel_start(struct machine *machine);
 
 static inline u64 machine__kernel_start(struct machine *machine)
index b1c475d9b240393e73614bc249e9e4b3dc2eedfc..4e38c396a897ad9599d1addb1fb464b5cdfa4129 100644 (file)
@@ -235,7 +235,7 @@ struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
  */
 bool __map__is_kernel(const struct map *map)
 {
-       return map->groups->machine->vmlinux_maps[map->type] == map;
+       return __machine__kernel_map(map->groups->machine, map->type) == map;
 }
 
 static void map__exit(struct map *map)
@@ -553,13 +553,9 @@ struct symbol *map_groups__find_symbol(struct map_groups *mg,
        return NULL;
 }
 
-struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
-                                              enum map_type type,
-                                              const char *name,
-                                              struct map **mapp,
-                                              symbol_filter_t filter)
+struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
+                                        struct map **mapp, symbol_filter_t filter)
 {
-       struct maps *maps = &mg->maps[type];
        struct symbol *sym;
        struct rb_node *nd;
 
@@ -583,6 +579,17 @@ out:
        return sym;
 }
 
+struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
+                                              enum map_type type,
+                                              const char *name,
+                                              struct map **mapp,
+                                              symbol_filter_t filter)
+{
+       struct symbol *sym = maps__find_symbol_by_name(&mg->maps[type], name, mapp, filter);
+
+       return sym;
+}
+
 int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter)
 {
        if (ams->addr < ams->map->start || ams->addr >= ams->map->end) {
index 57829e89b78b2f22e1977bb7c2ca2c315bcfa5f0..7309d64ce39e17d74416a1af04d57249c0cf5295 100644 (file)
@@ -190,6 +190,8 @@ void maps__remove(struct maps *maps, struct map *map);
 struct map *maps__find(struct maps *maps, u64 addr);
 struct map *maps__first(struct maps *maps);
 struct map *map__next(struct map *map);
+struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
+                                         struct map **mapp, symbol_filter_t filter);
 void map_groups__init(struct map_groups *mg, struct machine *machine);
 void map_groups__exit(struct map_groups *mg);
 int map_groups__clone(struct map_groups *mg,
index a3b1e13a05c0dd493e5bbb8a394705bff6e73fd2..355eecf6bf598ba051dde4df6368747e55fb141a 100644 (file)
@@ -27,6 +27,7 @@ static const struct branch_mode branch_modes[] = {
        BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
        BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
        BRANCH_OPT("ind_jmp", PERF_SAMPLE_BRANCH_IND_JUMP),
+       BRANCH_OPT("call", PERF_SAMPLE_BRANCH_CALL),
        BRANCH_END
 };
 
index 21ed6ee63da9747d1215ac638f936357c8df2e8c..bee60583839a7cdae0dd44f5c48dd818d53cb5e4 100644 (file)
@@ -1,4 +1,5 @@
 #include <linux/hw_breakpoint.h>
+#include <linux/err.h>
 #include "util.h"
 #include "../perf.h"
 #include "evlist.h"
@@ -10,8 +11,9 @@
 #include "symbol.h"
 #include "cache.h"
 #include "header.h"
+#include "bpf-loader.h"
 #include "debug.h"
-#include <api/fs/debugfs.h>
+#include <api/fs/tracing_path.h>
 #include "parse-events-bison.h"
 #define YY_EXTRA_TYPE int
 #include "parse-events-flex.h"
@@ -26,6 +28,8 @@
 extern int parse_events_debug;
 #endif
 int parse_events_parse(void *data, void *scanner);
+static int get_config_terms(struct list_head *head_config,
+                           struct list_head *head_terms __maybe_unused);
 
 static struct perf_pmu_event_symbol *perf_pmu_events_list;
 /*
@@ -386,32 +390,72 @@ int parse_events_add_cache(struct list_head *list, int *idx,
        return add_event(list, idx, &attr, name, NULL);
 }
 
+static void tracepoint_error(struct parse_events_error *e, int err,
+                            char *sys, char *name)
+{
+       char help[BUFSIZ];
+
+       /*
+        * We get error directly from syscall errno ( > 0),
+        * or from encoded pointer's error ( < 0).
+        */
+       err = abs(err);
+
+       switch (err) {
+       case EACCES:
+               e->str = strdup("can't access trace events");
+               break;
+       case ENOENT:
+               e->str = strdup("unknown tracepoint");
+               break;
+       default:
+               e->str = strdup("failed to add tracepoint");
+               break;
+       }
+
+       tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name);
+       e->help = strdup(help);
+}
+
 static int add_tracepoint(struct list_head *list, int *idx,
-                         char *sys_name, char *evt_name)
+                         char *sys_name, char *evt_name,
+                         struct parse_events_error *err,
+                         struct list_head *head_config)
 {
        struct perf_evsel *evsel;
 
        evsel = perf_evsel__newtp_idx(sys_name, evt_name, (*idx)++);
-       if (!evsel)
-               return -ENOMEM;
+       if (IS_ERR(evsel)) {
+               tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name);
+               return PTR_ERR(evsel);
+       }
 
-       list_add_tail(&evsel->node, list);
+       if (head_config) {
+               LIST_HEAD(config_terms);
 
+               if (get_config_terms(head_config, &config_terms))
+                       return -ENOMEM;
+               list_splice(&config_terms, &evsel->config_terms);
+       }
+
+       list_add_tail(&evsel->node, list);
        return 0;
 }
 
 static int add_tracepoint_multi_event(struct list_head *list, int *idx,
-                                     char *sys_name, char *evt_name)
+                                     char *sys_name, char *evt_name,
+                                     struct parse_events_error *err,
+                                     struct list_head *head_config)
 {
        char evt_path[MAXPATHLEN];
        struct dirent *evt_ent;
        DIR *evt_dir;
-       int ret = 0;
+       int ret = 0, found = 0;
 
        snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name);
        evt_dir = opendir(evt_path);
        if (!evt_dir) {
-               perror("Can't open event dir");
+               tracepoint_error(err, errno, sys_name, evt_name);
                return -1;
        }
 
@@ -425,7 +469,15 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx,
                if (!strglobmatch(evt_ent->d_name, evt_name))
                        continue;
 
-               ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name);
+               found++;
+
+               ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name,
+                                    err, head_config);
+       }
+
+       if (!found) {
+               tracepoint_error(err, ENOENT, sys_name, evt_name);
+               ret = -1;
        }
 
        closedir(evt_dir);
@@ -433,15 +485,21 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx,
 }
 
 static int add_tracepoint_event(struct list_head *list, int *idx,
-                               char *sys_name, char *evt_name)
+                               char *sys_name, char *evt_name,
+                               struct parse_events_error *err,
+                               struct list_head *head_config)
 {
        return strpbrk(evt_name, "*?") ?
-              add_tracepoint_multi_event(list, idx, sys_name, evt_name) :
-              add_tracepoint(list, idx, sys_name, evt_name);
+              add_tracepoint_multi_event(list, idx, sys_name, evt_name,
+                                         err, head_config) :
+              add_tracepoint(list, idx, sys_name, evt_name,
+                             err, head_config);
 }
 
 static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
-                                   char *sys_name, char *evt_name)
+                                   char *sys_name, char *evt_name,
+                                   struct parse_events_error *err,
+                                   struct list_head *head_config)
 {
        struct dirent *events_ent;
        DIR *events_dir;
@@ -449,7 +507,7 @@ static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
 
        events_dir = opendir(tracing_events_path);
        if (!events_dir) {
-               perror("Can't open event dir");
+               tracepoint_error(err, errno, sys_name, evt_name);
                return -1;
        }
 
@@ -465,20 +523,135 @@ static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
                        continue;
 
                ret = add_tracepoint_event(list, idx, events_ent->d_name,
-                                          evt_name);
+                                          evt_name, err, head_config);
        }
 
        closedir(events_dir);
        return ret;
 }
 
-int parse_events_add_tracepoint(struct list_head *list, int *idx,
-                               char *sys, char *event)
+struct __add_bpf_event_param {
+       struct parse_events_evlist *data;
+       struct list_head *list;
+};
+
+static int add_bpf_event(struct probe_trace_event *tev, int fd,
+                        void *_param)
 {
-       if (strpbrk(sys, "*?"))
-               return add_tracepoint_multi_sys(list, idx, sys, event);
-       else
-               return add_tracepoint_event(list, idx, sys, event);
+       LIST_HEAD(new_evsels);
+       struct __add_bpf_event_param *param = _param;
+       struct parse_events_evlist *evlist = param->data;
+       struct list_head *list = param->list;
+       struct perf_evsel *pos;
+       int err;
+
+       pr_debug("add bpf event %s:%s and attach bpf program %d\n",
+                tev->group, tev->event, fd);
+
+       err = parse_events_add_tracepoint(&new_evsels, &evlist->idx, tev->group,
+                                         tev->event, evlist->error, NULL);
+       if (err) {
+               struct perf_evsel *evsel, *tmp;
+
+               pr_debug("Failed to add BPF event %s:%s\n",
+                        tev->group, tev->event);
+               list_for_each_entry_safe(evsel, tmp, &new_evsels, node) {
+                       list_del(&evsel->node);
+                       perf_evsel__delete(evsel);
+               }
+               return err;
+       }
+       pr_debug("adding %s:%s\n", tev->group, tev->event);
+
+       list_for_each_entry(pos, &new_evsels, node) {
+               pr_debug("adding %s:%s to %p\n",
+                        tev->group, tev->event, pos);
+               pos->bpf_fd = fd;
+       }
+       list_splice(&new_evsels, list);
+       return 0;
+}
+
+int parse_events_load_bpf_obj(struct parse_events_evlist *data,
+                             struct list_head *list,
+                             struct bpf_object *obj)
+{
+       int err;
+       char errbuf[BUFSIZ];
+       struct __add_bpf_event_param param = {data, list};
+       static bool registered_unprobe_atexit = false;
+
+       if (IS_ERR(obj) || !obj) {
+               snprintf(errbuf, sizeof(errbuf),
+                        "Internal error: load bpf obj with NULL");
+               err = -EINVAL;
+               goto errout;
+       }
+
+       /*
+        * Register atexit handler before calling bpf__probe() so
+        * bpf__probe() don't need to unprobe probe points its already
+        * created when failure.
+        */
+       if (!registered_unprobe_atexit) {
+               atexit(bpf__clear);
+               registered_unprobe_atexit = true;
+       }
+
+       err = bpf__probe(obj);
+       if (err) {
+               bpf__strerror_probe(obj, err, errbuf, sizeof(errbuf));
+               goto errout;
+       }
+
+       err = bpf__load(obj);
+       if (err) {
+               bpf__strerror_load(obj, err, errbuf, sizeof(errbuf));
+               goto errout;
+       }
+
+       err = bpf__foreach_tev(obj, add_bpf_event, &param);
+       if (err) {
+               snprintf(errbuf, sizeof(errbuf),
+                        "Attach events in BPF object failed");
+               goto errout;
+       }
+
+       return 0;
+errout:
+       data->error->help = strdup("(add -v to see detail)");
+       data->error->str = strdup(errbuf);
+       return err;
+}
+
+int parse_events_load_bpf(struct parse_events_evlist *data,
+                         struct list_head *list,
+                         char *bpf_file_name,
+                         bool source)
+{
+       struct bpf_object *obj;
+
+       obj = bpf__prepare_load(bpf_file_name, source);
+       if (IS_ERR(obj) || !obj) {
+               char errbuf[BUFSIZ];
+               int err;
+
+               err = obj ? PTR_ERR(obj) : -EINVAL;
+
+               if (err == -ENOTSUP)
+                       snprintf(errbuf, sizeof(errbuf),
+                                "BPF support is not compiled");
+               else
+                       snprintf(errbuf, sizeof(errbuf),
+                                "BPF object file '%s' is invalid",
+                                bpf_file_name);
+
+               data->error->help = strdup("(add -v to see detail)");
+               data->error->str = strdup(errbuf);
+               return err;
+       }
+
+       return parse_events_load_bpf_obj(data, list, obj);
 }
 
 static int
@@ -565,9 +738,13 @@ static int check_type_val(struct parse_events_term *term,
        return -EINVAL;
 }
 
-static int config_term(struct perf_event_attr *attr,
-                      struct parse_events_term *term,
-                      struct parse_events_error *err)
+typedef int config_term_func_t(struct perf_event_attr *attr,
+                              struct parse_events_term *term,
+                              struct parse_events_error *err);
+
+static int config_term_common(struct perf_event_attr *attr,
+                             struct parse_events_term *term,
+                             struct parse_events_error *err)
 {
 #define CHECK_TYPE_VAL(type)                                              \
 do {                                                                      \
@@ -576,12 +753,6 @@ do {                                                                          \
 } while (0)
 
        switch (term->type_term) {
-       case PARSE_EVENTS__TERM_TYPE_USER:
-               /*
-                * Always succeed for sysfs terms, as we dont know
-                * at this point what type they need to have.
-                */
-               return 0;
        case PARSE_EVENTS__TERM_TYPE_CONFIG:
                CHECK_TYPE_VAL(NUM);
                attr->config = term->val.num;
@@ -620,10 +791,19 @@ do {                                                                         \
        case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
                CHECK_TYPE_VAL(NUM);
                break;
+       case PARSE_EVENTS__TERM_TYPE_INHERIT:
+               CHECK_TYPE_VAL(NUM);
+               break;
+       case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
+               CHECK_TYPE_VAL(NUM);
+               break;
        case PARSE_EVENTS__TERM_TYPE_NAME:
                CHECK_TYPE_VAL(STR);
                break;
        default:
+               err->str = strdup("unknown term");
+               err->idx = term->err_term;
+               err->help = parse_events_formats_error_string(NULL);
                return -EINVAL;
        }
 
@@ -631,9 +811,46 @@ do {                                                                          \
 #undef CHECK_TYPE_VAL
 }
 
+static int config_term_pmu(struct perf_event_attr *attr,
+                          struct parse_events_term *term,
+                          struct parse_events_error *err)
+{
+       if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER)
+               /*
+                * Always succeed for sysfs terms, as we dont know
+                * at this point what type they need to have.
+                */
+               return 0;
+       else
+               return config_term_common(attr, term, err);
+}
+
+static int config_term_tracepoint(struct perf_event_attr *attr,
+                                 struct parse_events_term *term,
+                                 struct parse_events_error *err)
+{
+       switch (term->type_term) {
+       case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
+       case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
+       case PARSE_EVENTS__TERM_TYPE_INHERIT:
+       case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
+               return config_term_common(attr, term, err);
+       default:
+               if (err) {
+                       err->idx = term->err_term;
+                       err->str = strdup("unknown term");
+                       err->help = strdup("valid terms: call-graph,stack-size\n");
+               }
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int config_attr(struct perf_event_attr *attr,
                       struct list_head *head,
-                      struct parse_events_error *err)
+                      struct parse_events_error *err,
+                      config_term_func_t config_term)
 {
        struct parse_events_term *term;
 
@@ -680,6 +897,12 @@ do {                                                               \
                case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
                        ADD_CONFIG_TERM(STACK_USER, stack_user, term->val.num);
                        break;
+               case PARSE_EVENTS__TERM_TYPE_INHERIT:
+                       ADD_CONFIG_TERM(INHERIT, inherit, term->val.num ? 1 : 0);
+                       break;
+               case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
+                       ADD_CONFIG_TERM(INHERIT, inherit, term->val.num ? 0 : 1);
+                       break;
                default:
                        break;
                }
@@ -688,6 +911,27 @@ do {                                                               \
        return 0;
 }
 
+int parse_events_add_tracepoint(struct list_head *list, int *idx,
+                               char *sys, char *event,
+                               struct parse_events_error *err,
+                               struct list_head *head_config)
+{
+       if (head_config) {
+               struct perf_event_attr attr;
+
+               if (config_attr(&attr, head_config, err,
+                               config_term_tracepoint))
+                       return -EINVAL;
+       }
+
+       if (strpbrk(sys, "*?"))
+               return add_tracepoint_multi_sys(list, idx, sys, event,
+                                               err, head_config);
+       else
+               return add_tracepoint_event(list, idx, sys, event,
+                                           err, head_config);
+}
+
 int parse_events_add_numeric(struct parse_events_evlist *data,
                             struct list_head *list,
                             u32 type, u64 config,
@@ -701,7 +945,8 @@ int parse_events_add_numeric(struct parse_events_evlist *data,
        attr.config = config;
 
        if (head_config) {
-               if (config_attr(&attr, head_config, data->error))
+               if (config_attr(&attr, head_config, data->error,
+                               config_term_common))
                        return -EINVAL;
 
                if (get_config_terms(head_config, &config_terms))
@@ -761,7 +1006,7 @@ int parse_events_add_pmu(struct parse_events_evlist *data,
         * Configure hardcoded terms first, no need to check
         * return value when called with fail == 0 ;)
         */
-       if (config_attr(&attr, head_config, data->error))
+       if (config_attr(&attr, head_config, data->error, config_term_pmu))
                return -EINVAL;
 
        if (get_config_terms(head_config, &config_terms))
@@ -793,6 +1038,11 @@ void parse_events__set_leader(char *name, struct list_head *list)
 {
        struct perf_evsel *leader;
 
+       if (list_empty(list)) {
+               WARN_ONCE(true, "WARNING: failed to set leader: empty list");
+               return;
+       }
+
        __perf_evlist__set_leader(list);
        leader = list_entry(list->next, struct perf_evsel, node);
        leader->group_name = name ? strdup(name) : NULL;
@@ -819,6 +1069,7 @@ struct event_modifier {
        int eG;
        int eI;
        int precise;
+       int precise_max;
        int exclude_GH;
        int sample_read;
        int pinned;
@@ -834,6 +1085,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
        int eG = evsel ? evsel->attr.exclude_guest : 0;
        int eI = evsel ? evsel->attr.exclude_idle : 0;
        int precise = evsel ? evsel->attr.precise_ip : 0;
+       int precise_max = 0;
        int sample_read = 0;
        int pinned = evsel ? evsel->attr.pinned : 0;
 
@@ -870,6 +1122,8 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
                        /* use of precise requires exclude_guest */
                        if (!exclude_GH)
                                eG = 1;
+               } else if (*str == 'P') {
+                       precise_max = 1;
                } else if (*str == 'S') {
                        sample_read = 1;
                } else if (*str == 'D') {
@@ -900,6 +1154,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
        mod->eG = eG;
        mod->eI = eI;
        mod->precise = precise;
+       mod->precise_max = precise_max;
        mod->exclude_GH = exclude_GH;
        mod->sample_read = sample_read;
        mod->pinned = pinned;
@@ -916,7 +1171,7 @@ static int check_modifier(char *str)
        char *p = str;
 
        /* The sizeof includes 0 byte as well. */
-       if (strlen(str) > (sizeof("ukhGHpppSDI") - 1))
+       if (strlen(str) > (sizeof("ukhGHpppPSDI") - 1))
                return -1;
 
        while (*p) {
@@ -955,6 +1210,7 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add)
                evsel->attr.exclude_idle   = mod.eI;
                evsel->exclude_GH          = mod.exclude_GH;
                evsel->sample_read         = mod.sample_read;
+               evsel->precise_max         = mod.precise_max;
 
                if (perf_evsel__is_group_leader(evsel))
                        evsel->attr.pinned = mod.pinned;
@@ -1142,6 +1398,11 @@ int parse_events(struct perf_evlist *evlist, const char *str,
        if (!ret) {
                struct perf_evsel *last;
 
+               if (list_empty(&data.list)) {
+                       WARN_ONCE(true, "WARNING: event parser found nothing");
+                       return -1;
+               }
+
                perf_evlist__splice_list_tail(evlist, &data.list);
                evlist->nr_groups += data.nr_groups;
                last = perf_evlist__last(evlist);
@@ -1251,6 +1512,12 @@ foreach_evsel_in_last_glob(struct perf_evlist *evlist,
        struct perf_evsel *last = NULL;
        int err;
 
+       /*
+        * Don't return when list_empty, give func a chance to report
+        * error when it found last == NULL.
+        *
+        * So no need to WARN here, let *func do this.
+        */
        if (evlist->nr_entries > 0)
                last = perf_evlist__last(evlist);
 
@@ -1419,7 +1686,7 @@ restart:
                printf("  %-50s [%s]\n", evt_list[evt_i++],
                                event_type_descriptors[PERF_TYPE_TRACEPOINT]);
        }
-       if (evt_num)
+       if (evt_num && pager_in_use())
                printf("\n");
 
 out_free:
@@ -1575,7 +1842,7 @@ restart:
                printf("  %-50s [%s]\n", evt_list[evt_i++],
                                event_type_descriptors[PERF_TYPE_HW_CACHE]);
        }
-       if (evt_num)
+       if (evt_num && pager_in_use())
                printf("\n");
 
 out_free:
@@ -1648,7 +1915,7 @@ restart:
                }
                printf("  %-50s [%s]\n", evt_list[evt_i++], event_type_descriptors[type]);
        }
-       if (evt_num)
+       if (evt_num && pager_in_use())
                printf("\n");
 
 out_free:
@@ -1689,13 +1956,14 @@ void print_events(const char *event_glob, bool name_only)
                printf("  %-50s [%s]\n",
                       "cpu/t1=v1[,t2=v2,t3 ...]/modifier",
                       event_type_descriptors[PERF_TYPE_RAW]);
-               printf("   (see 'man perf-list' on how to encode it)\n");
-               printf("\n");
+               if (pager_in_use())
+                       printf("   (see 'man perf-list' on how to encode it)\n\n");
 
                printf("  %-50s [%s]\n",
                       "mem:<addr>[/len][:access]",
                        event_type_descriptors[PERF_TYPE_BREAKPOINT]);
-               printf("\n");
+               if (pager_in_use())
+                       printf("\n");
        }
 
        print_tracepoint_events(NULL, NULL, name_only);
@@ -1811,3 +2079,29 @@ void parse_events_evlist_error(struct parse_events_evlist *data,
        err->str = strdup(str);
        WARN_ONCE(!err->str, "WARNING: failed to allocate error string");
 }
+
+/*
+ * Return string contains valid config terms of an event.
+ * @additional_terms: For terms such as PMU sysfs terms.
+ */
+char *parse_events_formats_error_string(char *additional_terms)
+{
+       char *str;
+       static const char *static_terms = "config,config1,config2,name,"
+                                         "period,freq,branch_type,time,"
+                                         "call-graph,stack-size\n";
+
+       /* valid terms */
+       if (additional_terms) {
+               if (!asprintf(&str, "valid terms: %s,%s",
+                             additional_terms, static_terms))
+                       goto fail;
+       } else {
+               if (!asprintf(&str, "valid terms: %s", static_terms))
+                       goto fail;
+       }
+       return str;
+
+fail:
+       return NULL;
+}
index a09b0e2109973395b5375832ec99a65c707ac630..f1a6db107241b1c8ffaf03a3514ba1549df6fd1b 100644 (file)
@@ -67,6 +67,8 @@ enum {
        PARSE_EVENTS__TERM_TYPE_TIME,
        PARSE_EVENTS__TERM_TYPE_CALLGRAPH,
        PARSE_EVENTS__TERM_TYPE_STACKSIZE,
+       PARSE_EVENTS__TERM_TYPE_NOINHERIT,
+       PARSE_EVENTS__TERM_TYPE_INHERIT
 };
 
 struct parse_events_term {
@@ -118,7 +120,18 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add);
 int parse_events__modifier_group(struct list_head *list, char *event_mod);
 int parse_events_name(struct list_head *list, char *name);
 int parse_events_add_tracepoint(struct list_head *list, int *idx,
-                               char *sys, char *event);
+                               char *sys, char *event,
+                               struct parse_events_error *error,
+                               struct list_head *head_config);
+int parse_events_load_bpf(struct parse_events_evlist *data,
+                         struct list_head *list,
+                         char *bpf_file_name,
+                         bool source);
+/* Provide this function for perf test */
+struct bpf_object;
+int parse_events_load_bpf_obj(struct parse_events_evlist *data,
+                             struct list_head *list,
+                             struct bpf_object *obj);
 int parse_events_add_numeric(struct parse_events_evlist *data,
                             struct list_head *list,
                             u32 type, u64 config,
@@ -155,5 +168,6 @@ int print_hwcache_events(const char *event_glob, bool name_only);
 extern int is_valid_tracepoint(const char *event_string);
 
 int valid_event_mount(const char *eventfs);
+char *parse_events_formats_error_string(char *additional_terms);
 
 #endif /* __PERF_PARSE_EVENTS_H */
index 936d566f48d8df39062848fbca9630eadbee3ca0..58c5831ffd5c22133f48a4c1a3a07721c71362fa 100644 (file)
@@ -115,6 +115,8 @@ do {                                                        \
 group          [^,{}/]*[{][^}]*[}][^,{}/]*
 event_pmu      [^,{}/]+[/][^/]*[/][^,{}/]*
 event          [^,{}/]+
+bpf_object     .*\.(o|bpf)
+bpf_source     .*\.c
 
 num_dec                [0-9]+
 num_hex                0x[a-fA-F0-9]+
@@ -122,7 +124,7 @@ num_raw_hex [a-fA-F0-9]+
 name           [a-zA-Z_*?][a-zA-Z0-9_*?.]*
 name_minus     [a-zA-Z_*?][a-zA-Z0-9\-_*?.]*
 /* If you add a modifier you need to update check_modifier() */
-modifier_event [ukhpGHSDI]+
+modifier_event [ukhpPGHSDI]+
 modifier_bp    [rwx]{1,3}
 
 %%
@@ -159,6 +161,8 @@ modifier_bp [rwx]{1,3}
                }
 
 {event_pmu}    |
+{bpf_object}   |
+{bpf_source}   |
 {event}                {
                        BEGIN(INITIAL);
                        REWIND(1);
@@ -174,7 +178,7 @@ modifier_bp [rwx]{1,3}
 
 <config>{
        /*
-        * Please update formats_error_string any time
+        * Please update parse_events_formats_error_string any time
         * new static term is added.
         */
 config                 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG); }
@@ -187,6 +191,8 @@ branch_type         { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE
 time                   { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_TIME); }
 call-graph             { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CALLGRAPH); }
 stack-size             { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_STACKSIZE); }
+inherit                        { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_INHERIT); }
+no-inherit             { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NOINHERIT); }
 ,                      { return ','; }
 "/"                    { BEGIN(INITIAL); return '/'; }
 {name_minus}           { return str(yyscanner, PE_NAME); }
@@ -264,6 +270,8 @@ r{num_raw_hex}              { return raw(yyscanner); }
 {num_hex}              { return value(yyscanner, 16); }
 
 {modifier_event}       { return str(yyscanner, PE_MODIFIER_EVENT); }
+{bpf_object}           { return str(yyscanner, PE_BPF_OBJECT); }
+{bpf_source}           { return str(yyscanner, PE_BPF_SOURCE); }
 {name}                 { return pmu_str_check(yyscanner); }
 "/"                    { BEGIN(config); return '/'; }
 -                      { return '-'; }
index 9cd70819c7950e2de1aaae29a17b0f951b593bda..ad379968d4c10c0fb7bb2ddc3bacce3dc1166f43 100644 (file)
@@ -42,6 +42,7 @@ static inc_group_count(struct list_head *list,
 %token PE_VALUE PE_VALUE_SYM_HW PE_VALUE_SYM_SW PE_RAW PE_TERM
 %token PE_EVENT_NAME
 %token PE_NAME
+%token PE_BPF_OBJECT PE_BPF_SOURCE
 %token PE_MODIFIER_EVENT PE_MODIFIER_BP
 %token PE_NAME_CACHE_TYPE PE_NAME_CACHE_OP_RESULT
 %token PE_PREFIX_MEM PE_PREFIX_RAW PE_PREFIX_GROUP
@@ -53,6 +54,8 @@ static inc_group_count(struct list_head *list,
 %type <num> PE_RAW
 %type <num> PE_TERM
 %type <str> PE_NAME
+%type <str> PE_BPF_OBJECT
+%type <str> PE_BPF_SOURCE
 %type <str> PE_NAME_CACHE_TYPE
 %type <str> PE_NAME_CACHE_OP_RESULT
 %type <str> PE_MODIFIER_EVENT
@@ -67,8 +70,10 @@ static inc_group_count(struct list_head *list,
 %type <head> event_legacy_cache
 %type <head> event_legacy_mem
 %type <head> event_legacy_tracepoint
+%type <tracepoint_name> tracepoint_name
 %type <head> event_legacy_numeric
 %type <head> event_legacy_raw
+%type <head> event_bpf_file
 %type <head> event_def
 %type <head> event_mod
 %type <head> event_name
@@ -84,6 +89,10 @@ static inc_group_count(struct list_head *list,
        u64 num;
        struct list_head *head;
        struct parse_events_term *term;
+       struct tracepoint_name {
+               char *sys;
+               char *event;
+       } tracepoint_name;
 }
 %%
 
@@ -198,7 +207,8 @@ event_def: event_pmu |
           event_legacy_mem |
           event_legacy_tracepoint sep_dc |
           event_legacy_numeric sep_dc |
-          event_legacy_raw sep_dc
+          event_legacy_raw sep_dc |
+          event_bpf_file
 
 event_pmu:
 PE_NAME '/' event_config '/'
@@ -368,36 +378,60 @@ PE_PREFIX_MEM PE_VALUE sep_dc
 }
 
 event_legacy_tracepoint:
-PE_NAME '-' PE_NAME ':' PE_NAME
+tracepoint_name
 {
        struct parse_events_evlist *data = _data;
+       struct parse_events_error *error = data->error;
        struct list_head *list;
-       char sys_name[128];
-       snprintf(&sys_name, 128, "%s-%s", $1, $3);
 
        ALLOC_LIST(list);
-       ABORT_ON(parse_events_add_tracepoint(list, &data->idx, &sys_name, $5));
+       if (error)
+               error->idx = @1.first_column;
+
+       if (parse_events_add_tracepoint(list, &data->idx, $1.sys, $1.event,
+                                       error, NULL))
+               return -1;
+
        $$ = list;
 }
 |
-PE_NAME ':' PE_NAME
+tracepoint_name '/' event_config '/'
 {
        struct parse_events_evlist *data = _data;
+       struct parse_events_error *error = data->error;
        struct list_head *list;
 
        ALLOC_LIST(list);
-       if (parse_events_add_tracepoint(list, &data->idx, $1, $3)) {
-               struct parse_events_error *error = data->error;
+       if (error)
+               error->idx = @1.first_column;
 
-               if (error) {
-                       error->idx = @1.first_column;
-                       error->str = strdup("unknown tracepoint");
-               }
+       if (parse_events_add_tracepoint(list, &data->idx, $1.sys, $1.event,
+                                       error, $3))
                return -1;
-       }
+
        $$ = list;
 }
 
+tracepoint_name:
+PE_NAME '-' PE_NAME ':' PE_NAME
+{
+       char sys_name[128];
+       struct tracepoint_name tracepoint;
+
+       snprintf(&sys_name, 128, "%s-%s", $1, $3);
+       tracepoint.sys = &sys_name;
+       tracepoint.event = $5;
+
+       $$ = tracepoint;
+}
+|
+PE_NAME ':' PE_NAME
+{
+       struct tracepoint_name tracepoint = {$1, $3};
+
+       $$ = tracepoint;
+}
+
 event_legacy_numeric:
 PE_VALUE ':' PE_VALUE
 {
@@ -420,6 +454,28 @@ PE_RAW
        $$ = list;
 }
 
+event_bpf_file:
+PE_BPF_OBJECT
+{
+       struct parse_events_evlist *data = _data;
+       struct parse_events_error *error = data->error;
+       struct list_head *list;
+
+       ALLOC_LIST(list);
+       ABORT_ON(parse_events_load_bpf(data, list, $1, false));
+       $$ = list;
+}
+|
+PE_BPF_SOURCE
+{
+       struct parse_events_evlist *data = _data;
+       struct list_head *list;
+
+       ALLOC_LIST(list);
+       ABORT_ON(parse_events_load_bpf(data, list, $1, true));
+       $$ = list;
+}
+
 start_terms: event_config
 {
        struct parse_events_terms *data = _data;
index 01626be2a8eb8043135591fcca302213af501e0d..9fca09296eb3472ba2fbe06af0ac1a3f4a1f829b 100644 (file)
@@ -2,10 +2,13 @@
 #include "parse-options.h"
 #include "cache.h"
 #include "header.h"
+#include <linux/string.h>
 
 #define OPT_SHORT 1
 #define OPT_UNSET 2
 
+static struct strbuf error_buf = STRBUF_INIT;
+
 static int opterror(const struct option *opt, const char *reason, int flags)
 {
        if (flags & OPT_SHORT)
@@ -372,7 +375,8 @@ void parse_options_start(struct parse_opt_ctx_t *ctx,
 }
 
 static int usage_with_options_internal(const char * const *,
-                                      const struct option *, int);
+                                      const struct option *, int,
+                                      struct parse_opt_ctx_t *);
 
 int parse_options_step(struct parse_opt_ctx_t *ctx,
                       const struct option *options,
@@ -396,8 +400,9 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
 
                if (arg[1] != '-') {
                        ctx->opt = ++arg;
-                       if (internal_help && *ctx->opt == 'h')
-                               return usage_with_options_internal(usagestr, options, 0);
+                       if (internal_help && *ctx->opt == 'h') {
+                               return usage_with_options_internal(usagestr, options, 0, ctx);
+                       }
                        switch (parse_short_opt(ctx, options)) {
                        case -1:
                                return parse_options_usage(usagestr, options, arg, 1);
@@ -412,7 +417,7 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
                                check_typos(arg, options);
                        while (ctx->opt) {
                                if (internal_help && *ctx->opt == 'h')
-                                       return usage_with_options_internal(usagestr, options, 0);
+                                       return usage_with_options_internal(usagestr, options, 0, ctx);
                                arg = ctx->opt;
                                switch (parse_short_opt(ctx, options)) {
                                case -1:
@@ -445,9 +450,9 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
 
                arg += 2;
                if (internal_help && !strcmp(arg, "help-all"))
-                       return usage_with_options_internal(usagestr, options, 1);
+                       return usage_with_options_internal(usagestr, options, 1, ctx);
                if (internal_help && !strcmp(arg, "help"))
-                       return usage_with_options_internal(usagestr, options, 0);
+                       return usage_with_options_internal(usagestr, options, 0, ctx);
                if (!strcmp(arg, "list-opts"))
                        return PARSE_OPT_LIST_OPTS;
                if (!strcmp(arg, "list-cmds"))
@@ -496,7 +501,7 @@ int parse_options_subcommand(int argc, const char **argv, const struct option *o
 {
        struct parse_opt_ctx_t ctx;
 
-       perf_header__set_cmdline(argc, argv);
+       perf_env__set_cmdline(&perf_env, argc, argv);
 
        /* build usage string if it's not provided */
        if (subcommands && !usagestr[0]) {
@@ -537,9 +542,11 @@ int parse_options_subcommand(int argc, const char **argv, const struct option *o
                exit(130);
        default: /* PARSE_OPT_UNKNOWN */
                if (ctx.argv[0][1] == '-') {
-                       error("unknown option `%s'", ctx.argv[0] + 2);
+                       strbuf_addf(&error_buf, "unknown option `%s'",
+                                   ctx.argv[0] + 2);
                } else {
-                       error("unknown switch `%c'", *ctx.opt);
+                       strbuf_addf(&error_buf, "unknown switch `%c'",
+                                   *ctx.opt);
                }
                usage_with_options(usagestr, options);
        }
@@ -642,13 +649,93 @@ static void print_option_help(const struct option *opts, int full)
        fprintf(stderr, "%*s%s\n", pad + USAGE_GAP, "", opts->help);
 }
 
+static int option__cmp(const void *va, const void *vb)
+{
+       const struct option *a = va, *b = vb;
+       int sa = tolower(a->short_name), sb = tolower(b->short_name), ret;
+
+       if (sa == 0)
+               sa = 'z' + 1;
+       if (sb == 0)
+               sb = 'z' + 1;
+
+       ret = sa - sb;
+
+       if (ret == 0) {
+               const char *la = a->long_name ?: "",
+                          *lb = b->long_name ?: "";
+               ret = strcmp(la, lb);
+       }
+
+       return ret;
+}
+
+static struct option *options__order(const struct option *opts)
+{
+       int nr_opts = 0;
+       const struct option *o = opts;
+       struct option *ordered;
+
+       for (o = opts; o->type != OPTION_END; o++)
+               ++nr_opts;
+
+       ordered = memdup(opts, sizeof(*o) * (nr_opts + 1));
+       if (ordered == NULL)
+               goto out;
+
+       qsort(ordered, nr_opts, sizeof(*o), option__cmp);
+out:
+       return ordered;
+}
+
+static bool option__in_argv(const struct option *opt, const struct parse_opt_ctx_t *ctx)
+{
+       int i;
+
+       for (i = 1; i < ctx->argc; ++i) {
+               const char *arg = ctx->argv[i];
+
+               if (arg[0] != '-') {
+                       if (arg[1] == '\0') {
+                               if (arg[0] == opt->short_name)
+                                       return true;
+                               continue;
+                       }
+
+                       if (opt->long_name && strcmp(opt->long_name, arg) == 0)
+                               return true;
+
+                       if (opt->help && strcasestr(opt->help, arg) != NULL)
+                               return true;
+
+                       continue;
+               }
+
+               if (arg[1] == opt->short_name ||
+                   (arg[1] == '-' && opt->long_name && strcmp(opt->long_name, arg + 2) == 0))
+                       return true;
+       }
+
+       return false;
+}
+
 int usage_with_options_internal(const char * const *usagestr,
-                               const struct option *opts, int full)
+                               const struct option *opts, int full,
+                               struct parse_opt_ctx_t *ctx)
 {
+       struct option *ordered;
+
        if (!usagestr)
                return PARSE_OPT_HELP;
 
-       fprintf(stderr, "\n usage: %s\n", *usagestr++);
+       setup_pager();
+
+       if (strbuf_avail(&error_buf)) {
+               fprintf(stderr, "  Error: %s\n", error_buf.buf);
+               strbuf_release(&error_buf);
+       }
+
+       fprintf(stderr, "\n Usage: %s\n", *usagestr++);
        while (*usagestr && **usagestr)
                fprintf(stderr, "    or: %s\n", *usagestr++);
        while (*usagestr) {
@@ -661,11 +748,20 @@ int usage_with_options_internal(const char * const *usagestr,
        if (opts->type != OPTION_GROUP)
                fputc('\n', stderr);
 
-       for (  ; opts->type != OPTION_END; opts++)
+       ordered = options__order(opts);
+       if (ordered)
+               opts = ordered;
+
+       for (  ; opts->type != OPTION_END; opts++) {
+               if (ctx && ctx->argc > 1 && !option__in_argv(opts, ctx))
+                       continue;
                print_option_help(opts, full);
+       }
 
        fputc('\n', stderr);
 
+       free(ordered);
+
        return PARSE_OPT_HELP;
 }
 
@@ -673,7 +769,22 @@ void usage_with_options(const char * const *usagestr,
                        const struct option *opts)
 {
        exit_browser(false);
-       usage_with_options_internal(usagestr, opts, 0);
+       usage_with_options_internal(usagestr, opts, 0, NULL);
+       exit(129);
+}
+
+void usage_with_options_msg(const char * const *usagestr,
+                           const struct option *opts, const char *fmt, ...)
+{
+       va_list ap;
+
+       exit_browser(false);
+
+       va_start(ap, fmt);
+       strbuf_addv(&error_buf, fmt, ap);
+       va_end(ap);
+
+       usage_with_options_internal(usagestr, opts, 0, NULL);
        exit(129);
 }
 
@@ -684,7 +795,7 @@ int parse_options_usage(const char * const *usagestr,
        if (!usagestr)
                goto opt;
 
-       fprintf(stderr, "\n usage: %s\n", *usagestr++);
+       fprintf(stderr, "\n Usage: %s\n", *usagestr++);
        while (*usagestr && **usagestr)
                fprintf(stderr, "    or: %s\n", *usagestr++);
        while (*usagestr) {
@@ -698,24 +809,23 @@ int parse_options_usage(const char * const *usagestr,
 opt:
        for (  ; opts->type != OPTION_END; opts++) {
                if (short_opt) {
-                       if (opts->short_name == *optstr)
+                       if (opts->short_name == *optstr) {
+                               print_option_help(opts, 0);
                                break;
+                       }
                        continue;
                }
 
                if (opts->long_name == NULL)
                        continue;
 
-               if (!prefixcmp(optstr, opts->long_name))
-                       break;
-               if (!prefixcmp(optstr, "no-") &&
-                   !prefixcmp(optstr + 3, opts->long_name))
-                       break;
+               if (!prefixcmp(opts->long_name, optstr))
+                       print_option_help(opts, 0);
+               if (!prefixcmp("no-", optstr) &&
+                   !prefixcmp(opts->long_name, optstr + 3))
+                       print_option_help(opts, 0);
        }
 
-       if (opts->type != OPTION_END)
-               print_option_help(opts, 0);
-
        return PARSE_OPT_HELP;
 }
 
index 367d8b816cc7e7ae0a99bb8200e0bd81b7999546..a8e407bc251e418df2e04623bec08c9de9f5a6cb 100644 (file)
@@ -111,6 +111,7 @@ struct option {
 #define OPT_GROUP(h)                { .type = OPTION_GROUP, .help = (h) }
 #define OPT_BIT(s, l, v, h, b)      { .type = OPTION_BIT, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h), .defval = (b) }
 #define OPT_BOOLEAN(s, l, v, h)     { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = check_vtype(v, bool *), .help = (h) }
+#define OPT_BOOLEAN_FLAG(s, l, v, h, f)     { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = check_vtype(v, bool *), .help = (h), .flags = (f) }
 #define OPT_BOOLEAN_SET(s, l, v, os, h) \
        { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), \
        .value = check_vtype(v, bool *), .help = (h), \
@@ -160,6 +161,10 @@ extern int parse_options_subcommand(int argc, const char **argv,
 
 extern NORETURN void usage_with_options(const char * const *usagestr,
                                         const struct option *options);
+extern NORETURN __attribute__((format(printf,3,4)))
+void usage_with_options_msg(const char * const *usagestr,
+                           const struct option *options,
+                           const char *fmt, ...);
 
 /*----- incremantal advanced APIs -----*/
 
index 885e8ac83997905db7baa0d334015b5cf46e37d9..6b8eb13e14e4d5897fca71c41634a07cc9ec5d16 100644 (file)
@@ -6,6 +6,7 @@ const struct sample_reg __weak sample_reg_masks[] = {
        SMPL_REG_END
 };
 
+#ifdef HAVE_PERF_REGS_SUPPORT
 int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
 {
        int i, idx = 0;
@@ -29,3 +30,4 @@ out:
        *valp = regs->cache_regs[id];
        return 0;
 }
+#endif
index 2984dcc54d67cd7acb9d02975c4fb1b7b55fdb7c..679d6e493962267f7b219b88c05d06c61ef1d2ea 100644 (file)
@@ -2,6 +2,7 @@
 #define __PERF_REGS_H
 
 #include <linux/types.h>
+#include <linux/compiler.h>
 
 struct regs_dump;
 
index 89c91a1a67e760fc5b82198179f9ecdc4b337df5..e4b173dec4b9978cb3d1eb53c3569277c5110714 100644 (file)
@@ -626,38 +626,26 @@ static int pmu_resolve_param_term(struct parse_events_term *term,
        return -1;
 }
 
-static char *formats_error_string(struct list_head *formats)
+static char *pmu_formats_string(struct list_head *formats)
 {
        struct perf_pmu_format *format;
-       char *err, *str;
-       static const char *static_terms = "config,config1,config2,name,"
-                                         "period,freq,branch_type,time,"
-                                         "call-graph,stack-size\n";
+       char *str;
+       struct strbuf buf;
        unsigned i = 0;
 
-       if (!asprintf(&str, "valid terms:"))
+       if (!formats)
                return NULL;
 
+       strbuf_init(&buf, 0);
        /* sysfs exported terms */
-       list_for_each_entry(format, formats, list) {
-               char c = i++ ? ',' : ' ';
-
-               err = str;
-               if (!asprintf(&str, "%s%c%s", err, c, format->name))
-                       goto fail;
-               free(err);
-       }
+       list_for_each_entry(format, formats, list)
+               strbuf_addf(&buf, i++ ? ",%s" : "%s",
+                           format->name);
 
-       /* static terms */
-       err = str;
-       if (!asprintf(&str, "%s,%s", err, static_terms))
-               goto fail;
+       str = strbuf_detach(&buf, NULL);
+       strbuf_release(&buf);
 
-       free(err);
        return str;
-fail:
-       free(err);
-       return NULL;
 }
 
 /*
@@ -693,9 +681,12 @@ static int pmu_config_term(struct list_head *formats,
                if (verbose)
                        printf("Invalid event/parameter '%s'\n", term->config);
                if (err) {
+                       char *pmu_term = pmu_formats_string(formats);
+
                        err->idx  = term->err_term;
                        err->str  = strdup("unknown term");
-                       err->help = formats_error_string(formats);
+                       err->help = parse_events_formats_error_string(pmu_term);
+                       free(pmu_term);
                }
                return -EINVAL;
        }
@@ -1017,7 +1008,8 @@ void print_pmu_events(const char *event_glob, bool name_only)
                                goto out_enomem;
                        j++;
                }
-               if (pmu->selectable) {
+               if (pmu->selectable &&
+                   (event_glob == NULL || strglobmatch(pmu->name, event_glob))) {
                        char *s;
                        if (asprintf(&s, "%s//", pmu->name) < 0)
                                goto out_enomem;
@@ -1035,7 +1027,7 @@ void print_pmu_events(const char *event_glob, bool name_only)
                printf("  %-50s [Kernel PMU event]\n", aliases[j]);
                printed++;
        }
-       if (printed)
+       if (printed && pager_in_use())
                printf("\n");
 out_free:
        for (j = 0; j < len; j++)
index eb5f18b754028e863e3b72970842cc33d2fb997d..b51a8bfb40f92cd861d0954ed84e71e15ee50784 100644 (file)
@@ -40,8 +40,7 @@
 #include "color.h"
 #include "symbol.h"
 #include "thread.h"
-#include <api/fs/debugfs.h>
-#include <api/fs/tracefs.h>
+#include <api/fs/fs.h>
 #include "trace-event.h"       /* For __maybe_unused */
 #include "probe-event.h"
 #include "probe-finder.h"
@@ -72,7 +71,7 @@ static char *synthesize_perf_probe_point(struct perf_probe_point *pp);
 static struct machine *host_machine;
 
 /* Initialize symbol maps and path of vmlinux/modules */
-static int init_symbol_maps(bool user_only)
+int init_probe_symbol_maps(bool user_only)
 {
        int ret;
 
@@ -102,7 +101,7 @@ out:
        return ret;
 }
 
-static void exit_symbol_maps(void)
+void exit_probe_symbol_maps(void)
 {
        if (host_machine) {
                machine__delete(host_machine);
@@ -127,17 +126,19 @@ static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void)
 {
        /* kmap->ref_reloc_sym should be set if host_machine is initialized */
        struct kmap *kmap;
+       struct map *map = machine__kernel_map(host_machine);
 
-       if (map__load(host_machine->vmlinux_maps[MAP__FUNCTION], NULL) < 0)
+       if (map__load(map, NULL) < 0)
                return NULL;
 
-       kmap = map__kmap(host_machine->vmlinux_maps[MAP__FUNCTION]);
+       kmap = map__kmap(map);
        if (!kmap)
                return NULL;
        return kmap->ref_reloc_sym;
 }
 
-static u64 kernel_get_symbol_address_by_name(const char *name, bool reloc)
+static int kernel_get_symbol_address_by_name(const char *name, u64 *addr,
+                                            bool reloc, bool reladdr)
 {
        struct ref_reloc_sym *reloc_sym;
        struct symbol *sym;
@@ -146,12 +147,14 @@ static u64 kernel_get_symbol_address_by_name(const char *name, bool reloc)
        /* ref_reloc_sym is just a label. Need a special fix*/
        reloc_sym = kernel_get_ref_reloc_sym();
        if (reloc_sym && strcmp(name, reloc_sym->name) == 0)
-               return (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr;
+               *addr = (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr;
        else {
                sym = __find_kernel_function_by_name(name, &map);
-               if (sym)
-                       return map->unmap_ip(map, sym->start) -
-                               ((reloc) ? 0 : map->reloc);
+               if (!sym)
+                       return -ENOENT;
+               *addr = map->unmap_ip(map, sym->start) -
+                       ((reloc) ? 0 : map->reloc) -
+                       ((reladdr) ? map->start : 0);
        }
        return 0;
 }
@@ -245,12 +248,14 @@ static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs)
 static bool kprobe_blacklist__listed(unsigned long address);
 static bool kprobe_warn_out_range(const char *symbol, unsigned long address)
 {
-       u64 etext_addr;
+       u64 etext_addr = 0;
+       int ret;
 
        /* Get the address of _etext for checking non-probable text symbol */
-       etext_addr = kernel_get_symbol_address_by_name("_etext", false);
+       ret = kernel_get_symbol_address_by_name("_etext", &etext_addr,
+                                               false, false);
 
-       if (etext_addr != 0 && etext_addr < address)
+       if (ret == 0 && etext_addr < address)
                pr_warning("%s is out of .text, skip it.\n", symbol);
        else if (kprobe_blacklist__listed(address))
                pr_warning("%s is blacklisted function, skip it.\n", symbol);
@@ -270,18 +275,19 @@ static int kernel_get_module_dso(const char *module, struct dso **pdso)
        int ret = 0;
 
        if (module) {
-               list_for_each_entry(dso, &host_machine->dsos.head, node) {
-                       if (!dso->kernel)
-                               continue;
-                       if (strncmp(dso->short_name + 1, module,
-                                   dso->short_name_len - 2) == 0)
-                               goto found;
+               char module_name[128];
+
+               snprintf(module_name, sizeof(module_name), "[%s]", module);
+               map = map_groups__find_by_name(&host_machine->kmaps, MAP__FUNCTION, module_name);
+               if (map) {
+                       dso = map->dso;
+                       goto found;
                }
                pr_debug("Failed to find module %s.\n", module);
                return -ENOENT;
        }
 
-       map = host_machine->vmlinux_maps[MAP__FUNCTION];
+       map = machine__kernel_map(host_machine);
        dso = map->dso;
 
        vmlinux_name = symbol_conf.vmlinux_name;
@@ -435,19 +441,22 @@ static char *debuginfo_cache_path;
 
 static struct debuginfo *debuginfo_cache__open(const char *module, bool silent)
 {
-       if ((debuginfo_cache_path && !strcmp(debuginfo_cache_path, module)) ||
-           (!debuginfo_cache_path && !module && debuginfo_cache))
+       const char *path = module;
+
+       /* If the module is NULL, it should be the kernel. */
+       if (!module)
+               path = "kernel";
+
+       if (debuginfo_cache_path && !strcmp(debuginfo_cache_path, path))
                goto out;
 
        /* Copy module path */
        free(debuginfo_cache_path);
-       if (module) {
-               debuginfo_cache_path = strdup(module);
-               if (!debuginfo_cache_path) {
-                       debuginfo__delete(debuginfo_cache);
-                       debuginfo_cache = NULL;
-                       goto out;
-               }
+       debuginfo_cache_path = strdup(path);
+       if (!debuginfo_cache_path) {
+               debuginfo__delete(debuginfo_cache);
+               debuginfo_cache = NULL;
+               goto out;
        }
 
        debuginfo_cache = open_debuginfo(module, silent);
@@ -516,8 +525,10 @@ static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp,
                        goto error;
                addr += stext;
        } else if (tp->symbol) {
-               addr = kernel_get_symbol_address_by_name(tp->symbol, false);
-               if (addr == 0)
+               /* If the module is given, this returns relative address */
+               ret = kernel_get_symbol_address_by_name(tp->symbol, &addr,
+                                                       false, !!tp->module);
+               if (ret != 0)
                        goto error;
                addr += tp->offset;
        }
@@ -860,11 +871,11 @@ int show_line_range(struct line_range *lr, const char *module, bool user)
 {
        int ret;
 
-       ret = init_symbol_maps(user);
+       ret = init_probe_symbol_maps(user);
        if (ret < 0)
                return ret;
        ret = __show_line_range(lr, module, user);
-       exit_symbol_maps();
+       exit_probe_symbol_maps();
 
        return ret;
 }
@@ -942,7 +953,7 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs,
        int i, ret = 0;
        struct debuginfo *dinfo;
 
-       ret = init_symbol_maps(pevs->uprobes);
+       ret = init_probe_symbol_maps(pevs->uprobes);
        if (ret < 0)
                return ret;
 
@@ -959,7 +970,7 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs,
 
        debuginfo__delete(dinfo);
 out:
-       exit_symbol_maps();
+       exit_probe_symbol_maps();
        return ret;
 }
 
@@ -1883,8 +1894,12 @@ static int find_perf_probe_point_from_map(struct probe_trace_point *tp,
                        goto out;
                sym = map__find_symbol(map, addr, NULL);
        } else {
-               if (tp->symbol)
-                       addr = kernel_get_symbol_address_by_name(tp->symbol, true);
+               if (tp->symbol && !addr) {
+                       ret = kernel_get_symbol_address_by_name(tp->symbol,
+                                                       &addr, true, false);
+                       if (ret < 0)
+                               goto out;
+               }
                if (addr) {
                        addr += tp->offset;
                        sym = __find_kernel_function(addr, &map);
@@ -2054,7 +2069,7 @@ static void kprobe_blacklist__delete(struct list_head *blacklist)
 static int kprobe_blacklist__load(struct list_head *blacklist)
 {
        struct kprobe_blacklist_node *node;
-       const char *__debugfs = debugfs_find_mountpoint();
+       const char *__debugfs = debugfs__mountpoint();
        char buf[PATH_MAX], *p;
        FILE *fp;
        int ret;
@@ -2180,9 +2195,9 @@ out:
 }
 
 /* Show an event */
-static int show_perf_probe_event(const char *group, const char *event,
-                                struct perf_probe_event *pev,
-                                const char *module, bool use_stdout)
+int show_perf_probe_event(const char *group, const char *event,
+                         struct perf_probe_event *pev,
+                         const char *module, bool use_stdout)
 {
        struct strbuf buf = STRBUF_INIT;
        int ret;
@@ -2263,7 +2278,7 @@ int show_perf_probe_events(struct strfilter *filter)
 
        setup_pager();
 
-       ret = init_symbol_maps(false);
+       ret = init_probe_symbol_maps(false);
        if (ret < 0)
                return ret;
 
@@ -2279,7 +2294,7 @@ int show_perf_probe_events(struct strfilter *filter)
                close(kp_fd);
        if (up_fd > 0)
                close(up_fd);
-       exit_symbol_maps();
+       exit_probe_symbol_maps();
 
        return ret;
 }
@@ -2288,36 +2303,41 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
                              struct strlist *namelist, bool allow_suffix)
 {
        int i, ret;
-       char *p;
+       char *p, *nbase;
 
        if (*base == '.')
                base++;
+       nbase = strdup(base);
+       if (!nbase)
+               return -ENOMEM;
 
-       /* Try no suffix */
-       ret = e_snprintf(buf, len, "%s", base);
+       /* Cut off the dot suffixes (e.g. .const, .isra)*/
+       p = strchr(nbase, '.');
+       if (p && p != nbase)
+               *p = '\0';
+
+       /* Try no suffix number */
+       ret = e_snprintf(buf, len, "%s", nbase);
        if (ret < 0) {
                pr_debug("snprintf() failed: %d\n", ret);
-               return ret;
+               goto out;
        }
-       /* Cut off the postfixes (e.g. .const, .isra)*/
-       p = strchr(buf, '.');
-       if (p && p != buf)
-               *p = '\0';
        if (!strlist__has_entry(namelist, buf))
-               return 0;
+               goto out;
 
        if (!allow_suffix) {
                pr_warning("Error: event \"%s\" already exists. "
-                          "(Use -f to force duplicates.)\n", base);
-               return -EEXIST;
+                          "(Use -f to force duplicates.)\n", buf);
+               ret = -EEXIST;
+               goto out;
        }
 
        /* Try to add suffix */
        for (i = 1; i < MAX_EVENT_INDEX; i++) {
-               ret = e_snprintf(buf, len, "%s_%d", base, i);
+               ret = e_snprintf(buf, len, "%s_%d", nbase, i);
                if (ret < 0) {
                        pr_debug("snprintf() failed: %d\n", ret);
-                       return ret;
+                       goto out;
                }
                if (!strlist__has_entry(namelist, buf))
                        break;
@@ -2327,6 +2347,8 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
                ret = -ERANGE;
        }
 
+out:
+       free(nbase);
        return ret;
 }
 
@@ -2399,7 +2421,6 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
 {
        int i, fd, ret;
        struct probe_trace_event *tev = NULL;
-       const char *event = NULL, *group = NULL;
        struct strlist *namelist;
 
        fd = probe_file__open(PF_FL_RW | (pev->uprobes ? PF_FL_UPROBE : 0));
@@ -2415,7 +2436,6 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
        }
 
        ret = 0;
-       pr_info("Added new event%s\n", (ntevs > 1) ? "s:" : ":");
        for (i = 0; i < ntevs; i++) {
                tev = &tevs[i];
                /* Skip if the symbol is out of .text or blacklisted */
@@ -2432,13 +2452,6 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
                if (ret < 0)
                        break;
 
-               /* We use tev's name for showing new events */
-               show_perf_probe_event(tev->group, tev->event, pev,
-                                     tev->point.module, false);
-               /* Save the last valid name */
-               event = tev->event;
-               group = tev->group;
-
                /*
                 * Probes after the first probe which comes from same
                 * user input are always allowed to add suffix, because
@@ -2450,13 +2463,6 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
        if (ret == -EINVAL && pev->uprobes)
                warn_uprobe_event_compat(tev);
 
-       /* Note that it is possible to skip all events because of blacklist */
-       if (ret >= 0 && event) {
-               /* Show how to use the event. */
-               pr_info("\nYou can now use it in all perf tools, such as:\n\n");
-               pr_info("\tperf record -e %s:%s -aR sleep 1\n\n", group, event);
-       }
-
        strlist__delete(namelist);
 close_out:
        close(fd);
@@ -2537,7 +2543,8 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
                goto out;
        }
 
-       if (!pev->uprobes && !pp->retprobe) {
+       /* Note that the symbols in the kmodule are not relocated */
+       if (!pev->uprobes && !pp->retprobe && !pev->target) {
                reloc_sym = kernel_get_ref_reloc_sym();
                if (!reloc_sym) {
                        pr_warning("Relocated base symbol is not found!\n");
@@ -2574,8 +2581,9 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
                }
                /* Add one probe point */
                tp->address = map->unmap_ip(map, sym->start) + pp->offset;
-               /* If we found a wrong one, mark it by NULL symbol */
-               if (!pev->uprobes &&
+
+               /* Check the kprobe (not in module) is within .text  */
+               if (!pev->uprobes && !pev->target &&
                    kprobe_warn_out_range(sym->name, tp->address)) {
                        tp->symbol = NULL;      /* Skip it */
                        skipped++;
@@ -2759,63 +2767,71 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev,
        return find_probe_trace_events_from_map(pev, tevs);
 }
 
-struct __event_package {
-       struct perf_probe_event         *pev;
-       struct probe_trace_event        *tevs;
-       int                             ntevs;
-};
-
-int add_perf_probe_events(struct perf_probe_event *pevs, int npevs)
+int convert_perf_probe_events(struct perf_probe_event *pevs, int npevs)
 {
-       int i, j, ret;
-       struct __event_package *pkgs;
-
-       ret = 0;
-       pkgs = zalloc(sizeof(struct __event_package) * npevs);
-
-       if (pkgs == NULL)
-               return -ENOMEM;
-
-       ret = init_symbol_maps(pevs->uprobes);
-       if (ret < 0) {
-               free(pkgs);
-               return ret;
-       }
+       int i, ret;
 
        /* Loop 1: convert all events */
        for (i = 0; i < npevs; i++) {
-               pkgs[i].pev = &pevs[i];
                /* Init kprobe blacklist if needed */
-               if (!pkgs[i].pev->uprobes)
+               if (!pevs[i].uprobes)
                        kprobe_blacklist__init();
                /* Convert with or without debuginfo */
-               ret  = convert_to_probe_trace_events(pkgs[i].pev,
-                                                    &pkgs[i].tevs);
+               ret  = convert_to_probe_trace_events(&pevs[i], &pevs[i].tevs);
                if (ret < 0)
-                       goto end;
-               pkgs[i].ntevs = ret;
+                       return ret;
+               pevs[i].ntevs = ret;
        }
        /* This just release blacklist only if allocated */
        kprobe_blacklist__release();
 
+       return 0;
+}
+
+int apply_perf_probe_events(struct perf_probe_event *pevs, int npevs)
+{
+       int i, ret = 0;
+
        /* Loop 2: add all events */
        for (i = 0; i < npevs; i++) {
-               ret = __add_probe_trace_events(pkgs[i].pev, pkgs[i].tevs,
-                                              pkgs[i].ntevs,
+               ret = __add_probe_trace_events(&pevs[i], pevs[i].tevs,
+                                              pevs[i].ntevs,
                                               probe_conf.force_add);
                if (ret < 0)
                        break;
        }
-end:
+       return ret;
+}
+
+void cleanup_perf_probe_events(struct perf_probe_event *pevs, int npevs)
+{
+       int i, j;
+
        /* Loop 3: cleanup and free trace events  */
        for (i = 0; i < npevs; i++) {
-               for (j = 0; j < pkgs[i].ntevs; j++)
-                       clear_probe_trace_event(&pkgs[i].tevs[j]);
-               zfree(&pkgs[i].tevs);
+               for (j = 0; j < pevs[i].ntevs; j++)
+                       clear_probe_trace_event(&pevs[i].tevs[j]);
+               zfree(&pevs[i].tevs);
+               pevs[i].ntevs = 0;
+               clear_perf_probe_event(&pevs[i]);
        }
-       free(pkgs);
-       exit_symbol_maps();
+}
 
+int add_perf_probe_events(struct perf_probe_event *pevs, int npevs)
+{
+       int ret;
+
+       ret = init_probe_symbol_maps(pevs->uprobes);
+       if (ret < 0)
+               return ret;
+
+       ret = convert_perf_probe_events(pevs, npevs);
+       if (ret == 0)
+               ret = apply_perf_probe_events(pevs, npevs);
+
+       cleanup_perf_probe_events(pevs, npevs);
+
+       exit_probe_symbol_maps();
        return ret;
 }
 
@@ -2827,8 +2843,6 @@ int del_perf_probe_events(struct strfilter *filter)
        if (!str)
                return -EINVAL;
 
-       pr_debug("Delete filter: \'%s\'\n", str);
-
        /* Get current event names */
        ret = probe_file__open_both(&kfd, &ufd, PF_FL_RW);
        if (ret < 0)
@@ -2843,9 +2857,6 @@ int del_perf_probe_events(struct strfilter *filter)
                ret = ret2;
                goto error;
        }
-       if (ret == -ENOENT && ret2 == -ENOENT)
-               pr_debug("\"%s\" does not hit any event.\n", str);
-               /* Note that this is silently ignored */
        ret = 0;
 
 error:
@@ -2880,7 +2891,7 @@ int show_available_funcs(const char *target, struct strfilter *_filter,
        struct map *map;
        int ret;
 
-       ret = init_symbol_maps(user);
+       ret = init_probe_symbol_maps(user);
        if (ret < 0)
                return ret;
 
@@ -2910,7 +2921,7 @@ end:
        if (user) {
                map__put(map);
        }
-       exit_symbol_maps();
+       exit_probe_symbol_maps();
 
        return ret;
 }
index 6e7ec68a4aa84ce0818c52e1b8d2a488c32e4b31..ba926c30f8cdcc3718f75cb51ef57f7f3f5c5c95 100644 (file)
@@ -87,6 +87,8 @@ struct perf_probe_event {
        bool                    uprobes;        /* Uprobe event flag */
        char                    *target;        /* Target binary */
        struct perf_probe_arg   *args;  /* Arguments */
+       struct probe_trace_event *tevs;
+       int                     ntevs;
 };
 
 /* Line range */
@@ -108,6 +110,8 @@ struct variable_list {
 };
 
 struct map;
+int init_probe_symbol_maps(bool user_only);
+void exit_probe_symbol_maps(void);
 
 /* Command string to events */
 extern int parse_perf_probe_command(const char *cmd,
@@ -138,7 +142,14 @@ extern void line_range__clear(struct line_range *lr);
 extern int line_range__init(struct line_range *lr);
 
 extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs);
+extern int convert_perf_probe_events(struct perf_probe_event *pevs, int npevs);
+extern int apply_perf_probe_events(struct perf_probe_event *pevs, int npevs);
+extern void cleanup_perf_probe_events(struct perf_probe_event *pevs, int npevs);
 extern int del_perf_probe_events(struct strfilter *filter);
+
+extern int show_perf_probe_event(const char *group, const char *event,
+                                struct perf_probe_event *pev,
+                                const char *module, bool use_stdout);
 extern int show_perf_probe_events(struct strfilter *filter);
 extern int show_line_range(struct line_range *lr, const char *module,
                           bool user);
index bbb243717ec834f0548e9d5bf8a3ac0ff48f8c4b..89dbeb92c68e9de1bc001edddd902940700ab4f2 100644 (file)
@@ -22,8 +22,7 @@
 #include "color.h"
 #include "symbol.h"
 #include "thread.h"
-#include <api/fs/debugfs.h>
-#include <api/fs/tracefs.h>
+#include <api/fs/tracing_path.h>
 #include "probe-event.h"
 #include "probe-file.h"
 #include "session.h"
@@ -73,21 +72,11 @@ static void print_both_open_warning(int kerr, int uerr)
 static int open_probe_events(const char *trace_file, bool readwrite)
 {
        char buf[PATH_MAX];
-       const char *__debugfs;
        const char *tracing_dir = "";
        int ret;
 
-       __debugfs = tracefs_find_mountpoint();
-       if (__debugfs == NULL) {
-               tracing_dir = "tracing/";
-
-               __debugfs = debugfs_find_mountpoint();
-               if (__debugfs == NULL)
-                       return -ENOTSUP;
-       }
-
        ret = e_snprintf(buf, PATH_MAX, "%s/%s%s",
-                        __debugfs, tracing_dir, trace_file);
+                        tracing_path, tracing_dir, trace_file);
        if (ret >= 0) {
                pr_debug("Opening %s write=%d\n", buf, readwrite);
                if (readwrite && !probe_event_dry_run)
@@ -267,7 +256,6 @@ static int __del_trace_probe_event(int fd, struct str_node *ent)
                goto error;
        }
 
-       pr_info("Removed event: %s\n", ent->s);
        return 0;
 error:
        pr_warning("Failed to delete event: %s\n",
@@ -275,7 +263,8 @@ error:
        return ret;
 }
 
-int probe_file__del_events(int fd, struct strfilter *filter)
+int probe_file__get_events(int fd, struct strfilter *filter,
+                          struct strlist *plist)
 {
        struct strlist *namelist;
        struct str_node *ent;
@@ -290,12 +279,43 @@ int probe_file__del_events(int fd, struct strfilter *filter)
                p = strchr(ent->s, ':');
                if ((p && strfilter__compare(filter, p + 1)) ||
                    strfilter__compare(filter, ent->s)) {
-                       ret = __del_trace_probe_event(fd, ent);
-                       if (ret < 0)
-                               break;
+                       strlist__add(plist, ent->s);
+                       ret = 0;
                }
        }
        strlist__delete(namelist);
 
        return ret;
 }
+
+int probe_file__del_strlist(int fd, struct strlist *namelist)
+{
+       int ret = 0;
+       struct str_node *ent;
+
+       strlist__for_each(ent, namelist) {
+               ret = __del_trace_probe_event(fd, ent);
+               if (ret < 0)
+                       break;
+       }
+       return ret;
+}
+
+int probe_file__del_events(int fd, struct strfilter *filter)
+{
+       struct strlist *namelist;
+       int ret;
+
+       namelist = strlist__new(NULL, NULL);
+       if (!namelist)
+               return -ENOMEM;
+
+       ret = probe_file__get_events(fd, filter, namelist);
+       if (ret < 0)
+               return ret;
+
+       ret = probe_file__del_strlist(fd, namelist);
+       strlist__delete(namelist);
+
+       return ret;
+}
index ada94a242a17c80edb8f53c82f31145b70993653..18ac9cf51c3433438eb3036d340f34e5975161bc 100644 (file)
@@ -14,5 +14,9 @@ struct strlist *probe_file__get_namelist(int fd);
 struct strlist *probe_file__get_rawlist(int fd);
 int probe_file__add_event(int fd, struct probe_trace_event *tev);
 int probe_file__del_events(int fd, struct strfilter *filter);
+int probe_file__get_events(int fd, struct strfilter *filter,
+                                 struct strlist *plist);
+int probe_file__del_strlist(int fd, struct strlist *namelist);
+
 
 #endif
index 29c43c0680a8fc0dbefbd9d35b5e6488d6807f40..bd8f03de5e408293a642478a795c6436143edbcf 100644 (file)
@@ -70,6 +70,7 @@ static int debuginfo__init_offline_dwarf(struct debuginfo *dbg,
        if (!dbg->dwfl)
                goto error;
 
+       dwfl_report_begin(dbg->dwfl);
        dbg->mod = dwfl_report_offline(dbg->dwfl, "", "", fd);
        if (!dbg->mod)
                goto error;
@@ -78,6 +79,8 @@ static int debuginfo__init_offline_dwarf(struct debuginfo *dbg,
        if (!dbg->dbg)
                goto error;
 
+       dwfl_report_end(dbg->dwfl, NULL, NULL);
+
        return 0;
 error:
        if (dbg->dwfl)
@@ -591,6 +594,7 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
 /* Convert subprogram DIE to trace point */
 static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
                                  Dwarf_Addr paddr, bool retprobe,
+                                 const char *function,
                                  struct probe_trace_point *tp)
 {
        Dwarf_Addr eaddr, highaddr;
@@ -634,8 +638,10 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
        /* Return probe must be on the head of a subprogram */
        if (retprobe) {
                if (eaddr != paddr) {
-                       pr_warning("Return probe must be on the head of"
-                                  " a real function.\n");
+                       pr_warning("Failed to find \"%s%%return\",\n"
+                                  " because %s is an inlined function and"
+                                  " has no return point.\n", function,
+                                  function);
                        return -EINVAL;
                }
                tp->retprobe = true;
@@ -1175,6 +1181,7 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
 {
        struct trace_event_finder *tf =
                        container_of(pf, struct trace_event_finder, pf);
+       struct perf_probe_point *pp = &pf->pev->point;
        struct probe_trace_event *tev;
        struct perf_probe_arg *args;
        int ret, i;
@@ -1189,7 +1196,7 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
 
        /* Trace point should be converted from subprogram DIE */
        ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr,
-                                    pf->pev->point.retprobe, &tev->point);
+                                    pp->retprobe, pp->function, &tev->point);
        if (ret < 0)
                return ret;
 
@@ -1319,6 +1326,7 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
 {
        struct available_var_finder *af =
                        container_of(pf, struct available_var_finder, pf);
+       struct perf_probe_point *pp = &pf->pev->point;
        struct variable_list *vl;
        Dwarf_Die die_mem;
        int ret;
@@ -1332,7 +1340,7 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
 
        /* Trace point should be converted from subprogram DIE */
        ret = convert_to_trace_point(&pf->sp_die, af->mod, pf->addr,
-                                    pf->pev->point.retprobe, &vl->point);
+                                    pp->retprobe, pp->function, &vl->point);
        if (ret < 0)
                return ret;
 
@@ -1399,6 +1407,41 @@ int debuginfo__find_available_vars_at(struct debuginfo *dbg,
        return (ret < 0) ? ret : af.nvls;
 }
 
+/* For the kernel module, we need a special code to get a DIE */
+static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs)
+{
+       int n, i;
+       Elf32_Word shndx;
+       Elf_Scn *scn;
+       Elf *elf;
+       GElf_Shdr mem, *shdr;
+       const char *p;
+
+       elf = dwfl_module_getelf(dbg->mod, &dbg->bias);
+       if (!elf)
+               return -EINVAL;
+
+       /* Get the number of relocations */
+       n = dwfl_module_relocations(dbg->mod);
+       if (n < 0)
+               return -ENOENT;
+       /* Search the relocation related .text section */
+       for (i = 0; i < n; i++) {
+               p = dwfl_module_relocation_info(dbg->mod, i, &shndx);
+               if (strcmp(p, ".text") == 0) {
+                       /* OK, get the section header */
+                       scn = elf_getscn(elf, shndx);
+                       if (!scn)
+                               return -ENOENT;
+                       shdr = gelf_getshdr(scn, &mem);
+                       if (!shdr)
+                               return -ENOENT;
+                       *offs = shdr->sh_addr;
+               }
+       }
+       return 0;
+}
+
 /* Reverse search */
 int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
                                struct perf_probe_point *ppt)
@@ -1407,9 +1450,16 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
        Dwarf_Addr _addr = 0, baseaddr = 0;
        const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;
        int baseline = 0, lineno = 0, ret = 0;
+       bool reloc = false;
 
+retry:
        /* Find cu die */
        if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) {
+               if (!reloc && debuginfo__get_text_offset(dbg, &baseaddr) == 0) {
+                       addr += baseaddr;
+                       reloc = true;
+                       goto retry;
+               }
                pr_warning("Failed to find debug information for address %lx\n",
                           addr);
                ret = -EINVAL;
index 6324fe6b161e8f45a60fdde0573e76e6fc174298..98f127abfa42a2d9b8d06b24b6c17090781c87fb 100644 (file)
@@ -67,6 +67,7 @@ static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
 static PyMemberDef pyrf_mmap_event__members[] = {
        sample_members
        member_def(perf_event_header, type, T_UINT, "event type"),
+       member_def(perf_event_header, misc, T_UINT, "event misc"),
        member_def(mmap_event, pid, T_UINT, "event pid"),
        member_def(mmap_event, tid, T_UINT, "event tid"),
        member_def(mmap_event, start, T_ULONGLONG, "start of the map"),
@@ -297,6 +298,43 @@ static PyTypeObject pyrf_sample_event__type = {
        .tp_repr        = (reprfunc)pyrf_sample_event__repr,
 };
 
+static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object.");
+
+static PyMemberDef pyrf_context_switch_event__members[] = {
+       sample_members
+       member_def(perf_event_header, type, T_UINT, "event type"),
+       member_def(context_switch_event, next_prev_pid, T_UINT, "next/prev pid"),
+       member_def(context_switch_event, next_prev_tid, T_UINT, "next/prev tid"),
+       { .name = NULL, },
+};
+
+static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent)
+{
+       PyObject *ret;
+       char *s;
+
+       if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }",
+                    pevent->event.context_switch.next_prev_pid,
+                    pevent->event.context_switch.next_prev_tid,
+                    !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) {
+               ret = PyErr_NoMemory();
+       } else {
+               ret = PyString_FromString(s);
+               free(s);
+       }
+       return ret;
+}
+
+static PyTypeObject pyrf_context_switch_event__type = {
+       PyVarObject_HEAD_INIT(NULL, 0)
+       .tp_name        = "perf.context_switch_event",
+       .tp_basicsize   = sizeof(struct pyrf_event),
+       .tp_flags       = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+       .tp_doc         = pyrf_context_switch_event__doc,
+       .tp_members     = pyrf_context_switch_event__members,
+       .tp_repr        = (reprfunc)pyrf_context_switch_event__repr,
+};
+
 static int pyrf_event__setup_types(void)
 {
        int err;
@@ -306,6 +344,7 @@ static int pyrf_event__setup_types(void)
        pyrf_lost_event__type.tp_new =
        pyrf_read_event__type.tp_new =
        pyrf_sample_event__type.tp_new =
+       pyrf_context_switch_event__type.tp_new =
        pyrf_throttle_event__type.tp_new = PyType_GenericNew;
        err = PyType_Ready(&pyrf_mmap_event__type);
        if (err < 0)
@@ -328,6 +367,9 @@ static int pyrf_event__setup_types(void)
        err = PyType_Ready(&pyrf_sample_event__type);
        if (err < 0)
                goto out;
+       err = PyType_Ready(&pyrf_context_switch_event__type);
+       if (err < 0)
+               goto out;
 out:
        return err;
 }
@@ -342,6 +384,8 @@ static PyTypeObject *pyrf_event__type[] = {
        [PERF_RECORD_FORK]       = &pyrf_task_event__type,
        [PERF_RECORD_READ]       = &pyrf_read_event__type,
        [PERF_RECORD_SAMPLE]     = &pyrf_sample_event__type,
+       [PERF_RECORD_SWITCH]     = &pyrf_context_switch_event__type,
+       [PERF_RECORD_SWITCH_CPU_WIDE]  = &pyrf_context_switch_event__type,
 };
 
 static PyObject *pyrf_event__new(union perf_event *event)
@@ -349,8 +393,10 @@ static PyObject *pyrf_event__new(union perf_event *event)
        struct pyrf_event *pevent;
        PyTypeObject *ptype;
 
-       if (event->header.type < PERF_RECORD_MMAP ||
-           event->header.type > PERF_RECORD_SAMPLE)
+       if ((event->header.type < PERF_RECORD_MMAP ||
+            event->header.type > PERF_RECORD_SAMPLE) &&
+           !(event->header.type == PERF_RECORD_SWITCH ||
+             event->header.type == PERF_RECORD_SWITCH_CPU_WIDE))
                return NULL;
 
        ptype = pyrf_event__type[event->header.type];
@@ -528,6 +574,7 @@ static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
                "exclude_hv",
                "exclude_idle",
                "mmap",
+               "context_switch",
                "comm",
                "freq",
                "inherit_stat",
@@ -553,6 +600,7 @@ static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
            exclude_hv = 0,
            exclude_idle = 0,
            mmap = 0,
+           context_switch = 0,
            comm = 0,
            freq = 1,
            inherit_stat = 0,
@@ -565,13 +613,13 @@ static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
        int idx = 0;
 
        if (!PyArg_ParseTupleAndKeywords(args, kwargs,
-                                        "|iKiKKiiiiiiiiiiiiiiiiiiiiiKK", kwlist,
+                                        "|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist,
                                         &attr.type, &attr.config, &attr.sample_freq,
                                         &sample_period, &attr.sample_type,
                                         &attr.read_format, &disabled, &inherit,
                                         &pinned, &exclusive, &exclude_user,
                                         &exclude_kernel, &exclude_hv, &exclude_idle,
-                                        &mmap, &comm, &freq, &inherit_stat,
+                                        &mmap, &context_switch, &comm, &freq, &inherit_stat,
                                         &enable_on_exec, &task, &watermark,
                                         &precise_ip, &mmap_data, &sample_id_all,
                                         &attr.wakeup_events, &attr.bp_type,
@@ -595,6 +643,7 @@ static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
        attr.exclude_hv     = exclude_hv;
        attr.exclude_idle   = exclude_idle;
        attr.mmap           = mmap;
+       attr.context_switch = context_switch;
        attr.comm           = comm;
        attr.freq           = freq;
        attr.inherit_stat   = inherit_stat;
@@ -1019,6 +1068,8 @@ static struct {
        PERF_CONST(RECORD_LOST_SAMPLES),
        PERF_CONST(RECORD_SWITCH),
        PERF_CONST(RECORD_SWITCH_CPU_WIDE),
+
+       PERF_CONST(RECORD_MISC_SWITCH_OUT),
        { .name = NULL, },
 };
 
index 1bd593bbf7a56c1ddd5a6a9b6466f27f3eeafd53..544509c159cec4111e5865007b513e1197c576c4 100644 (file)
@@ -221,6 +221,7 @@ static void define_event_symbols(struct event_format *event,
                break;
        case PRINT_BSTRING:
        case PRINT_DYNAMIC_ARRAY:
+       case PRINT_DYNAMIC_ARRAY_LEN:
        case PRINT_STRING:
        case PRINT_BITMASK:
                break;
index ace2484985cb45c298529523d791736a25a5c12b..a8e825fca42af9aeab46de300855e5e22ea8ffc0 100644 (file)
@@ -251,6 +251,7 @@ static void define_event_symbols(struct event_format *event,
                /* gcc warns for these? */
        case PRINT_BSTRING:
        case PRINT_DYNAMIC_ARRAY:
+       case PRINT_DYNAMIC_ARRAY_LEN:
        case PRINT_FUNC:
        case PRINT_BITMASK:
                /* we should warn... */
@@ -318,7 +319,7 @@ static PyObject *python_process_callchain(struct perf_sample *sample,
 
        if (thread__resolve_callchain(al->thread, evsel,
                                      sample, NULL, NULL,
-                                     PERF_MAX_STACK_DEPTH) != 0) {
+                                     scripting_max_stack) != 0) {
                pr_err("Failed to resolve callchain. Skipping\n");
                goto exit;
        }
index 8a4537ee9bc374166c31d05f6e48b6ca6943a4b3..428149bc64d23c6a12e9c141fb3070809090ad36 100644 (file)
@@ -138,6 +138,8 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
                        perf_session__set_id_hdr_size(session);
                        perf_session__set_comm_exec(session);
                }
+       } else  {
+               session->machines.host.env = &perf_env;
        }
 
        if (!file || perf_data_file__is_write(file)) {
@@ -170,30 +172,13 @@ static void perf_session__delete_threads(struct perf_session *session)
        machine__delete_threads(&session->machines.host);
 }
 
-static void perf_session_env__exit(struct perf_env *env)
-{
-       zfree(&env->hostname);
-       zfree(&env->os_release);
-       zfree(&env->version);
-       zfree(&env->arch);
-       zfree(&env->cpu_desc);
-       zfree(&env->cpuid);
-
-       zfree(&env->cmdline);
-       zfree(&env->cmdline_argv);
-       zfree(&env->sibling_cores);
-       zfree(&env->sibling_threads);
-       zfree(&env->numa_nodes);
-       zfree(&env->pmu_mappings);
-}
-
 void perf_session__delete(struct perf_session *session)
 {
        auxtrace__free(session);
        auxtrace_index__free(&session->auxtrace_index);
        perf_session__destroy_kernel_maps(session);
        perf_session__delete_threads(session);
-       perf_session_env__exit(&session->header.env);
+       perf_env__exit(&session->header.env);
        machines__exit(&session->machines);
        if (session->file)
                perf_data_file__close(session->file);
@@ -1079,11 +1064,11 @@ static int machines__deliver_event(struct machines *machines,
 
        switch (event->header.type) {
        case PERF_RECORD_SAMPLE:
-               dump_sample(evsel, event, sample);
                if (evsel == NULL) {
                        ++evlist->stats.nr_unknown_id;
                        return 0;
                }
+               dump_sample(evsel, event, sample);
                if (machine == NULL) {
                        ++evlist->stats.nr_unprocessable_samples;
                        return 0;
@@ -1116,6 +1101,9 @@ static int machines__deliver_event(struct machines *machines,
        case PERF_RECORD_UNTHROTTLE:
                return tool->unthrottle(tool, event, sample, machine);
        case PERF_RECORD_AUX:
+               if (tool->aux == perf_event__process_aux &&
+                   (event->aux.flags & PERF_AUX_FLAG_TRUNCATED))
+                       evlist->stats.total_aux_lost += 1;
                return tool->aux(tool, event, sample, machine);
        case PERF_RECORD_ITRACE_START:
                return tool->itrace_start(tool, event, sample, machine);
@@ -1323,7 +1311,7 @@ struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
        return machine__findnew_thread(&session->machines.host, -1, pid);
 }
 
-static struct thread *perf_session__register_idle_thread(struct perf_session *session)
+struct thread *perf_session__register_idle_thread(struct perf_session *session)
 {
        struct thread *thread;
 
@@ -1361,6 +1349,13 @@ static void perf_session__warn_about_errors(const struct perf_session *session)
                }
        }
 
+       if (session->tool->aux == perf_event__process_aux &&
+           stats->total_aux_lost != 0) {
+               ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
+                           stats->total_aux_lost,
+                           stats->nr_events[PERF_RECORD_AUX]);
+       }
+
        if (stats->nr_unknown_events != 0) {
                ui__warning("Found %u unknown events!\n\n"
                            "Is this an older tool processing a perf.data "
@@ -1580,7 +1575,10 @@ static int __perf_session__process_events(struct perf_session *session,
        file_offset = page_offset;
        head = data_offset - page_offset;
 
-       if (data_size && (data_offset + data_size < file_size))
+       if (data_size == 0)
+               goto out;
+
+       if (data_offset + data_size < file_size)
                file_size = data_offset + data_size;
 
        ui_progress__init(&prog, file_size, "Processing events...");
@@ -1802,7 +1800,7 @@ void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample,
 
                if (thread__resolve_callchain(al->thread, evsel,
                                              sample, NULL, NULL,
-                                             PERF_MAX_STACK_DEPTH) != 0) {
+                                             stack_depth) != 0) {
                        if (verbose)
                                error("Failed to resolve callchain. Skipping\n");
                        return;
index b44afc75d1cc51feb05f943973d0813e8556122c..3e900c0efc734a19188ff2e6658e0321f9b6b843 100644 (file)
@@ -89,6 +89,8 @@ struct machine *perf_session__findnew_machine(struct perf_session *session, pid_
 }
 
 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid);
+struct thread *perf_session__register_idle_thread(struct perf_session *session);
+
 size_t perf_session__fprintf(struct perf_session *session, FILE *fp);
 
 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp);
index 7e3871606df3a9911fe150105942d61e4c2a5848..2d8ccd4d9e1b7bd5231d35c2afaaec9078c86573 100644 (file)
@@ -21,6 +21,7 @@ int           sort__need_collapse = 0;
 int            sort__has_parent = 0;
 int            sort__has_sym = 0;
 int            sort__has_dso = 0;
+int            sort__has_socket = 0;
 enum sort_mode sort__mode = SORT_MODE__NORMAL;
 
 
@@ -328,8 +329,8 @@ static char *get_srcfile(struct hist_entry *e)
        char *sf, *p;
        struct map *map = e->ms.map;
 
-       sf = get_srcline(map->dso, map__rip_2objdump(map, e->ip),
-                        e->ms.sym, true);
+       sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
+                        e->ms.sym, false, true);
        if (!strcmp(sf, SRCLINE_UNKNOWN))
                return no_srcfile;
        p = strchr(sf, ':');
@@ -421,6 +422,27 @@ struct sort_entry sort_cpu = {
        .se_width_idx   = HISTC_CPU,
 };
 
+/* --sort socket */
+
+static int64_t
+sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+       return right->socket - left->socket;
+}
+
+static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
+                                   size_t size, unsigned int width)
+{
+       return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
+}
+
+struct sort_entry sort_socket = {
+       .se_header      = "Socket",
+       .se_cmp         = sort__socket_cmp,
+       .se_snprintf    = hist_entry__socket_snprintf,
+       .se_width_idx   = HISTC_SOCKET,
+};
+
 /* sort keys for branch stacks */
 
 static int64_t
@@ -632,6 +654,35 @@ static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
                                         width);
 }
 
+static int64_t
+sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+       uint64_t l = 0, r = 0;
+
+       if (left->mem_info)
+               l = left->mem_info->iaddr.addr;
+       if (right->mem_info)
+               r = right->mem_info->iaddr.addr;
+
+       return (int64_t)(r - l);
+}
+
+static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
+                                   size_t size, unsigned int width)
+{
+       uint64_t addr = 0;
+       struct map *map = NULL;
+       struct symbol *sym = NULL;
+
+       if (he->mem_info) {
+               addr = he->mem_info->iaddr.addr;
+               map  = he->mem_info->iaddr.map;
+               sym  = he->mem_info->iaddr.sym;
+       }
+       return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
+                                        width);
+}
+
 static int64_t
 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
 {
@@ -1055,6 +1106,13 @@ struct sort_entry sort_mem_daddr_sym = {
        .se_width_idx   = HISTC_MEM_DADDR_SYMBOL,
 };
 
+struct sort_entry sort_mem_iaddr_sym = {
+       .se_header      = "Code Symbol",
+       .se_cmp         = sort__iaddr_cmp,
+       .se_snprintf    = hist_entry__iaddr_snprintf,
+       .se_width_idx   = HISTC_MEM_IADDR_SYMBOL,
+};
+
 struct sort_entry sort_mem_daddr_dso = {
        .se_header      = "Data Object",
        .se_cmp         = sort__dso_daddr_cmp,
@@ -1248,6 +1306,7 @@ static struct sort_dimension common_sort_dimensions[] = {
        DIM(SORT_SYM, "symbol", sort_sym),
        DIM(SORT_PARENT, "parent", sort_parent),
        DIM(SORT_CPU, "cpu", sort_cpu),
+       DIM(SORT_SOCKET, "socket", sort_socket),
        DIM(SORT_SRCLINE, "srcline", sort_srcline),
        DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
        DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
@@ -1276,6 +1335,7 @@ static struct sort_dimension bstack_sort_dimensions[] = {
 
 static struct sort_dimension memory_sort_dimensions[] = {
        DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
+       DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
        DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
        DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
        DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
@@ -1517,6 +1577,12 @@ static int __hpp_dimension__add_output(struct hpp_dimension *hd)
        return 0;
 }
 
+int hpp_dimension__add_output(unsigned col)
+{
+       BUG_ON(col >= PERF_HPP__MAX_INDEX);
+       return __hpp_dimension__add_output(&hpp_sort_dimensions[col]);
+}
+
 int sort_dimension__add(const char *tok)
 {
        unsigned int i;
@@ -1550,6 +1616,8 @@ int sort_dimension__add(const char *tok)
 
                } else if (sd->entry == &sort_dso) {
                        sort__has_dso = 1;
+               } else if (sd->entry == &sort_socket) {
+                       sort__has_socket = 1;
                }
 
                return __sort_dimension__add(sd);
@@ -1855,8 +1923,6 @@ static int __setup_output_field(void)
        if (field_order == NULL)
                return 0;
 
-       reset_dimensions();
-
        strp = str = strdup(field_order);
        if (str == NULL) {
                error("Not enough memory to setup output fields");
index 3c2a399f8f5b9ca093efe7afac24e9034376310b..31228851e397770d6b7567be00e7feb27435c8c0 100644 (file)
@@ -34,6 +34,7 @@ extern int have_ignore_callees;
 extern int sort__need_collapse;
 extern int sort__has_parent;
 extern int sort__has_sym;
+extern int sort__has_socket;
 extern enum sort_mode sort__mode;
 extern struct sort_entry sort_comm;
 extern struct sort_entry sort_dso;
@@ -90,6 +91,7 @@ struct hist_entry {
        struct comm             *comm;
        u64                     ip;
        u64                     transaction;
+       s32                     socket;
        s32                     cpu;
        u8                      cpumode;
 
@@ -172,6 +174,7 @@ enum sort_type {
        SORT_SYM,
        SORT_PARENT,
        SORT_CPU,
+       SORT_SOCKET,
        SORT_SRCLINE,
        SORT_SRCFILE,
        SORT_LOCAL_WEIGHT,
@@ -198,6 +201,7 @@ enum sort_type {
        SORT_MEM_LVL,
        SORT_MEM_SNOOP,
        SORT_MEM_DCACHELINE,
+       SORT_MEM_IADDR_SYMBOL,
 };
 
 /*
@@ -230,4 +234,6 @@ void perf_hpp__set_elide(int idx, bool elide);
 int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset);
 
 bool is_strict_order(const char *order);
+
+int hpp_dimension__add_output(unsigned col);
 #endif /* __PERF_SORT_H */
index fc08248f08ca703b0a4330169535264854d5478c..b4db3f48e3b09784f00d610c8cbffea8c26e38c6 100644 (file)
@@ -149,8 +149,11 @@ static void addr2line_cleanup(struct a2l_data *a2l)
        free(a2l);
 }
 
+#define MAX_INLINE_NEST 1024
+
 static int addr2line(const char *dso_name, u64 addr,
-                    char **file, unsigned int *line, struct dso *dso)
+                    char **file, unsigned int *line, struct dso *dso,
+                    bool unwind_inlines)
 {
        int ret = 0;
        struct a2l_data *a2l = dso->a2l;
@@ -170,6 +173,15 @@ static int addr2line(const char *dso_name, u64 addr,
 
        bfd_map_over_sections(a2l->abfd, find_address_in_section, a2l);
 
+       if (a2l->found && unwind_inlines) {
+               int cnt = 0;
+
+               while (bfd_find_inliner_info(a2l->abfd, &a2l->filename,
+                                            &a2l->funcname, &a2l->line) &&
+                      cnt++ < MAX_INLINE_NEST)
+                       ;
+       }
+
        if (a2l->found && a2l->filename) {
                *file = strdup(a2l->filename);
                *line = a2l->line;
@@ -197,7 +209,8 @@ void dso__free_a2l(struct dso *dso)
 
 static int addr2line(const char *dso_name, u64 addr,
                     char **file, unsigned int *line_nr,
-                    struct dso *dso __maybe_unused)
+                    struct dso *dso __maybe_unused,
+                    bool unwind_inlines __maybe_unused)
 {
        FILE *fp;
        char cmd[PATH_MAX];
@@ -254,8 +267,8 @@ void dso__free_a2l(struct dso *dso __maybe_unused)
  */
 #define A2L_FAIL_LIMIT 123
 
-char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
-                 bool show_sym)
+char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
+                 bool show_sym, bool unwind_inlines)
 {
        char *file = NULL;
        unsigned line = 0;
@@ -276,7 +289,7 @@ char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
        if (!strncmp(dso_name, "/tmp/perf-", 10))
                goto out;
 
-       if (!addr2line(dso_name, addr, &file, &line, dso))
+       if (!addr2line(dso_name, addr, &file, &line, dso, unwind_inlines))
                goto out;
 
        if (asprintf(&srcline, "%s:%u",
@@ -310,3 +323,9 @@ void free_srcline(char *srcline)
        if (srcline && strcmp(srcline, SRCLINE_UNKNOWN) != 0)
                free(srcline);
 }
+
+char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
+                 bool show_sym)
+{
+       return __get_srcline(dso, addr, sym, show_sym, false);
+}
index 415c359de4654be8f68a30effa530dd06696b52b..2d9d8306dbd3f97f3d1f9734ace17b25d9ed583a 100644 (file)
@@ -67,7 +67,7 @@ double rel_stddev_stats(double stddev, double avg)
 bool __perf_evsel_stat__is(struct perf_evsel *evsel,
                           enum perf_stat_evsel_id id)
 {
-       struct perf_stat *ps = evsel->priv;
+       struct perf_stat_evsel *ps = evsel->priv;
 
        return ps->id == id;
 }
@@ -84,7 +84,7 @@ static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
 
 void perf_stat_evsel_id_init(struct perf_evsel *evsel)
 {
-       struct perf_stat *ps = evsel->priv;
+       struct perf_stat_evsel *ps = evsel->priv;
        int i;
 
        /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
@@ -100,7 +100,7 @@ void perf_stat_evsel_id_init(struct perf_evsel *evsel)
 void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
 {
        int i;
-       struct perf_stat *ps = evsel->priv;
+       struct perf_stat_evsel *ps = evsel->priv;
 
        for (i = 0; i < 3; i++)
                init_stats(&ps->res_stats[i]);
@@ -110,7 +110,7 @@ void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
 
 int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
 {
-       evsel->priv = zalloc(sizeof(struct perf_stat));
+       evsel->priv = zalloc(sizeof(struct perf_stat_evsel));
        if (evsel->priv == NULL)
                return -ENOMEM;
        perf_evsel__reset_stat_priv(evsel);
@@ -196,7 +196,8 @@ static void zero_per_pkg(struct perf_evsel *counter)
                memset(counter->per_pkg_mask, 0, MAX_NR_CPUS);
 }
 
-static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip)
+static int check_per_pkg(struct perf_evsel *counter,
+                        struct perf_counts_values *vals, int cpu, bool *skip)
 {
        unsigned long *mask = counter->per_pkg_mask;
        struct cpu_map *cpus = perf_evsel__cpus(counter);
@@ -218,7 +219,18 @@ static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip)
                counter->per_pkg_mask = mask;
        }
 
-       s = cpu_map__get_socket(cpus, cpu);
+       /*
+        * we do not consider an event that has not run as a good
+        * instance to mark a package as used (skip=1). Otherwise
+        * we may run into a situation where the first CPU in a package
+        * is not running anything, yet the second is, and this function
+        * would mark the package as used after the first CPU and would
+        * not read the values from the second CPU.
+        */
+       if (!(vals->run && vals->ena))
+               return 0;
+
+       s = cpu_map__get_socket(cpus, cpu, NULL);
        if (s < 0)
                return -1;
 
@@ -235,7 +247,7 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel
        static struct perf_counts_values zero;
        bool skip = false;
 
-       if (check_per_pkg(evsel, cpu, &skip)) {
+       if (check_per_pkg(evsel, count, cpu, &skip)) {
                pr_err("failed to read per-pkg counter\n");
                return -1;
        }
@@ -260,6 +272,7 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel
                        aggr->ena += count->ena;
                        aggr->run += count->run;
                }
+       case AGGR_UNSET:
        default:
                break;
        }
@@ -292,7 +305,7 @@ int perf_stat_process_counter(struct perf_stat_config *config,
                              struct perf_evsel *counter)
 {
        struct perf_counts_values *aggr = &counter->counts->aggr;
-       struct perf_stat *ps = counter->priv;
+       struct perf_stat_evsel *ps = counter->priv;
        u64 *count = counter->counts->aggr.values;
        int i, ret;
 
index 62448c8175d3de587eb6b9546df7bc4088b2bfa1..da1d11c4f8c193cff476fec76868277b12f7c623 100644 (file)
@@ -20,7 +20,7 @@ enum perf_stat_evsel_id {
        PERF_STAT_EVSEL_ID__MAX,
 };
 
-struct perf_stat {
+struct perf_stat_evsel {
        struct stats            res_stats[3];
        enum perf_stat_evsel_id id;
 };
@@ -31,6 +31,7 @@ enum aggr_mode {
        AGGR_SOCKET,
        AGGR_CORE,
        AGGR_THREAD,
+       AGGR_UNSET,
 };
 
 struct perf_stat_config {
index 4abe23550c73808b16586ac4121bcde244a7fba8..25671fa166188413c66758a978082e89a1ba08d2 100644 (file)
@@ -82,23 +82,22 @@ void strbuf_add(struct strbuf *sb, const void *data, size_t len)
        strbuf_setlen(sb, sb->len + len);
 }
 
-void strbuf_addf(struct strbuf *sb, const char *fmt, ...)
+void strbuf_addv(struct strbuf *sb, const char *fmt, va_list ap)
 {
        int len;
-       va_list ap;
+       va_list ap_saved;
 
        if (!strbuf_avail(sb))
                strbuf_grow(sb, 64);
-       va_start(ap, fmt);
+
+       va_copy(ap_saved, ap);
        len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
-       va_end(ap);
        if (len < 0)
                die("your vsnprintf is broken");
        if (len > strbuf_avail(sb)) {
                strbuf_grow(sb, len);
-               va_start(ap, fmt);
-               len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
-               va_end(ap);
+               len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap_saved);
+               va_end(ap_saved);
                if (len > strbuf_avail(sb)) {
                        die("this should not happen, your vsnprintf is broken");
                }
@@ -106,6 +105,15 @@ void strbuf_addf(struct strbuf *sb, const char *fmt, ...)
        strbuf_setlen(sb, sb->len + len);
 }
 
+void strbuf_addf(struct strbuf *sb, const char *fmt, ...)
+{
+       va_list ap;
+
+       va_start(ap, fmt);
+       strbuf_addv(sb, fmt, ap);
+       va_end(ap);
+}
+
 ssize_t strbuf_read(struct strbuf *sb, int fd, ssize_t hint)
 {
        size_t oldlen = sb->len;
index 436ac319f6c76bdf2a2be4b19e60e06dc481f245..529f2f03524915ab9cae7c5608de444fd875812d 100644 (file)
@@ -39,6 +39,7 @@
  */
 
 #include <assert.h>
+#include <stdarg.h>
 
 extern char strbuf_slopbuf[];
 struct strbuf {
@@ -85,6 +86,7 @@ static inline void strbuf_addstr(struct strbuf *sb, const char *s) {
 
 __attribute__((format(printf,2,3)))
 extern void strbuf_addf(struct strbuf *sb, const char *fmt, ...);
+extern void strbuf_addv(struct strbuf *sb, const char *fmt, va_list ap);
 
 /* XXX: if read fails, any partial read is undone */
 extern ssize_t strbuf_read(struct strbuf *, int fd, ssize_t hint);
index 53bb5f59ec589c22f7b1cd211cb132a3ca98d702..475d88d0a1c9a772323b3218cfcf5f5900c0e809 100644 (file)
@@ -38,7 +38,7 @@ static inline char *bfd_demangle(void __maybe_unused *v,
 #endif
 
 #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
-int elf_getphdrnum(Elf *elf, size_t *dst)
+static int elf_getphdrnum(Elf *elf, size_t *dst)
 {
        GElf_Ehdr gehdr;
        GElf_Ehdr *ehdr;
@@ -1271,8 +1271,6 @@ out_close:
 static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
                       bool temp)
 {
-       GElf_Ehdr *ehdr;
-
        kcore->elfclass = elfclass;
 
        if (temp)
@@ -1289,9 +1287,7 @@ static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
        if (!gelf_newehdr(kcore->elf, elfclass))
                goto out_end;
 
-       ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
-       if (!ehdr)
-               goto out_end;
+       memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr));
 
        return 0;
 
@@ -1348,23 +1344,18 @@ static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
 static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
                           u64 addr, u64 len)
 {
-       GElf_Phdr gphdr;
-       GElf_Phdr *phdr;
-
-       phdr = gelf_getphdr(kcore->elf, idx, &gphdr);
-       if (!phdr)
-               return -1;
-
-       phdr->p_type    = PT_LOAD;
-       phdr->p_flags   = PF_R | PF_W | PF_X;
-       phdr->p_offset  = offset;
-       phdr->p_vaddr   = addr;
-       phdr->p_paddr   = 0;
-       phdr->p_filesz  = len;
-       phdr->p_memsz   = len;
-       phdr->p_align   = page_size;
-
-       if (!gelf_update_phdr(kcore->elf, idx, phdr))
+       GElf_Phdr phdr = {
+               .p_type         = PT_LOAD,
+               .p_flags        = PF_R | PF_W | PF_X,
+               .p_offset       = offset,
+               .p_vaddr        = addr,
+               .p_paddr        = 0,
+               .p_filesz       = len,
+               .p_memsz        = len,
+               .p_align        = page_size,
+       };
+
+       if (!gelf_update_phdr(kcore->elf, idx, &phdr))
                return -1;
 
        return 0;
index fd8477cacf888f2bdac70fbc71a4f0461fbfc9b5..48906333a858c06b41991f33cb0f5f2cdd4f68d9 100644 (file)
@@ -337,7 +337,7 @@ int dso__load_sym(struct dso *dso, struct map *map __maybe_unused,
                  symbol_filter_t filter __maybe_unused,
                  int kmodule __maybe_unused)
 {
-       unsigned char *build_id[BUILD_ID_SIZE];
+       unsigned char build_id[BUILD_ID_SIZE];
        int ret;
 
        ret = fd__is_64_bit(ss->fd);
index 1f97ffb158a6af43e95cd1428ae0339d5fed3386..b4cc7662677ec858cddf6dd90e906f75c5a14200 100644 (file)
@@ -624,7 +624,7 @@ static int map__process_kallsym_symbol(void *arg, const char *name,
         * symbols, setting length to 0, and rely on
         * symbols__fixup_end() to fix it up.
         */
-       sym = symbol__new(start, 0, kallsyms2elf_type(type), name);
+       sym = symbol__new(start, 0, kallsyms2elf_binding(type), name);
        if (sym == NULL)
                return -ENOMEM;
        /*
@@ -680,7 +680,7 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
                        pos->start -= curr_map->start - curr_map->pgoff;
                        if (pos->end)
                                pos->end -= curr_map->start - curr_map->pgoff;
-                       if (curr_map != map) {
+                       if (curr_map->dso != map->dso) {
                                rb_erase_init(&pos->rb_node, root);
                                symbols__insert(
                                        &curr_map->dso->symbols[curr_map->type],
@@ -1406,6 +1406,7 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
        struct symsrc ss_[2];
        struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
        bool kmod;
+       unsigned char build_id[BUILD_ID_SIZE];
 
        pthread_mutex_lock(&dso->lock);
 
@@ -1461,6 +1462,14 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
                dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
                dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
 
+
+       /*
+        * Read the build id if possible. This is required for
+        * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
+        */
+       if (filename__read_build_id(dso->name, build_id, BUILD_ID_SIZE) > 0)
+               dso__set_build_id(dso, build_id);
+
        /*
         * Iterate over candidate debug images.
         * Keep track of "interesting" ones (those which have a symtab, dynsym,
@@ -1607,6 +1616,15 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map,
        int i, err = 0;
        char *filename = NULL;
 
+       pr_debug("Looking at the vmlinux_path (%d entries long)\n",
+                vmlinux_path__nr_entries + 1);
+
+       for (i = 0; i < vmlinux_path__nr_entries; ++i) {
+               err = dso__load_vmlinux(dso, map, vmlinux_path[i], false, filter);
+               if (err > 0)
+                       goto out;
+       }
+
        if (!symbol_conf.ignore_vmlinux_buildid)
                filename = dso__build_id_filename(dso, NULL, 0);
        if (filename != NULL) {
@@ -1615,15 +1633,6 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map,
                        goto out;
                free(filename);
        }
-
-       pr_debug("Looking at the vmlinux_path (%d entries long)\n",
-                vmlinux_path__nr_entries + 1);
-
-       for (i = 0; i < vmlinux_path__nr_entries; ++i) {
-               err = dso__load_vmlinux(dso, map, vmlinux_path[i], false, filter);
-               if (err > 0)
-                       break;
-       }
 out:
        return err;
 }
index 440ba8ae888f0a2fcbc91cb67a85a206bb7ca1c1..40073c60b83d6b7e91fe2e3208f50210cf61f6ce 100644 (file)
@@ -191,6 +191,7 @@ struct addr_location {
        u8            filtered;
        u8            cpumode;
        s32           cpu;
+       s32           socket;
 };
 
 struct symsrc {
index 22245986e59e45632d4998285dbc3b6557ed370d..d995743cb673e77524f0cd539d0cf0042b3fc363 100644 (file)
@@ -38,7 +38,7 @@
 
 #include "../perf.h"
 #include "trace-event.h"
-#include <api/fs/debugfs.h>
+#include <api/fs/tracing_path.h>
 #include "evsel.h"
 #include "debug.h"
 
index b90e646c7a91c399ea23f1f23c49991e5e8511d3..802bb868d446cafa7c5383982193ad13d87b785a 100644 (file)
@@ -7,7 +7,9 @@
 #include <sys/stat.h>
 #include <fcntl.h>
 #include <linux/kernel.h>
+#include <linux/err.h>
 #include <traceevent/event-parse.h>
+#include <api/fs/tracing_path.h>
 #include "trace-event.h"
 #include "machine.h"
 #include "util.h"
@@ -65,6 +67,9 @@ void trace_event__cleanup(struct trace_event *t)
        pevent_free(t->pevent);
 }
 
+/*
+ * Returns pointer with encoded error via <linux/err.h> interface.
+ */
 static struct event_format*
 tp_format(const char *sys, const char *name)
 {
@@ -73,12 +78,14 @@ tp_format(const char *sys, const char *name)
        char path[PATH_MAX];
        size_t size;
        char *data;
+       int err;
 
        scnprintf(path, PATH_MAX, "%s/%s/%s/format",
                  tracing_events_path, sys, name);
 
-       if (filename__read_str(path, &data, &size))
-               return NULL;
+       err = filename__read_str(path, &data, &size);
+       if (err)
+               return ERR_PTR(err);
 
        pevent_parse_format(pevent, &event, data, size, sys);
 
@@ -86,11 +93,14 @@ tp_format(const char *sys, const char *name)
        return event;
 }
 
+/*
+ * Returns pointer with encoded error via <linux/err.h> interface.
+ */
 struct event_format*
 trace_event__tp_format(const char *sys, const char *name)
 {
        if (!tevent_initialized && trace_event__init2())
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        return tp_format(sys, name);
 }
index da6cc4cc2a4f48eebfa0e59a3001e9e61d8484ee..b85ee55cca0cc943c75e2ea348b932236ff0618f 100644 (file)
@@ -78,6 +78,8 @@ struct scripting_ops {
        int (*generate_script) (struct pevent *pevent, const char *outfile);
 };
 
+extern unsigned int scripting_max_stack;
+
 int script_spec_register(const char *spec, struct scripting_ops *ops);
 
 void setup_perl_scripting(void);
index 4c00507ee3fd2ad488642def35226711cba821fe..c83832b555e580a771c1a831e1d18b3b5df135ac 100644 (file)
@@ -330,6 +330,7 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
        struct map *map;
        unw_dyn_info_t di;
        u64 table_data, segbase, fde_count;
+       int ret = -EINVAL;
 
        map = find_map(ip, ui);
        if (!map || !map->dso)
@@ -348,29 +349,33 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
                di.u.rti.table_data = map->start + table_data;
                di.u.rti.table_len  = fde_count * sizeof(struct table_entry)
                                      / sizeof(unw_word_t);
-               return dwarf_search_unwind_table(as, ip, &di, pi,
-                                                need_unwind_info, arg);
+               ret = dwarf_search_unwind_table(as, ip, &di, pi,
+                                               need_unwind_info, arg);
        }
 
 #ifndef NO_LIBUNWIND_DEBUG_FRAME
        /* Check the .debug_frame section for unwinding info */
-       if (!read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) {
+       if (ret < 0 &&
+           !read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) {
                int fd = dso__data_get_fd(map->dso, ui->machine);
                int is_exec = elf_is_exec(fd, map->dso->name);
                unw_word_t base = is_exec ? 0 : map->start;
+               const char *symfile;
 
                if (fd >= 0)
                        dso__data_put_fd(map->dso);
 
+               symfile = map->dso->symsrc_filename ?: map->dso->name;
+
                memset(&di, 0, sizeof(di));
-               if (dwarf_find_debug_frame(0, &di, ip, base, map->dso->name,
+               if (dwarf_find_debug_frame(0, &di, ip, base, symfile,
                                           map->start, map->end))
                        return dwarf_search_unwind_table(as, ip, &di, pi,
                                                         need_unwind_info, arg);
        }
 #endif
 
-       return -EINVAL;
+       return ret;
 }
 
 static int access_fpreg(unw_addr_space_t __maybe_unused as,
@@ -461,7 +466,7 @@ static int access_mem(unw_addr_space_t __maybe_unused as,
                if (ret) {
                        pr_debug("unwind: access_mem %p not inside range"
                                 " 0x%" PRIx64 "-0x%" PRIx64 "\n",
-                                (void *) addr, start, end);
+                                (void *) (uintptr_t) addr, start, end);
                        *valp = 0;
                        return ret;
                }
@@ -471,7 +476,7 @@ static int access_mem(unw_addr_space_t __maybe_unused as,
        offset = addr - start;
        *valp  = *(unw_word_t *)&stack->data[offset];
        pr_debug("unwind: access_mem addr %p val %lx, offset %d\n",
-                (void *) addr, (unsigned long)*valp, offset);
+                (void *) (uintptr_t) addr, (unsigned long)*valp, offset);
        return 0;
 }
 
index 4007aca8e0caa550a3fd551accf66810f2e19be9..6adfa18cdd4e0289a83bfca59a5f2901cdeb63b8 100644 (file)
@@ -50,6 +50,11 @@ void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN)
        die_routine = routine;
 }
 
+void set_warning_routine(void (*routine)(const char *err, va_list params))
+{
+       warn_routine = routine;
+}
+
 void usage(const char *err)
 {
        usage_routine(err);
index 7acafb3c5592d60501561986b1812fb6b121f271..cd12c25e4ea408b1b873d8368f5081d3e2a72113 100644 (file)
@@ -17,7 +17,7 @@
 #include "callchain.h"
 
 struct callchain_param callchain_param = {
-       .mode   = CHAIN_GRAPH_REL,
+       .mode   = CHAIN_GRAPH_ABS,
        .min_percent = 0.5,
        .order  = ORDER_CALLEE,
        .key    = CCKEY_FUNCTION
@@ -34,9 +34,6 @@ bool test_attr__enabled;
 bool perf_host  = true;
 bool perf_guest = false;
 
-char tracing_path[PATH_MAX + 1]        = "/sys/kernel/debug/tracing";
-char tracing_events_path[PATH_MAX + 1] = "/sys/kernel/debug/tracing/events";
-
 void event_attr_init(struct perf_event_attr *attr)
 {
        if (!perf_host)
@@ -390,73 +387,6 @@ void set_term_quiet_input(struct termios *old)
        tcsetattr(0, TCSANOW, &tc);
 }
 
-static void set_tracing_events_path(const char *tracing, const char *mountpoint)
-{
-       snprintf(tracing_path, sizeof(tracing_path), "%s/%s",
-                mountpoint, tracing);
-       snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s%s",
-                mountpoint, tracing, "events");
-}
-
-static const char *__perf_tracefs_mount(const char *mountpoint)
-{
-       const char *mnt;
-
-       mnt = tracefs_mount(mountpoint);
-       if (!mnt)
-               return NULL;
-
-       set_tracing_events_path("", mnt);
-
-       return mnt;
-}
-
-static const char *__perf_debugfs_mount(const char *mountpoint)
-{
-       const char *mnt;
-
-       mnt = debugfs_mount(mountpoint);
-       if (!mnt)
-               return NULL;
-
-       set_tracing_events_path("tracing/", mnt);
-
-       return mnt;
-}
-
-const char *perf_debugfs_mount(const char *mountpoint)
-{
-       const char *mnt;
-
-       mnt = __perf_tracefs_mount(mountpoint);
-       if (mnt)
-               return mnt;
-
-       mnt = __perf_debugfs_mount(mountpoint);
-
-       return mnt;
-}
-
-void perf_debugfs_set_path(const char *mntpt)
-{
-       set_tracing_events_path("tracing/", mntpt);
-}
-
-char *get_tracing_file(const char *name)
-{
-       char *file;
-
-       if (asprintf(&file, "%s/%s", tracing_path, name) < 0)
-               return NULL;
-
-       return file;
-}
-
-void put_tracing_file(char *file)
-{
-       free(file);
-}
-
 int parse_nsec_time(const char *str, u64 *ptime)
 {
        u64 time_sec, time_nsec;
@@ -709,7 +639,7 @@ bool find_process(const char *name)
 
        dir = opendir(procfs__mountpoint());
        if (!dir)
-               return -1;
+               return false;
 
        /* Walk through the directory. */
        while (ret && (d = readdir(dir)) != NULL) {
index 291be1d84bc3e679ef1d3c4974309f566bd9a652..4cfb913aa9e04bffd94286ac426bac4c1221e9c4 100644 (file)
@@ -74,8 +74,7 @@
 #include <linux/magic.h>
 #include <linux/types.h>
 #include <sys/ttydefaults.h>
-#include <api/fs/debugfs.h>
-#include <api/fs/tracefs.h>
+#include <api/fs/tracing_path.h>
 #include <termios.h>
 #include <linux/bitops.h>
 #include <termios.h>
 extern const char *graph_line;
 extern const char *graph_dotted_line;
 extern char buildid_dir[];
-extern char tracing_path[];
-extern char tracing_events_path[];
-extern void perf_debugfs_set_path(const char *mountpoint);
-const char *perf_debugfs_mount(const char *mountpoint);
-char *get_tracing_file(const char *name);
-void put_tracing_file(char *file);
 
 /* On most systems <limits.h> would have given us this, but
  * not on some systems (e.g. GNU/Hurd).
@@ -152,6 +145,7 @@ extern void warning(const char *err, ...) __attribute__((format (printf, 1, 2)))
 
 
 extern void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN);
+extern void set_warning_routine(void (*routine)(const char *err, va_list params));
 
 extern int prefixcmp(const char *str, const char *prefix);
 extern void set_buildid_dir(const char *dir);
@@ -321,6 +315,8 @@ struct symbol;
 extern bool srcline_full_filename;
 char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
                  bool show_sym);
+char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
+                 bool show_sym, bool unwind_inlines);
 void free_srcline(char *srcline);
 
 int filename__read_str(const char *filename, char **buf, size_t *sizep);
index 9655cb49c7cb8eb6427b078e0472451b777b9910..bde0ef1a63df4876d5149c85f4083eb92561dbb4 100644 (file)
@@ -71,8 +71,11 @@ unsigned int extra_msr_offset32;
 unsigned int extra_msr_offset64;
 unsigned int extra_delta_offset32;
 unsigned int extra_delta_offset64;
+unsigned int aperf_mperf_multiplier = 1;
 int do_smi;
 double bclk;
+double base_hz;
+double tsc_tweak = 1.0;
 unsigned int show_pkg;
 unsigned int show_core;
 unsigned int show_cpu;
@@ -502,7 +505,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
        /* %Busy */
        if (has_aperf) {
                if (!skip_c0)
-                       outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc);
+                       outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc/tsc_tweak);
                else
                        outp += sprintf(outp, "********");
        }
@@ -510,7 +513,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
        /* Bzy_MHz */
        if (has_aperf)
                outp += sprintf(outp, "%8.0f",
-                       1.0 * t->tsc / units * t->aperf / t->mperf / interval_float);
+                       1.0 * t->tsc * tsc_tweak / units * t->aperf / t->mperf / interval_float);
 
        /* TSC_MHz */
        outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float);
@@ -984,6 +987,8 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                        return -3;
                if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf))
                        return -4;
+               t->aperf = t->aperf * aperf_mperf_multiplier;
+               t->mperf = t->mperf * aperf_mperf_multiplier;
        }
 
        if (do_smi) {
@@ -1149,6 +1154,19 @@ int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV,
 int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
 int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
 
+
+static void
+calculate_tsc_tweak()
+{
+       unsigned long long msr;
+       unsigned int base_ratio;
+
+       get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr);
+       base_ratio = (msr >> 8) & 0xFF;
+       base_hz = base_ratio * bclk * 1000000;
+       tsc_tweak = base_hz / tsc_hz;
+}
+
 static void
 dump_nhm_platform_info(void)
 {
@@ -1926,8 +1944,6 @@ int has_config_tdp(unsigned int family, unsigned int model)
 
        switch (model) {
        case 0x3A:      /* IVB */
-       case 0x3E:      /* IVB Xeon */
-
        case 0x3C:      /* HSW */
        case 0x3F:      /* HSX */
        case 0x45:      /* HSW */
@@ -2543,6 +2559,13 @@ int is_knl(unsigned int family, unsigned int model)
        return 0;
 }
 
+unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model)
+{
+       if (is_knl(family, model))
+               return 1024;
+       return 1;
+}
+
 #define SLM_BCLK_FREQS 5
 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
 
@@ -2744,6 +2767,9 @@ void process_cpuid()
                }
        }
 
+       if (has_aperf)
+               aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model);
+
        do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model);
        do_snb_cstates = has_snb_msrs(family, model);
        do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2);
@@ -2762,6 +2788,9 @@ void process_cpuid()
        if (debug)
                dump_cstate_pstate_config_info();
 
+       if (has_skl_msrs(family, model))
+               calculate_tsc_tweak();
+
        return;
 }
 
@@ -3090,7 +3119,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(stderr, "turbostat version 4.7 17-June, 2015"
+       fprintf(stderr, "turbostat version 4.8 26-Sep, 2015"
                " - Len Brown <lenb@kernel.org>\n");
 }
 
index d1b6475095967fb029af77ed69956c28b7828226..6cae06117b55297e031a210129e7f5a3f9d4c053 100644 (file)
 
 #define FIXUP_SECTION ".ex_fixup"
 
+static inline unsigned long __fls(unsigned long x);
+
 #include "word-at-a-time.h"
 
 #include "utils.h"
 
+static inline unsigned long __fls(unsigned long x)
+{
+       int lz;
+
+       asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x));
+       return sizeof(unsigned long) - 1 - lz;
+}
 
 static int page_size;
 static char *mem_region;
index fbe2dbff1e210c2f4711d2f581823d6b14c85799..f6483609ebc246f38d268dc97a9ddfa4072cbf7b 100755 (executable)
@@ -75,7 +75,7 @@ usage () {
 while test $# -gt 0
 do
        case "$1" in
-       --bootargs)
+       --bootargs|--bootarg)
                checkarg --bootargs "(list of kernel boot arguments)" "$#" "$2" '.*' '^--'
                TORTURE_BOOTARGS="$2"
                shift
@@ -88,7 +88,7 @@ do
        --buildonly)
                TORTURE_BUILDONLY=1
                ;;
-       --configs)
+       --configs|--config)
                checkarg --configs "(list of config files)" "$#" "$2" '^[^/]*$' '^--'
                configs="$2"
                shift
@@ -134,7 +134,7 @@ do
        --no-initrd)
                TORTURE_INITRD=""; export TORTURE_INITRD
                ;;
-       --qemu-args)
+       --qemu-args|--qemu-arg)
                checkarg --qemu-args "-qemu args" $# "$2" '^-' '^error'
                TORTURE_QEMU_ARG="$2"
                shift
index 6910b73707617cc2ddf855be82e488420a35bed1..b9611c523723982e726548742b700d29ed0be7c4 100644 (file)
@@ -1,4 +1,6 @@
 LOCK01
 LOCK02
 LOCK03
-LOCK04
\ No newline at end of file
+LOCK04
+LOCK05
+LOCK06
diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK05 b/tools/testing/selftests/rcutorture/configs/lock/LOCK05
new file mode 100644 (file)
index 0000000..1d1da14
--- /dev/null
@@ -0,0 +1,6 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK05.boot b/tools/testing/selftests/rcutorture/configs/lock/LOCK05.boot
new file mode 100644 (file)
index 0000000..8ac3730
--- /dev/null
@@ -0,0 +1 @@
+locktorture.torture_type=rtmutex_lock
diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK06 b/tools/testing/selftests/rcutorture/configs/lock/LOCK06
new file mode 100644 (file)
index 0000000..1d1da14
--- /dev/null
@@ -0,0 +1,6 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK06.boot b/tools/testing/selftests/rcutorture/configs/lock/LOCK06.boot
new file mode 100644 (file)
index 0000000..f92219c
--- /dev/null
@@ -0,0 +1 @@
+locktorture.torture_type=percpu_rwsem_lock
index 89a3f44bf355d65f72ad279c298920d4c0dec260..4a1be1b75a7fe6ca66c8788d62740622fd795d42 100644 (file)
@@ -8,7 +8,7 @@ LDFLAGS += -lrt -lpthread
 TEST_PROGS = posix_timers nanosleep nsleep-lat set-timer-lat mqueue-lat \
             inconsistency-check raw_skew threadtest rtctest
 
-TEST_PROGS_EXTENDED = alarmtimer-suspend valid-adjtimex change_skew \
+TEST_PROGS_EXTENDED = alarmtimer-suspend valid-adjtimex adjtick change_skew \
                      skew_consistency clocksource-switch leap-a-day \
                      leapcrash set-tai set-2038
 
@@ -24,6 +24,7 @@ include ../lib.mk
 run_destructive_tests: run_tests
        ./alarmtimer-suspend
        ./valid-adjtimex
+       ./adjtick
        ./change_skew
        ./skew_consistency
        ./clocksource-switch
diff --git a/tools/testing/selftests/timers/adjtick.c b/tools/testing/selftests/timers/adjtick.c
new file mode 100644 (file)
index 0000000..9887fd5
--- /dev/null
@@ -0,0 +1,221 @@
+/* adjtimex() tick adjustment test
+ *             by:   John Stultz <john.stultz@linaro.org>
+ *             (C) Copyright Linaro Limited 2015
+ *             Licensed under the GPLv2
+ *
+ *  To build:
+ *     $ gcc adjtick.c -o adjtick -lrt
+ *
+ *   This program is free software: you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation, either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <sys/timex.h>
+#include <time.h>
+
+#ifdef KTEST
+#include "../kselftest.h"
+#else
+static inline int ksft_exit_pass(void)
+{
+       exit(0);
+}
+static inline int ksft_exit_fail(void)
+{
+       exit(1);
+}
+#endif
+
+#define CLOCK_MONOTONIC_RAW    4
+
+#define NSEC_PER_SEC           1000000000LL
+#define USEC_PER_SEC           1000000
+
+#define MILLION                        1000000
+
+long systick;
+
+long long llabs(long long val)
+{
+       if (val < 0)
+               val = -val;
+       return val;
+}
+
+unsigned long long ts_to_nsec(struct timespec ts)
+{
+       return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
+}
+
+struct timespec nsec_to_ts(long long ns)
+{
+       struct timespec ts;
+
+       ts.tv_sec = ns/NSEC_PER_SEC;
+       ts.tv_nsec = ns%NSEC_PER_SEC;
+
+       return ts;
+}
+
+long long diff_timespec(struct timespec start, struct timespec end)
+{
+       long long start_ns, end_ns;
+
+       start_ns = ts_to_nsec(start);
+       end_ns = ts_to_nsec(end);
+
+       return end_ns - start_ns;
+}
+
+void get_monotonic_and_raw(struct timespec *mon, struct timespec *raw)
+{
+       struct timespec start, mid, end;
+       long long diff = 0, tmp;
+       int i;
+
+       clock_gettime(CLOCK_MONOTONIC, mon);
+       clock_gettime(CLOCK_MONOTONIC_RAW, raw);
+
+       /* Try to get a more tightly bound pairing */
+       for (i = 0; i < 3; i++) {
+               long long newdiff;
+
+               clock_gettime(CLOCK_MONOTONIC, &start);
+               clock_gettime(CLOCK_MONOTONIC_RAW, &mid);
+               clock_gettime(CLOCK_MONOTONIC, &end);
+
+               newdiff = diff_timespec(start, end);
+               if (diff == 0 || newdiff < diff) {
+                       diff = newdiff;
+                       *raw = mid;
+                       tmp = (ts_to_nsec(start) + ts_to_nsec(end))/2;
+                       *mon = nsec_to_ts(tmp);
+               }
+       }
+}
+
+long long get_ppm_drift(void)
+{
+       struct timespec mon_start, raw_start, mon_end, raw_end;
+       long long delta1, delta2, eppm;
+
+       get_monotonic_and_raw(&mon_start, &raw_start);
+
+       sleep(15);
+
+       get_monotonic_and_raw(&mon_end, &raw_end);
+
+       delta1 = diff_timespec(mon_start, mon_end);
+       delta2 = diff_timespec(raw_start, raw_end);
+
+       eppm = (delta1*MILLION)/delta2 - MILLION;
+
+       return eppm;
+}
+
+int check_tick_adj(long tickval)
+{
+       long long eppm, ppm;
+       struct timex tx1;
+
+       tx1.modes        = ADJ_TICK;
+       tx1.modes       |= ADJ_OFFSET;
+       tx1.modes       |= ADJ_FREQUENCY;
+       tx1.modes       |= ADJ_STATUS;
+
+       tx1.status      = STA_PLL;
+       tx1.offset      = 0;
+       tx1.freq        = 0;
+       tx1.tick        = tickval;
+
+       adjtimex(&tx1);
+
+       sleep(1);
+
+       ppm = ((long long)tickval * MILLION)/systick - MILLION;
+       printf("Estimating tick (act: %ld usec, %lld ppm): ", tickval, ppm);
+
+       eppm = get_ppm_drift();
+       printf("%lld usec, %lld ppm", systick + (systick * eppm / MILLION), eppm);
+
+       tx1.modes = 0;
+       adjtimex(&tx1);
+
+       if (tx1.offset || tx1.freq || tx1.tick != tickval) {
+               printf("        [ERROR]\n");
+               printf("\tUnexpected adjtimex return values, make sure ntpd is not running.\n");
+               return -1;
+       }
+
+       /*
+        * Here we use 100ppm difference as an error bound.
+        * We likely should see better, but some coarse clocksources
+        * cannot match the HZ tick size accurately, so we have a
+        * internal correction factor that doesn't scale exactly
+        * with the adjustment, resulting in > 10ppm error during
+        * a 10% adjustment. 100ppm also gives us more breathing
+        * room for interruptions during the measurement.
+        */
+       if (llabs(eppm - ppm) > 100) {
+               printf("        [FAILED]\n");
+               return -1;
+       }
+       printf("        [OK]\n");
+
+       return  0;
+}
+
+int main(int argv, char **argc)
+{
+       struct timespec raw;
+       long tick, max, interval, err;
+       struct timex tx1;
+
+       err = 0;
+       setbuf(stdout, NULL);
+
+       if (clock_gettime(CLOCK_MONOTONIC_RAW, &raw)) {
+               printf("ERR: NO CLOCK_MONOTONIC_RAW\n");
+               return -1;
+       }
+
+       printf("Each iteration takes about 15 seconds\n");
+
+       systick = sysconf(_SC_CLK_TCK);
+       systick = USEC_PER_SEC/sysconf(_SC_CLK_TCK);
+       max = systick/10; /* +/- 10% */
+       interval = max/4; /* in 4 steps each side */
+
+       for (tick = (systick - max); tick < (systick + max); tick += interval) {
+               if (check_tick_adj(tick)) {
+                       err = 1;
+                       break;
+               }
+       }
+
+       /* Reset things to zero */
+       tx1.modes        = ADJ_TICK;
+       tx1.modes       |= ADJ_OFFSET;
+       tx1.modes       |= ADJ_FREQUENCY;
+
+       tx1.offset       = 0;
+       tx1.freq         = 0;
+       tx1.tick         = systick;
+
+       adjtimex(&tx1);
+
+       if (err)
+               return ksft_exit_fail();
+
+       return ksft_exit_pass();
+}
index 29089b24d18bca1bd3b4a3b8ab92a65cf6316053..389701f599406bffa76980d98f27a0cb913ac1d5 100644 (file)
@@ -4,8 +4,8 @@ include ../lib.mk
 
 .PHONY: all all_32 all_64 warn_32bit_failure clean
 
-TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs ldt_gdt syscall_nt
-TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault sigreturn
+TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs ldt_gdt syscall_nt ptrace_syscall
+TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault sigreturn test_syscall_vdso unwind_vdso
 
 TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
 BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32)
@@ -60,3 +60,5 @@ endif
 
 # Some tests have additional dependencies.
 sysret_ss_attrs_64: thunks.S
+ptrace_syscall_32: raw_syscall_helper_32.S
+test_syscall_vdso_32: thunks_32.S
index 421c607a8856887d0f9f6048c2ab26bd9367efdd..d075ea0e5ca1ac0a42ce0d48e25b30a1a059de48 100644 (file)
@@ -230,5 +230,9 @@ int main(void)
        }
        clearhandler(SIGSEGV);
 
+       /* Make sure nothing explodes if we fork. */
+       if (fork() > 0)
+               return 0;
+
        return (nerrs == 0 ? 0 : 1);
 }
diff --git a/tools/testing/selftests/x86/ptrace_syscall.c b/tools/testing/selftests/x86/ptrace_syscall.c
new file mode 100644 (file)
index 0000000..5105b49
--- /dev/null
@@ -0,0 +1,294 @@
+#define _GNU_SOURCE
+
+#include <sys/ptrace.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/syscall.h>
+#include <sys/user.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <err.h>
+#include <string.h>
+#include <asm/ptrace-abi.h>
+#include <sys/auxv.h>
+
+/* Bitness-agnostic defines for user_regs_struct fields. */
+#ifdef __x86_64__
+# define user_syscall_nr       orig_rax
+# define user_arg0             rdi
+# define user_arg1             rsi
+# define user_arg2             rdx
+# define user_arg3             r10
+# define user_arg4             r8
+# define user_arg5             r9
+# define user_ip               rip
+# define user_ax               rax
+#else
+# define user_syscall_nr       orig_eax
+# define user_arg0             ebx
+# define user_arg1             ecx
+# define user_arg2             edx
+# define user_arg3             esi
+# define user_arg4             edi
+# define user_arg5             ebp
+# define user_ip               eip
+# define user_ax               eax
+#endif
+
+static int nerrs = 0;
+
+struct syscall_args32 {
+       uint32_t nr, arg0, arg1, arg2, arg3, arg4, arg5;
+};
+
+#ifdef __i386__
+extern void sys32_helper(struct syscall_args32 *, void *);
+extern void int80_and_ret(void);
+#endif
+
+/*
+ * Helper to invoke int80 with controlled regs and capture the final regs.
+ */
+static void do_full_int80(struct syscall_args32 *args)
+{
+#ifdef __x86_64__
+       register unsigned long bp asm("bp") = args->arg5;
+       asm volatile ("int $0x80"
+                     : "+a" (args->nr),
+                       "+b" (args->arg0), "+c" (args->arg1), "+d" (args->arg2),
+                       "+S" (args->arg3), "+D" (args->arg4), "+r" (bp));
+       args->arg5 = bp;
+#else
+       sys32_helper(args, int80_and_ret);
+#endif
+}
+
+#ifdef __i386__
+static void (*vsyscall32)(void);
+
+/*
+ * Nasty helper to invoke AT_SYSINFO (i.e. __kernel_vsyscall) with
+ * controlled regs and capture the final regs.  This is so nasty that it
+ * crashes my copy of gdb :)
+ */
+static void do_full_vsyscall32(struct syscall_args32 *args)
+{
+       sys32_helper(args, vsyscall32);
+}
+#endif
+
+static siginfo_t wait_trap(pid_t chld)
+{
+       siginfo_t si;
+       if (waitid(P_PID, chld, &si, WEXITED|WSTOPPED) != 0)
+               err(1, "waitid");
+       if (si.si_pid != chld)
+               errx(1, "got unexpected pid in event\n");
+       if (si.si_code != CLD_TRAPPED)
+               errx(1, "got unexpected event type %d\n", si.si_code);
+       return si;
+}
+
+static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
+                      int flags)
+{
+       struct sigaction sa;
+       memset(&sa, 0, sizeof(sa));
+       sa.sa_sigaction = handler;
+       sa.sa_flags = SA_SIGINFO | flags;
+       sigemptyset(&sa.sa_mask);
+       if (sigaction(sig, &sa, 0))
+               err(1, "sigaction");
+}
+
+static void clearhandler(int sig)
+{
+       struct sigaction sa;
+       memset(&sa, 0, sizeof(sa));
+       sa.sa_handler = SIG_DFL;
+       sigemptyset(&sa.sa_mask);
+       if (sigaction(sig, &sa, 0))
+               err(1, "sigaction");
+}
+
+#ifdef __x86_64__
+# define REG_BP REG_RBP
+#else
+# define REG_BP REG_EBP
+#endif
+
+static void empty_handler(int sig, siginfo_t *si, void *ctx_void)
+{
+}
+
+static void test_sys32_regs(void (*do_syscall)(struct syscall_args32 *))
+{
+       struct syscall_args32 args = {
+               .nr = 224,      /* gettid */
+               .arg0 = 10, .arg1 = 11, .arg2 = 12,
+               .arg3 = 13, .arg4 = 14, .arg5 = 15,
+       };
+
+       do_syscall(&args);
+
+       if (args.nr != getpid() ||
+           args.arg0 != 10 || args.arg1 != 11 || args.arg2 != 12 ||
+           args.arg3 != 13 || args.arg4 != 14 || args.arg5 != 15) {
+               printf("[FAIL]\tgetpid() failed to preseve regs\n");
+               nerrs++;
+       } else {
+               printf("[OK]\tgetpid() preserves regs\n");
+       }
+
+       sethandler(SIGUSR1, empty_handler, 0);
+
+       args.nr = 37;   /* kill */
+       args.arg0 = getpid();
+       args.arg1 = SIGUSR1;
+       do_syscall(&args);
+       if (args.nr != 0 ||
+           args.arg0 != getpid() || args.arg1 != SIGUSR1 || args.arg2 != 12 ||
+           args.arg3 != 13 || args.arg4 != 14 || args.arg5 != 15) {
+               printf("[FAIL]\tkill(getpid(), SIGUSR1) failed to preseve regs\n");
+               nerrs++;
+       } else {
+               printf("[OK]\tkill(getpid(), SIGUSR1) preserves regs\n");
+       }
+       clearhandler(SIGUSR1);
+}
+
+static void test_ptrace_syscall_restart(void)
+{
+       printf("[RUN]\tptrace-induced syscall restart\n");
+       pid_t chld = fork();
+       if (chld < 0)
+               err(1, "fork");
+
+       if (chld == 0) {
+               if (ptrace(PTRACE_TRACEME, 0, 0, 0) != 0)
+                       err(1, "PTRACE_TRACEME");
+
+               printf("\tChild will make one syscall\n");
+               raise(SIGSTOP);
+
+               syscall(SYS_gettid, 10, 11, 12, 13, 14, 15);
+               _exit(0);
+       }
+
+       int status;
+
+       /* Wait for SIGSTOP. */
+       if (waitpid(chld, &status, 0) != chld || !WIFSTOPPED(status))
+               err(1, "waitpid");
+
+       struct user_regs_struct regs;
+
+       printf("[RUN]\tSYSEMU\n");
+       if (ptrace(PTRACE_SYSEMU, chld, 0, 0) != 0)
+               err(1, "PTRACE_SYSCALL");
+       wait_trap(chld);
+
+       if (ptrace(PTRACE_GETREGS, chld, 0, &regs) != 0)
+               err(1, "PTRACE_GETREGS");
+
+       if (regs.user_syscall_nr != SYS_gettid ||
+           regs.user_arg0 != 10 || regs.user_arg1 != 11 ||
+           regs.user_arg2 != 12 || regs.user_arg3 != 13 ||
+           regs.user_arg4 != 14 || regs.user_arg5 != 15) {
+               printf("[FAIL]\tInitial args are wrong (nr=%lu, args=%lu %lu %lu %lu %lu %lu)\n", (unsigned long)regs.user_syscall_nr, (unsigned long)regs.user_arg0, (unsigned long)regs.user_arg1, (unsigned long)regs.user_arg2, (unsigned long)regs.user_arg3, (unsigned long)regs.user_arg4, (unsigned long)regs.user_arg5);
+               nerrs++;
+       } else {
+               printf("[OK]\tInitial nr and args are correct\n");
+       }
+
+       printf("[RUN]\tRestart the syscall (ip = 0x%lx)\n",
+              (unsigned long)regs.user_ip);
+
+       /*
+        * This does exactly what it appears to do if syscall is int80 or
+        * SYSCALL64.  For SYSCALL32 or SYSENTER, though, this is highly
+        * magical.  It needs to work so that ptrace and syscall restart
+        * work as expected.
+        */
+       regs.user_ax = regs.user_syscall_nr;
+       regs.user_ip -= 2;
+       if (ptrace(PTRACE_SETREGS, chld, 0, &regs) != 0)
+               err(1, "PTRACE_SETREGS");
+
+       if (ptrace(PTRACE_SYSEMU, chld, 0, 0) != 0)
+               err(1, "PTRACE_SYSCALL");
+       wait_trap(chld);
+
+       if (ptrace(PTRACE_GETREGS, chld, 0, &regs) != 0)
+               err(1, "PTRACE_GETREGS");
+
+       if (regs.user_syscall_nr != SYS_gettid ||
+           regs.user_arg0 != 10 || regs.user_arg1 != 11 ||
+           regs.user_arg2 != 12 || regs.user_arg3 != 13 ||
+           regs.user_arg4 != 14 || regs.user_arg5 != 15) {
+               printf("[FAIL]\tRestart nr or args are wrong (nr=%lu, args=%lu %lu %lu %lu %lu %lu)\n", (unsigned long)regs.user_syscall_nr, (unsigned long)regs.user_arg0, (unsigned long)regs.user_arg1, (unsigned long)regs.user_arg2, (unsigned long)regs.user_arg3, (unsigned long)regs.user_arg4, (unsigned long)regs.user_arg5);
+               nerrs++;
+       } else {
+               printf("[OK]\tRestarted nr and args are correct\n");
+       }
+
+       printf("[RUN]\tChange nr and args and restart the syscall (ip = 0x%lx)\n",
+              (unsigned long)regs.user_ip);
+
+       regs.user_ax = SYS_getpid;
+       regs.user_arg0 = 20;
+       regs.user_arg1 = 21;
+       regs.user_arg2 = 22;
+       regs.user_arg3 = 23;
+       regs.user_arg4 = 24;
+       regs.user_arg5 = 25;
+       regs.user_ip -= 2;
+
+       if (ptrace(PTRACE_SETREGS, chld, 0, &regs) != 0)
+               err(1, "PTRACE_SETREGS");
+
+       if (ptrace(PTRACE_SYSEMU, chld, 0, 0) != 0)
+               err(1, "PTRACE_SYSCALL");
+       wait_trap(chld);
+
+       if (ptrace(PTRACE_GETREGS, chld, 0, &regs) != 0)
+               err(1, "PTRACE_GETREGS");
+
+       if (regs.user_syscall_nr != SYS_getpid ||
+           regs.user_arg0 != 20 || regs.user_arg1 != 21 || regs.user_arg2 != 22 ||
+           regs.user_arg3 != 23 || regs.user_arg4 != 24 || regs.user_arg5 != 25) {
+               printf("[FAIL]\tRestart nr or args are wrong (nr=%lu, args=%lu %lu %lu %lu %lu %lu)\n", (unsigned long)regs.user_syscall_nr, (unsigned long)regs.user_arg0, (unsigned long)regs.user_arg1, (unsigned long)regs.user_arg2, (unsigned long)regs.user_arg3, (unsigned long)regs.user_arg4, (unsigned long)regs.user_arg5);
+               nerrs++;
+       } else {
+               printf("[OK]\tReplacement nr and args are correct\n");
+       }
+
+       if (ptrace(PTRACE_CONT, chld, 0, 0) != 0)
+               err(1, "PTRACE_CONT");
+       if (waitpid(chld, &status, 0) != chld)
+               err(1, "waitpid");
+       if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
+               printf("[FAIL]\tChild failed\n");
+               nerrs++;
+       } else {
+               printf("[OK]\tChild exited cleanly\n");
+       }
+}
+
+int main()
+{
+       printf("[RUN]\tCheck int80 return regs\n");
+       test_sys32_regs(do_full_int80);
+
+#if defined(__i386__) && (!defined(__GLIBC__) || __GLIBC__ > 2 || __GLIBC_MINOR__ >= 16)
+       vsyscall32 = (void *)getauxval(AT_SYSINFO);
+       printf("[RUN]\tCheck AT_SYSINFO return regs\n");
+       test_sys32_regs(do_full_vsyscall32);
+#endif
+
+       test_ptrace_syscall_restart();
+
+       return 0;
+}
diff --git a/tools/testing/selftests/x86/raw_syscall_helper_32.S b/tools/testing/selftests/x86/raw_syscall_helper_32.S
new file mode 100644 (file)
index 0000000..534e71e
--- /dev/null
@@ -0,0 +1,46 @@
+.global sys32_helper
+sys32_helper:
+       /* Args: syscall_args_32*, function pointer */
+       pushl   %ebp
+       pushl   %ebx
+       pushl   %esi
+       pushl   %edi
+       movl    5*4(%esp), %eax /* pointer to args struct */
+
+       movl    1*4(%eax), %ebx
+       movl    2*4(%eax), %ecx
+       movl    3*4(%eax), %edx
+       movl    4*4(%eax), %esi
+       movl    5*4(%eax), %edi
+       movl    6*4(%eax), %ebp
+       movl    0*4(%eax), %eax
+
+       call    *(6*4)(%esp)    /* Do the syscall */
+
+       /* Now we need to recover without losing any reg values */
+       pushl   %eax
+       movl    6*4(%esp), %eax
+       popl    0*4(%eax)
+       movl    %ebx, 1*4(%eax)
+       movl    %ecx, 2*4(%eax)
+       movl    %edx, 3*4(%eax)
+       movl    %esi, 4*4(%eax)
+       movl    %edi, 5*4(%eax)
+       movl    %ebp, 6*4(%eax)
+
+       popl    %edi
+       popl    %esi
+       popl    %ebx
+       popl    %ebp
+       ret
+
+       .type sys32_helper, @function
+       .size sys32_helper, .-sys32_helper
+
+.global int80_and_ret
+int80_and_ret:
+       int     $0x80
+       ret
+
+       .type int80_and_ret, @function
+       .size int80_and_ret, .-int80_and_ret
diff --git a/tools/testing/selftests/x86/test_syscall_vdso.c b/tools/testing/selftests/x86/test_syscall_vdso.c
new file mode 100644 (file)
index 0000000..4037035
--- /dev/null
@@ -0,0 +1,401 @@
+/*
+ * 32-bit syscall ABI conformance test.
+ *
+ * Copyright (c) 2015 Denys Vlasenko
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Can be built statically:
+ * gcc -Os -Wall -static -m32 test_syscall_vdso.c thunks_32.S
+ */
+#undef _GNU_SOURCE
+#define _GNU_SOURCE 1
+#undef __USE_GNU
+#define __USE_GNU 1
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <signal.h>
+#include <sys/types.h>
+#include <sys/select.h>
+#include <sys/time.h>
+#include <elf.h>
+#include <sys/ptrace.h>
+#include <sys/wait.h>
+
+#if !defined(__i386__)
+int main(int argc, char **argv, char **envp)
+{
+       printf("[SKIP]\tNot a 32-bit x86 userspace\n");
+       return 0;
+}
+#else
+
+long syscall_addr;
+long get_syscall(char **envp)
+{
+       Elf32_auxv_t *auxv;
+       while (*envp++ != NULL)
+               continue;
+       for (auxv = (void *)envp; auxv->a_type != AT_NULL; auxv++)
+               if (auxv->a_type == AT_SYSINFO)
+                       return auxv->a_un.a_val;
+       printf("[WARN]\tAT_SYSINFO not supplied\n");
+       return 0;
+}
+
+asm (
+       "       .pushsection .text\n"
+       "       .global int80\n"
+       "int80:\n"
+       "       int     $0x80\n"
+       "       ret\n"
+       "       .popsection\n"
+);
+extern char int80;
+
+struct regs64 {
+       uint64_t rax, rbx, rcx, rdx;
+       uint64_t rsi, rdi, rbp, rsp;
+       uint64_t r8,  r9,  r10, r11;
+       uint64_t r12, r13, r14, r15;
+};
+struct regs64 regs64;
+int kernel_is_64bit;
+
+asm (
+       "       .pushsection .text\n"
+       "       .code64\n"
+       "get_regs64:\n"
+       "       push    %rax\n"
+       "       mov     $regs64, %eax\n"
+       "       pop     0*8(%rax)\n"
+       "       movq    %rbx, 1*8(%rax)\n"
+       "       movq    %rcx, 2*8(%rax)\n"
+       "       movq    %rdx, 3*8(%rax)\n"
+       "       movq    %rsi, 4*8(%rax)\n"
+       "       movq    %rdi, 5*8(%rax)\n"
+       "       movq    %rbp, 6*8(%rax)\n"
+       "       movq    %rsp, 7*8(%rax)\n"
+       "       movq    %r8,  8*8(%rax)\n"
+       "       movq    %r9,  9*8(%rax)\n"
+       "       movq    %r10, 10*8(%rax)\n"
+       "       movq    %r11, 11*8(%rax)\n"
+       "       movq    %r12, 12*8(%rax)\n"
+       "       movq    %r13, 13*8(%rax)\n"
+       "       movq    %r14, 14*8(%rax)\n"
+       "       movq    %r15, 15*8(%rax)\n"
+       "       ret\n"
+       "poison_regs64:\n"
+       "       movq    $0x7f7f7f7f, %r8\n"
+       "       shl     $32, %r8\n"
+       "       orq     $0x7f7f7f7f, %r8\n"
+       "       movq    %r8, %r9\n"
+       "       movq    %r8, %r10\n"
+       "       movq    %r8, %r11\n"
+       "       movq    %r8, %r12\n"
+       "       movq    %r8, %r13\n"
+       "       movq    %r8, %r14\n"
+       "       movq    %r8, %r15\n"
+       "       ret\n"
+       "       .code32\n"
+       "       .popsection\n"
+);
+extern void get_regs64(void);
+extern void poison_regs64(void);
+extern unsigned long call64_from_32(void (*function)(void));
+void print_regs64(void)
+{
+       if (!kernel_is_64bit)
+               return;
+       printf("ax:%016llx bx:%016llx cx:%016llx dx:%016llx\n", regs64.rax,  regs64.rbx,  regs64.rcx,  regs64.rdx);
+       printf("si:%016llx di:%016llx bp:%016llx sp:%016llx\n", regs64.rsi,  regs64.rdi,  regs64.rbp,  regs64.rsp);
+       printf(" 8:%016llx  9:%016llx 10:%016llx 11:%016llx\n", regs64.r8 ,  regs64.r9 ,  regs64.r10,  regs64.r11);
+       printf("12:%016llx 13:%016llx 14:%016llx 15:%016llx\n", regs64.r12,  regs64.r13,  regs64.r14,  regs64.r15);
+}
+
+int check_regs64(void)
+{
+       int err = 0;
+       int num = 8;
+       uint64_t *r64 = &regs64.r8;
+
+       if (!kernel_is_64bit)
+               return 0;
+
+       do {
+               if (*r64 == 0x7f7f7f7f7f7f7f7fULL)
+                       continue; /* register did not change */
+               if (syscall_addr != (long)&int80) {
+                       /*
+                        * Non-INT80 syscall entrypoints are allowed to clobber R8+ regs:
+                        * either clear them to 0, or for R11, load EFLAGS.
+                        */
+                       if (*r64 == 0)
+                               continue;
+                       if (num == 11) {
+                               printf("[NOTE]\tR11 has changed:%016llx - assuming clobbered by SYSRET insn\n", *r64);
+                               continue;
+                       }
+               } else {
+                       /* INT80 syscall entrypoint can be used by
+                        * 64-bit programs too, unlike SYSCALL/SYSENTER.
+                        * Therefore it must preserve R12+
+                        * (they are callee-saved registers in 64-bit C ABI).
+                        *
+                        * This was probably historically not intended,
+                        * but R8..11 are clobbered (cleared to 0).
+                        * IOW: they are the only registers which aren't
+                        * preserved across INT80 syscall.
+                        */
+                       if (*r64 == 0 && num <= 11)
+                               continue;
+               }
+               printf("[FAIL]\tR%d has changed:%016llx\n", num, *r64);
+               err++;
+       } while (r64++, ++num < 16);
+
+       if (!err)
+               printf("[OK]\tR8..R15 did not leak kernel data\n");
+       return err;
+}
+
+int nfds;
+fd_set rfds;
+fd_set wfds;
+fd_set efds;
+struct timespec timeout;
+sigset_t sigmask;
+struct {
+       sigset_t *sp;
+       int sz;
+} sigmask_desc;
+
+void prep_args()
+{
+       nfds = 42;
+       FD_ZERO(&rfds);
+       FD_ZERO(&wfds);
+       FD_ZERO(&efds);
+       FD_SET(0, &rfds);
+       FD_SET(1, &wfds);
+       FD_SET(2, &efds);
+       timeout.tv_sec = 0;
+       timeout.tv_nsec = 123;
+       sigemptyset(&sigmask);
+       sigaddset(&sigmask, SIGINT);
+       sigaddset(&sigmask, SIGUSR2);
+       sigaddset(&sigmask, SIGRTMAX);
+       sigmask_desc.sp = &sigmask;
+       sigmask_desc.sz = 8; /* bytes */
+}
+
+static void print_flags(const char *name, unsigned long r)
+{
+       static const char *bitarray[] = {
+       "\n" ,"c\n" ,/* Carry Flag */
+       "0 " ,"1 "  ,/* Bit 1 - always on */
+       ""   ,"p "  ,/* Parity Flag */
+       "0 " ,"3? " ,
+       ""   ,"a "  ,/* Auxiliary carry Flag */
+       "0 " ,"5? " ,
+       ""   ,"z "  ,/* Zero Flag */
+       ""   ,"s "  ,/* Sign Flag */
+       ""   ,"t "  ,/* Trap Flag */
+       ""   ,"i "  ,/* Interrupt Flag */
+       ""   ,"d "  ,/* Direction Flag */
+       ""   ,"o "  ,/* Overflow Flag */
+       "0 " ,"1 "  ,/* I/O Privilege Level (2 bits) */
+       "0"  ,"1"   ,/* I/O Privilege Level (2 bits) */
+       ""   ,"n "  ,/* Nested Task */
+       "0 " ,"15? ",
+       ""   ,"r "  ,/* Resume Flag */
+       ""   ,"v "  ,/* Virtual Mode */
+       ""   ,"ac " ,/* Alignment Check/Access Control */
+       ""   ,"vif ",/* Virtual Interrupt Flag */
+       ""   ,"vip ",/* Virtual Interrupt Pending */
+       ""   ,"id " ,/* CPUID detection */
+       NULL
+       };
+       const char **bitstr;
+       int bit;
+
+       printf("%s=%016lx ", name, r);
+       bitstr = bitarray + 42;
+       bit = 21;
+       if ((r >> 22) != 0)
+               printf("(extra bits are set) ");
+       do {
+               if (bitstr[(r >> bit) & 1][0])
+                       fputs(bitstr[(r >> bit) & 1], stdout);
+               bitstr -= 2;
+               bit--;
+       } while (bit >= 0);
+}
+
+int run_syscall(void)
+{
+       long flags, bad_arg;
+
+       prep_args();
+
+       if (kernel_is_64bit)
+               call64_from_32(poison_regs64);
+       /*print_regs64();*/
+
+       asm("\n"
+       /* Try 6-arg syscall: pselect. It should return quickly */
+       "       push    %%ebp\n"
+       "       mov     $308, %%eax\n"     /* PSELECT */
+       "       mov     nfds, %%ebx\n"     /* ebx  arg1 */
+       "       mov     $rfds, %%ecx\n"    /* ecx  arg2 */
+       "       mov     $wfds, %%edx\n"    /* edx  arg3 */
+       "       mov     $efds, %%esi\n"    /* esi  arg4 */
+       "       mov     $timeout, %%edi\n" /* edi  arg5 */
+       "       mov     $sigmask_desc, %%ebp\n" /* %ebp arg6 */
+       "       push    $0x200ed7\n"      /* set almost all flags */
+       "       popf\n"         /* except TF, IOPL, NT, RF, VM, AC, VIF, VIP */
+       "       call    *syscall_addr\n"
+       /* Check that registers are not clobbered */
+       "       pushf\n"
+       "       pop     %%eax\n"
+       "       cld\n"
+       "       cmp     nfds, %%ebx\n"     /* ebx  arg1 */
+       "       mov     $1, %%ebx\n"
+       "       jne     1f\n"
+       "       cmp     $rfds, %%ecx\n"    /* ecx  arg2 */
+       "       mov     $2, %%ebx\n"
+       "       jne     1f\n"
+       "       cmp     $wfds, %%edx\n"    /* edx  arg3 */
+       "       mov     $3, %%ebx\n"
+       "       jne     1f\n"
+       "       cmp     $efds, %%esi\n"    /* esi  arg4 */
+       "       mov     $4, %%ebx\n"
+       "       jne     1f\n"
+       "       cmp     $timeout, %%edi\n" /* edi  arg5 */
+       "       mov     $5, %%ebx\n"
+       "       jne     1f\n"
+       "       cmpl    $sigmask_desc, %%ebp\n" /* %ebp arg6 */
+       "       mov     $6, %%ebx\n"
+       "       jne     1f\n"
+       "       mov     $0, %%ebx\n"
+       "1:\n"
+       "       pop     %%ebp\n"
+       : "=a" (flags), "=b" (bad_arg)
+       :
+       : "cx", "dx", "si", "di"
+       );
+
+       if (kernel_is_64bit) {
+               memset(&regs64, 0x77, sizeof(regs64));
+               call64_from_32(get_regs64);
+               /*print_regs64();*/
+       }
+
+       /*
+        * On paravirt kernels, flags are not preserved across syscalls.
+        * Thus, we do not consider it a bug if some are changed.
+        * We just show ones which do.
+        */
+       if ((0x200ed7 ^ flags) != 0) {
+               print_flags("[WARN]\tFlags before", 0x200ed7);
+               print_flags("[WARN]\tFlags  after", flags);
+               print_flags("[WARN]\tFlags change", (0x200ed7 ^ flags));
+       }
+
+       if (bad_arg) {
+               printf("[FAIL]\targ#%ld clobbered\n", bad_arg);
+               return 1;
+       }
+       printf("[OK]\tArguments are preserved across syscall\n");
+
+       return check_regs64();
+}
+
+int run_syscall_twice()
+{
+       int exitcode = 0;
+       long sv;
+
+       if (syscall_addr) {
+               printf("[RUN]\tExecuting 6-argument 32-bit syscall via VDSO\n");
+               exitcode = run_syscall();
+       }
+       sv = syscall_addr;
+       syscall_addr = (long)&int80;
+       printf("[RUN]\tExecuting 6-argument 32-bit syscall via INT 80\n");
+       exitcode += run_syscall();
+       syscall_addr = sv;
+       return exitcode;
+}
+
+void ptrace_me()
+{
+       pid_t pid;
+
+       fflush(NULL);
+       pid = fork();
+       if (pid < 0)
+               exit(1);
+       if (pid == 0) {
+               /* child */
+               if (ptrace(PTRACE_TRACEME, 0L, 0L, 0L) != 0)
+                       exit(0);
+               raise(SIGSTOP);
+               return;
+       }
+       /* parent */
+       printf("[RUN]\tRunning tests under ptrace\n");
+       while (1) {
+               int status;
+               pid = waitpid(-1, &status, __WALL);
+               if (WIFEXITED(status))
+                       exit(WEXITSTATUS(status));
+               if (WIFSIGNALED(status))
+                       exit(WTERMSIG(status));
+               if (pid <= 0 || !WIFSTOPPED(status)) /* paranoia */
+                       exit(255);
+               /*
+                * Note: we do not inject sig = WSTOPSIG(status).
+                * We probably should, but careful: do not inject SIGTRAP
+                * generated by syscall entry/exit stops.
+                * That kills the child.
+                */
+               ptrace(PTRACE_SYSCALL, pid, 0L, 0L /*sig*/);
+       }
+}
+
+int main(int argc, char **argv, char **envp)
+{
+       int exitcode = 0;
+       int cs;
+
+       asm("\n"
+       "       movl    %%cs, %%eax\n"
+       : "=a" (cs)
+       );
+       kernel_is_64bit = (cs == 0x23);
+       if (!kernel_is_64bit)
+               printf("[NOTE]\tNot a 64-bit kernel, won't test R8..R15 leaks\n");
+
+       /* This only works for non-static builds:
+        * syscall_addr = dlsym(dlopen("linux-gate.so.1", RTLD_NOW), "__kernel_vsyscall");
+        */
+       syscall_addr = get_syscall(envp);
+
+       exitcode += run_syscall_twice();
+       ptrace_me();
+       exitcode += run_syscall_twice();
+
+       return exitcode;
+}
+#endif
diff --git a/tools/testing/selftests/x86/thunks_32.S b/tools/testing/selftests/x86/thunks_32.S
new file mode 100644 (file)
index 0000000..29b644b
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * thunks_32.S - assembly helpers for mixed-bitness code
+ * Copyright (c) 2015 Denys Vlasenko
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * These are little helpers that make it easier to switch bitness on
+ * the fly.
+ */
+
+       .text
+       .code32
+
+       .global call64_from_32
+       .type call32_from_64, @function
+
+       // 4(%esp): function to call
+call64_from_32:
+       // Fetch function address
+       mov     4(%esp), %eax
+
+       // Save registers which are callee-clobbered by 64-bit ABI
+       push    %ecx
+       push    %edx
+       push    %esi
+       push    %edi
+
+       // Switch to long mode
+       jmp     $0x33,$1f
+1:     .code64
+
+       // Call the function
+       call    *%rax
+
+       // Switch to compatibility mode
+       push    $0x23  /* USER32_CS */
+       .code32; push $1f; .code64 /* hack: can't have X86_64_32S relocation in 32-bit ELF */
+       lretq
+1:     .code32
+
+       pop     %edi
+       pop     %esi
+       pop     %edx
+       pop     %ecx
+
+       ret
+
+.size call64_from_32, .-call64_from_32
diff --git a/tools/testing/selftests/x86/unwind_vdso.c b/tools/testing/selftests/x86/unwind_vdso.c
new file mode 100644 (file)
index 0000000..00a26a8
--- /dev/null
@@ -0,0 +1,211 @@
+/*
+ * unwind_vdso.c - tests unwind info for AT_SYSINFO in the vDSO
+ * Copyright (c) 2014-2015 Andrew Lutomirski
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * This tests __kernel_vsyscall's unwind info.
+ */
+
+#define _GNU_SOURCE
+
+#include <features.h>
+#include <stdio.h>
+
+#if defined(__GLIBC__) && __GLIBC__ == 2 && __GLIBC_MINOR__ < 16
+
+int main()
+{
+       /* We need getauxval(). */
+       printf("[SKIP]\tGLIBC before 2.16 cannot compile this test\n");
+       return 0;
+}
+
+#else
+
+#include <sys/time.h>
+#include <stdlib.h>
+#include <syscall.h>
+#include <unistd.h>
+#include <string.h>
+#include <inttypes.h>
+#include <sys/mman.h>
+#include <signal.h>
+#include <sys/ucontext.h>
+#include <err.h>
+#include <stddef.h>
+#include <stdbool.h>
+#include <sys/ptrace.h>
+#include <sys/user.h>
+#include <sys/ucontext.h>
+#include <link.h>
+#include <sys/auxv.h>
+#include <dlfcn.h>
+#include <unwind.h>
+
+static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
+                      int flags)
+{
+       struct sigaction sa;
+       memset(&sa, 0, sizeof(sa));
+       sa.sa_sigaction = handler;
+       sa.sa_flags = SA_SIGINFO | flags;
+       sigemptyset(&sa.sa_mask);
+       if (sigaction(sig, &sa, 0))
+               err(1, "sigaction");
+}
+
+#ifdef __x86_64__
+# define WIDTH "q"
+#else
+# define WIDTH "l"
+#endif
+
+static unsigned long get_eflags(void)
+{
+       unsigned long eflags;
+       asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
+       return eflags;
+}
+
+static void set_eflags(unsigned long eflags)
+{
+       asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
+                     : : "rm" (eflags) : "flags");
+}
+
+#define X86_EFLAGS_TF (1UL << 8)
+
+static volatile sig_atomic_t nerrs;
+static unsigned long sysinfo;
+static bool got_sysinfo = false;
+static unsigned long return_address;
+
+struct unwind_state {
+       unsigned long ip;       /* trap source */
+       int depth;              /* -1 until we hit the trap source */
+};
+
+_Unwind_Reason_Code trace_fn(struct _Unwind_Context * ctx, void *opaque)
+{
+       struct unwind_state *state = opaque;
+       unsigned long ip = _Unwind_GetIP(ctx);
+
+       if (state->depth == -1) {
+               if (ip == state->ip)
+                       state->depth = 0;
+               else
+                       return _URC_NO_REASON;  /* Not there yet */
+       }
+       printf("\t  0x%lx\n", ip);
+
+       if (ip == return_address) {
+               /* Here we are. */
+               unsigned long eax = _Unwind_GetGR(ctx, 0);
+               unsigned long ecx = _Unwind_GetGR(ctx, 1);
+               unsigned long edx = _Unwind_GetGR(ctx, 2);
+               unsigned long ebx = _Unwind_GetGR(ctx, 3);
+               unsigned long ebp = _Unwind_GetGR(ctx, 5);
+               unsigned long esi = _Unwind_GetGR(ctx, 6);
+               unsigned long edi = _Unwind_GetGR(ctx, 7);
+               bool ok = (eax == SYS_getpid || eax == getpid()) &&
+                       ebx == 1 && ecx == 2 && edx == 3 &&
+                       esi == 4 && edi == 5 && ebp == 6;
+
+               if (!ok)
+                       nerrs++;
+               printf("[%s]\t  NR = %ld, args = %ld, %ld, %ld, %ld, %ld, %ld\n",
+                      (ok ? "OK" : "FAIL"),
+                      eax, ebx, ecx, edx, esi, edi, ebp);
+
+               return _URC_NORMAL_STOP;
+       } else {
+               state->depth++;
+               return _URC_NO_REASON;
+       }
+}
+
+static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
+{
+       ucontext_t *ctx = (ucontext_t *)ctx_void;
+       struct unwind_state state;
+       unsigned long ip = ctx->uc_mcontext.gregs[REG_EIP];
+
+       if (!got_sysinfo && ip == sysinfo) {
+               got_sysinfo = true;
+
+               /* Find the return address. */
+               return_address = *(unsigned long *)(unsigned long)ctx->uc_mcontext.gregs[REG_ESP];
+
+               printf("\tIn vsyscall at 0x%lx, returning to 0x%lx\n",
+                      ip, return_address);
+       }
+
+       if (!got_sysinfo)
+               return;         /* Not there yet */
+
+       if (ip == return_address) {
+               ctx->uc_mcontext.gregs[REG_EFL] &= ~X86_EFLAGS_TF;
+               printf("\tVsyscall is done\n");
+               return;
+       }
+
+       printf("\tSIGTRAP at 0x%lx\n", ip);
+
+       state.ip = ip;
+       state.depth = -1;
+       _Unwind_Backtrace(trace_fn, &state);
+}
+
+int main()
+{
+       sysinfo = getauxval(AT_SYSINFO);
+       printf("\tAT_SYSINFO is 0x%lx\n", sysinfo);
+
+       Dl_info info;
+       if (!dladdr((void *)sysinfo, &info)) {
+               printf("[WARN]\tdladdr failed on AT_SYSINFO\n");
+       } else {
+               printf("[OK]\tAT_SYSINFO maps to %s, loaded at 0x%p\n",
+                      info.dli_fname, info.dli_fbase);
+       }
+
+       sethandler(SIGTRAP, sigtrap, 0);
+
+       syscall(SYS_getpid);  /* Force symbol binding without TF set. */
+       printf("[RUN]\tSet TF and check a fast syscall\n");
+       set_eflags(get_eflags() | X86_EFLAGS_TF);
+       syscall(SYS_getpid, 1, 2, 3, 4, 5, 6);
+       if (!got_sysinfo) {
+               set_eflags(get_eflags() & ~X86_EFLAGS_TF);
+
+               /*
+                * The most likely cause of this is that you're on Debian or
+                * a Debian-based distro, you're missing libc6-i686, and you're
+                * affected by libc/19006 (https://sourceware.org/PR19006).
+                */
+               printf("[WARN]\tsyscall(2) didn't enter AT_SYSINFO\n");
+       }
+
+       if (get_eflags() & X86_EFLAGS_TF) {
+               printf("[FAIL]\tTF is still set\n");
+               nerrs++;
+       }
+
+       if (nerrs) {
+               printf("[FAIL]\tThere were errors\n");
+               return 1;
+       } else {
+               printf("[OK]\tAll is well\n");
+               return 0;
+       }
+}
+
+#endif /* New enough libc */
index 7f73fa32a590b5f5177affb4b2e6df93dc3e2f2f..bcf5ec760eb9287f5a0b5f99e95518eaed78f6a0 100644 (file)
@@ -42,7 +42,7 @@
 #include <sys/mman.h>
 #include "../../include/uapi/linux/magic.h"
 #include "../../include/uapi/linux/kernel-page-flags.h"
-#include <api/fs/debugfs.h>
+#include <api/fs/fs.h>
 
 #ifndef MAX_PATH
 # define MAX_PATH 256
@@ -188,7 +188,7 @@ static int          kpageflags_fd;
 static int             opt_hwpoison;
 static int             opt_unpoison;
 
-static char            *hwpoison_debug_fs;
+static const char      *hwpoison_debug_fs;
 static int             hwpoison_inject_fd;
 static int             hwpoison_forget_fd;
 
@@ -487,7 +487,7 @@ static void prepare_hwpoison_fd(void)
 {
        char buf[MAX_PATH + 1];
 
-       hwpoison_debug_fs = debugfs_mount(NULL);
+       hwpoison_debug_fs = debugfs__mount();
        if (!hwpoison_debug_fs) {
                perror("mount debugfs");
                exit(EXIT_FAILURE);
index 48c6e1ac6827f14be7eaede56bfd1ae3d185d217..b9d3a32cbc048ddb7c1c885fb3af523f97944fbc 100644 (file)
@@ -137,6 +137,8 @@ bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
 void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
 {
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+       bool phys_active;
+       int ret;
 
        /*
         * We're about to run this vcpu again, so there is no need to
@@ -151,6 +153,23 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
         */
        if (kvm_timer_should_fire(vcpu))
                kvm_timer_inject_irq(vcpu);
+
+       /*
+        * We keep track of whether the edge-triggered interrupt has been
+        * signalled to the vgic/guest, and if so, we mask the interrupt and
+        * the physical distributor to prevent the timer from raising a
+        * physical interrupt whenever we run a guest, preventing forward
+        * VCPU progress.
+        */
+       if (kvm_vgic_get_phys_irq_active(timer->map))
+               phys_active = true;
+       else
+               phys_active = false;
+
+       ret = irq_set_irqchip_state(timer->map->irq,
+                                   IRQCHIP_STATE_ACTIVE,
+                                   phys_active);
+       WARN_ON(ret);
 }
 
 /**
index 6bd1c9bf7ae71504d042f455bce8ad9ca6abf6fc..30489181922d28d9749feefb552e7b8f0fa97f52 100644 (file)
@@ -531,6 +531,34 @@ bool vgic_handle_set_pending_reg(struct kvm *kvm,
        return false;
 }
 
+/*
+ * If a mapped interrupt's state has been modified by the guest such that it
+ * is no longer active or pending, without it have gone through the sync path,
+ * then the map->active field must be cleared so the interrupt can be taken
+ * again.
+ */
+static void vgic_handle_clear_mapped_irq(struct kvm_vcpu *vcpu)
+{
+       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+       struct list_head *root;
+       struct irq_phys_map_entry *entry;
+       struct irq_phys_map *map;
+
+       rcu_read_lock();
+
+       /* Check for PPIs */
+       root = &vgic_cpu->irq_phys_map_list;
+       list_for_each_entry_rcu(entry, root, entry) {
+               map = &entry->map;
+
+               if (!vgic_dist_irq_is_pending(vcpu, map->virt_irq) &&
+                   !vgic_irq_is_active(vcpu, map->virt_irq))
+                       map->active = false;
+       }
+
+       rcu_read_unlock();
+}
+
 bool vgic_handle_clear_pending_reg(struct kvm *kvm,
                                   struct kvm_exit_mmio *mmio,
                                   phys_addr_t offset, int vcpu_id)
@@ -561,6 +589,7 @@ bool vgic_handle_clear_pending_reg(struct kvm *kvm,
                                          vcpu_id, offset);
                vgic_reg_access(mmio, reg, offset, mode);
 
+               vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id));
                vgic_update_state(kvm);
                return true;
        }
@@ -598,6 +627,7 @@ bool vgic_handle_clear_active_reg(struct kvm *kvm,
                        ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
 
        if (mmio->is_write) {
+               vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id));
                vgic_update_state(kvm);
                return true;
        }
@@ -982,6 +1012,12 @@ static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
        pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
        pend_shared = vcpu->arch.vgic_cpu.pending_shared;
 
+       if (!dist->enabled) {
+               bitmap_zero(pend_percpu, VGIC_NR_PRIVATE_IRQS);
+               bitmap_zero(pend_shared, nr_shared);
+               return 0;
+       }
+
        pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id);
        enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
        bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
@@ -1009,11 +1045,6 @@ void vgic_update_state(struct kvm *kvm)
        struct kvm_vcpu *vcpu;
        int c;
 
-       if (!dist->enabled) {
-               set_bit(0, dist->irq_pending_on_cpu);
-               return;
-       }
-
        kvm_for_each_vcpu(c, vcpu, kvm) {
                if (compute_pending_for_cpu(vcpu))
                        set_bit(c, dist->irq_pending_on_cpu);
@@ -1092,6 +1123,15 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
        struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
 
+       /*
+        * We must transfer the pending state back to the distributor before
+        * retiring the LR, otherwise we may loose edge-triggered interrupts.
+        */
+       if (vlr.state & LR_STATE_PENDING) {
+               vgic_dist_irq_set_pending(vcpu, irq);
+               vlr.hwirq = 0;
+       }
+
        vlr.state = 0;
        vgic_set_lr(vcpu, lr_nr, vlr);
        clear_bit(lr_nr, vgic_cpu->lr_used);
@@ -1132,7 +1172,8 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
                kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state);
                vgic_irq_clear_active(vcpu, irq);
                vgic_update_state(vcpu->kvm);
-       } else if (vgic_dist_irq_is_pending(vcpu, irq)) {
+       } else {
+               WARN_ON(!vgic_dist_irq_is_pending(vcpu, irq));
                vlr.state |= LR_STATE_PENDING;
                kvm_debug("Set pending: 0x%x\n", vlr.state);
        }
@@ -1240,7 +1281,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
        struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
        unsigned long *pa_percpu, *pa_shared;
-       int i, vcpu_id, lr, ret;
+       int i, vcpu_id;
        int overflow = 0;
        int nr_shared = vgic_nr_shared_irqs(dist);
 
@@ -1295,31 +1336,6 @@ epilog:
                 */
                clear_bit(vcpu_id, dist->irq_pending_on_cpu);
        }
-
-       for (lr = 0; lr < vgic->nr_lr; lr++) {
-               struct vgic_lr vlr;
-
-               if (!test_bit(lr, vgic_cpu->lr_used))
-                       continue;
-
-               vlr = vgic_get_lr(vcpu, lr);
-
-               /*
-                * If we have a mapping, and the virtual interrupt is
-                * presented to the guest (as pending or active), then we must
-                * set the state to active in the physical world. See
-                * Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt.
-                */
-               if (vlr.state & LR_HW) {
-                       struct irq_phys_map *map;
-                       map = vgic_irq_map_search(vcpu, vlr.irq);
-
-                       ret = irq_set_irqchip_state(map->irq,
-                                                   IRQCHIP_STATE_ACTIVE,
-                                                   true);
-                       WARN_ON(ret);
-               }
-       }
 }
 
 static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
@@ -1421,7 +1437,7 @@ static int vgic_sync_hwirq(struct kvm_vcpu *vcpu, struct vgic_lr vlr)
                return 0;
 
        map = vgic_irq_map_search(vcpu, vlr.irq);
-       BUG_ON(!map || !map->active);
+       BUG_ON(!map);
 
        ret = irq_get_irqchip_state(map->irq,
                                    IRQCHIP_STATE_ACTIVE,
@@ -1429,13 +1445,8 @@ static int vgic_sync_hwirq(struct kvm_vcpu *vcpu, struct vgic_lr vlr)
 
        WARN_ON(ret);
 
-       if (map->active) {
-               ret = irq_set_irqchip_state(map->irq,
-                                           IRQCHIP_STATE_ACTIVE,
-                                           false);
-               WARN_ON(ret);
+       if (map->active)
                return 0;
-       }
 
        return 1;
 }
@@ -1607,8 +1618,12 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
        } else {
                if (level_triggered) {
                        vgic_dist_irq_clear_level(vcpu, irq_num);
-                       if (!vgic_dist_irq_soft_pend(vcpu, irq_num))
+                       if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) {
                                vgic_dist_irq_clear_pending(vcpu, irq_num);
+                               vgic_cpu_irq_clear(vcpu, irq_num);
+                               if (!compute_pending_for_cpu(vcpu))
+                                       clear_bit(cpuid, dist->irq_pending_on_cpu);
+                       }
                }
 
                ret = false;
@@ -2122,7 +2137,7 @@ static int init_vgic_model(struct kvm *kvm, int type)
        case KVM_DEV_TYPE_ARM_VGIC_V2:
                vgic_v2_init_emulation(kvm);
                break;
-#ifdef CONFIG_ARM_GIC_V3
+#ifdef CONFIG_KVM_ARM_VGIC_V3
        case KVM_DEV_TYPE_ARM_VGIC_V3:
                vgic_v3_init_emulation(kvm);
                break;
@@ -2284,7 +2299,7 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
                block_size = KVM_VGIC_V2_CPU_SIZE;
                alignment = SZ_4K;
                break;
-#ifdef CONFIG_ARM_GIC_V3
+#ifdef CONFIG_KVM_ARM_VGIC_V3
        case KVM_VGIC_V3_ADDR_TYPE_DIST:
                type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
                addr_ptr = &vgic->vgic_dist_base;