]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branches 'iommu/fixes', 'arm/omap', 'arm/smmu', 'arm/shmobile', 'x86/amd',...
authorJoerg Roedel <jroedel@suse.de>
Fri, 30 May 2014 18:22:10 +0000 (20:22 +0200)
committerJoerg Roedel <jroedel@suse.de>
Fri, 30 May 2014 18:22:10 +0000 (20:22 +0200)
448 files changed:
Documentation/ABI/testing/sysfs-bus-pci
Documentation/DocBook/drm.tmpl
Documentation/DocBook/media/Makefile
Documentation/devicetree/bindings/clock/at91-clock.txt
Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
Documentation/devicetree/bindings/dma/ti-edma.txt
Documentation/devicetree/bindings/net/mdio-gpio.txt
Documentation/email-clients.txt
Documentation/filesystems/proc.txt
Documentation/hwmon/sysfs-interface
Documentation/java.txt
Documentation/kernel-parameters.txt
Documentation/networking/filter.txt
Documentation/networking/packet_mmap.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/am3517.dtsi
arch/arm/boot/dts/am437x-gp-evm.dts
arch/arm/boot/dts/armada-370-db.dts
arch/arm/boot/dts/armada-375-db.dts
arch/arm/boot/dts/armada-xp-db.dts
arch/arm/boot/dts/armada-xp-gp.dts
arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
arch/arm/boot/dts/at91-sama5d3_xplained.dts
arch/arm/boot/dts/at91sam9261.dtsi
arch/arm/boot/dts/at91sam9rl.dtsi
arch/arm/boot/dts/imx53-mba53.dts
arch/arm/boot/dts/imx53.dtsi
arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts
arch/arm/boot/dts/kirkwood-nsa310-common.dtsi
arch/arm/boot/dts/kirkwood-t5325.dts
arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi
arch/arm/boot/dts/omap2.dtsi
arch/arm/boot/dts/omap2420.dtsi
arch/arm/boot/dts/omap2430.dtsi
arch/arm/boot/dts/omap3-cm-t3x30.dtsi
arch/arm/boot/dts/omap3-igep.dtsi
arch/arm/boot/dts/omap3-igep0020.dts
arch/arm/boot/dts/omap3-sb-t35.dtsi
arch/arm/boot/dts/omap3-sbc-t3517.dts
arch/arm/boot/dts/omap3.dtsi
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/sama5d3.dtsi
arch/arm/boot/dts/sama5d3_mci2.dtsi
arch/arm/boot/dts/sama5d3_tcb1.dtsi
arch/arm/boot/dts/sama5d3_uart.dtsi
arch/arm/boot/dts/ste-ccu8540.dts
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/common/edma.c
arch/arm/configs/sunxi_defconfig
arch/arm/include/asm/xen/page.h
arch/arm/mach-omap2/omap-headsmp.S
arch/arm/mach-orion5x/common.h
arch/arm64/include/asm/memory.h
arch/arm64/kernel/irq.c
arch/arm64/mm/hugetlbpage.c
arch/ia64/include/asm/unistd.h
arch/ia64/include/uapi/asm/unistd.h
arch/ia64/kernel/entry.S
arch/m68k/include/asm/unistd.h
arch/m68k/include/uapi/asm/unistd.h
arch/m68k/kernel/syscalltable.S
arch/metag/include/asm/barrier.h
arch/metag/include/asm/processor.h
arch/metag/include/uapi/asm/Kbuild
arch/metag/include/uapi/asm/resource.h [deleted file]
arch/mips/dec/ecc-berr.c
arch/mips/dec/kn02xa-berr.c
arch/mips/dec/prom/Makefile
arch/mips/dec/prom/call_o32.S [deleted file]
arch/mips/fw/lib/call_o32.S
arch/mips/fw/sni/sniprom.c
arch/mips/include/asm/dec/prom.h
arch/mips/include/asm/rm9k-ocd.h [deleted file]
arch/mips/include/asm/syscall.h
arch/mips/include/uapi/asm/inst.h
arch/mips/include/uapi/asm/unistd.h
arch/mips/kernel/proc.c
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/lantiq/dts/easy50712.dts
arch/mips/lib/csum_partial.S
arch/mips/lib/delay.c
arch/mips/lib/strncpy_user.S
arch/mips/loongson/Kconfig
arch/mips/loongson/lemote-2f/clock.c
arch/mips/mm/tlb-funcs.S
arch/mips/mm/tlbex.c
arch/mips/ralink/dts/mt7620a_eval.dts
arch/mips/ralink/dts/rt2880_eval.dts
arch/mips/ralink/dts/rt3052_eval.dts
arch/mips/ralink/dts/rt3883_eval.dts
arch/parisc/Kconfig
arch/parisc/include/asm/processor.h
arch/parisc/include/uapi/asm/unistd.h
arch/parisc/kernel/sys_parisc.c
arch/parisc/kernel/syscall.S
arch/parisc/kernel/syscall_table.S
arch/parisc/kernel/traps.c
arch/parisc/mm/fault.c
arch/powerpc/kernel/time.c
arch/powerpc/platforms/powernv/eeh-ioda.c
arch/s390/crypto/aes_s390.c
arch/s390/crypto/des_s390.c
arch/s390/net/bpf_jit_comp.c
arch/sparc/include/asm/pgtable_64.h
arch/sparc/kernel/sysfs.c
arch/sparc/lib/NG2memcpy.S
arch/sparc/mm/fault_64.c
arch/sparc/mm/tsb.c
arch/x86/include/asm/hugetlb.h
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/rdrand.c
arch/x86/kernel/ldt.c
arch/x86/net/bpf_jit_comp.c
arch/x86/vdso/vdso32-setup.c
block/blk-cgroup.c
drivers/Makefile
drivers/acpi/Kconfig
drivers/acpi/Makefile
drivers/acpi/ac.c
drivers/acpi/acpi_platform.c
drivers/acpi/acpi_processor.c
drivers/acpi/acpica/acglobal.h
drivers/acpi/acpica/tbutils.c
drivers/acpi/battery.c
drivers/acpi/blacklist.c
drivers/acpi/cm_sbs.c [new file with mode: 0644]
drivers/acpi/video.c
drivers/ata/Kconfig
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/ahci_imx.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/bus/mvebu-mbus.c
drivers/char/random.c
drivers/char/tpm/tpm_ppi.c
drivers/clk/bcm/clk-kona-setup.c
drivers/clk/bcm/clk-kona.c
drivers/clk/bcm/clk-kona.h
drivers/clk/clk-divider.c
drivers/clk/clk.c
drivers/clk/shmobile/clk-mstp.c
drivers/clk/socfpga/clk-pll.c
drivers/clk/socfpga/clk.c
drivers/clk/tegra/clk-pll.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/loongson2_cpufreq.c
drivers/crypto/caam/error.c
drivers/dma/dmaengine.c
drivers/dma/mv_xor.c
drivers/firmware/iscsi_ibft.c
drivers/gpio/gpio-ich.c
drivers/gpio/gpio-mcp23s08.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_bios.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_vce.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/sid.h
drivers/hwmon/Kconfig
drivers/hwmon/emc1403.c
drivers/hwmon/ntc_thermistor.c
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-nomadik.c
drivers/i2c/busses/i2c-qup.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/isert/ib_isert.h
drivers/iommu/Kconfig
drivers/iommu/Makefile
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_v2.c
drivers/iommu/arm-smmu.c
drivers/iommu/fsl_pamu.c
drivers/iommu/ipmmu-vmsa.c [new file with mode: 0644]
drivers/iommu/msm_iommu_dev.c
drivers/iommu/omap-iommu.c
drivers/iommu/omap-iopgtable.h
drivers/iommu/shmobile-ipmmu.c
drivers/md/dm-crypt.c
drivers/md/dm-mpath.c
drivers/md/dm-thin.c
drivers/md/md.c
drivers/md/raid10.c
drivers/media/i2c/ov7670.c
drivers/media/i2c/s5c73m3/s5c73m3-core.c
drivers/media/media-device.c
drivers/media/platform/davinci/vpbe_display.c
drivers/media/platform/davinci/vpfe_capture.c
drivers/media/platform/davinci/vpif_capture.c
drivers/media/platform/davinci/vpif_display.c
drivers/media/platform/exynos4-is/fimc-core.c
drivers/media/tuners/fc2580.c
drivers/media/tuners/fc2580_priv.h
drivers/media/usb/dvb-usb-v2/Makefile
drivers/media/usb/dvb-usb-v2/rtl28xxu.c
drivers/media/usb/gspca/sonixb.c
drivers/media/v4l2-core/v4l2-compat-ioctl32.c
drivers/memory/mvebu-devbus.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_options.c
drivers/net/bonding/bonding.h
drivers/net/can/c_can/Kconfig
drivers/net/can/c_can/c_can.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/altera/Makefile
drivers/net/ethernet/altera/altera_msgdma.c
drivers/net/ethernet/altera/altera_msgdmahw.h
drivers/net/ethernet/altera/altera_sgdma.c
drivers/net/ethernet/altera/altera_sgdmahw.h
drivers/net/ethernet/altera/altera_tse.h
drivers/net/ethernet/altera/altera_tse_ethtool.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/altera/altera_utils.c
drivers/net/ethernet/altera/altera_utils.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/ec_bhf.c [new file with mode: 0644]
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/sfc/nic.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/sun/cassini.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/macvlan.c
drivers/net/phy/mdio-gpio.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/usb/cdc_mbim.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/iwlwifi/mvm/coex.c
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/of/base.c
drivers/pci/host/pci-mvebu.c
drivers/pci/hotplug/shpchp_ctrl.c
drivers/pci/pci.c
drivers/ptp/Kconfig
drivers/rtc/rtc-hym8563.c
drivers/scsi/scsi_transport_sas.c
drivers/sh/Makefile
drivers/sh/pm_runtime.c
drivers/spi/spi-pxa2xx-dma.c
drivers/spi/spi-qup.c
drivers/spi/spi.c
drivers/staging/imx-drm/imx-drm-core.c
drivers/staging/imx-drm/imx-tve.c
drivers/staging/media/davinci_vpfe/vpfe_video.c
drivers/staging/media/sn9c102/sn9c102_devtable.h
drivers/staging/rtl8723au/os_dep/os_intfs.c
drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_core.h
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_tpg.c
drivers/target/target_core_device.c
drivers/target/target_core_transport.c
drivers/target/tcm_fc/tfc_cmd.c
drivers/xen/events/events_fifo.c
fs/afs/cmservice.c
fs/afs/internal.h
fs/afs/rxrpc.c
fs/btrfs/ioctl.c
fs/btrfs/send.c
fs/cifs/inode.c
fs/exec.c
fs/kernfs/file.c
fs/locks.c
fs/nfsd/nfs4acl.c
fs/nfsd/nfs4state.c
fs/ocfs2/dlm/dlmmaster.c
fs/sysfs/file.c
fs/sysfs/mount.c
fs/xfs/xfs_export.c
fs/xfs/xfs_file.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_qm.c
fs/xfs/xfs_super.c
include/asm-generic/resource.h
include/dt-bindings/clock/at91.h [moved from include/dt-bindings/clk/at91.h with 100% similarity]
include/linux/cgroup.h
include/linux/dmaengine.h
include/linux/if_macvlan.h
include/linux/if_vlan.h
include/linux/interrupt.h
include/linux/kernfs.h
include/linux/mlx4/qp.h
include/linux/net.h
include/linux/netdevice.h
include/linux/of.h
include/linux/of_mdio.h
include/linux/perf_event.h
include/linux/platform_data/ipmmu-vmsa.h [new file with mode: 0644]
include/linux/rtnetlink.h
include/linux/sched.h
include/net/cfg80211.h
include/net/ip6_route.h
include/net/netns/ipv4.h
include/uapi/asm-generic/resource.h
include/uapi/asm-generic/unistd.h
include/uapi/linux/audit.h
include/uapi/linux/nl80211.h
kernel/cgroup.c
kernel/cgroup_freezer.c
kernel/events/core.c
kernel/hrtimer.c
kernel/sched/core.c
kernel/sched/cpudeadline.c
kernel/sched/cpupri.c
kernel/sched/cputime.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/workqueue.c
mm/Kconfig
mm/filemap.c
mm/kmemleak.c
mm/madvise.c
mm/memcontrol.c
mm/memory-failure.c
mm/mremap.c
mm/percpu.c
net/8021q/vlan.c
net/8021q/vlan_dev.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/fragmentation.c
net/batman-adv/gateway_client.c
net/batman-adv/hard-interface.c
net/batman-adv/originator.c
net/bridge/br_netfilter.c
net/ceph/messenger.c
net/ceph/osdmap.c
net/core/dev.c
net/core/neighbour.c
net/core/net_namespace.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/utils.c
net/dsa/dsa.c
net/ipv4/af_inet.c
net/ipv4/fib_semantics.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_forward.c
net/ipv4/ip_fragment.c
net/ipv4/ip_output.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_vti.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/ping.c
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/xfrm4_output.c
net/ipv4/xfrm4_protocol.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/ndisc.c
net/ipv6/netfilter.c
net/ipv6/route.c
net/ipv6/tcpv6_offload.c
net/ipv6/xfrm6_output.c
net/ipv6/xfrm6_protocol.c
net/iucv/af_iucv.c
net/mac80211/ieee80211_i.h
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mac80211/status.c
net/mac80211/trace.h
net/mac80211/util.c
net/mac80211/vht.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_tables_core.c
net/netfilter/nfnetlink.c
net/rxrpc/ar-key.c
net/sched/cls_tcindex.c
net/wireless/scan.c
net/wireless/sme.c
scripts/checksyscalls.sh
security/device_cgroup.c
sound/isa/sb/sb_mixer.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/tlv320aic31xx.c
sound/soc/codecs/wm8962.c
sound/soc/codecs/wm8962.h
sound/soc/fsl/fsl_esai.c
sound/soc/fsl/imx-audmux.c
sound/soc/intel/sst-acpi.c
sound/soc/intel/sst-baytrail-dsp.c
sound/soc/intel/sst-baytrail-ipc.c
sound/soc/intel/sst-dsp-priv.h
sound/soc/intel/sst-dsp.c
sound/soc/intel/sst-dsp.h
sound/soc/intel/sst-firmware.c
sound/soc/intel/sst-haswell-dsp.c
sound/soc/intel/sst-haswell-ipc.c
sound/soc/intel/sst-haswell-ipc.h
sound/soc/intel/sst-haswell-pcm.c
sound/soc/sh/rcar/core.c
sound/soc/soc-dapm.c
sound/soc/soc-pcm.c
tools/Makefile
tools/lib/lockdep/Makefile

index a3c5a6685036103e7ec677272acdc4620d0302f6..ab8d76dfaa8096bbdf42bacf31336b856c07959b 100644 (file)
@@ -117,7 +117,7 @@ Description:
 
 What:          /sys/bus/pci/devices/.../vpd
 Date:          February 2008
-Contact:       Ben Hutchings <bhutchings@solarflare.com>
+Contact:       Ben Hutchings <bwh@kernel.org>
 Description:
                A file named vpd in a device directory will be a
                binary file containing the Vital Product Data for the
index 677a02553ec0cd772c1a86c73b7d314f48376190..ba60d93c18551af17db0e1bcbca7670662c025f9 100644 (file)
@@ -79,7 +79,7 @@
   <partintro>
     <para>
       This first part of the DRM Developer's Guide documents core DRM code,
-      helper libraries for writting drivers and generic userspace interfaces
+      helper libraries for writing drivers and generic userspace interfaces
       exposed by DRM drivers.
     </para>
   </partintro>
@@ -459,7 +459,7 @@ char *date;</synopsis>
       providing a solution to every graphics memory-related problems, GEM
       identified common code between drivers and created a support library to
       share it. GEM has simpler initialization and execution requirements than
-      TTM, but has no video RAM management capabitilies and is thus limited to
+      TTM, but has no video RAM management capabilities and is thus limited to
       UMA devices.
     </para>
     <sect2>
@@ -889,7 +889,7 @@ int (*prime_fd_to_handle)(struct drm_device *dev,
            vice versa. Drivers must use the kernel dma-buf buffer sharing framework
            to manage the PRIME file descriptors. Similar to the mode setting
            API PRIME is agnostic to the underlying buffer object manager, as
-           long as handles are 32bit unsinged integers.
+           long as handles are 32bit unsigned integers.
          </para>
          <para>
            While non-GEM drivers must implement the operations themselves, GEM
@@ -2356,7 +2356,7 @@ void intel_crt_init(struct drm_device *dev)
       first create properties and then create and associate individual instances
       of those properties to objects. A property can be instantiated multiple
       times and associated with different objects. Values are stored in property
-      instances, and all other property information are stored in the propery
+      instances, and all other property information are stored in the property
       and shared between all instances of the property.
     </para>
     <para>
@@ -2697,10 +2697,10 @@ int num_ioctls;</synopsis>
   <sect1>
     <title>Legacy Support Code</title>
     <para>
-      The section very brievely covers some of the old legacy support code which
+      The section very briefly covers some of the old legacy support code which
       is only used by old DRM drivers which have done a so-called shadow-attach
       to the underlying device instead of registering as a real driver. This
-      also includes some of the old generic buffer mangement and command
+      also includes some of the old generic buffer management and command
       submission code. Do not use any of this in new and modern drivers.
     </para>
 
index f9fd615427fbd4c0a718d66ed1b7de006ab796e6..1d27f0a1abd1e1872b0e05693ab35d6ecd64b0f2 100644 (file)
@@ -195,7 +195,7 @@ DVB_DOCUMENTED = \
 #
 
 install_media_images = \
-       $(Q)cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api
+       $(Q)-cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api
 
 $(MEDIA_OBJ_DIR)/%: $(MEDIA_SRC_DIR)/%.b64
        $(Q)base64 -d $< >$@
index cd5e23912888cf4350750fe0adc908fb895e3d08..6794cdc96d8fdf44513a4ad2310fe67a2a93712d 100644 (file)
@@ -62,7 +62,7 @@ Required properties for PMC node:
 - interrupt-controller : tell that the PMC is an interrupt controller.
 - #interrupt-cells : must be set to 1. The first cell encodes the interrupt id,
        and reflect the bit position in the PMC_ER/DR/SR registers.
-       You can use the dt macros defined in dt-bindings/clk/at91.h.
+       You can use the dt macros defined in dt-bindings/clock/at91.h.
        0 (AT91_PMC_MOSCS) -> main oscillator ready
        1 (AT91_PMC_LOCKA) -> PLL A ready
        2 (AT91_PMC_LOCKB) -> PLL B ready
index 5992dceec7af7d1e9d1ac8f329b831d48e409d87..02a25d99ca61bfb33a46a2ce8e547b7f22d3c73a 100644 (file)
@@ -43,7 +43,7 @@ Example
                clock-output-names =
                        "tpu0", "mmcif1", "sdhi3", "sdhi2",
                         "sdhi1", "sdhi0", "mmcif0";
-               renesas,clock-indices = <
+               clock-indices = <
                        R8A7790_CLK_TPU0 R8A7790_CLK_MMCIF1 R8A7790_CLK_SDHI3
                        R8A7790_CLK_SDHI2 R8A7790_CLK_SDHI1 R8A7790_CLK_SDHI0
                        R8A7790_CLK_MMCIF0
index 9fbbdb783a72d50600755a074cbc11cfba97f713..68ff2137bae7261e84409a472fbf432989fca0d6 100644 (file)
@@ -29,6 +29,6 @@ edma: edma@49000000 {
        dma-channels = <64>;
        ti,edma-regions = <4>;
        ti,edma-slots = <256>;
-       ti,edma-xbar-event-map = <1 12
-                                 2 13>;
+       ti,edma-xbar-event-map = /bits/ 16 <1 12
+                                           2 13>;
 };
index c79bab025369af4bb6320ba0e90f7fb386942cc1..8dbcf8295c6c9ceaa4eb7518694acf3b09095dc1 100644 (file)
@@ -14,7 +14,7 @@ node.
 Example:
 
 aliases {
-       mdio-gpio0 = <&mdio0>;
+       mdio-gpio0 = &mdio0;
 };
 
 mdio0: mdio {
index e9f5daccbd02e2d8e97b489faf372dbb307529d8..4e30ebaa9e5b2652950b2383817e852d2e2239e0 100644 (file)
@@ -201,20 +201,15 @@ To beat some sense out of the internal editor, do this:
 
 - Edit your Thunderbird config settings so that it won't use format=flowed.
   Go to "edit->preferences->advanced->config editor" to bring up the
-  thunderbird's registry editor, and set "mailnews.send_plaintext_flowed" to
-  "false".
+  thunderbird's registry editor.
 
-- Disable HTML Format: Set "mail.identity.id1.compose_html" to "false".
+- Set "mailnews.send_plaintext_flowed" to "false"
 
-- Enable "preformat" mode: Set "editor.quotesPreformatted" to "true".
+- Set "mailnews.wraplength" from "72" to "0"
 
-- Enable UTF8: Set "prefs.converted-to-utf8" to "true".
+- "View" > "Message Body As" > "Plain Text"
 
-- Install the "toggle wordwrap" extension.  Download the file from:
-    https://addons.mozilla.org/thunderbird/addon/2351/
-  Then go to "tools->add ons", select "install" at the bottom of the screen,
-  and browse to where you saved the .xul file.  This adds an "Enable
-  Wordwrap" entry under the Options menu of the message composer.
+- "View" > "Character Encoding" > "Unicode (UTF-8)"
 
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 TkRat (GUI)
index 8b9cd8eb3f917e63074ba0ffae0e83b767a87176..264bcde0c51c55629066e9809af3bab99fe05c8c 100644 (file)
@@ -1245,8 +1245,9 @@ second).  The meanings of the columns are as follows, from left to right:
 
 The "intr" line gives counts of interrupts  serviced since boot time, for each
 of the  possible system interrupts.   The first  column  is the  total of  all
-interrupts serviced; each  subsequent column is the  total for that particular
-interrupt.
+interrupts serviced  including  unnumbered  architecture specific  interrupts;
+each  subsequent column is the  total for that particular numbered interrupt.
+Unnumbered interrupts are not shown, only summed into the total.
 
 The "ctxt" line gives the total number of context switches across all CPUs.
 
index 79f8257dd790703f8a9285e7b498f585df87e97b..2cc95ad466047b055d45f748619cf2d7231d5e93 100644 (file)
@@ -327,6 +327,13 @@ temp[1-*]_max_hyst
                from the max value.
                RW
 
+temp[1-*]_min_hyst
+               Temperature hysteresis value for min limit.
+               Unit: millidegree Celsius
+               Must be reported as an absolute temperature, NOT a delta
+               from the min value.
+               RW
+
 temp[1-*]_input Temperature input value.
                Unit: millidegree Celsius
                RO
@@ -362,6 +369,13 @@ temp[1-*]_lcrit    Temperature critical min value, typically lower than
                Unit: millidegree Celsius
                RW
 
+temp[1-*]_lcrit_hyst
+               Temperature hysteresis value for critical min limit.
+               Unit: millidegree Celsius
+               Must be reported as an absolute temperature, NOT a delta
+               from the critical min value.
+               RW
+
 temp[1-*]_offset
                Temperature offset which is added to the temperature reading
                by the chip.
index e6a72328154783fc6e983904658eb8eb8c0c4ee8..418020584ccc171b8ff079e496e73383f0f55c29 100644 (file)
@@ -188,6 +188,9 @@ shift
 #define CP_METHODREF 10
 #define CP_INTERFACEMETHODREF 11
 #define CP_NAMEANDTYPE 12
+#define CP_METHODHANDLE 15
+#define CP_METHODTYPE 16
+#define CP_INVOKEDYNAMIC 18
 
 /* Define some commonly used error messages */
 
@@ -242,14 +245,19 @@ void skip_constant(FILE *classfile, u_int16_t *cur)
                break;
        case CP_CLASS:
        case CP_STRING:
+       case CP_METHODTYPE:
                seekerr = fseek(classfile, 2, SEEK_CUR);
                break;
+       case CP_METHODHANDLE:
+               seekerr = fseek(classfile, 3, SEEK_CUR);
+               break;
        case CP_INTEGER:
        case CP_FLOAT:
        case CP_FIELDREF:
        case CP_METHODREF:
        case CP_INTERFACEMETHODREF:
        case CP_NAMEANDTYPE:
+       case CP_INVOKEDYNAMIC:
                seekerr = fseek(classfile, 4, SEEK_CUR);
                break;
        case CP_LONG:
index 43842177b771d72e67e90361f79b28966435787f..30a8ad0dae535cf1670a77d7f4c869e5ffc4c6dc 100644 (file)
@@ -2218,10 +2218,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        noreplace-smp   [X86-32,SMP] Don't replace SMP instructions
                        with UP alternatives
 
-       nordrand        [X86] Disable the direct use of the RDRAND
-                       instruction even if it is supported by the
-                       processor.  RDRAND is still available to user
-                       space applications.
+       nordrand        [X86] Disable kernel use of the RDRAND and
+                       RDSEED instructions even if they are supported
+                       by the processor.  RDRAND and RDSEED are still
+                       available to user space applications.
 
        noresume        [SWSUSP] Disables resume and restores original swap
                        space.
index 81f940f4e88480d48c35fd7707d679d646ef0af8..e3ba753cb714949c4e26f898da31b6d0e6b03737 100644 (file)
@@ -277,7 +277,7 @@ Possible BPF extensions are shown in the following table:
   mark                                  skb->mark
   queue                                 skb->queue_mapping
   hatype                                skb->dev->type
-  rxhash                                skb->rxhash
+  rxhash                                skb->hash
   cpu                                   raw_smp_processor_id()
   vlan_tci                              vlan_tx_tag_get(skb)
   vlan_pr                               vlan_tx_tag_present(skb)
index 6fea79efb4cbfd31cc1d0155aef320b94b89da9e..38112d512f47db9ec9a70edae7c7df83b7d13119 100644 (file)
@@ -578,7 +578,7 @@ processes. This also works in combination with mmap(2) on packet sockets.
 
 Currently implemented fanout policies are:
 
-  - PACKET_FANOUT_HASH: schedule to socket by skb's rxhash
+  - PACKET_FANOUT_HASH: schedule to socket by skb's packet hash
   - PACKET_FANOUT_LB: schedule to socket by round-robin
   - PACKET_FANOUT_CPU: schedule to socket by CPU packet arrives on
   - PACKET_FANOUT_RND: schedule to socket by random selection
index 51ebb779c5f33ee1693e740066bfbd1c004fa819..cc4511177949e3539946f441166586e50212ac2c 100644 (file)
@@ -537,7 +537,7 @@ L:  linux-alpha@vger.kernel.org
 F:     arch/alpha/
 
 ALTERA TRIPLE SPEED ETHERNET DRIVER
-M:     Vince Bridgers <vbridgers2013@gmail.com
+M:     Vince Bridgers <vbridgers2013@gmail.com>
 L:     netdev@vger.kernel.org
 L:     nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
 S:     Maintained
@@ -1893,14 +1893,15 @@ L:      netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/broadcom/bnx2x/
 
-BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
+BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
 M:     Christian Daudt <bcm@fixthebug.org>
 M:     Matt Porter <mporter@linaro.org>
 L:     bcm-kernel-feedback-list@broadcom.com
-T:     git git://git.github.com/broadcom/bcm11351
+T:     git git://github.com/broadcom/mach-bcm
 S:     Maintained
 F:     arch/arm/mach-bcm/
 F:     arch/arm/boot/dts/bcm113*
+F:     arch/arm/boot/dts/bcm216*
 F:     arch/arm/boot/dts/bcm281*
 F:     arch/arm/configs/bcm_defconfig
 F:     drivers/mmc/host/sdhci_bcm_kona.c
@@ -2245,12 +2246,6 @@ L:       linux-usb@vger.kernel.org
 S:     Maintained
 F:     drivers/usb/host/ohci-ep93xx.c
 
-CIRRUS LOGIC CS4270 SOUND DRIVER
-M:     Timur Tabi <timur@tabi.org>
-L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
-S:     Odd Fixes
-F:     sound/soc/codecs/cs4270*
-
 CIRRUS LOGIC AUDIO CODEC DRIVERS
 M:     Brian Austin <brian.austin@cirrus.com>
 M:     Paul Handrigan <Paul.Handrigan@cirrus.com>
@@ -4818,6 +4813,14 @@ L:       linux-kernel@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
 F:     kernel/irq/
+
+IRQCHIP DRIVERS
+M:     Thomas Gleixner <tglx@linutronix.de>
+M:     Jason Cooper <jason@lakedaemon.net>
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
+T:     git git://git.infradead.org/users/jcooper/linux.git irqchip/core
 F:     drivers/irqchip/
 
 IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
@@ -5490,15 +5493,15 @@ F:      Documentation/hwmon/ltc4261
 F:     drivers/hwmon/ltc4261.c
 
 LTP (Linux Test Project)
-M:     Shubham Goyal <shubham@linux.vnet.ibm.com>
 M:     Mike Frysinger <vapier@gentoo.org>
 M:     Cyril Hrubis <chrubis@suse.cz>
-M:     Caspar Zhang <caspar@casparzhang.com>
 M:     Wanlong Gao <gaowanlong@cn.fujitsu.com>
+M:     Jan Stancek <jstancek@redhat.com>
+M:     Stanislav Kholmanskikh <stanislav.kholmanskikh@oracle.com>
+M:     Alexey Kodanev <alexey.kodanev@oracle.com>
 L:     ltp-list@lists.sourceforge.net (subscribers-only)
-W:     http://ltp.sourceforge.net/
+W:     http://linux-test-project.github.io/
 T:     git git://github.com/linux-test-project/ltp.git
-T:     git git://ltp.git.sourceforge.net/gitroot/ltp/ltp-dev
 S:     Maintained
 
 M32R ARCHITECTURE
@@ -6511,10 +6514,10 @@ T:      git git://openrisc.net/~jonas/linux
 F:     arch/openrisc/
 
 OPENVSWITCH
-M:     Jesse Gross <jesse@nicira.com>
+M:     Pravin Shelar <pshelar@nicira.com>
 L:     dev@openvswitch.org
 W:     http://openvswitch.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pshelar/openvswitch.git
 S:     Maintained
 F:     net/openvswitch/
 
@@ -9107,6 +9110,9 @@ F:        arch/um/os-Linux/drivers/
 
 TURBOCHANNEL SUBSYSTEM
 M:     "Maciej W. Rozycki" <macro@linux-mips.org>
+M:     Ralf Baechle <ralf@linux-mips.org>
+L:     linux-mips@linux-mips.org
+Q:     http://patchwork.linux-mips.org/project/linux-mips/list/
 S:     Maintained
 F:     drivers/tc/
 F:     include/linux/tc.h
index 8a8440a3578e1d3b2fde55625fbc506a5d2979e4..cf3412d78ff139c99458bf923872e7a57440853e 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 15
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
 NAME = Shuffling Zombie Juror
 
 # *DOCUMENTATION*
index cb6811e5ae5a9a148321d4e667dc34cde9bc9846..7ad75b4e066358f5229d93d0d017c754a711ca32 100644 (file)
                        compatible = "ti,edma3";
                        ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2";
                        reg =   <0x49000000 0x10000>,
-                               <0x44e10f90 0x10>;
+                               <0x44e10f90 0x40>;
                        interrupts = <12 13 14>;
                        #dma-cells = <1>;
                        dma-channels = <64>;
index 788391f916844130d8d6cdb2722479163863a707..5a452fdd7c5d9711cec9f6d0196f90b0726fefc3 100644 (file)
        };
 };
 
+&iva {
+       status = "disabled";
+};
+
+&mailbox {
+       status = "disabled";
+};
+
+&mmu_isp {
+       status = "disabled";
+};
+
+&smartreflex_mpu_iva {
+       status = "disabled";
+};
+
 /include/ "am35xx-clocks.dtsi"
 /include/ "omap36xx-am35xx-omap3430es2plus-clocks.dtsi"
index df8798e8bd255bbf789da0b44f5229b9f3dc80dd..a055f7f0f14ae5a3e080489c9c424bd37be0e1c0 100644 (file)
        status = "okay";
 };
 
+&gpio5 {
+       status = "okay";
+       ti,no-reset-on-init;
+};
+
 &mmc1 {
        status = "okay";
        vmmc-supply = <&vmmcsd_fixed>;
index 82f238a9063ffe47d10dbe083f136e1876f8efd2..3383c4b668035737e1812777fed34a03f5f9ef7c 100644 (file)
@@ -67,6 +67,7 @@
                        i2c@11000 {
                                pinctrl-0 = <&i2c0_pins>;
                                pinctrl-names = "default";
+                               clock-frequency = <100000>;
                                status = "okay";
                                audio_codec: audio-codec@4a {
                                        compatible = "cirrus,cs42l51";
index 9378d3136b41d7b37f11abdf01186c019758f7f1..0451124e8ebf49af45b34072ae69a02a6fa133a9 100644 (file)
                                };
                        };
 
+                       sata@a0000 {
+                               status = "okay";
+                               nr-ports = <2>;
+                       };
+
                        nand: nand@d0000 {
                                pinctrl-0 = <&nand_pins>;
                                pinctrl-names = "default";
index 448373c4b0e534c1d2ce08592981f5d38bad39a8..90f0bf6f92715c7335072bda5b508e7800fa89a4 100644 (file)
@@ -49,7 +49,7 @@
                        /* Device Bus parameters are required */
 
                        /* Read parameters */
-                       devbus,bus-width    = <8>;
+                       devbus,bus-width    = <16>;
                        devbus,turn-off-ps  = <60000>;
                        devbus,badr-skew-ps = <0>;
                        devbus,acc-first-ps = <124000>;
index 61bda687f782f65485f958adb8d8ec2822ebde35..0c756421ae6aa5f504b72898c16875bdaab5f8d3 100644 (file)
@@ -59,7 +59,7 @@
                        /* Device Bus parameters are required */
 
                        /* Read parameters */
-                       devbus,bus-width    = <8>;
+                       devbus,bus-width    = <16>;
                        devbus,turn-off-ps  = <60000>;
                        devbus,badr-skew-ps = <0>;
                        devbus,acc-first-ps = <124000>;
                        ethernet@70000 {
                                status = "okay";
                                phy = <&phy0>;
-                               phy-mode = "rgmii-id";
+                               phy-mode = "qsgmii";
                        };
                        ethernet@74000 {
                                status = "okay";
                                phy = <&phy1>;
-                               phy-mode = "rgmii-id";
+                               phy-mode = "qsgmii";
                        };
                        ethernet@30000 {
                                status = "okay";
                                phy = <&phy2>;
-                               phy-mode = "rgmii-id";
+                               phy-mode = "qsgmii";
                        };
                        ethernet@34000 {
                                status = "okay";
                                phy = <&phy3>;
-                               phy-mode = "rgmii-id";
+                               phy-mode = "qsgmii";
                        };
 
                        /* Front-side USB slot */
index 985948ce67b3271a65c9b4a413a99e7d255b12e7..5d42feb3104983a2ee9b21be33b59cc2e66a1bcd 100644 (file)
@@ -39,7 +39,7 @@
                        /* Device Bus parameters are required */
 
                        /* Read parameters */
-                       devbus,bus-width    = <8>;
+                       devbus,bus-width    = <16>;
                        devbus,turn-off-ps  = <60000>;
                        devbus,badr-skew-ps = <0>;
                        devbus,acc-first-ps = <124000>;
index ce1375595e5f27117c37881bcafa5fc5805b1455..4537259ce5299baf8cf68978d7c2d106c5f67c32 100644 (file)
@@ -34,7 +34,7 @@
                        };
 
                        spi0: spi@f0004000 {
-                               cs-gpios = <&pioD 13 0>;
+                               cs-gpios = <&pioD 13 0>, <0>, <0>, <&pioD 16 0>;
                                status = "okay";
                        };
 
@@ -79,7 +79,7 @@
                        };
 
                        spi1: spi@f8008000 {
-                               cs-gpios = <&pioC 25 0>, <0>, <0>, <&pioD 16 0>;
+                               cs-gpios = <&pioC 25 0>;
                                status = "okay";
                        };
 
index e21dda0e8986574b2531c1675eca1b4de164822d..3be973e9889a2b0ef29cc45657ad6eeb056e7f5f 100644 (file)
@@ -10,7 +10,7 @@
 #include <dt-bindings/pinctrl/at91.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
 
 / {
        model = "Atmel AT91SAM9261 family SoC";
index 63e1784d272c556974023fcfd7d46ba7a0510e3d..92a52faebef77cd8eda572d11504c0f83362a428 100644 (file)
@@ -8,7 +8,7 @@
 
 #include "skeleton.dtsi"
 #include <dt-bindings/pinctrl/at91.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/gpio/gpio.h>
 
index 7c8c129698929151512f282c5baad14c69f51309..a3431d7848709aac0463a27e4471c22dd17a4a46 100644 (file)
 &tve {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_vga_sync_1>;
-       i2c-ddc-bus = <&i2c3>;
+       ddc-i2c-bus = <&i2c3>;
        fsl,tve-mode = "vga";
        fsl,hsync-pin = <4>;
        fsl,vsync-pin = <6>;
index 9c2bff2252d0d078514348ab2bc92aad7ea5a6fb..6a1bf4ff83d5516dac0016ebc2bdd21d05565631 100644 (file)
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "fsl,imx53-ipu";
-                       reg = <0x18000000 0x080000000>;
+                       reg = <0x18000000 0x08000000>;
                        interrupts = <11 10>;
                        clocks = <&clks IMX5_CLK_IPU_GATE>,
                                 <&clks IMX5_CLK_IPU_DI0_GATE>,
index 32c6fb4a11624c05756e4e2fe24b05c62c799667..b939f4f52d16a7c0edb677a86558bacd94fec831 100644 (file)
                bootargs = "console=ttyS0,115200n8 earlyprintk";
        };
 
+       mbus {
+               pcie-controller {
+                       status = "okay";
+
+                       pcie@1,0 {
+                               status = "okay";
+                       };
+               };
+        };
+
        ocp@f1000000 {
                pinctrl@10000 {
                        pmx_usb_led: pmx-usb-led {
                ehci@50000 {
                        status = "okay";
                };
-
-               pcie-controller {
-                       status = "okay";
-
-                       pcie@1,0 {
-                               status = "okay";
-                       };
-               };
        };
 
        gpio-leds {
index aa78c2d11fe738fc843f716bd1a6f98ff1054a5b..e2cc85cc3b87e805a113489d3d95cf912d682224 100644 (file)
@@ -4,6 +4,16 @@
 / {
        model = "ZyXEL NSA310";
 
+       mbus {
+               pcie-controller {
+                       status = "okay";
+
+                       pcie@1,0 {
+                               status = "okay";
+                       };
+               };
+       };
+
        ocp@f1000000 {
                pinctrl: pinctrl@10000 {
 
                        status = "okay";
                        nr-ports = <2>;
                };
-
-               pcie-controller {
-                       status = "okay";
-
-                       pcie@1,0 {
-                               status = "okay";
-                       };
-               };
        };
 
        gpio_poweroff {
index 7d1c7677a18f18c3fb1cc00463420545a5b6cb78..0bd70d928c69ba7f7798d1492f6ce3844af9d6ad 100644 (file)
 
                i2c@11000 {
                        status = "okay";
-
-                       alc5621: alc5621@1a {
-                               compatible = "realtek,alc5621";
-                               reg = <0x1a>;
-                       };
                };
 
                serial@12000 {
index f577b7df9a29e4f5f4e74ca86aef4b4ba61ceb87..521c587acaee9f679ab6f9200c5f8be8eee240e9 100644 (file)
                compatible = "smsc,lan9221", "smsc,lan9115";
                bank-width = <2>;
                gpmc,mux-add-data;
-               gpmc,cs-on-ns = <0>;
-               gpmc,cs-rd-off-ns = <186>;
-               gpmc,cs-wr-off-ns = <186>;
-               gpmc,adv-on-ns = <12>;
-               gpmc,adv-rd-off-ns = <48>;
+               gpmc,cs-on-ns = <1>;
+               gpmc,cs-rd-off-ns = <180>;
+               gpmc,cs-wr-off-ns = <180>;
+               gpmc,adv-rd-off-ns = <18>;
                gpmc,adv-wr-off-ns = <48>;
                gpmc,oe-on-ns = <54>;
                gpmc,oe-off-ns = <168>;
                gpmc,we-off-ns = <168>;
                gpmc,rd-cycle-ns = <186>;
                gpmc,wr-cycle-ns = <186>;
-               gpmc,access-ns = <114>;
-               gpmc,page-burst-access-ns = <6>;
-               gpmc,bus-turnaround-ns = <12>;
-               gpmc,cycle2cycle-delay-ns = <18>;
-               gpmc,wr-data-mux-bus-ns = <90>;
-               gpmc,wr-access-ns = <186>;
+               gpmc,access-ns = <144>;
+               gpmc,page-burst-access-ns = <24>;
+               gpmc,bus-turnaround-ns = <90>;
+               gpmc,cycle2cycle-delay-ns = <90>;
                gpmc,cycle2cycle-samecsen;
                gpmc,cycle2cycle-diffcsen;
                vddvario-supply = <&vddvario>;
index 22f35ea142c199082afdd8ba626bd7c5e0b6cd54..8f8c07da4ac148d550ae45cd3501d4a992f4e7c8 100644 (file)
                        interrupts = <58>;
                };
 
-               mailbox: mailbox@48094000 {
-                       compatible = "ti,omap2-mailbox";
-                       ti,hwmods = "mailbox";
-                       reg = <0x48094000 0x200>;
-                       interrupts = <26>;
-               };
-
                intc: interrupt-controller@1 {
                        compatible = "ti,omap2-intc";
                        interrupt-controller;
index 85b1fb014c4314efe82eca9fcb7dfa7e482cb8d2..2d9979835f241f2153cd3d0b52c5c8b0b6c49559 100644 (file)
                        dma-names = "tx", "rx";
                };
 
+               mailbox: mailbox@48094000 {
+                       compatible = "ti,omap2-mailbox";
+                       reg = <0x48094000 0x200>;
+                       interrupts = <26>, <34>;
+                       interrupt-names = "dsp", "iva";
+                       ti,hwmods = "mailbox";
+               };
+
                timer1: timer@48028000 {
                        compatible = "ti,omap2420-timer";
                        reg = <0x48028000 0x400>;
index d09697dab55e80063a737361c65138822bbdf828..42d2c61c9e2d7dc1851054a45ec305820cc8a9f4 100644 (file)
                        dma-names = "tx", "rx";
                };
 
+               mailbox: mailbox@48094000 {
+                       compatible = "ti,omap2-mailbox";
+                       reg = <0x48094000 0x200>;
+                       interrupts = <26>;
+                       ti,hwmods = "mailbox";
+               };
+
                timer1: timer@49018000 {
                        compatible = "ti,omap2420-timer";
                        reg = <0x49018000 0x400>;
index d00055809e31d79b9d1730c06efe02c7e2e6f747..25ba08331d8852e6701c96a4b9e8e54def19f0d8 100644 (file)
                        cpu0-supply = <&vcc>;
                };
        };
-
-       vddvario: regulator-vddvario {
-               compatible = "regulator-fixed";
-               regulator-name = "vddvario";
-               regulator-always-on;
-       };
-
-       vdd33a: regulator-vdd33a {
-               compatible = "regulator-fixed";
-               regulator-name = "vdd33a";
-               regulator-always-on;
-       };
 };
 
 &omap3_pmx_core {
 
        hsusb0_pins: pinmux_hsusb0_pins {
                pinctrl-single,pins = <
-                       OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0)               /* hsusb0_clk.hsusb0_clk */
-                       OMAP3_CORE1_IOPAD(0x21a2, PIN_OUTPUT | MUX_MODE0)               /* hsusb0_stp.hsusb0_stp */
-                       OMAP3_CORE1_IOPAD(0x21a4, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_dir.hsusb0_dir */
-                       OMAP3_CORE1_IOPAD(0x21a6, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_nxt.hsusb0_nxt */
-                       OMAP3_CORE1_IOPAD(0x21a8, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data0.hsusb2_data0 */
-                       OMAP3_CORE1_IOPAD(0x21aa, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data1.hsusb0_data1 */
-                       OMAP3_CORE1_IOPAD(0x21ac, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data2.hsusb0_data2 */
-                       OMAP3_CORE1_IOPAD(0x21ae, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data3 */
-                       OMAP3_CORE1_IOPAD(0x21b0, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data4 */
-                       OMAP3_CORE1_IOPAD(0x21b2, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data5 */
-                       OMAP3_CORE1_IOPAD(0x21b4, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data6 */
-                       OMAP3_CORE1_IOPAD(0x21b6, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data7 */
+                       OMAP3_CORE1_IOPAD(0x21a2, PIN_OUTPUT | MUX_MODE0)               /* hsusb0_clk.hsusb0_clk */
+                       OMAP3_CORE1_IOPAD(0x21a4, PIN_OUTPUT | MUX_MODE0)               /* hsusb0_stp.hsusb0_stp */
+                       OMAP3_CORE1_IOPAD(0x21a6, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_dir.hsusb0_dir */
+                       OMAP3_CORE1_IOPAD(0x21a8, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_nxt.hsusb0_nxt */
+                       OMAP3_CORE1_IOPAD(0x21aa, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data0.hsusb2_data0 */
+                       OMAP3_CORE1_IOPAD(0x21ac, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data1.hsusb0_data1 */
+                       OMAP3_CORE1_IOPAD(0x21ae, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data2.hsusb0_data2 */
+                       OMAP3_CORE1_IOPAD(0x21b0, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data3 */
+                       OMAP3_CORE1_IOPAD(0x21b2, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data4 */
+                       OMAP3_CORE1_IOPAD(0x21b4, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data5 */
+                       OMAP3_CORE1_IOPAD(0x21b6, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data6 */
+                       OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data7 */
                >;
        };
 };
 
+#include "omap-gpmc-smsc911x.dtsi"
+
 &gpmc {
        ranges = <5 0 0x2c000000 0x01000000>;
 
-       smsc1: ethernet@5,0 {
+       smsc1: ethernet@gpmc {
                compatible = "smsc,lan9221", "smsc,lan9115";
                pinctrl-names = "default";
                pinctrl-0 = <&smsc1_pins>;
                interrupt-parent = <&gpio6>;
                interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
                reg = <5 0 0xff>;
-               bank-width = <2>;
-               gpmc,mux-add-data;
-               gpmc,cs-on-ns = <0>;
-               gpmc,cs-rd-off-ns = <186>;
-               gpmc,cs-wr-off-ns = <186>;
-               gpmc,adv-on-ns = <12>;
-               gpmc,adv-rd-off-ns = <48>;
-               gpmc,adv-wr-off-ns = <48>;
-               gpmc,oe-on-ns = <54>;
-               gpmc,oe-off-ns = <168>;
-               gpmc,we-on-ns = <54>;
-               gpmc,we-off-ns = <168>;
-               gpmc,rd-cycle-ns = <186>;
-               gpmc,wr-cycle-ns = <186>;
-               gpmc,access-ns = <114>;
-               gpmc,page-burst-access-ns = <6>;
-               gpmc,bus-turnaround-ns = <12>;
-               gpmc,cycle2cycle-delay-ns = <18>;
-               gpmc,wr-data-mux-bus-ns = <90>;
-               gpmc,wr-access-ns = <186>;
-               gpmc,cycle2cycle-samecsen;
-               gpmc,cycle2cycle-diffcsen;
-               vddvario-supply = <&vddvario>;
-               vdd33a-supply = <&vdd33a>;
-               reg-io-width = <4>;
-               smsc,save-mac-address;
        };
 };
 
index b97736d98a6427f087c11bd510909b1007f8c152..e2d163bf061975bff9ac5a6e83ecf6bf4ef98ed3 100644 (file)
                >;
        };
 
-       smsc911x_pins: pinmux_smsc911x_pins {
+       smsc9221_pins: pinmux_smsc9221_pins {
                pinctrl-single,pins = <
                        0x1a2 (PIN_INPUT | MUX_MODE4)           /* mcspi1_cs2.gpio_176 */
                >;
index 7abd64f6ae21465c9ac74b22563f8f828a5d684b..b22caaaf774ba710461fcabf395c121c0ccc0482 100644 (file)
@@ -10,7 +10,7 @@
  */
 
 #include "omap3-igep.dtsi"
-#include "omap-gpmc-smsc911x.dtsi"
+#include "omap-gpmc-smsc9221.dtsi"
 
 / {
        model = "IGEPv2 (TI OMAP AM/DM37x)";
 
        ethernet@gpmc {
                pinctrl-names = "default";
-               pinctrl-0 = <&smsc911x_pins>;
+               pinctrl-0 = <&smsc9221_pins>;
                reg = <5 0 0xff>;
                interrupt-parent = <&gpio6>;
                interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
index 7909c51b05a5643563b4ed74405066e1a222a995..d59e3de1441e2f7dc5e0fb9b4bc29f49ee04cbc1 100644 (file)
@@ -2,20 +2,6 @@
  * Common support for CompuLab SB-T35 used on SBC-T3530, SBC-T3517 and SBC-T3730
  */
 
-/ {
-       vddvario_sb_t35: regulator-vddvario-sb-t35 {
-               compatible = "regulator-fixed";
-               regulator-name = "vddvario";
-               regulator-always-on;
-       };
-
-       vdd33a_sb_t35: regulator-vdd33a-sb-t35 {
-               compatible = "regulator-fixed";
-               regulator-name = "vdd33a";
-               regulator-always-on;
-       };
-};
-
 &omap3_pmx_core {
        smsc2_pins: pinmux_smsc2_pins {
                pinctrl-single,pins = <
                reg = <4 0 0xff>;
                bank-width = <2>;
                gpmc,mux-add-data;
-               gpmc,cs-on-ns = <0>;
-               gpmc,cs-rd-off-ns = <186>;
-               gpmc,cs-wr-off-ns = <186>;
-               gpmc,adv-on-ns = <12>;
-               gpmc,adv-rd-off-ns = <48>;
+               gpmc,cs-on-ns = <1>;
+               gpmc,cs-rd-off-ns = <180>;
+               gpmc,cs-wr-off-ns = <180>;
+               gpmc,adv-rd-off-ns = <18>;
                gpmc,adv-wr-off-ns = <48>;
                gpmc,oe-on-ns = <54>;
                gpmc,oe-off-ns = <168>;
                gpmc,we-off-ns = <168>;
                gpmc,rd-cycle-ns = <186>;
                gpmc,wr-cycle-ns = <186>;
-               gpmc,access-ns = <114>;
-               gpmc,page-burst-access-ns = <6>;
-               gpmc,bus-turnaround-ns = <12>;
-               gpmc,cycle2cycle-delay-ns = <18>;
-               gpmc,wr-data-mux-bus-ns = <90>;
-               gpmc,wr-access-ns = <186>;
+               gpmc,access-ns = <144>;
+               gpmc,page-burst-access-ns = <24>;
+               gpmc,bus-turnaround-ns = <90>;
+               gpmc,cycle2cycle-delay-ns = <90>;
                gpmc,cycle2cycle-samecsen;
                gpmc,cycle2cycle-diffcsen;
-               vddvario-supply = <&vddvario_sb_t35>;
-               vdd33a-supply = <&vdd33a_sb_t35>;
+               vddvario-supply = <&vddvario>;
+               vdd33a-supply = <&vdd33a>;
                reg-io-width = <4>;
                smsc,save-mac-address;
        };
index 024c9c6c682d7eb4b421a559c7c4b5f95e0511f9..42189b65d393d29d2cf467ddbc5df4eb0661a471 100644 (file)
@@ -8,6 +8,19 @@
 / {
        model = "CompuLab SBC-T3517 with CM-T3517";
        compatible = "compulab,omap3-sbc-t3517", "compulab,omap3-cm-t3517", "ti,am3517", "ti,omap3";
+
+       /* Only one GPMC smsc9220 on SBC-T3517, CM-T3517 uses am35x Ethernet */
+       vddvario: regulator-vddvario-sb-t35 {
+               compatible = "regulator-fixed";
+               regulator-name = "vddvario";
+               regulator-always-on;
+       };
+
+       vdd33a: regulator-vdd33a-sb-t35 {
+               compatible = "regulator-fixed";
+               regulator-name = "vdd33a";
+               regulator-always-on;
+       };
 };
 
 &omap3_pmx_core {
index acb9019dc437b66321ec7995dc16456678a2e42c..4231191ade06acf8c7a02938a0fcfcbdeb318fbc 100644 (file)
@@ -61,7 +61,7 @@
                        ti,hwmods = "mpu";
                };
 
-               iva {
+               iva: iva {
                        compatible = "ti,iva2.2";
                        ti,hwmods = "iva";
 
index f8c9855ce587c15790f79a2f04e0ff02933f463f..36b4312a5e0d82fb20fb6a7eb9b1d59942b0ec18 100644 (file)
                        status = "disabled";
                };
 
+               mailbox: mailbox@4a0f4000 {
+                       compatible = "ti,omap4-mailbox";
+                       reg = <0x4a0f4000 0x200>;
+                       interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+                       ti,hwmods = "mailbox";
+               };
+
                timer1: timer@4ae18000 {
                        compatible = "ti,omap5430-timer";
                        reg = <0x4ae18000 0x80>;
index eabcfdbb403acc7ff40b409617a53c9831414c04..a106b0872910da874f1000f417a1aab1793237de 100644 (file)
@@ -13,7 +13,7 @@
 #include <dt-bindings/pinctrl/at91.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
 
 / {
        model = "Atmel SAMA5D3 family SoC";
index b029fe7ef17a657946de4d2fe71168b4b03210d8..1b02208ea6ff2b70aab43ddd416b37ef0b9a2a1f 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <dt-bindings/pinctrl/at91.h>
 #include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
 
 / {
        ahb {
index 382b04431f66b621e01a9f2fe7a9e488ac4c171b..02848453ca0cf5447de27aaca6b233611173da4e 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <dt-bindings/pinctrl/at91.h>
 #include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
 
 / {
        aliases {
index a9fa75e4165205f9a1259f9a0821519faee54070..7a8d4c6115f72fdab533980a0f0d96eb6cb1469a 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <dt-bindings/pinctrl/at91.h>
 #include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
 
 / {
        aliases {
index 7f3baf51a3a9e933d3a47dce18d2ca102c104a9b..32dd55e5f4e6b8567a8a2d207b69311959c9f9a1 100644 (file)
@@ -18,6 +18,7 @@
        compatible = "st-ericsson,ccu8540", "st-ericsson,u8540";
 
        memory@0 {
+               device_type = "memory";
                reg = <0x20000000 0x1f000000>, <0xc0000000 0x3f000000>;
        };
 
index 32efc105df834de3c3143938bf0cfdebbf5d1b55..aba1c8a3f3883320a50b31dcac05341568e0a335 100644 (file)
@@ -87,7 +87,7 @@
 
                pll4: clk@01c20018 {
                        #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-pll1-clk";
+                       compatible = "allwinner,sun7i-a20-pll4-clk";
                        reg = <0x01c20018 0x4>;
                        clocks = <&osc24M>;
                        clock-output-names = "pll4";
                        clock-output-names = "pll6_sata", "pll6_other", "pll6";
                };
 
+               pll8: clk@01c20040 {
+                       #clock-cells = <0>;
+                       compatible = "allwinner,sun7i-a20-pll4-clk";
+                       reg = <0x01c20040 0x4>;
+                       clocks = <&osc24M>;
+                       clock-output-names = "pll8";
+               };
+
                cpu: cpu@01c20054 {
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-cpu-clk";
                        status = "disabled";
                };
 
-               i2c4: i2c@01c2bc00 {
+               i2c4: i2c@01c2c000 {
                        compatible = "allwinner,sun4i-i2c";
-                       reg = <0x01c2bc00 0x400>;
+                       reg = <0x01c2c000 0x400>;
                        interrupts = <0 89 4>;
                        clocks = <&apb1_gates 15>;
                        clock-frequency = <100000>;
index 41bca32409fce81358c3b5c35bc081bcc28e7c76..5339009b3c0ce648df92244b5d9274e78273290b 100644 (file)
@@ -1423,55 +1423,38 @@ EXPORT_SYMBOL(edma_clear_event);
 
 #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES)
 
-static int edma_of_read_u32_to_s16_array(const struct device_node *np,
-                                        const char *propname, s16 *out_values,
-                                        size_t sz)
+static int edma_xbar_event_map(struct device *dev, struct device_node *node,
+                              struct edma_soc_info *pdata, size_t sz)
 {
-       int ret;
-
-       ret = of_property_read_u16_array(np, propname, out_values, sz);
-       if (ret)
-               return ret;
-
-       /* Terminate it */
-       *out_values++ = -1;
-       *out_values++ = -1;
-
-       return 0;
-}
-
-static int edma_xbar_event_map(struct device *dev,
-                              struct device_node *node,
-                              struct edma_soc_info *pdata, int len)
-{
-       int ret, i;
+       const char pname[] = "ti,edma-xbar-event-map";
        struct resource res;
        void __iomem *xbar;
-       const s16 (*xbar_chans)[2];
+       s16 (*xbar_chans)[2];
+       size_t nelm = sz / sizeof(s16);
        u32 shift, offset, mux;
+       int ret, i;
 
-       xbar_chans = devm_kzalloc(dev,
-                                 len/sizeof(s16) + 2*sizeof(s16),
-                                 GFP_KERNEL);
+       xbar_chans = devm_kzalloc(dev, (nelm + 2) * sizeof(s16), GFP_KERNEL);
        if (!xbar_chans)
                return -ENOMEM;
 
        ret = of_address_to_resource(node, 1, &res);
        if (ret)
-               return -EIO;
+               return -ENOMEM;
 
        xbar = devm_ioremap(dev, res.start, resource_size(&res));
        if (!xbar)
                return -ENOMEM;
 
-       ret = edma_of_read_u32_to_s16_array(node,
-                                           "ti,edma-xbar-event-map",
-                                           (s16 *)xbar_chans,
-                                           len/sizeof(u32));
+       ret = of_property_read_u16_array(node, pname, (u16 *)xbar_chans, nelm);
        if (ret)
                return -EIO;
 
-       for (i = 0; xbar_chans[i][0] != -1; i++) {
+       /* Invalidate last entry for the other user of this mess */
+       nelm >>= 1;
+       xbar_chans[nelm][0] = xbar_chans[nelm][1] = -1;
+
+       for (i = 0; i < nelm; i++) {
                shift = (xbar_chans[i][1] & 0x03) << 3;
                offset = xbar_chans[i][1] & 0xfffffffc;
                mux = readl(xbar + offset);
@@ -1480,8 +1463,7 @@ static int edma_xbar_event_map(struct device *dev,
                writel(mux, (xbar + offset));
        }
 
-       pdata->xbar_chans = xbar_chans;
-
+       pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
        return 0;
 }
 
index b5df4a511b0acdfea6df30b7dd4b9753d2b00f9a..81ba78eaf54adb02840dbc2d41cc4af8acc7c08a 100644 (file)
@@ -37,7 +37,7 @@ CONFIG_SUN4I_EMAC=y
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SMSC is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
+CONFIG_STMMAC_ETH=y
 # CONFIG_NET_VENDOR_WIZNET is not set
 # CONFIG_WLAN is not set
 CONFIG_SERIAL_8250=y
index cf4f3e867395ef0261b16a4d721d8cff740c1de2..ded062f9b358038c05fa074816706c40619e6408 100644 (file)
@@ -77,7 +77,6 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine)
 }
 /* VIRT <-> MACHINE conversion */
 #define virt_to_machine(v)     (phys_to_machine(XPADDR(__pa(v))))
-#define virt_to_pfn(v)          (PFN_DOWN(__pa(v)))
 #define virt_to_mfn(v)         (pfn_to_mfn(virt_to_pfn(v)))
 #define mfn_to_virt(m)         (__va(mfn_to_pfn(m) << PAGE_SHIFT))
 
index 75e92952c18efe3e597791e6798c2ee2ce5969a1..40c5d5f1451cd34b19ebed5e2d58f150f6f4a29d 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Secondary CPU startup routine source file.
  *
- * Copyright (C) 2009 Texas Instruments, Inc.
+ * Copyright (C) 2009-2014 Texas Instruments, Inc.
  *
  * Author:
  *      Santosh Shilimkar <santosh.shilimkar@ti.com>
  * code.  This routine also provides a holding flag into which
  * secondary core is held until we're ready for it to initialise.
  * The primary core will update this flag using a hardware
-+ * register AuxCoreBoot0.
+ * register AuxCoreBoot0.
  */
 ENTRY(omap5_secondary_startup)
+.arm
+THUMB( adr     r9, BSYM(wait)  )       @ CPU may be entered in ARM mode.
+THUMB( bx      r9              )       @ If this is a Thumb-2 kernel,
+THUMB( .thumb                  )       @ switch to Thumb now.
 wait:  ldr     r2, =AUX_CORE_BOOT0_PA  @ read from AuxCoreBoot0
        ldr     r0, [r2]
        mov     r0, r0, lsr #5
index f565f9944af2ee45b795b180391853ffa8810a12..7548db2bfb8a7e595d7672c5cfbba3868651d689 100644 (file)
@@ -21,7 +21,7 @@ struct mv_sata_platform_data;
 #define ORION_MBUS_DEVBUS_BOOT_ATTR   0x0f
 #define ORION_MBUS_DEVBUS_TARGET(cs)  0x01
 #define ORION_MBUS_DEVBUS_ATTR(cs)    (~(1 << cs))
-#define ORION_MBUS_SRAM_TARGET        0x00
+#define ORION_MBUS_SRAM_TARGET        0x09
 #define ORION_MBUS_SRAM_ATTR          0x00
 
 /*
index e94f9458aa6faa3630d5d2b7cebf9e522b56901d..993bce527b8552d379c62b6082703b1438e80436 100644 (file)
@@ -138,6 +138,7 @@ static inline void *phys_to_virt(phys_addr_t x)
 #define __pa(x)                        __virt_to_phys((unsigned long)(x))
 #define __va(x)                        ((void *)__phys_to_virt((phys_addr_t)(x)))
 #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
+#define virt_to_pfn(x)      __phys_to_pfn(__virt_to_phys(x))
 
 /*
  *  virt_to_page(k)    convert a _valid_ virtual address to struct page *
index 473e5dbf8f39a39e8eaa7a0740e54ee4d6bacb59..0f08dfd69ebc73ea99b7b7f6d68c4b2f320eb2d1 100644 (file)
@@ -97,11 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
        if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
                return false;
 
-       if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
-               affinity = cpu_online_mask;
+       if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids)
                ret = true;
-       }
 
+       /*
+        * when using forced irq_set_affinity we must ensure that the cpu
+        * being offlined is not present in the affinity mask, it may be
+        * selected as the target CPU otherwise
+        */
+       affinity = cpu_online_mask;
        c = irq_data_get_irq_chip(d);
        if (!c->irq_set_affinity)
                pr_debug("IRQ%u: unable to set affinity\n", d->irq);
index 5e9aec358306f0c13bdd0bb70758dde88be9e961..31eb959e9aa81d05f16269e3bb500a965cb7cd48 100644 (file)
@@ -51,7 +51,11 @@ int pmd_huge(pmd_t pmd)
 
 int pud_huge(pud_t pud)
 {
+#ifndef __PAGETABLE_PMD_FOLDED
        return !(pud_val(pud) & PUD_TABLE_BIT);
+#else
+       return 0;
+#endif
 }
 
 int pmd_huge_support(void)
index ae763d8bf55acff94a8c611893e2a103f3156f33..fb13dc5e8f8c7c34079664761f6fab1bf802a1c7 100644 (file)
@@ -11,7 +11,7 @@
 
 
 
-#define NR_syscalls                    314 /* length of syscall table */
+#define NR_syscalls                    315 /* length of syscall table */
 
 /*
  * The following defines stop scripts/checksyscalls.sh from complaining about
index 715e85f858de5ea34e7f581b38bc5d9a60ec1304..7de0a2d65da42a09b6d8b34f75f175c326b329e9 100644 (file)
 #define __NR_finit_module              1335
 #define __NR_sched_setattr             1336
 #define __NR_sched_getattr             1337
+#define __NR_renameat2                 1338
 
 #endif /* _UAPI_ASM_IA64_UNISTD_H */
index fa8d61a312a7ee818a300522d9f029c9534d78f6..ba3d03503e84fde7f714fda777acf254356b78e8 100644 (file)
@@ -1775,6 +1775,7 @@ sys_call_table:
        data8 sys_finit_module                  // 1335
        data8 sys_sched_setattr
        data8 sys_sched_getattr
+       data8 sys_renameat2
 
        .org sys_call_table + 8*NR_syscalls     // guard against failures to increase NR_syscalls
 #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
index 9d38b73989eb597d677acd95ea53cf0ddb99b624..33afa56ad47aecd6620bc150eea042705cc85d3e 100644 (file)
@@ -4,7 +4,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            351
+#define NR_syscalls            352
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
index b932dd470041c2c5130dfcc44448c80033525259..9cd82fbc7817f716d589368bda3c1d274687a607 100644 (file)
 #define __NR_finit_module      348
 #define __NR_sched_setattr     349
 #define __NR_sched_getattr     350
+#define __NR_renameat2         351
 
 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
index b6223dc41d82870953be64b35fc409c4b5634070..501e102127899c6afaa893bd99d4d555f5bab8ec 100644 (file)
@@ -371,4 +371,5 @@ ENTRY(sys_call_table)
        .long sys_finit_module
        .long sys_sched_setattr
        .long sys_sched_getattr         /* 350 */
+       .long sys_renameat2
 
index 5d6b4b407ddab29b677a7aa5328715127dfdad56..2d6f0de7732529212bf4a9256bb3720386e39e71 100644 (file)
@@ -15,6 +15,7 @@ static inline void wr_fence(void)
        volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_FENCE;
        barrier();
        *flushptr = 0;
+       barrier();
 }
 
 #else /* CONFIG_METAG_META21 */
@@ -35,6 +36,7 @@ static inline void wr_fence(void)
        *flushptr = 0;
        *flushptr = 0;
        *flushptr = 0;
+       barrier();
 }
 
 #endif /* !CONFIG_METAG_META21 */
@@ -68,6 +70,7 @@ static inline void fence(void)
        volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK;
        barrier();
        *flushptr = 0;
+       barrier();
 }
 #define smp_mb()        fence()
 #define smp_rmb()       fence()
index f16477d1f571cb134a29e3afd13c480401f340ea..a8a37477c66e22a8e3c29ba2402ff6559a35bf27 100644 (file)
@@ -22,6 +22,8 @@
 /* Add an extra page of padding at the top of the stack for the guard page. */
 #define STACK_TOP      (TASK_SIZE - PAGE_SIZE)
 #define STACK_TOP_MAX  STACK_TOP
+/* Maximum virtual space for stack */
+#define STACK_SIZE_MAX (CONFIG_MAX_STACK_SIZE_MB*1024*1024)
 
 /* This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
index 84e09feb4d546fef7004746a8d5550bddb3f8d58..ab78be2b6eb0503f939cc85409d532a3b037b491 100644 (file)
@@ -4,11 +4,11 @@ include include/uapi/asm-generic/Kbuild.asm
 header-y += byteorder.h
 header-y += ech.h
 header-y += ptrace.h
-header-y += resource.h
 header-y += sigcontext.h
 header-y += siginfo.h
 header-y += swab.h
 header-y += unistd.h
 
 generic-y += mman.h
+generic-y += resource.h
 generic-y += setup.h
diff --git a/arch/metag/include/uapi/asm/resource.h b/arch/metag/include/uapi/asm/resource.h
deleted file mode 100644 (file)
index 526d23c..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _UAPI_METAG_RESOURCE_H
-#define _UAPI_METAG_RESOURCE_H
-
-#define _STK_LIM_MAX    (1 << 28)
-#include <asm-generic/resource.h>
-
-#endif /* _UAPI_METAG_RESOURCE_H */
index 5abf4e894216ac4b683772f6b187e4e9337eb2d5..2a66e908f6a9d9a276cad042ac793f692630255e 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/addrspace.h>
 #include <asm/bootinfo.h>
 #include <asm/cpu.h>
+#include <asm/cpu-type.h>
 #include <asm/irq_regs.h>
 #include <asm/processor.h>
 #include <asm/ptrace.h>
index f434b759e3b9aaa0f6eb9ab07f85fb783de4a6ad..ec606363b80677fd464b92c13d5a905871b21466 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/types.h>
 
 #include <asm/addrspace.h>
+#include <asm/cpu-type.h>
 #include <asm/irq_regs.h>
 #include <asm/ptrace.h>
 #include <asm/traps.h>
index 064ae7a76bdc204c28e0c8e92ccd63400fbbf403..ae73e42ac20b163331a77439a75270320a77dfa8 100644 (file)
@@ -6,4 +6,3 @@
 lib-y                  += init.o memory.o cmdline.o identify.o console.o
 
 lib-$(CONFIG_32BIT)    += locore.o
-lib-$(CONFIG_64BIT)    += call_o32.o
diff --git a/arch/mips/dec/prom/call_o32.S b/arch/mips/dec/prom/call_o32.S
deleted file mode 100644 (file)
index 8c84981..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- *     O32 interface for the 64 (or N32) ABI.
- *
- *     Copyright (C) 2002  Maciej W. Rozycki
- *
- *     This program is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License
- *     as published by the Free Software Foundation; either version
- *     2 of the License, or (at your option) any later version.
- */
-
-#include <asm/asm.h>
-#include <asm/regdef.h>
-
-/* Maximum number of arguments supported.  Must be even!  */
-#define O32_ARGC       32
-/* Number of static registers we save.  */
-#define O32_STATC      11
-/* Frame size for both of the above.  */
-#define O32_FRAMESZ    (4 * O32_ARGC + SZREG * O32_STATC)
-
-               .text
-
-/*
- * O32 function call dispatcher, for interfacing 32-bit ROM routines.
- *
- * The standard 64 (N32) calling sequence is supported, with a0
- * holding a function pointer, a1-a7 -- its first seven arguments
- * and the stack -- remaining ones (up to O32_ARGC, including a1-a7).
- * Static registers, gp and fp are preserved, v0 holds a result.
- * This code relies on the called o32 function for sp and ra
- * restoration and thus both this dispatcher and the current stack
- * have to be placed in a KSEGx (or KUSEG) address space.  Any
- * pointers passed have to point to addresses within one of these
- * spaces as well.
- */
-NESTED(call_o32, O32_FRAMESZ, ra)
-               REG_SUBU        sp,O32_FRAMESZ
-
-               REG_S           ra,O32_FRAMESZ-1*SZREG(sp)
-               REG_S           fp,O32_FRAMESZ-2*SZREG(sp)
-               REG_S           gp,O32_FRAMESZ-3*SZREG(sp)
-               REG_S           s7,O32_FRAMESZ-4*SZREG(sp)
-               REG_S           s6,O32_FRAMESZ-5*SZREG(sp)
-               REG_S           s5,O32_FRAMESZ-6*SZREG(sp)
-               REG_S           s4,O32_FRAMESZ-7*SZREG(sp)
-               REG_S           s3,O32_FRAMESZ-8*SZREG(sp)
-               REG_S           s2,O32_FRAMESZ-9*SZREG(sp)
-               REG_S           s1,O32_FRAMESZ-10*SZREG(sp)
-               REG_S           s0,O32_FRAMESZ-11*SZREG(sp)
-
-               move            jp,a0
-
-               sll             a0,a1,zero
-               sll             a1,a2,zero
-               sll             a2,a3,zero
-               sll             a3,a4,zero
-               sw              a5,0x10(sp)
-               sw              a6,0x14(sp)
-               sw              a7,0x18(sp)
-
-               PTR_LA          t0,O32_FRAMESZ(sp)
-               PTR_LA          t1,0x1c(sp)
-               li              t2,O32_ARGC-7
-1:
-               lw              t3,(t0)
-               REG_ADDU        t0,SZREG
-               sw              t3,(t1)
-               REG_SUBU        t2,1
-               REG_ADDU        t1,4
-               bnez            t2,1b
-
-               jalr            jp
-
-               REG_L           s0,O32_FRAMESZ-11*SZREG(sp)
-               REG_L           s1,O32_FRAMESZ-10*SZREG(sp)
-               REG_L           s2,O32_FRAMESZ-9*SZREG(sp)
-               REG_L           s3,O32_FRAMESZ-8*SZREG(sp)
-               REG_L           s4,O32_FRAMESZ-7*SZREG(sp)
-               REG_L           s5,O32_FRAMESZ-6*SZREG(sp)
-               REG_L           s6,O32_FRAMESZ-5*SZREG(sp)
-               REG_L           s7,O32_FRAMESZ-4*SZREG(sp)
-               REG_L           gp,O32_FRAMESZ-3*SZREG(sp)
-               REG_L           fp,O32_FRAMESZ-2*SZREG(sp)
-               REG_L           ra,O32_FRAMESZ-1*SZREG(sp)
-
-               REG_ADDU        sp,O32_FRAMESZ
-               jr              ra
-END(call_o32)
index b308b2a0613e210c1f7b068dfc6a7589f1ad2afb..4703fe4dbd9a7b6c192ce3e86d0c5bceada460f1 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *     O32 interface for the 64 (or N32) ABI.
  *
- *     Copyright (C) 2002  Maciej W. Rozycki
+ *     Copyright (C) 2002, 2014  Maciej W. Rozycki
  *
  *     This program is free software; you can redistribute it and/or
  *     modify it under the terms of the GNU General Public License
 #include <asm/asm.h>
 #include <asm/regdef.h>
 
+/* O32 register size.  */
+#define O32_SZREG      4
 /* Maximum number of arguments supported.  Must be even!  */
 #define O32_ARGC       32
-/* Number of static registers we save. */
+/* Number of static registers we save.  */
 #define O32_STATC      11
-/* Frame size for static register  */
-#define O32_FRAMESZ    (SZREG * O32_STATC)
-/* Frame size on new stack */
-#define O32_FRAMESZ_NEW (SZREG + 4 * O32_ARGC)
+/* Argument area frame size.  */
+#define O32_ARGSZ      (O32_SZREG * O32_ARGC)
+/* Static register save area frame size.  */
+#define O32_STATSZ     (SZREG * O32_STATC)
+/* Stack pointer register save area frame size.  */
+#define O32_SPSZ       SZREG
+/* Combined area frame size.  */
+#define O32_FRAMESZ    (O32_ARGSZ + O32_SPSZ + O32_STATSZ)
+/* Switched stack frame size.  */
+#define O32_NFRAMESZ   (O32_ARGSZ + O32_SPSZ)
 
                .text
 
 /*
  * O32 function call dispatcher, for interfacing 32-bit ROM routines.
  *
- * The standard 64 (N32) calling sequence is supported, with a0
- * holding a function pointer, a1 a new stack pointer, a2-a7 -- its
- * first six arguments and the stack -- remaining ones (up to O32_ARGC,
- * including a2-a7). Static registers, gp and fp are preserved, v0 holds
- * a result. This code relies on the called o32 function for sp and ra
- * restoration and this dispatcher has to be placed in a KSEGx (or KUSEG)
- * address space.  Any pointers passed have to point to addresses within
- * one of these spaces as well.
+ * The standard 64 (N32) calling sequence is supported, with a0 holding
+ * a function pointer, a1 a pointer to the new stack to call the
+ * function with or 0 if no stack switching is requested, a2-a7 -- the
+ * function call's first six arguments, and the stack -- the remaining
+ * arguments (up to O32_ARGC, including a2-a7).  Static registers, gp
+ * and fp are preserved, v0 holds the result.  This code relies on the
+ * called o32 function for sp and ra restoration and this dispatcher has
+ * to be placed in a KSEGx (or KUSEG) address space.  Any pointers
+ * passed have to point to addresses within one of these spaces as well.
  */
 NESTED(call_o32, O32_FRAMESZ, ra)
                REG_SUBU        sp,O32_FRAMESZ
@@ -51,32 +60,36 @@ NESTED(call_o32, O32_FRAMESZ, ra)
                REG_S           s0,O32_FRAMESZ-11*SZREG(sp)
 
                move            jp,a0
-               REG_SUBU        s0,a1,O32_FRAMESZ_NEW
-               REG_S           sp,O32_FRAMESZ_NEW-1*SZREG(s0)
+
+               move            fp,sp
+               beqz            a1,0f
+               REG_SUBU        fp,a1,O32_NFRAMESZ
+0:
+               REG_S           sp,O32_NFRAMESZ-1*SZREG(fp)
 
                sll             a0,a2,zero
                sll             a1,a3,zero
                sll             a2,a4,zero
                sll             a3,a5,zero
-               sw              a6,0x10(s0)
-               sw              a7,0x14(s0)
+               sw              a6,4*O32_SZREG(fp)
+               sw              a7,5*O32_SZREG(fp)
 
                PTR_LA          t0,O32_FRAMESZ(sp)
-               PTR_LA          t1,0x18(s0)
+               PTR_LA          t1,6*O32_SZREG(fp)
                li              t2,O32_ARGC-6
 1:
                lw              t3,(t0)
                REG_ADDU        t0,SZREG
                sw              t3,(t1)
                REG_SUBU        t2,1
-               REG_ADDU        t1,4
+               REG_ADDU        t1,O32_SZREG
                bnez            t2,1b
 
-               move            sp,s0
+               move            sp,fp
 
                jalr            jp
 
-               REG_L           sp,O32_FRAMESZ_NEW-1*SZREG(sp)
+               REG_L           sp,O32_NFRAMESZ-1*SZREG(sp)
 
                REG_L           s0,O32_FRAMESZ-11*SZREG(sp)
                REG_L           s1,O32_FRAMESZ-10*SZREG(sp)
index 2c2cb182af4edd8673dc54c00c39aded85a6ed30..6aa264b9856ac99b71b88d54fae19cea27d782de 100644 (file)
@@ -40,7 +40,8 @@
 
 #ifdef CONFIG_64BIT
 
-static u8 o32_stk[16384];
+/* O32 stack has to be 8-byte aligned. */
+static u64 o32_stk[4096];
 #define O32_STK          &o32_stk[sizeof(o32_stk)]
 
 #define __PROM_O32(fun, arg) fun arg __asm__(#fun); \
index c0ead63138453c04d3b20918fbcfb1e1c02c5c3c..b59a2103b61a3f64efd75b29f034e8208c155012 100644 (file)
@@ -113,31 +113,31 @@ extern int (*__pmax_close)(int);
 #define __DEC_PROM_O32(fun, arg) fun arg __asm__(#fun); \
                                 __asm__(#fun " = call_o32")
 
-int __DEC_PROM_O32(_rex_bootinit, (int (*)(void)));
-int __DEC_PROM_O32(_rex_bootread, (int (*)(void)));
-int __DEC_PROM_O32(_rex_getbitmap, (int (*)(memmap *), memmap *));
+int __DEC_PROM_O32(_rex_bootinit, (int (*)(void), void *));
+int __DEC_PROM_O32(_rex_bootread, (int (*)(void), void *));
+int __DEC_PROM_O32(_rex_getbitmap, (int (*)(memmap *), void *, memmap *));
 unsigned long *__DEC_PROM_O32(_rex_slot_address,
-                            (unsigned long *(*)(int), int));
-void *__DEC_PROM_O32(_rex_gettcinfo, (void *(*)(void)));
-int __DEC_PROM_O32(_rex_getsysid, (int (*)(void)));
-void __DEC_PROM_O32(_rex_clear_cache, (void (*)(void)));
-
-int __DEC_PROM_O32(_prom_getchar, (int (*)(void)));
-char *__DEC_PROM_O32(_prom_getenv, (char *(*)(char *), char *));
-int __DEC_PROM_O32(_prom_printf, (int (*)(char *, ...), char *, ...));
-
-
-#define rex_bootinit()         _rex_bootinit(__rex_bootinit)
-#define rex_bootread()         _rex_bootread(__rex_bootread)
-#define rex_getbitmap(x)       _rex_getbitmap(__rex_getbitmap, x)
-#define rex_slot_address(x)    _rex_slot_address(__rex_slot_address, x)
-#define rex_gettcinfo()                _rex_gettcinfo(__rex_gettcinfo)
-#define rex_getsysid()         _rex_getsysid(__rex_getsysid)
-#define rex_clear_cache()      _rex_clear_cache(__rex_clear_cache)
-
-#define prom_getchar()         _prom_getchar(__prom_getchar)
-#define prom_getenv(x)         _prom_getenv(__prom_getenv, x)
-#define prom_printf(x...)      _prom_printf(__prom_printf, x)
+                            (unsigned long *(*)(int), void *, int));
+void *__DEC_PROM_O32(_rex_gettcinfo, (void *(*)(void), void *));
+int __DEC_PROM_O32(_rex_getsysid, (int (*)(void), void *));
+void __DEC_PROM_O32(_rex_clear_cache, (void (*)(void), void *));
+
+int __DEC_PROM_O32(_prom_getchar, (int (*)(void), void *));
+char *__DEC_PROM_O32(_prom_getenv, (char *(*)(char *), void *, char *));
+int __DEC_PROM_O32(_prom_printf, (int (*)(char *, ...), void *, char *, ...));
+
+
+#define rex_bootinit()         _rex_bootinit(__rex_bootinit, NULL)
+#define rex_bootread()         _rex_bootread(__rex_bootread, NULL)
+#define rex_getbitmap(x)       _rex_getbitmap(__rex_getbitmap, NULL, x)
+#define rex_slot_address(x)    _rex_slot_address(__rex_slot_address, NULL, x)
+#define rex_gettcinfo()                _rex_gettcinfo(__rex_gettcinfo, NULL)
+#define rex_getsysid()         _rex_getsysid(__rex_getsysid, NULL)
+#define rex_clear_cache()      _rex_clear_cache(__rex_clear_cache, NULL)
+
+#define prom_getchar()         _prom_getchar(__prom_getchar, NULL)
+#define prom_getenv(x)         _prom_getenv(__prom_getenv, NULL, x)
+#define prom_printf(x...)      _prom_printf(__prom_printf, NULL, x)
 
 #else /* !CONFIG_64BIT */
 
diff --git a/arch/mips/include/asm/rm9k-ocd.h b/arch/mips/include/asm/rm9k-ocd.h
deleted file mode 100644 (file)
index b0b80d9..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- *  Copyright (C) 2004 by Basler Vision Technologies AG
- *  Author: Thomas Koeller <thomas.koeller@baslerweb.com>
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-#if !defined(_ASM_RM9K_OCD_H)
-#define _ASM_RM9K_OCD_H
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <asm/io.h>
-
-extern volatile void __iomem * const ocd_base;
-extern volatile void __iomem * const titan_base;
-
-#define ocd_addr(__x__)                (ocd_base + (__x__))
-#define titan_addr(__x__)      (titan_base + (__x__))
-#define scram_addr(__x__)      (scram_base + (__x__))
-
-/* OCD register access */
-#define ocd_readl(__offs__) __raw_readl(ocd_addr(__offs__))
-#define ocd_readw(__offs__) __raw_readw(ocd_addr(__offs__))
-#define ocd_readb(__offs__) __raw_readb(ocd_addr(__offs__))
-#define ocd_writel(__val__, __offs__) \
-       __raw_writel((__val__), ocd_addr(__offs__))
-#define ocd_writew(__val__, __offs__) \
-       __raw_writew((__val__), ocd_addr(__offs__))
-#define ocd_writeb(__val__, __offs__) \
-       __raw_writeb((__val__), ocd_addr(__offs__))
-
-/* TITAN register access - 32 bit-wide only */
-#define titan_readl(__offs__) __raw_readl(titan_addr(__offs__))
-#define titan_writel(__val__, __offs__) \
-       __raw_writel((__val__), titan_addr(__offs__))
-
-/* Protect access to shared TITAN registers */
-extern spinlock_t titan_lock;
-extern int titan_irqflags;
-#define lock_titan_regs() spin_lock_irqsave(&titan_lock, titan_irqflags)
-#define unlock_titan_regs() spin_unlock_irqrestore(&titan_lock, titan_irqflags)
-
-#endif /* !defined(_ASM_RM9K_OCD_H) */
index c6e9cd2bca8dbf7de5512c9be8de0f2b92db7bd4..17960fe7a8ce4ef21b7a94cca10c7b5af61cec17 100644 (file)
@@ -133,6 +133,8 @@ static inline int syscall_get_arch(void)
 #ifdef CONFIG_64BIT
        if (!test_thread_flag(TIF_32BIT_REGS))
                arch |= __AUDIT_ARCH_64BIT;
+       if (test_thread_flag(TIF_32BIT_ADDR))
+               arch |= __AUDIT_ARCH_CONVENTION_MIPS64_N32;
 #endif
 #if defined(__LITTLE_ENDIAN)
        arch |=  __AUDIT_ARCH_LE;
index df6e775f3fef524e8d049c24433eff06dade0fd7..3125797f2a88a6bc3e44cdc215d416d604e7070d 100644 (file)
@@ -484,13 +484,13 @@ enum MIPS6e_i8_func {
  * Damn ...  bitfields depend from byteorder :-(
  */
 #ifdef __MIPSEB__
-#define BITFIELD_FIELD(field, more)                                    \
+#define __BITFIELD_FIELD(field, more)                                  \
        field;                                                          \
        more
 
 #elif defined(__MIPSEL__)
 
-#define BITFIELD_FIELD(field, more)                                    \
+#define __BITFIELD_FIELD(field, more)                                  \
        more                                                            \
        field;
 
@@ -499,112 +499,112 @@ enum MIPS6e_i8_func {
 #endif
 
 struct j_format {
-       BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */
-       BITFIELD_FIELD(unsigned int target : 26,
+       __BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */
+       __BITFIELD_FIELD(unsigned int target : 26,
        ;))
 };
 
 struct i_format {                      /* signed immediate format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rs : 5,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(signed int simmediate : 16,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rs : 5,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(signed int simmediate : 16,
        ;))))
 };
 
 struct u_format {                      /* unsigned immediate format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rs : 5,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int uimmediate : 16,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rs : 5,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int uimmediate : 16,
        ;))))
 };
 
 struct c_format {                      /* Cache (>= R6000) format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rs : 5,
-       BITFIELD_FIELD(unsigned int c_op : 3,
-       BITFIELD_FIELD(unsigned int cache : 2,
-       BITFIELD_FIELD(unsigned int simmediate : 16,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rs : 5,
+       __BITFIELD_FIELD(unsigned int c_op : 3,
+       __BITFIELD_FIELD(unsigned int cache : 2,
+       __BITFIELD_FIELD(unsigned int simmediate : 16,
        ;)))))
 };
 
 struct r_format {                      /* Register format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rs : 5,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int rd : 5,
-       BITFIELD_FIELD(unsigned int re : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rs : 5,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int rd : 5,
+       __BITFIELD_FIELD(unsigned int re : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct p_format {              /* Performance counter format (R10000) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rs : 5,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int rd : 5,
-       BITFIELD_FIELD(unsigned int re : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rs : 5,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int rd : 5,
+       __BITFIELD_FIELD(unsigned int re : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct f_format {                      /* FPU register format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int : 1,
-       BITFIELD_FIELD(unsigned int fmt : 4,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int rd : 5,
-       BITFIELD_FIELD(unsigned int re : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int : 1,
+       __BITFIELD_FIELD(unsigned int fmt : 4,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int rd : 5,
+       __BITFIELD_FIELD(unsigned int re : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;)))))))
 };
 
 struct ma_format {             /* FPU multiply and add format (MIPS IV) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int fr : 5,
-       BITFIELD_FIELD(unsigned int ft : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int func : 4,
-       BITFIELD_FIELD(unsigned int fmt : 2,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int fr : 5,
+       __BITFIELD_FIELD(unsigned int ft : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int func : 4,
+       __BITFIELD_FIELD(unsigned int fmt : 2,
        ;)))))))
 };
 
 struct b_format {                      /* BREAK and SYSCALL */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int code : 20,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int code : 20,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;)))
 };
 
 struct ps_format {                     /* MIPS-3D / paired single format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rs : 5,
-       BITFIELD_FIELD(unsigned int ft : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rs : 5,
+       __BITFIELD_FIELD(unsigned int ft : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct v_format {                              /* MDMX vector format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int sel : 4,
-       BITFIELD_FIELD(unsigned int fmt : 1,
-       BITFIELD_FIELD(unsigned int vt : 5,
-       BITFIELD_FIELD(unsigned int vs : 5,
-       BITFIELD_FIELD(unsigned int vd : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int sel : 4,
+       __BITFIELD_FIELD(unsigned int fmt : 1,
+       __BITFIELD_FIELD(unsigned int vt : 5,
+       __BITFIELD_FIELD(unsigned int vs : 5,
+       __BITFIELD_FIELD(unsigned int vd : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;)))))))
 };
 
 struct spec3_format {   /* SPEC3 */
-       BITFIELD_FIELD(unsigned int opcode:6,
-       BITFIELD_FIELD(unsigned int rs:5,
-       BITFIELD_FIELD(unsigned int rt:5,
-       BITFIELD_FIELD(signed int simmediate:9,
-       BITFIELD_FIELD(unsigned int func:7,
+       __BITFIELD_FIELD(unsigned int opcode:6,
+       __BITFIELD_FIELD(unsigned int rs:5,
+       __BITFIELD_FIELD(unsigned int rt:5,
+       __BITFIELD_FIELD(signed int simmediate:9,
+       __BITFIELD_FIELD(unsigned int func:7,
        ;)))))
 };
 
@@ -616,141 +616,141 @@ struct spec3_format {   /* SPEC3 */
  *     if it is MIPS32 instruction re-encoded for use in the microMIPS ASE.
  */
 struct fb_format {             /* FPU branch format (MIPS32) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int bc : 5,
-       BITFIELD_FIELD(unsigned int cc : 3,
-       BITFIELD_FIELD(unsigned int flag : 2,
-       BITFIELD_FIELD(signed int simmediate : 16,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int bc : 5,
+       __BITFIELD_FIELD(unsigned int cc : 3,
+       __BITFIELD_FIELD(unsigned int flag : 2,
+       __BITFIELD_FIELD(signed int simmediate : 16,
        ;)))))
 };
 
 struct fp0_format {            /* FPU multiply and add format (MIPS32) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int fmt : 5,
-       BITFIELD_FIELD(unsigned int ft : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int fmt : 5,
+       __BITFIELD_FIELD(unsigned int ft : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct mm_fp0_format {         /* FPU multipy and add format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int ft : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int fmt : 3,
-       BITFIELD_FIELD(unsigned int op : 2,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int ft : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int fmt : 3,
+       __BITFIELD_FIELD(unsigned int op : 2,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;)))))))
 };
 
 struct fp1_format {            /* FPU mfc1 and cfc1 format (MIPS32) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int op : 5,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int op : 5,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct mm_fp1_format {         /* FPU mfc1 and cfc1 format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fmt : 2,
-       BITFIELD_FIELD(unsigned int op : 8,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fmt : 2,
+       __BITFIELD_FIELD(unsigned int op : 8,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct mm_fp2_format {         /* FPU movt and movf format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int cc : 3,
-       BITFIELD_FIELD(unsigned int zero : 2,
-       BITFIELD_FIELD(unsigned int fmt : 2,
-       BITFIELD_FIELD(unsigned int op : 3,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int cc : 3,
+       __BITFIELD_FIELD(unsigned int zero : 2,
+       __BITFIELD_FIELD(unsigned int fmt : 2,
+       __BITFIELD_FIELD(unsigned int op : 3,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))))
 };
 
 struct mm_fp3_format {         /* FPU abs and neg format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fmt : 3,
-       BITFIELD_FIELD(unsigned int op : 7,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fmt : 3,
+       __BITFIELD_FIELD(unsigned int op : 7,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct mm_fp4_format {         /* FPU c.cond format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int cc : 3,
-       BITFIELD_FIELD(unsigned int fmt : 3,
-       BITFIELD_FIELD(unsigned int cond : 4,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int cc : 3,
+       __BITFIELD_FIELD(unsigned int fmt : 3,
+       __BITFIELD_FIELD(unsigned int cond : 4,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;)))))))
 };
 
 struct mm_fp5_format {         /* FPU lwxc1 and swxc1 format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int index : 5,
-       BITFIELD_FIELD(unsigned int base : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int op : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int index : 5,
+       __BITFIELD_FIELD(unsigned int base : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int op : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct fp6_format {            /* FPU madd and msub format (MIPS IV) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int fr : 5,
-       BITFIELD_FIELD(unsigned int ft : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int fr : 5,
+       __BITFIELD_FIELD(unsigned int ft : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct mm_fp6_format {         /* FPU madd and msub format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int ft : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int fr : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int ft : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int fr : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct mm_i_format {           /* Immediate format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int rs : 5,
-       BITFIELD_FIELD(signed int simmediate : 16,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int rs : 5,
+       __BITFIELD_FIELD(signed int simmediate : 16,
        ;))))
 };
 
 struct mm_m_format {           /* Multi-word load/store format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rd : 5,
-       BITFIELD_FIELD(unsigned int base : 5,
-       BITFIELD_FIELD(unsigned int func : 4,
-       BITFIELD_FIELD(signed int simmediate : 12,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rd : 5,
+       __BITFIELD_FIELD(unsigned int base : 5,
+       __BITFIELD_FIELD(unsigned int func : 4,
+       __BITFIELD_FIELD(signed int simmediate : 12,
        ;)))))
 };
 
 struct mm_x_format {           /* Scaled indexed load format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int index : 5,
-       BITFIELD_FIELD(unsigned int base : 5,
-       BITFIELD_FIELD(unsigned int rd : 5,
-       BITFIELD_FIELD(unsigned int func : 11,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int index : 5,
+       __BITFIELD_FIELD(unsigned int base : 5,
+       __BITFIELD_FIELD(unsigned int rd : 5,
+       __BITFIELD_FIELD(unsigned int func : 11,
        ;)))))
 };
 
@@ -758,51 +758,51 @@ struct mm_x_format {              /* Scaled indexed load format (microMIPS) */
  * microMIPS instruction formats (16-bit length)
  */
 struct mm_b0_format {          /* Unconditional branch format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(signed int simmediate : 10,
-       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(signed int simmediate : 10,
+       __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
        ;)))
 };
 
 struct mm_b1_format {          /* Conditional branch format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rs : 3,
-       BITFIELD_FIELD(signed int simmediate : 7,
-       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rs : 3,
+       __BITFIELD_FIELD(signed int simmediate : 7,
+       __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
        ;))))
 };
 
 struct mm16_m_format {         /* Multi-word load/store format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int func : 4,
-       BITFIELD_FIELD(unsigned int rlist : 2,
-       BITFIELD_FIELD(unsigned int imm : 4,
-       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int func : 4,
+       __BITFIELD_FIELD(unsigned int rlist : 2,
+       __BITFIELD_FIELD(unsigned int imm : 4,
+       __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
        ;)))))
 };
 
 struct mm16_rb_format {                /* Signed immediate format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rt : 3,
-       BITFIELD_FIELD(unsigned int base : 3,
-       BITFIELD_FIELD(signed int simmediate : 4,
-       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rt : 3,
+       __BITFIELD_FIELD(unsigned int base : 3,
+       __BITFIELD_FIELD(signed int simmediate : 4,
+       __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
        ;)))))
 };
 
 struct mm16_r3_format {                /* Load from global pointer format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rt : 3,
-       BITFIELD_FIELD(signed int simmediate : 7,
-       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rt : 3,
+       __BITFIELD_FIELD(signed int simmediate : 7,
+       __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
        ;))))
 };
 
 struct mm16_r5_format {                /* Load/store from stack pointer format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(signed int simmediate : 5,
-       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(signed int simmediate : 5,
+       __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
        ;))))
 };
 
@@ -810,57 +810,57 @@ struct mm16_r5_format {           /* Load/store from stack pointer format */
  * MIPS16e instruction formats (16-bit length)
  */
 struct m16e_rr {
-       BITFIELD_FIELD(unsigned int opcode : 5,
-       BITFIELD_FIELD(unsigned int rx : 3,
-       BITFIELD_FIELD(unsigned int nd : 1,
-       BITFIELD_FIELD(unsigned int l : 1,
-       BITFIELD_FIELD(unsigned int ra : 1,
-       BITFIELD_FIELD(unsigned int func : 5,
+       __BITFIELD_FIELD(unsigned int opcode : 5,
+       __BITFIELD_FIELD(unsigned int rx : 3,
+       __BITFIELD_FIELD(unsigned int nd : 1,
+       __BITFIELD_FIELD(unsigned int l : 1,
+       __BITFIELD_FIELD(unsigned int ra : 1,
+       __BITFIELD_FIELD(unsigned int func : 5,
        ;))))))
 };
 
 struct m16e_jal {
-       BITFIELD_FIELD(unsigned int opcode : 5,
-       BITFIELD_FIELD(unsigned int x : 1,
-       BITFIELD_FIELD(unsigned int imm20_16 : 5,
-       BITFIELD_FIELD(signed int imm25_21 : 5,
+       __BITFIELD_FIELD(unsigned int opcode : 5,
+       __BITFIELD_FIELD(unsigned int x : 1,
+       __BITFIELD_FIELD(unsigned int imm20_16 : 5,
+       __BITFIELD_FIELD(signed int imm25_21 : 5,
        ;))))
 };
 
 struct m16e_i64 {
-       BITFIELD_FIELD(unsigned int opcode : 5,
-       BITFIELD_FIELD(unsigned int func : 3,
-       BITFIELD_FIELD(unsigned int imm : 8,
+       __BITFIELD_FIELD(unsigned int opcode : 5,
+       __BITFIELD_FIELD(unsigned int func : 3,
+       __BITFIELD_FIELD(unsigned int imm : 8,
        ;)))
 };
 
 struct m16e_ri64 {
-       BITFIELD_FIELD(unsigned int opcode : 5,
-       BITFIELD_FIELD(unsigned int func : 3,
-       BITFIELD_FIELD(unsigned int ry : 3,
-       BITFIELD_FIELD(unsigned int imm : 5,
+       __BITFIELD_FIELD(unsigned int opcode : 5,
+       __BITFIELD_FIELD(unsigned int func : 3,
+       __BITFIELD_FIELD(unsigned int ry : 3,
+       __BITFIELD_FIELD(unsigned int imm : 5,
        ;))))
 };
 
 struct m16e_ri {
-       BITFIELD_FIELD(unsigned int opcode : 5,
-       BITFIELD_FIELD(unsigned int rx : 3,
-       BITFIELD_FIELD(unsigned int imm : 8,
+       __BITFIELD_FIELD(unsigned int opcode : 5,
+       __BITFIELD_FIELD(unsigned int rx : 3,
+       __BITFIELD_FIELD(unsigned int imm : 8,
        ;)))
 };
 
 struct m16e_rri {
-       BITFIELD_FIELD(unsigned int opcode : 5,
-       BITFIELD_FIELD(unsigned int rx : 3,
-       BITFIELD_FIELD(unsigned int ry : 3,
-       BITFIELD_FIELD(unsigned int imm : 5,
+       __BITFIELD_FIELD(unsigned int opcode : 5,
+       __BITFIELD_FIELD(unsigned int rx : 3,
+       __BITFIELD_FIELD(unsigned int ry : 3,
+       __BITFIELD_FIELD(unsigned int imm : 5,
        ;))))
 };
 
 struct m16e_i8 {
-       BITFIELD_FIELD(unsigned int opcode : 5,
-       BITFIELD_FIELD(unsigned int func : 3,
-       BITFIELD_FIELD(unsigned int imm : 8,
+       __BITFIELD_FIELD(unsigned int opcode : 5,
+       __BITFIELD_FIELD(unsigned int func : 3,
+       __BITFIELD_FIELD(unsigned int imm : 8,
        ;)))
 };
 
index d6e154a9e6a55ef98d964f71629a129f6fd04d27..2692abb28e3637db7705217de88239fb968b2935 100644 (file)
 #define __NR_finit_module              (__NR_Linux + 348)
 #define __NR_sched_setattr             (__NR_Linux + 349)
 #define __NR_sched_getattr             (__NR_Linux + 350)
+#define __NR_renameat2                 (__NR_Linux + 351)
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls            350
+#define __NR_Linux_syscalls            351
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_getdents64                        (__NR_Linux + 308)
 #define __NR_sched_setattr             (__NR_Linux + 309)
 #define __NR_sched_getattr             (__NR_Linux + 310)
+#define __NR_renameat2                 (__NR_Linux + 311)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            310
+#define __NR_Linux_syscalls            311
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_finit_module              (__NR_Linux + 312)
 #define __NR_sched_setattr             (__NR_Linux + 313)
 #define __NR_sched_getattr             (__NR_Linux + 314)
+#define __NR_renameat2                 (__NR_Linux + 315)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            314
+#define __NR_Linux_syscalls            315
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
index e40971b51d2f0bf47e3eb217f43c652c732e7a09..037a44d962f37e1b94251f13b054103e6cbb1ff5 100644 (file)
@@ -124,14 +124,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
        seq_printf(m, "kscratch registers\t: %d\n",
                      hweight8(cpu_data[n].kscratch_mask));
        seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
-#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
-       if (cpu_has_mipsmt) {
-               seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
-#if defined(CONFIG_MIPS_MT_SMTC)
-               seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id);
-#endif
-       }
-#endif
+
        sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
                      cpu_has_vce ? "%u" : "not available");
        seq_printf(m, fmt, 'D', vced_count);
index fdc70b40044265fb2107df186eb0e593a96b9571..3245474f19d5cdec5563a3841ecdcd3760f98e17 100644 (file)
@@ -577,3 +577,4 @@ EXPORT(sys_call_table)
        PTR     sys_finit_module
        PTR     sys_sched_setattr
        PTR     sys_sched_getattr               /* 4350 */
+       PTR     sys_renameat2
index dd99c3285aeae75f65ae982de46a755ae953f140..be2fedd4ae33193937010b376e62c7d9e327022d 100644 (file)
@@ -430,4 +430,5 @@ EXPORT(sys_call_table)
        PTR     sys_getdents64
        PTR     sys_sched_setattr
        PTR     sys_sched_getattr               /* 5310 */
+       PTR     sys_renameat2
        .size   sys_call_table,.-sys_call_table
index f68d2f4f009021de3ed784e9733aee5d3580d8e0..c1dbcda4b816844cc64821aa8d64a79d5c9a38f3 100644 (file)
@@ -423,4 +423,5 @@ EXPORT(sysn32_call_table)
        PTR     sys_finit_module
        PTR     sys_sched_setattr
        PTR     sys_sched_getattr
+       PTR     sys_renameat2                   /* 6315 */
        .size   sysn32_call_table,.-sysn32_call_table
index 70f6acecd928896c54b586b3470c27eb703721be..f1343ccd7ed7e58d14563c4413ede6517ff60853 100644 (file)
@@ -556,4 +556,5 @@ EXPORT(sys32_call_table)
        PTR     sys_finit_module
        PTR     sys_sched_setattr
        PTR     sys_sched_getattr               /* 4350 */
+       PTR     sys_renameat2
        .size   sys32_call_table,.-sys32_call_table
index fac1f5b178ebe3c558ad369a9e1243340da04c80..143b8a37b5e41358f3faa814375869b058c9fee7 100644 (file)
@@ -8,6 +8,7 @@
        };
 
        memory@0 {
+               device_type = "memory";
                reg = <0x0 0x2000000>;
        };
 
index 2e4825e483882b217046caf4a847a99c3db56afe..9901237563c58922d213b9af10b9ed3c504b18e5 100644 (file)
 #define UNIT(unit)  ((unit)*NBYTES)
 
 #define ADDC(sum,reg)                                          \
+       .set    push;                                           \
+       .set    noat;                                           \
        ADD     sum, reg;                                       \
        sltu    v1, sum, reg;                                   \
        ADD     sum, v1;                                        \
+       .set    pop
 
 #define ADDC32(sum,reg)                                                \
+       .set    push;                                           \
+       .set    noat;                                           \
        addu    sum, reg;                                       \
        sltu    v1, sum, reg;                                   \
        addu    sum, v1;                                        \
+       .set    pop
 
 #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3)   \
        LOAD    _t0, (offset + UNIT(0))(src);                   \
@@ -710,6 +716,8 @@ LEAF(csum_partial)
        ADDC(sum, t2)
 .Ldone\@:
        /* fold checksum */
+       .set    push
+       .set    noat
 #ifdef USE_DOUBLE
        dsll32  v1, sum, 0
        daddu   sum, v1
@@ -732,6 +740,7 @@ LEAF(csum_partial)
        or      sum, sum, t0
 1:
 #endif
+       .set    pop
        .set reorder
        ADDC32(sum, psum)
        jr      ra
index 44713af15a62bc60ebc53ffcbc9e73ad10d87380..705cfb7c1a74e0843e40567219024882ab932ac9 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 1994 by Waldorf Electronics
  * Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- * Copyright (C) 2007  Maciej W. Rozycki
+ * Copyright (C) 2007, 2014 Maciej W. Rozycki
  */
 #include <linux/module.h>
 #include <linux/param.h>
 #include <asm/compiler.h>
 #include <asm/war.h>
 
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+#define GCC_DADDI_IMM_ASM() "I"
+#else
+#define GCC_DADDI_IMM_ASM() "r"
+#endif
+
 void __delay(unsigned long loops)
 {
        __asm__ __volatile__ (
@@ -22,13 +28,13 @@ void __delay(unsigned long loops)
        "       .align  3                                       \n"
        "1:     bnez    %0, 1b                                  \n"
 #if BITS_PER_LONG == 32
-       "       subu    %0,                                   \n"
+       "       subu    %0, %1                                  \n"
 #else
-       "       dsubu   %0,                                   \n"
+       "       dsubu   %0, %1                                  \n"
 #endif
        "       .set    reorder                                 \n"
        : "=r" (loops)
-       : "0" (loops));
+       : GCC_DADDI_IMM_ASM() (1), "0" (loops));
 }
 EXPORT_SYMBOL(__delay);
 
index d3301cd1e9a51b4c387a7f8c3d4a46e2632761e2..3c32baf8b49447a591e6e552e634eafe5be8d09e 100644 (file)
@@ -35,7 +35,6 @@ LEAF(__strncpy_from_\func\()_asm)
        bnez            v0, .Lfault\@
 
 FEXPORT(__strncpy_from_\func\()_nocheck_asm)
-       .set            noreorder
        move            t0, zero
        move            v1, a1
 .ifeqs "\func","kernel"
@@ -45,21 +44,21 @@ FEXPORT(__strncpy_from_\func\()_nocheck_asm)
 .endif
        PTR_ADDIU       v1, 1
        R10KCBARRIER(0(ra))
+       sb              v0, (a0)
        beqz            v0, 2f
-        sb             v0, (a0)
        PTR_ADDIU       t0, 1
+       PTR_ADDIU       a0, 1
        bne             t0, a2, 1b
-        PTR_ADDIU      a0, 1
 2:     PTR_ADDU        v0, a1, t0
        xor             v0, a1
        bltz            v0, .Lfault\@
-        nop
+       move            v0, t0
        jr              ra                      # return n
-        move           v0, t0
        END(__strncpy_from_\func\()_asm)
 
-.Lfault\@: jr          ra
-         li            v0, -EFAULT
+.Lfault\@:
+       li              v0, -EFAULT
+       jr              ra
 
        .section        __ex_table,"a"
        PTR             1b, .Lfault\@
index 7397be226a06a2a7d0481fac7b2dd476d4fe6d3a..603d79a95f4778e40d5ec94b78f1efa9750cd214 100644 (file)
@@ -64,7 +64,6 @@ config LEMOTE_MACH3A
        bool "Lemote Loongson 3A family machines"
        select ARCH_SPARSEMEM_ENABLE
        select GENERIC_ISA_DMA_SUPPORT_BROKEN
-       select GENERIC_HARDIRQS_NO__DO_IRQ
        select BOOT_ELF32
        select BOARD_SCACHE
        select CSRC_R4K
index e1f427f4f5f3fed4985bb370054421d7d2f91cdc..67dd94ef28e60f4023b16f7da8fac716c558b3bf 100644 (file)
@@ -91,6 +91,7 @@ EXPORT_SYMBOL(clk_put);
 
 int clk_set_rate(struct clk *clk, unsigned long rate)
 {
+       unsigned int rate_khz = rate / 1000;
        int ret = 0;
        int regval;
        int i;
@@ -111,10 +112,10 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
                if (loongson2_clockmod_table[i].frequency ==
                    CPUFREQ_ENTRY_INVALID)
                        continue;
-               if (rate == loongson2_clockmod_table[i].frequency)
+               if (rate_khz == loongson2_clockmod_table[i].frequency)
                        break;
        }
-       if (rate != loongson2_clockmod_table[i].frequency)
+       if (rate_khz != loongson2_clockmod_table[i].frequency)
                return -ENOTSUPP;
 
        clk->rate = rate;
index 30a494db99c2a0eb4d51aa64ca410de956801837..a5427c6e97574c6a1a0dbad628f7a75a3feed516 100644 (file)
 
 #define FASTPATH_SIZE  128
 
+EXPORT(tlbmiss_handler_setup_pgd_start)
 LEAF(tlbmiss_handler_setup_pgd)
-       .space          16 * 4
+1:     j       1b              /* Dummy, will be replaced. */
+       .space  64
 END(tlbmiss_handler_setup_pgd)
 EXPORT(tlbmiss_handler_setup_pgd_end)
 
index ee88367ab3addcda0e4aba412f524493dddd079a..f99ec587b151919bd90437687f08614c24aea32e 100644 (file)
@@ -1422,16 +1422,17 @@ static void build_r4000_tlb_refill_handler(void)
 extern u32 handle_tlbl[], handle_tlbl_end[];
 extern u32 handle_tlbs[], handle_tlbs_end[];
 extern u32 handle_tlbm[], handle_tlbm_end[];
-extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[];
+extern u32 tlbmiss_handler_setup_pgd_start[], tlbmiss_handler_setup_pgd[];
+extern u32 tlbmiss_handler_setup_pgd_end[];
 
 static void build_setup_pgd(void)
 {
        const int a0 = 4;
        const int __maybe_unused a1 = 5;
        const int __maybe_unused a2 = 6;
-       u32 *p = tlbmiss_handler_setup_pgd;
+       u32 *p = tlbmiss_handler_setup_pgd_start;
        const int tlbmiss_handler_setup_pgd_size =
-               tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd;
+               tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd_start;
 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
        long pgdc = (long)pgd_current;
 #endif
index 35eb874ab7f11b25248e40714c2e477ec0f1d4a3..709f58132f5cba9d87adc37afbe84c38db40a5ac 100644 (file)
@@ -7,6 +7,7 @@
        model = "Ralink MT7620A evaluation board";
 
        memory@0 {
+               device_type = "memory";
                reg = <0x0 0x2000000>;
        };
 
index 322d7002595bda983b95f9bb77a17de24980c001..0a685db093d4dbd367228a1cb9e4d42bdf43e0e7 100644 (file)
@@ -7,6 +7,7 @@
        model = "Ralink RT2880 evaluation board";
 
        memory@0 {
+               device_type = "memory";
                reg = <0x8000000 0x2000000>;
        };
 
index 0ac73ea281984909df89403d17fb82fd4e9f3ae4..ec9e9a03554140a4c11947b2ff0a7548cb4fb7fb 100644 (file)
@@ -7,6 +7,7 @@
        model = "Ralink RT3052 evaluation board";
 
        memory@0 {
+               device_type = "memory";
                reg = <0x0 0x2000000>;
        };
 
index 2fa6b330bf4f2fcb8d6f63f4b8df55cb1fe6b065..e8df21a5d10d9eb35a2dfc88af0d137d9c66156e 100644 (file)
@@ -7,6 +7,7 @@
        model = "Ralink RT3883 evaluation board";
 
        memory@0 {
+               device_type = "memory";
                reg = <0x0 0x2000000>;
        };
 
index 1faefed32749c93ff31a7d6237732d3dd6c55f26..108d48e652af4c802da1676b18252a5394d30295 100644 (file)
@@ -22,6 +22,7 @@ config PARISC
        select GENERIC_SMP_IDLE_THREAD
        select GENERIC_STRNCPY_FROM_USER
        select SYSCTL_ARCH_UNALIGN_ALLOW
+       select SYSCTL_EXCEPTION_TRACE
        select HAVE_MOD_ARCH_SPECIFIC
        select VIRT_TO_BUS
        select MODULES_USE_ELF_RELA
index 198a86feb5748fc595f7230941485af0bc84a76a..d951c9681ab316aa2be88cd65b4d18ae62f958cb 100644 (file)
 #define STACK_TOP      TASK_SIZE
 #define STACK_TOP_MAX  DEFAULT_TASK_SIZE
 
+/* Allow bigger stacks for 64-bit processes */
+#define STACK_SIZE_MAX (USER_WIDE_MODE                                 \
+                        ? (1 << 30)    /* 1 GB */                      \
+                        : (CONFIG_MAX_STACK_SIZE_MB*1024*1024))
+
 #endif
 
 #ifndef __ASSEMBLY__
index 265ae5190b0a70e4dc2539d9a661e1e3d2deda05..47e0e21d2272468bbc864e6e30ced36c18929534 100644 (file)
 #define __NR_sched_setattr     (__NR_Linux + 334)
 #define __NR_sched_getattr     (__NR_Linux + 335)
 #define __NR_utimes            (__NR_Linux + 336)
+#define __NR_renameat2         (__NR_Linux + 337)
 
-#define __NR_Linux_syscalls    (__NR_utimes + 1)
+#define __NR_Linux_syscalls    (__NR_renameat2 + 1)
 
 
 #define __IGNORE_select                /* newselect */
index 31ffa9b5532216620d9a6106d2d4b76e7501ee0b..e1ffea2f9a0b05ccda844969dcb7c519ab17077a 100644 (file)
@@ -72,10 +72,10 @@ static unsigned long mmap_upper_limit(void)
 {
        unsigned long stack_base;
 
-       /* Limit stack size to 1GB - see setup_arg_pages() in fs/exec.c */
+       /* Limit stack size - see setup_arg_pages() in fs/exec.c */
        stack_base = rlimit_max(RLIMIT_STACK);
-       if (stack_base > (1 << 30))
-               stack_base = 1 << 30;
+       if (stack_base > STACK_SIZE_MAX)
+               stack_base = STACK_SIZE_MAX;
 
        return PAGE_ALIGN(STACK_TOP - stack_base);
 }
index a63bb179f79a1fcd56a7bcf1adbe759f46587b71..83878601103701df4497913c89d5702897249a38 100644 (file)
@@ -589,10 +589,13 @@ cas_nocontend:
 # endif
 /* ENABLE_LWS_DEBUG */
 
+       rsm     PSW_SM_I, %r0                           /* Disable interrupts */
+       /* COW breaks can cause contention on UP systems */
        LDCW    0(%sr2,%r20), %r28                      /* Try to acquire the lock */
        cmpb,<>,n       %r0, %r28, cas_action           /* Did we get it? */
 cas_wouldblock:
        ldo     2(%r0), %r28                            /* 2nd case */
+       ssm     PSW_SM_I, %r0
        b       lws_exit                                /* Contended... */
        ldo     -EAGAIN(%r0), %r21                      /* Spin in userspace */
 
@@ -619,15 +622,17 @@ cas_action:
        stw     %r1, 4(%sr2,%r20)
 #endif
        /* The load and store could fail */
-1:     ldw     0(%sr3,%r26), %r28
+1:     ldw,ma  0(%sr3,%r26), %r28
        sub,<>  %r28, %r25, %r0
-2:     stw     %r24, 0(%sr3,%r26)
+2:     stw,ma  %r24, 0(%sr3,%r26)
        /* Free lock */
-       stw     %r20, 0(%sr2,%r20)
+       stw,ma  %r20, 0(%sr2,%r20)
 #if ENABLE_LWS_DEBUG
        /* Clear thread register indicator */
        stw     %r0, 4(%sr2,%r20)
 #endif
+       /* Enable interrupts */
+       ssm     PSW_SM_I, %r0
        /* Return to userspace, set no error */
        b       lws_exit
        copy    %r0, %r21
@@ -639,6 +644,7 @@ cas_action:
 #if ENABLE_LWS_DEBUG
        stw     %r0, 4(%sr2,%r20)
 #endif
+       ssm     PSW_SM_I, %r0
        b       lws_exit
        ldo     -EFAULT(%r0),%r21       /* set errno */
        nop
index 83ead0ea127d23cdcb10115a5b04d76f40e10612..c5fa7a697fba2a13b0779e59904ec7c1428e8ef4 100644 (file)
        ENTRY_SAME(sched_setattr)
        ENTRY_SAME(sched_getattr)       /* 335 */
        ENTRY_COMP(utimes)
+       ENTRY_SAME(renameat2)
 
        /* Nothing yet */
 
index 1cd1d0c83b6d7bd7a21d0a22c57e18f2ac27f65a..47ee620d15d27850ab8ebac1f739dfd3215dae9b 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/interrupt.h>
 #include <linux/console.h>
 #include <linux/bug.h>
+#include <linux/ratelimit.h>
 
 #include <asm/assembly.h>
 #include <asm/uaccess.h>
@@ -42,9 +43,6 @@
 
 #include "../math-emu/math-emu.h"      /* for handle_fpe() */
 
-#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
-                         /*  dumped to the console via printk)          */
-
 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
 DEFINE_SPINLOCK(pa_dbit_lock);
 #endif
@@ -160,6 +158,17 @@ void show_regs(struct pt_regs *regs)
        }
 }
 
+static DEFINE_RATELIMIT_STATE(_hppa_rs,
+       DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+
+#define parisc_printk_ratelimited(critical, regs, fmt, ...)    {             \
+       if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
+               printk(fmt, ##__VA_ARGS__);                                   \
+               show_regs(regs);                                              \
+       }                                                                     \
+}
+
+
 static void do_show_stack(struct unwind_frame_info *info)
 {
        int i = 1;
@@ -229,12 +238,10 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
                if (err == 0)
                        return; /* STFU */
 
-               printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
+               parisc_printk_ratelimited(1, regs,
+                       KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
                        current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
-#ifdef PRINT_USER_FAULTS
-               /* XXX for debugging only */
-               show_regs(regs);
-#endif
+
                return;
        }
 
@@ -321,14 +328,11 @@ static void handle_break(struct pt_regs *regs)
                        (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
        }
 
-#ifdef PRINT_USER_FAULTS
-       if (unlikely(iir != GDB_BREAK_INSN)) {
-               printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
+       if (unlikely(iir != GDB_BREAK_INSN))
+               parisc_printk_ratelimited(0, regs,
+                       KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
                        iir & 31, (iir>>13) & ((1<<13)-1),
                        task_pid_nr(current), current->comm);
-               show_regs(regs);
-       }
-#endif
 
        /* send standard GDB signal */
        handle_gdb_break(regs, TRAP_BRKPT);
@@ -758,11 +762,9 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
 
        default:
                if (user_mode(regs)) {
-#ifdef PRINT_USER_FAULTS
-                       printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
-                           task_pid_nr(current), current->comm);
-                       show_regs(regs);
-#endif
+                       parisc_printk_ratelimited(0, regs, KERN_DEBUG
+                               "handle_interruption() pid=%d command='%s'\n",
+                               task_pid_nr(current), current->comm);
                        /* SIGBUS, for lack of a better one. */
                        si.si_signo = SIGBUS;
                        si.si_code = BUS_OBJERR;
@@ -779,16 +781,10 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
 
        if (user_mode(regs)) {
            if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
-#ifdef PRINT_USER_FAULTS
-               if (fault_space == 0)
-                       printk(KERN_DEBUG "User Fault on Kernel Space ");
-               else
-                       printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
-                              code);
-               printk(KERN_CONT "pid=%d command='%s'\n",
-                      task_pid_nr(current), current->comm);
-               show_regs(regs);
-#endif
+               parisc_printk_ratelimited(0, regs, KERN_DEBUG
+                               "User fault %d on space 0x%08lx, pid=%d command='%s'\n",
+                               code, fault_space,
+                               task_pid_nr(current), current->comm);
                si.si_signo = SIGSEGV;
                si.si_errno = 0;
                si.si_code = SEGV_MAPERR;
index 747550762f3ca25acf6dabdfcc7d8aef9de31779..3ca9c1131cfe0d80b9b12fb5c0e599a3363942c0 100644 (file)
 #include <asm/uaccess.h>
 #include <asm/traps.h>
 
-#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
-                        /*  dumped to the console via printk)          */
-
-
 /* Various important other fields */
 #define bit22set(x)            (x & 0x00000200)
 #define bits23_25set(x)                (x & 0x000001c0)
@@ -34,6 +30,8 @@
 
 DEFINE_PER_CPU(struct exception_data, exception_data);
 
+int show_unhandled_signals = 1;
+
 /*
  * parisc_acctyp(unsigned int inst) --
  *    Given a PA-RISC memory access instruction, determine if the
@@ -173,6 +171,32 @@ int fixup_exception(struct pt_regs *regs)
        return 0;
 }
 
+/*
+ * Print out info about fatal segfaults, if the show_unhandled_signals
+ * sysctl is set:
+ */
+static inline void
+show_signal_msg(struct pt_regs *regs, unsigned long code,
+               unsigned long address, struct task_struct *tsk,
+               struct vm_area_struct *vma)
+{
+       if (!unhandled_signal(tsk, SIGSEGV))
+               return;
+
+       if (!printk_ratelimit())
+               return;
+
+       pr_warn("\n");
+       pr_warn("do_page_fault() command='%s' type=%lu address=0x%08lx",
+           tsk->comm, code, address);
+       print_vma_addr(KERN_CONT " in ", regs->iaoq[0]);
+       if (vma)
+               pr_warn(" vm_start = 0x%08lx, vm_end = 0x%08lx\n",
+                               vma->vm_start, vma->vm_end);
+
+       show_regs(regs);
+}
+
 void do_page_fault(struct pt_regs *regs, unsigned long code,
                              unsigned long address)
 {
@@ -270,16 +294,8 @@ bad_area:
        if (user_mode(regs)) {
                struct siginfo si;
 
-#ifdef PRINT_USER_FAULTS
-               printk(KERN_DEBUG "\n");
-               printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n",
-                   task_pid_nr(tsk), tsk->comm, code, address);
-               if (vma) {
-                       printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n",
-                                       vma->vm_start, vma->vm_end);
-               }
-               show_regs(regs);
-#endif
+               show_signal_msg(regs, code, address, tsk, vma);
+
                switch (code) {
                case 15:        /* Data TLB miss fault/Data page fault */
                        /* send SIGSEGV when outside of vma */
index 122a580f732246c02c5e31c301078cb725435919..7e711bdcc6da5adb399e8ac0a55097e76317fe51 100644 (file)
@@ -813,9 +813,6 @@ static void __init clocksource_init(void)
 static int decrementer_set_next_event(unsigned long evt,
                                      struct clock_event_device *dev)
 {
-       /* Don't adjust the decrementer if some irq work is pending */
-       if (test_irq_work_pending())
-               return 0;
        __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt;
        set_dec(evt);
 
index 253fefe3d1a0e76fd4ed3996711b6f1784725d6c..5b51079f3e3ba52f7ecb51c3fb27a5d7255ebb48 100644 (file)
@@ -549,7 +549,8 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
                ret = ioda_eeh_phb_reset(hose, option);
        } else {
                bus = eeh_pe_bus_get(pe);
-               if (pci_is_root_bus(bus))
+               if (pci_is_root_bus(bus) ||
+                   pci_is_root_bus(bus->parent))
                        ret = ioda_eeh_root_reset(hose, option);
                else
                        ret = ioda_eeh_bridge_reset(hose, bus->self, option);
index cf3c0089bef253d7817a604f9a7f195a4ffd9bc0..23223cd63e54811d9ab85823df1e03303c88036e 100644 (file)
@@ -820,6 +820,9 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
                else
                        memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
                spin_unlock(&ctrblk_lock);
+       } else {
+               if (!nbytes)
+                       memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
        }
        /*
         * final block may be < AES_BLOCK_SIZE, copy only nbytes
index 0a5aac8a9412b64c4b2c43f9999e0f1e2f3815cb..7acb77f7ef1ada0183280c07e75f2c34579bac72 100644 (file)
@@ -429,6 +429,9 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
                else
                        memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
                spin_unlock(&ctrblk_lock);
+       } else {
+               if (!nbytes)
+                       memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
        }
        /* final block may be < DES_BLOCK_SIZE, copy only nbytes */
        if (nbytes) {
index 452d3ebd9d0fba3b513a6978b096bd22b27c1b46..e9f8fa9337fe1fae0d5d0ba9bf03a8426961a3c0 100644 (file)
@@ -811,7 +811,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
                return NULL;
        memset(header, 0, sz);
        header->pages = sz / PAGE_SIZE;
-       hole = sz - (bpfsize + sizeof(*header));
+       hole = min(sz - (bpfsize + sizeof(*header)), PAGE_SIZE - sizeof(*header));
        /* Insert random number of illegal instructions before BPF code
         * and make sure the first instruction starts at an even address.
         */
index fde5abaac0ccbf6ee8c9d7f3e198d2b93e1fd384..1a49ffdf9da91056cb24357b6fdefea772658201 100644 (file)
@@ -24,7 +24,8 @@
 
 /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
  * The page copy blockops can use 0x6000000 to 0x8000000.
- * The TSB is mapped in the 0x8000000 to 0xa000000 range.
+ * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
+ * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
  * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
  * The vmalloc area spans 0x100000000 to 0x200000000.
  * Since modules need to be in the lowest 32-bits of the address space,
@@ -33,7 +34,8 @@
  * 0x400000000.
  */
 #define        TLBTEMP_BASE            _AC(0x0000000006000000,UL)
-#define        TSBMAP_BASE             _AC(0x0000000008000000,UL)
+#define        TSBMAP_8K_BASE          _AC(0x0000000008000000,UL)
+#define        TSBMAP_4M_BASE          _AC(0x0000000008400000,UL)
 #define MODULES_VADDR          _AC(0x0000000010000000,UL)
 #define MODULES_LEN            _AC(0x00000000e0000000,UL)
 #define MODULES_END            _AC(0x00000000f0000000,UL)
index a364000ca1aa8a495f7e8b9bf59882350f41c6de..7f41d40b7e6e8ccf89b5ce12a9422bbf4e84ac2e 100644 (file)
@@ -151,7 +151,7 @@ static ssize_t store_mmustat_enable(struct device *s,
                        size_t count)
 {
        unsigned long val, err;
-       int ret = sscanf(buf, "%ld", &val);
+       int ret = sscanf(buf, "%lu", &val);
 
        if (ret != 1)
                return -EINVAL;
index 2c20ad63ddbf2bbf8a4da5e751e49650d8be7060..30eee6e8a81b2d45797aab304914b10571573b1a 100644 (file)
@@ -236,6 +236,7 @@ FUNC_NAME:  /* %o0=dst, %o1=src, %o2=len */
         */
        VISEntryHalf
 
+       membar          #Sync
        alignaddr       %o1, %g0, %g0
 
        add             %o1, (64 - 1), %o4
index a8ff0d1a3b6999ab7c0f16de972d192a48a951ff..4ced3fc66130c30b8870c21cea57611056046d50 100644 (file)
@@ -281,18 +281,6 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
        show_regs(regs);
 }
 
-static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
-                                                        unsigned long addr)
-{
-       static int times;
-
-       if (times++ < 10)
-               printk(KERN_ERR "FAULT[%s:%d]: 32-bit process "
-                      "reports 64-bit fault address [%lx]\n",
-                      current->comm, current->pid, addr);
-       show_regs(regs);
-}
-
 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
 {
        enum ctx_state prev_state = exception_enter();
@@ -322,10 +310,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
                                goto intr_or_no_mm;
                        }
                }
-               if (unlikely((address >> 32) != 0)) {
-                       bogus_32bit_fault_address(regs, address);
+               if (unlikely((address >> 32) != 0))
                        goto intr_or_no_mm;
-               }
        }
 
        if (regs->tstate & TSTATE_PRIV) {
index f5d506fdddad3dea459aa97308e9a8872f34ffe8..fe19b81acc091b4d994da81f5580fe4438664388 100644 (file)
@@ -133,7 +133,19 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
        mm->context.tsb_block[tsb_idx].tsb_nentries =
                tsb_bytes / sizeof(struct tsb);
 
-       base = TSBMAP_BASE;
+       switch (tsb_idx) {
+       case MM_TSB_BASE:
+               base = TSBMAP_8K_BASE;
+               break;
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+       case MM_TSB_HUGE:
+               base = TSBMAP_4M_BASE;
+               break;
+#endif
+       default:
+               BUG();
+       }
+
        tte = pgprot_val(PAGE_KERNEL_LOCKED);
        tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
        BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
index a8091216963b006145baa000e905edbdececae63..68c05398bba9b449a1324d54b584ce52d52aa8d1 100644 (file)
@@ -52,6 +52,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
                                         unsigned long addr, pte_t *ptep)
 {
+       ptep_clear_flush(vma, addr, ptep);
 }
 
 static inline int huge_pte_none(pte_t pte)
index aa333d9668866f808955209f8cd71737d447eafc..adb02aa62af5e310ff51ca720d1163b247489ccb 100644 (file)
@@ -169,7 +169,6 @@ static struct event_constraint intel_slm_event_constraints[] __read_mostly =
 {
        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
-       FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF */
        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
        EVENT_CONSTRAINT_END
 };
index 384df5105fbc9883626ec5482151babd43ce482a..136ac74dee823005cea04ea9600a0c62cdf5685c 100644 (file)
@@ -27,6 +27,7 @@
 static int __init x86_rdrand_setup(char *s)
 {
        setup_clear_cpu_cap(X86_FEATURE_RDRAND);
+       setup_clear_cpu_cap(X86_FEATURE_RDSEED);
        return 1;
 }
 __setup("nordrand", x86_rdrand_setup);
index af1d14a9ebdae1ac2fddace4c15271babd1613a3..dcbbaa165bdeed61dd2b504a13ca05ced99737c7 100644 (file)
@@ -20,6 +20,8 @@
 #include <asm/mmu_context.h>
 #include <asm/syscalls.h>
 
+int sysctl_ldt16 = 0;
+
 #ifdef CONFIG_SMP
 static void flush_ldt(void *current_mm)
 {
@@ -234,7 +236,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
         * IRET leaking the high bits of the kernel stack address.
         */
 #ifdef CONFIG_X86_64
-       if (!ldt_info.seg_32bit) {
+       if (!ldt_info.seg_32bit && !sysctl_ldt16) {
                error = -EINVAL;
                goto out_unlock;
        }
index dc017735bb91b7b2ec61f333b091c63accdb921b..6d5663a599a7a362756b5bcb6d3ff3e30ee34bae 100644 (file)
@@ -171,7 +171,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
        memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
 
        header->pages = sz / PAGE_SIZE;
-       hole = sz - (proglen + sizeof(*header));
+       hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
 
        /* insert a random number of int3 instructions before BPF code */
        *image_ptr = &header->image[prandom_u32() % hole];
index 00348980a3a64a49180be23bda3517d314c6bf81..e1f220e3ca6899af1d542ce7d22903e961754c7c 100644 (file)
@@ -39,6 +39,7 @@
 #ifdef CONFIG_X86_64
 #define vdso_enabled                   sysctl_vsyscall32
 #define arch_setup_additional_pages    syscall32_setup_pages
+extern int sysctl_ldt16;
 #endif
 
 /*
@@ -249,6 +250,13 @@ static struct ctl_table abi_table2[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "ldt16",
+               .data           = &sysctl_ldt16,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
        {}
 };
 
index e4a4145926f629787ce0647f3036a98298c8f055..1039fb9ff5f5f998628884dedab422c9b405a36c 100644 (file)
@@ -451,7 +451,20 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
        struct blkcg_gq *blkg;
        int i;
 
-       mutex_lock(&blkcg_pol_mutex);
+       /*
+        * XXX: We invoke cgroup_add/rm_cftypes() under blkcg_pol_mutex
+        * which ends up putting cgroup's internal cgroup_tree_mutex under
+        * it; however, cgroup_tree_mutex is nested above cgroup file
+        * active protection and grabbing blkcg_pol_mutex from a cgroup
+        * file operation creates a possible circular dependency.  cgroup
+        * internal locking is planned to go through further simplification
+        * and this issue should go away soon.  For now, let's trylock
+        * blkcg_pol_mutex and restart the write on failure.
+        *
+        * http://lkml.kernel.org/g/5363C04B.4010400@oracle.com
+        */
+       if (!mutex_trylock(&blkcg_pol_mutex))
+               return restart_syscall();
        spin_lock_irq(&blkcg->lock);
 
        /*
index d05d81b19b50c229b2aa100bf01f8ac2818c0c1c..7183b6af5dac2b72fc53edc7044d743eac57ed45 100644 (file)
@@ -119,7 +119,7 @@ obj-$(CONFIG_SGI_SN)                += sn/
 obj-y                          += firmware/
 obj-$(CONFIG_CRYPTO)           += crypto/
 obj-$(CONFIG_SUPERH)           += sh/
-obj-$(CONFIG_ARCH_SHMOBILE_LEGACY)     += sh/
+obj-$(CONFIG_ARCH_SHMOBILE)    += sh/
 ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
 obj-y                          += clocksource/
 endif
index ab686b31010034083ba3b4784ba288fb93b3f554..a34a22841002495713a74f960dea482ecee8dc11 100644 (file)
@@ -47,6 +47,23 @@ config ACPI_SLEEP
        depends on SUSPEND || HIBERNATION
        default y
 
+config ACPI_PROCFS_POWER
+       bool "Deprecated power /proc/acpi directories"
+       depends on PROC_FS
+       help
+         For backwards compatibility, this option allows
+          deprecated power /proc/acpi/ directories to exist, even when
+          they have been replaced by functions in /sys.
+          The deprecated directories (and their replacements) include:
+         /proc/acpi/battery/* (/sys/class/power_supply/*)
+         /proc/acpi/ac_adapter/* (sys/class/power_supply/*)
+         This option has no effect on /proc/acpi/ directories
+         and functions, which do not yet exist in /sys
+         This option, together with the proc directories, will be
+         deleted in the future.
+
+         Say N to delete power /proc/acpi/ directories that have moved to /sys/
+
 config ACPI_EC_DEBUGFS
        tristate "EC read/write access through /sys/kernel/debug/ec"
        default n
index 0331f91d56e663d63a7268d1a77bb3ae1080f715..bce34afadcd05d250831f098ca533565750345e6 100644 (file)
@@ -47,6 +47,7 @@ acpi-y                                += sysfs.o
 acpi-$(CONFIG_X86)             += acpi_cmos_rtc.o
 acpi-$(CONFIG_DEBUG_FS)                += debugfs.o
 acpi-$(CONFIG_ACPI_NUMA)       += numa.o
+acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
 ifdef CONFIG_ACPI_VIDEO
 acpi-y                         += video_detect.o
 endif
index 2c01c1da29ce39f637136a12a512f2a473333233..c67f6f5ad61107b7ded8069a53894763e91ab892 100644 (file)
@@ -52,11 +52,39 @@ MODULE_AUTHOR("Paul Diefenbaugh");
 MODULE_DESCRIPTION("ACPI AC Adapter Driver");
 MODULE_LICENSE("GPL");
 
+static int acpi_ac_add(struct acpi_device *device);
+static int acpi_ac_remove(struct acpi_device *device);
+static void acpi_ac_notify(struct acpi_device *device, u32 event);
+
+static const struct acpi_device_id ac_device_ids[] = {
+       {"ACPI0003", 0},
+       {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, ac_device_ids);
+
+#ifdef CONFIG_PM_SLEEP
+static int acpi_ac_resume(struct device *dev);
+#endif
+static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
+
 static int ac_sleep_before_get_state_ms;
 
+static struct acpi_driver acpi_ac_driver = {
+       .name = "ac",
+       .class = ACPI_AC_CLASS,
+       .ids = ac_device_ids,
+       .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
+       .ops = {
+               .add = acpi_ac_add,
+               .remove = acpi_ac_remove,
+               .notify = acpi_ac_notify,
+               },
+       .drv.pm = &acpi_ac_pm,
+};
+
 struct acpi_ac {
        struct power_supply charger;
-       struct platform_device *pdev;
+       struct acpi_device * device;
        unsigned long long state;
        struct notifier_block battery_nb;
 };
@@ -69,10 +97,12 @@ struct acpi_ac {
 
 static int acpi_ac_get_state(struct acpi_ac *ac)
 {
-       acpi_status status;
-       acpi_handle handle = ACPI_HANDLE(&ac->pdev->dev);
+       acpi_status status = AE_OK;
+
+       if (!ac)
+               return -EINVAL;
 
-       status = acpi_evaluate_integer(handle, "_PSR", NULL,
+       status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL,
                                       &ac->state);
        if (ACPI_FAILURE(status)) {
                ACPI_EXCEPTION((AE_INFO, status,
@@ -117,10 +147,9 @@ static enum power_supply_property ac_props[] = {
                                    Driver Model
    -------------------------------------------------------------------------- */
 
-static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
+static void acpi_ac_notify(struct acpi_device *device, u32 event)
 {
-       struct acpi_ac *ac = data;
-       struct acpi_device *adev;
+       struct acpi_ac *ac = acpi_driver_data(device);
 
        if (!ac)
                return;
@@ -143,11 +172,10 @@ static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
                        msleep(ac_sleep_before_get_state_ms);
 
                acpi_ac_get_state(ac);
-               adev = ACPI_COMPANION(&ac->pdev->dev);
-               acpi_bus_generate_netlink_event(adev->pnp.device_class,
-                                               dev_name(&ac->pdev->dev),
-                                               event, (u32) ac->state);
-               acpi_notifier_call_chain(adev, event, (u32) ac->state);
+               acpi_bus_generate_netlink_event(device->pnp.device_class,
+                                                 dev_name(&device->dev), event,
+                                                 (u32) ac->state);
+               acpi_notifier_call_chain(device, event, (u32) ac->state);
                kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
        }
 
@@ -192,49 +220,39 @@ static struct dmi_system_id ac_dmi_table[] = {
        {},
 };
 
-static int acpi_ac_probe(struct platform_device *pdev)
+static int acpi_ac_add(struct acpi_device *device)
 {
        int result = 0;
        struct acpi_ac *ac = NULL;
-       struct acpi_device *adev;
 
-       if (!pdev)
-               return -EINVAL;
 
-       adev = ACPI_COMPANION(&pdev->dev);
-       if (!adev)
-               return -ENODEV;
+       if (!device)
+               return -EINVAL;
 
        ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL);
        if (!ac)
                return -ENOMEM;
 
-       strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME);
-       strcpy(acpi_device_class(adev), ACPI_AC_CLASS);
-       ac->pdev = pdev;
-       platform_set_drvdata(pdev, ac);
+       ac->device = device;
+       strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME);
+       strcpy(acpi_device_class(device), ACPI_AC_CLASS);
+       device->driver_data = ac;
 
        result = acpi_ac_get_state(ac);
        if (result)
                goto end;
 
-       ac->charger.name = acpi_device_bid(adev);
+       ac->charger.name = acpi_device_bid(device);
        ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
        ac->charger.properties = ac_props;
        ac->charger.num_properties = ARRAY_SIZE(ac_props);
        ac->charger.get_property = get_ac_property;
-       result = power_supply_register(&pdev->dev, &ac->charger);
+       result = power_supply_register(&ac->device->dev, &ac->charger);
        if (result)
                goto end;
 
-       result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
-                       ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac);
-       if (result) {
-               power_supply_unregister(&ac->charger);
-               goto end;
-       }
        printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
-              acpi_device_name(adev), acpi_device_bid(adev),
+              acpi_device_name(device), acpi_device_bid(device),
               ac->state ? "on-line" : "off-line");
 
        ac->battery_nb.notifier_call = acpi_ac_battery_notify;
@@ -256,7 +274,7 @@ static int acpi_ac_resume(struct device *dev)
        if (!dev)
                return -EINVAL;
 
-       ac = platform_get_drvdata(to_platform_device(dev));
+       ac = acpi_driver_data(to_acpi_device(dev));
        if (!ac)
                return -EINVAL;
 
@@ -270,19 +288,17 @@ static int acpi_ac_resume(struct device *dev)
 #else
 #define acpi_ac_resume NULL
 #endif
-static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume);
 
-static int acpi_ac_remove(struct platform_device *pdev)
+static int acpi_ac_remove(struct acpi_device *device)
 {
-       struct acpi_ac *ac;
+       struct acpi_ac *ac = NULL;
+
 
-       if (!pdev)
+       if (!device || !acpi_driver_data(device))
                return -EINVAL;
 
-       acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
-                       ACPI_ALL_NOTIFY, acpi_ac_notify_handler);
+       ac = acpi_driver_data(device);
 
-       ac = platform_get_drvdata(pdev);
        if (ac->charger.dev)
                power_supply_unregister(&ac->charger);
        unregister_acpi_notifier(&ac->battery_nb);
@@ -292,23 +308,6 @@ static int acpi_ac_remove(struct platform_device *pdev)
        return 0;
 }
 
-static const struct acpi_device_id acpi_ac_match[] = {
-       { "ACPI0003", 0 },
-       { }
-};
-MODULE_DEVICE_TABLE(acpi, acpi_ac_match);
-
-static struct platform_driver acpi_ac_driver = {
-       .probe          = acpi_ac_probe,
-       .remove         = acpi_ac_remove,
-       .driver         = {
-               .name   = "acpi-ac",
-               .owner  = THIS_MODULE,
-               .pm     = &acpi_ac_pm_ops,
-               .acpi_match_table = ACPI_PTR(acpi_ac_match),
-       },
-};
-
 static int __init acpi_ac_init(void)
 {
        int result;
@@ -316,7 +315,7 @@ static int __init acpi_ac_init(void)
        if (acpi_disabled)
                return -ENODEV;
 
-       result = platform_driver_register(&acpi_ac_driver);
+       result = acpi_bus_register_driver(&acpi_ac_driver);
        if (result < 0)
                return -ENODEV;
 
@@ -325,7 +324,7 @@ static int __init acpi_ac_init(void)
 
 static void __exit acpi_ac_exit(void)
 {
-       platform_driver_unregister(&acpi_ac_driver);
+       acpi_bus_unregister_driver(&acpi_ac_driver);
 }
 module_init(acpi_ac_init);
 module_exit(acpi_ac_exit);
index dbfe49e5fd63cc179559b2c5caee57d27324c012..1d4950388fa13b9fc6050fbaf419ae68c796f24d 100644 (file)
@@ -29,7 +29,6 @@ ACPI_MODULE_NAME("platform");
 static const struct acpi_device_id acpi_platform_device_ids[] = {
 
        { "PNP0D40" },
-       { "ACPI0003" },
        { "VPC2004" },
        { "BCM4752" },
 
index b06f5f55ada952ced85de9c845dfb49cac421633..52c81c49cc7d8396fa5b36a4d24b757f934ced46 100644 (file)
@@ -405,7 +405,6 @@ static int acpi_processor_add(struct acpi_device *device,
                goto err;
 
        pr->dev = dev;
-       dev->offline = pr->flags.need_hotplug_init;
 
        /* Trigger the processor driver's .probe() if present. */
        if (device_attach(dev) >= 0)
index 49bbc71fad54efd709ba9b7e6a610b5f8a30a7a3..a08a448068dd99981be3eeb9643e0255b20ae8df 100644 (file)
@@ -141,9 +141,9 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
  * address. Although ACPICA adheres to the ACPI specification which
  * requires the use of the corresponding 64-bit address if it is non-zero,
  * some machines have been found to have a corrupted non-zero 64-bit
- * address. Default is FALSE, do not favor the 32-bit addresses.
+ * address. Default is TRUE, favor the 32-bit addresses.
  */
-ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, TRUE);
 
 /*
  * Optionally truncate I/O addresses to 16 bits. Provides compatibility
index a4702eee91a820d131960754c14da6aa62f50939..9fb85f38de90e3b073635ef2b854895b13e7b5b8 100644 (file)
@@ -461,6 +461,7 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
        u32 table_count;
        struct acpi_table_header *table;
        acpi_physical_address address;
+       acpi_physical_address rsdt_address;
        u32 length;
        u8 *table_entry;
        acpi_status status;
@@ -488,11 +489,14 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
                 * as per the ACPI specification.
                 */
                address = (acpi_physical_address) rsdp->xsdt_physical_address;
+               rsdt_address =
+                   (acpi_physical_address) rsdp->rsdt_physical_address;
                table_entry_size = ACPI_XSDT_ENTRY_SIZE;
        } else {
                /* Root table is an RSDT (32-bit physical addresses) */
 
                address = (acpi_physical_address) rsdp->rsdt_physical_address;
+               rsdt_address = address;
                table_entry_size = ACPI_RSDT_ENTRY_SIZE;
        }
 
@@ -515,8 +519,7 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
 
                        /* Fall back to the RSDT */
 
-                       address =
-                           (acpi_physical_address) rsdp->rsdt_physical_address;
+                       address = rsdt_address;
                        table_entry_size = ACPI_RSDT_ENTRY_SIZE;
                }
        }
index 9a2c63b2005038476e5a5e77360fd836aad3f25f..6e7b2a12860d31ac533c730e97d080daaa6f1385 100644 (file)
 #include <linux/suspend.h>
 #include <asm/unaligned.h>
 
+#ifdef CONFIG_ACPI_PROCFS_POWER
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/uaccess.h>
+#endif
+
 #include <linux/acpi.h>
 #include <linux/power_supply.h>
 
@@ -64,6 +70,19 @@ static unsigned int cache_time = 1000;
 module_param(cache_time, uint, 0644);
 MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
 
+#ifdef CONFIG_ACPI_PROCFS_POWER
+extern struct proc_dir_entry *acpi_lock_battery_dir(void);
+extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
+
+enum acpi_battery_files {
+       info_tag = 0,
+       state_tag,
+       alarm_tag,
+       ACPI_BATTERY_NUMFILES,
+};
+
+#endif
+
 static const struct acpi_device_id battery_device_ids[] = {
        {"PNP0C0A", 0},
        {"", 0},
@@ -299,6 +318,14 @@ static enum power_supply_property energy_battery_props[] = {
        POWER_SUPPLY_PROP_SERIAL_NUMBER,
 };
 
+#ifdef CONFIG_ACPI_PROCFS_POWER
+inline char *acpi_battery_units(struct acpi_battery *battery)
+{
+       return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ?
+               "mA" : "mW";
+}
+#endif
+
 /* --------------------------------------------------------------------------
                                Battery Management
    -------------------------------------------------------------------------- */
@@ -716,6 +743,279 @@ static void acpi_battery_refresh(struct acpi_battery *battery)
        sysfs_add_battery(battery);
 }
 
+/* --------------------------------------------------------------------------
+                              FS Interface (/proc)
+   -------------------------------------------------------------------------- */
+
+#ifdef CONFIG_ACPI_PROCFS_POWER
+static struct proc_dir_entry *acpi_battery_dir;
+
+static int acpi_battery_print_info(struct seq_file *seq, int result)
+{
+       struct acpi_battery *battery = seq->private;
+
+       if (result)
+               goto end;
+
+       seq_printf(seq, "present:                 %s\n",
+                  acpi_battery_present(battery) ? "yes" : "no");
+       if (!acpi_battery_present(battery))
+               goto end;
+       if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+               seq_printf(seq, "design capacity:         unknown\n");
+       else
+               seq_printf(seq, "design capacity:         %d %sh\n",
+                          battery->design_capacity,
+                          acpi_battery_units(battery));
+
+       if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+               seq_printf(seq, "last full capacity:      unknown\n");
+       else
+               seq_printf(seq, "last full capacity:      %d %sh\n",
+                          battery->full_charge_capacity,
+                          acpi_battery_units(battery));
+
+       seq_printf(seq, "battery technology:      %srechargeable\n",
+                  (!battery->technology)?"non-":"");
+
+       if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
+               seq_printf(seq, "design voltage:          unknown\n");
+       else
+               seq_printf(seq, "design voltage:          %d mV\n",
+                          battery->design_voltage);
+       seq_printf(seq, "design capacity warning: %d %sh\n",
+                  battery->design_capacity_warning,
+                  acpi_battery_units(battery));
+       seq_printf(seq, "design capacity low:     %d %sh\n",
+                  battery->design_capacity_low,
+                  acpi_battery_units(battery));
+       seq_printf(seq, "cycle count:             %i\n", battery->cycle_count);
+       seq_printf(seq, "capacity granularity 1:  %d %sh\n",
+                  battery->capacity_granularity_1,
+                  acpi_battery_units(battery));
+       seq_printf(seq, "capacity granularity 2:  %d %sh\n",
+                  battery->capacity_granularity_2,
+                  acpi_battery_units(battery));
+       seq_printf(seq, "model number:            %s\n", battery->model_number);
+       seq_printf(seq, "serial number:           %s\n", battery->serial_number);
+       seq_printf(seq, "battery type:            %s\n", battery->type);
+       seq_printf(seq, "OEM info:                %s\n", battery->oem_info);
+      end:
+       if (result)
+               seq_printf(seq, "ERROR: Unable to read battery info\n");
+       return result;
+}
+
+static int acpi_battery_print_state(struct seq_file *seq, int result)
+{
+       struct acpi_battery *battery = seq->private;
+
+       if (result)
+               goto end;
+
+       seq_printf(seq, "present:                 %s\n",
+                  acpi_battery_present(battery) ? "yes" : "no");
+       if (!acpi_battery_present(battery))
+               goto end;
+
+       seq_printf(seq, "capacity state:          %s\n",
+                       (battery->state & 0x04) ? "critical" : "ok");
+       if ((battery->state & 0x01) && (battery->state & 0x02))
+               seq_printf(seq,
+                          "charging state:          charging/discharging\n");
+       else if (battery->state & 0x01)
+               seq_printf(seq, "charging state:          discharging\n");
+       else if (battery->state & 0x02)
+               seq_printf(seq, "charging state:          charging\n");
+       else
+               seq_printf(seq, "charging state:          charged\n");
+
+       if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
+               seq_printf(seq, "present rate:            unknown\n");
+       else
+               seq_printf(seq, "present rate:            %d %s\n",
+                          battery->rate_now, acpi_battery_units(battery));
+
+       if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
+               seq_printf(seq, "remaining capacity:      unknown\n");
+       else
+               seq_printf(seq, "remaining capacity:      %d %sh\n",
+                          battery->capacity_now, acpi_battery_units(battery));
+       if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
+               seq_printf(seq, "present voltage:         unknown\n");
+       else
+               seq_printf(seq, "present voltage:         %d mV\n",
+                          battery->voltage_now);
+      end:
+       if (result)
+               seq_printf(seq, "ERROR: Unable to read battery state\n");
+
+       return result;
+}
+
+static int acpi_battery_print_alarm(struct seq_file *seq, int result)
+{
+       struct acpi_battery *battery = seq->private;
+
+       if (result)
+               goto end;
+
+       if (!acpi_battery_present(battery)) {
+               seq_printf(seq, "present:                 no\n");
+               goto end;
+       }
+       seq_printf(seq, "alarm:                   ");
+       if (!battery->alarm)
+               seq_printf(seq, "unsupported\n");
+       else
+               seq_printf(seq, "%u %sh\n", battery->alarm,
+                               acpi_battery_units(battery));
+      end:
+       if (result)
+               seq_printf(seq, "ERROR: Unable to read battery alarm\n");
+       return result;
+}
+
+static ssize_t acpi_battery_write_alarm(struct file *file,
+                                       const char __user * buffer,
+                                       size_t count, loff_t * ppos)
+{
+       int result = 0;
+       char alarm_string[12] = { '\0' };
+       struct seq_file *m = file->private_data;
+       struct acpi_battery *battery = m->private;
+
+       if (!battery || (count > sizeof(alarm_string) - 1))
+               return -EINVAL;
+       if (!acpi_battery_present(battery)) {
+               result = -ENODEV;
+               goto end;
+       }
+       if (copy_from_user(alarm_string, buffer, count)) {
+               result = -EFAULT;
+               goto end;
+       }
+       alarm_string[count] = '\0';
+       battery->alarm = simple_strtol(alarm_string, NULL, 0);
+       result = acpi_battery_set_alarm(battery);
+      end:
+       if (!result)
+               return count;
+       return result;
+}
+
+typedef int(*print_func)(struct seq_file *seq, int result);
+
+static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = {
+       acpi_battery_print_info,
+       acpi_battery_print_state,
+       acpi_battery_print_alarm,
+};
+
+static int acpi_battery_read(int fid, struct seq_file *seq)
+{
+       struct acpi_battery *battery = seq->private;
+       int result = acpi_battery_update(battery);
+       return acpi_print_funcs[fid](seq, result);
+}
+
+#define DECLARE_FILE_FUNCTIONS(_name) \
+static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \
+{ \
+       return acpi_battery_read(_name##_tag, seq); \
+} \
+static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \
+{ \
+       return single_open(file, acpi_battery_read_##_name, PDE_DATA(inode)); \
+}
+
+DECLARE_FILE_FUNCTIONS(info);
+DECLARE_FILE_FUNCTIONS(state);
+DECLARE_FILE_FUNCTIONS(alarm);
+
+#undef DECLARE_FILE_FUNCTIONS
+
+#define FILE_DESCRIPTION_RO(_name) \
+       { \
+       .name = __stringify(_name), \
+       .mode = S_IRUGO, \
+       .ops = { \
+               .open = acpi_battery_##_name##_open_fs, \
+               .read = seq_read, \
+               .llseek = seq_lseek, \
+               .release = single_release, \
+               .owner = THIS_MODULE, \
+               }, \
+       }
+
+#define FILE_DESCRIPTION_RW(_name) \
+       { \
+       .name = __stringify(_name), \
+       .mode = S_IFREG | S_IRUGO | S_IWUSR, \
+       .ops = { \
+               .open = acpi_battery_##_name##_open_fs, \
+               .read = seq_read, \
+               .llseek = seq_lseek, \
+               .write = acpi_battery_write_##_name, \
+               .release = single_release, \
+               .owner = THIS_MODULE, \
+               }, \
+       }
+
+static const struct battery_file {
+       struct file_operations ops;
+       umode_t mode;
+       const char *name;
+} acpi_battery_file[] = {
+       FILE_DESCRIPTION_RO(info),
+       FILE_DESCRIPTION_RO(state),
+       FILE_DESCRIPTION_RW(alarm),
+};
+
+#undef FILE_DESCRIPTION_RO
+#undef FILE_DESCRIPTION_RW
+
+static int acpi_battery_add_fs(struct acpi_device *device)
+{
+       struct proc_dir_entry *entry = NULL;
+       int i;
+
+       printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
+                       " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
+       if (!acpi_device_dir(device)) {
+               acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
+                                                    acpi_battery_dir);
+               if (!acpi_device_dir(device))
+                       return -ENODEV;
+       }
+
+       for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) {
+               entry = proc_create_data(acpi_battery_file[i].name,
+                                        acpi_battery_file[i].mode,
+                                        acpi_device_dir(device),
+                                        &acpi_battery_file[i].ops,
+                                        acpi_driver_data(device));
+               if (!entry)
+                       return -ENODEV;
+       }
+       return 0;
+}
+
+static void acpi_battery_remove_fs(struct acpi_device *device)
+{
+       int i;
+       if (!acpi_device_dir(device))
+               return;
+       for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i)
+               remove_proc_entry(acpi_battery_file[i].name,
+                                 acpi_device_dir(device));
+
+       remove_proc_entry(acpi_device_bid(device), acpi_battery_dir);
+       acpi_device_dir(device) = NULL;
+}
+
+#endif
+
 /* --------------------------------------------------------------------------
                                  Driver Interface
    -------------------------------------------------------------------------- */
@@ -790,6 +1090,15 @@ static int acpi_battery_add(struct acpi_device *device)
        result = acpi_battery_update(battery);
        if (result)
                goto fail;
+#ifdef CONFIG_ACPI_PROCFS_POWER
+       result = acpi_battery_add_fs(device);
+#endif
+       if (result) {
+#ifdef CONFIG_ACPI_PROCFS_POWER
+               acpi_battery_remove_fs(device);
+#endif
+               goto fail;
+       }
 
        printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
                ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
@@ -816,6 +1125,9 @@ static int acpi_battery_remove(struct acpi_device *device)
                return -EINVAL;
        battery = acpi_driver_data(device);
        unregister_pm_notifier(&battery->pm_nb);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+       acpi_battery_remove_fs(device);
+#endif
        sysfs_remove_battery(battery);
        mutex_destroy(&battery->lock);
        mutex_destroy(&battery->sysfs_lock);
@@ -866,7 +1178,19 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
 
        if (dmi_check_system(bat_dmi_table))
                battery_bix_broken_package = 1;
-       acpi_bus_register_driver(&acpi_battery_driver);
+       
+#ifdef CONFIG_ACPI_PROCFS_POWER
+       acpi_battery_dir = acpi_lock_battery_dir();
+       if (!acpi_battery_dir)
+               return;
+#endif
+       if (acpi_bus_register_driver(&acpi_battery_driver) < 0) {
+#ifdef CONFIG_ACPI_PROCFS_POWER
+               acpi_unlock_battery_dir(acpi_battery_dir);
+#endif
+               return;
+       }
+       return;
 }
 
 static int __init acpi_battery_init(void)
@@ -878,6 +1202,9 @@ static int __init acpi_battery_init(void)
 static void __exit acpi_battery_exit(void)
 {
        acpi_bus_unregister_driver(&acpi_battery_driver);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+       acpi_unlock_battery_dir(acpi_battery_dir);
+#endif
 }
 
 module_init(acpi_battery_init);
index afec4526c48aa04e2921a199396e8fb2ea7489be..3d8413d02a975f0643275a247524a0c9d7569341 100644 (file)
@@ -314,6 +314,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
                     DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
                },
        },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "Dell Inspiron 7737",
+       .matches = {
+                   DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                   DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"),
+               },
+       },
 
        /*
         * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
@@ -374,6 +382,19 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
                     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"),
                },
        },
+       /*
+        * Without this this EEEpc exports a non working WMI interface, with
+        * this it exports a working "good old" eeepc_laptop interface, fixing
+        * both brightness control, and rfkill not working.
+        */
+       {
+       .callback = dmi_enable_osi_linux,
+       .ident = "Asus EEE PC 1015PX",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
+                    DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
+               },
+       },
        {}
 };
 
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
new file mode 100644 (file)
index 0000000..6c9ee68
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or (at
+ *  your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#define PREFIX "ACPI: "
+
+ACPI_MODULE_NAME("cm_sbs");
+#define ACPI_AC_CLASS          "ac_adapter"
+#define ACPI_BATTERY_CLASS     "battery"
+#define _COMPONENT             ACPI_SBS_COMPONENT
+static struct proc_dir_entry *acpi_ac_dir;
+static struct proc_dir_entry *acpi_battery_dir;
+
+static DEFINE_MUTEX(cm_sbs_mutex);
+
+static int lock_ac_dir_cnt;
+static int lock_battery_dir_cnt;
+
+struct proc_dir_entry *acpi_lock_ac_dir(void)
+{
+       mutex_lock(&cm_sbs_mutex);
+       if (!acpi_ac_dir)
+               acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir);
+       if (acpi_ac_dir) {
+               lock_ac_dir_cnt++;
+       } else {
+               printk(KERN_ERR PREFIX
+                                 "Cannot create %s\n", ACPI_AC_CLASS);
+       }
+       mutex_unlock(&cm_sbs_mutex);
+       return acpi_ac_dir;
+}
+EXPORT_SYMBOL(acpi_lock_ac_dir);
+
+void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param)
+{
+       mutex_lock(&cm_sbs_mutex);
+       if (acpi_ac_dir_param)
+               lock_ac_dir_cnt--;
+       if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) {
+               remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir);
+               acpi_ac_dir = NULL;
+       }
+       mutex_unlock(&cm_sbs_mutex);
+}
+EXPORT_SYMBOL(acpi_unlock_ac_dir);
+
+struct proc_dir_entry *acpi_lock_battery_dir(void)
+{
+       mutex_lock(&cm_sbs_mutex);
+       if (!acpi_battery_dir) {
+               acpi_battery_dir =
+                   proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir);
+       }
+       if (acpi_battery_dir) {
+               lock_battery_dir_cnt++;
+       } else {
+               printk(KERN_ERR PREFIX
+                                 "Cannot create %s\n", ACPI_BATTERY_CLASS);
+       }
+       mutex_unlock(&cm_sbs_mutex);
+       return acpi_battery_dir;
+}
+EXPORT_SYMBOL(acpi_lock_battery_dir);
+
+void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param)
+{
+       mutex_lock(&cm_sbs_mutex);
+       if (acpi_battery_dir_param)
+               lock_battery_dir_cnt--;
+       if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param
+           && acpi_battery_dir) {
+               remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir);
+               acpi_battery_dir = NULL;
+       }
+       mutex_unlock(&cm_sbs_mutex);
+       return;
+}
+EXPORT_SYMBOL(acpi_unlock_battery_dir);
index 8b6990e417ec870d7c77e42c994b1e2245b3b88e..f8bc5a755dda411963e097cc41c27bec0752da71 100644 (file)
@@ -457,10 +457,10 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
        },
        {
         .callback = video_set_use_native_backlight,
-        .ident = "ThinkPad T430s",
+        .ident = "ThinkPad T430 and T430s",
         .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430"),
                },
        },
        {
@@ -472,7 +472,7 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                },
        },
        {
-       .callback = video_set_use_native_backlight,
+        .callback = video_set_use_native_backlight,
        .ident = "ThinkPad X1 Carbon",
        .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -500,7 +500,7 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
         .ident = "Dell Inspiron 7520",
         .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-               DMI_MATCH(DMI_PRODUCT_VERSION, "Inspiron 7520"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),
                },
        },
        {
@@ -511,6 +511,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5733Z"),
                },
        },
+       {
+        .callback = video_set_use_native_backlight,
+        .ident = "Acer Aspire 5742G",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5742G"),
+               },
+       },
        {
         .callback = video_set_use_native_backlight,
         .ident = "Acer Aspire V5-431",
index c2706047337f17c0fad38b3161cabc93d95be0e5..0033fafc470be5f7be9b84da74d689143abb5357 100644 (file)
@@ -815,7 +815,7 @@ config PATA_AT32
 
 config PATA_AT91
        tristate "PATA support for AT91SAM9260"
-       depends on ARM && ARCH_AT91
+       depends on ARM && SOC_AT91SAM9
        help
          This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
 
index 71e15b73513d22ed2bf5ac34afec9b5f42679fe7..60707814a84b19e2581d6309c2a17d4823d1e015 100644 (file)
@@ -1115,6 +1115,17 @@ static bool ahci_broken_online(struct pci_dev *pdev)
        return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
 }
 
+static bool ahci_broken_devslp(struct pci_dev *pdev)
+{
+       /* device with broken DEVSLP but still showing SDS capability */
+       static const struct pci_device_id ids[] = {
+               { PCI_VDEVICE(INTEL, 0x0f23)}, /* Valleyview SoC */
+               {}
+       };
+
+       return pci_match_id(ids, pdev);
+}
+
 #ifdef CONFIG_ATA_ACPI
 static void ahci_gtf_filter_workaround(struct ata_host *host)
 {
@@ -1364,6 +1375,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
 
+       /* must set flag prior to save config in order to take effect */
+       if (ahci_broken_devslp(pdev))
+               hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
+
        /* save initial config */
        ahci_pci_save_initial_config(pdev, hpriv);
 
index b5eb886da22635c3c76775bc0ef6374af3464b98..af63c75c20011e10d76be66edb27d595cb47978c 100644 (file)
@@ -236,6 +236,7 @@ enum {
                                                        port start (wait until
                                                        error-handling stage) */
        AHCI_HFLAG_MULTI_MSI            = (1 << 16), /* multiple PCI MSIs */
+       AHCI_HFLAG_NO_DEVSLP            = (1 << 17), /* no device sleep */
 
        /* ap->flags bits */
 
index 497c7abe1c7df5ef79ccd246828b68a1251c5201..8befeb69eeb1133afc62daa43b1fb41af584994e 100644 (file)
 #include "ahci.h"
 
 enum {
-       PORT_PHY_CTL = 0x178,                   /* Port0 PHY Control */
-       PORT_PHY_CTL_PDDQ_LOC = 0x100000,       /* PORT_PHY_CTL bits */
-       HOST_TIMER1MS = 0xe0,                   /* Timer 1-ms */
+       /* Timer 1-ms Register */
+       IMX_TIMER1MS                            = 0x00e0,
+       /* Port0 PHY Control Register */
+       IMX_P0PHYCR                             = 0x0178,
+       IMX_P0PHYCR_TEST_PDDQ                   = 1 << 20,
+       IMX_P0PHYCR_CR_READ                     = 1 << 19,
+       IMX_P0PHYCR_CR_WRITE                    = 1 << 18,
+       IMX_P0PHYCR_CR_CAP_DATA                 = 1 << 17,
+       IMX_P0PHYCR_CR_CAP_ADDR                 = 1 << 16,
+       /* Port0 PHY Status Register */
+       IMX_P0PHYSR                             = 0x017c,
+       IMX_P0PHYSR_CR_ACK                      = 1 << 18,
+       IMX_P0PHYSR_CR_DATA_OUT                 = 0xffff << 0,
+       /* Lane0 Output Status Register */
+       IMX_LANE0_OUT_STAT                      = 0x2003,
+       IMX_LANE0_OUT_STAT_RX_PLL_STATE         = 1 << 1,
+       /* Clock Reset Register */
+       IMX_CLOCK_RESET                         = 0x7f3f,
+       IMX_CLOCK_RESET_RESET                   = 1 << 0,
 };
 
 enum ahci_imx_type {
@@ -54,9 +70,149 @@ MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support
 
 static void ahci_imx_host_stop(struct ata_host *host);
 
+static int imx_phy_crbit_assert(void __iomem *mmio, u32 bit, bool assert)
+{
+       int timeout = 10;
+       u32 crval;
+       u32 srval;
+
+       /* Assert or deassert the bit */
+       crval = readl(mmio + IMX_P0PHYCR);
+       if (assert)
+               crval |= bit;
+       else
+               crval &= ~bit;
+       writel(crval, mmio + IMX_P0PHYCR);
+
+       /* Wait for the cr_ack signal */
+       do {
+               srval = readl(mmio + IMX_P0PHYSR);
+               if ((assert ? srval : ~srval) & IMX_P0PHYSR_CR_ACK)
+                       break;
+               usleep_range(100, 200);
+       } while (--timeout);
+
+       return timeout ? 0 : -ETIMEDOUT;
+}
+
+static int imx_phy_reg_addressing(u16 addr, void __iomem *mmio)
+{
+       u32 crval = addr;
+       int ret;
+
+       /* Supply the address on cr_data_in */
+       writel(crval, mmio + IMX_P0PHYCR);
+
+       /* Assert the cr_cap_addr signal */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, true);
+       if (ret)
+               return ret;
+
+       /* Deassert cr_cap_addr */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, false);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int imx_phy_reg_write(u16 val, void __iomem *mmio)
+{
+       u32 crval = val;
+       int ret;
+
+       /* Supply the data on cr_data_in */
+       writel(crval, mmio + IMX_P0PHYCR);
+
+       /* Assert the cr_cap_data signal */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, true);
+       if (ret)
+               return ret;
+
+       /* Deassert cr_cap_data */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, false);
+       if (ret)
+               return ret;
+
+       if (val & IMX_CLOCK_RESET_RESET) {
+               /*
+                * In case we're resetting the phy, it's unable to acknowledge,
+                * so we return immediately here.
+                */
+               crval |= IMX_P0PHYCR_CR_WRITE;
+               writel(crval, mmio + IMX_P0PHYCR);
+               goto out;
+       }
+
+       /* Assert the cr_write signal */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, true);
+       if (ret)
+               return ret;
+
+       /* Deassert cr_write */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, false);
+       if (ret)
+               return ret;
+
+out:
+       return 0;
+}
+
+static int imx_phy_reg_read(u16 *val, void __iomem *mmio)
+{
+       int ret;
+
+       /* Assert the cr_read signal */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, true);
+       if (ret)
+               return ret;
+
+       /* Capture the data from cr_data_out[] */
+       *val = readl(mmio + IMX_P0PHYSR) & IMX_P0PHYSR_CR_DATA_OUT;
+
+       /* Deassert cr_read */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, false);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int imx_sata_phy_reset(struct ahci_host_priv *hpriv)
+{
+       void __iomem *mmio = hpriv->mmio;
+       int timeout = 10;
+       u16 val;
+       int ret;
+
+       /* Reset SATA PHY by setting RESET bit of PHY register CLOCK_RESET */
+       ret = imx_phy_reg_addressing(IMX_CLOCK_RESET, mmio);
+       if (ret)
+               return ret;
+       ret = imx_phy_reg_write(IMX_CLOCK_RESET_RESET, mmio);
+       if (ret)
+               return ret;
+
+       /* Wait for PHY RX_PLL to be stable */
+       do {
+               usleep_range(100, 200);
+               ret = imx_phy_reg_addressing(IMX_LANE0_OUT_STAT, mmio);
+               if (ret)
+                       return ret;
+               ret = imx_phy_reg_read(&val, mmio);
+               if (ret)
+                       return ret;
+               if (val & IMX_LANE0_OUT_STAT_RX_PLL_STATE)
+                       break;
+       } while (--timeout);
+
+       return timeout ? 0 : -ETIMEDOUT;
+}
+
 static int imx_sata_enable(struct ahci_host_priv *hpriv)
 {
        struct imx_ahci_priv *imxpriv = hpriv->plat_data;
+       struct device *dev = &imxpriv->ahci_pdev->dev;
        int ret;
 
        if (imxpriv->no_device)
@@ -101,6 +257,14 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
                regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
                                   IMX6Q_GPR13_SATA_MPLL_CLK_EN,
                                   IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+
+               usleep_range(100, 200);
+
+               ret = imx_sata_phy_reset(hpriv);
+               if (ret) {
+                       dev_err(dev, "failed to reset phy: %d\n", ret);
+                       goto disable_regulator;
+               }
        }
 
        usleep_range(1000, 2000);
@@ -156,8 +320,8 @@ static void ahci_imx_error_handler(struct ata_port *ap)
         * without full reset once the pddq mode is enabled making it
         * impossible to use as part of libata LPM.
         */
-       reg_val = readl(mmio + PORT_PHY_CTL);
-       writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL);
+       reg_val = readl(mmio + IMX_P0PHYCR);
+       writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR);
        imx_sata_disable(hpriv);
        imxpriv->no_device = true;
 }
@@ -217,6 +381,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
        if (!imxpriv)
                return -ENOMEM;
 
+       imxpriv->ahci_pdev = pdev;
        imxpriv->no_device = false;
        imxpriv->first_time = true;
        imxpriv->type = (enum ahci_imx_type)of_id->data;
@@ -248,7 +413,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
 
        /*
         * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
-        * and IP vendor specific register HOST_TIMER1MS.
+        * and IP vendor specific register IMX_TIMER1MS.
         * Configure CAP_SSS (support stagered spin up).
         * Implement the port0.
         * Get the ahb clock rate, and configure the TIMER1MS register.
@@ -265,7 +430,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
        }
 
        reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
-       writel(reg_val, hpriv->mmio + HOST_TIMER1MS);
+       writel(reg_val, hpriv->mmio + IMX_TIMER1MS);
 
        ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, 0, 0);
        if (ret)
index 6bd4f660b4e15966ca2c351b4501c0521491de32..b9861453fc8148612a740418f5fee3088d7b65a1 100644 (file)
@@ -452,6 +452,13 @@ void ahci_save_initial_config(struct device *dev,
                cap &= ~HOST_CAP_SNTF;
        }
 
+       if ((cap2 & HOST_CAP2_SDS) && (hpriv->flags & AHCI_HFLAG_NO_DEVSLP)) {
+               dev_info(dev,
+                        "controller can't do DEVSLP, turning off\n");
+               cap2 &= ~HOST_CAP2_SDS;
+               cap2 &= ~HOST_CAP2_SADM;
+       }
+
        if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
                dev_info(dev, "controller can do FBS, turning on CAP_FBS\n");
                cap |= HOST_CAP_FBS;
index 943cc8b83e59bb7f1b293abce887be717047ffff..ea83828bfea94b41eae6947cfbbbaa8dfbc15b5a 100644 (file)
@@ -6314,6 +6314,8 @@ int ata_host_activate(struct ata_host *host, int irq,
 static void ata_port_detach(struct ata_port *ap)
 {
        unsigned long flags;
+       struct ata_link *link;
+       struct ata_device *dev;
 
        if (!ap->ops->error_handler)
                goto skip_eh;
@@ -6333,6 +6335,13 @@ static void ata_port_detach(struct ata_port *ap)
        cancel_delayed_work_sync(&ap->hotplug_task);
 
  skip_eh:
+       /* clean up zpodd on port removal */
+       ata_for_each_link(link, ap, HOST_FIRST) {
+               ata_for_each_dev(dev, link, ALL) {
+                       if (zpodd_dev_enabled(dev))
+                               zpodd_exit(dev);
+               }
+       }
        if (ap->pmp_link) {
                int i;
                for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
index 293e2e0a0a87c7d9877c27524fd98503ba16c1cf..00b73448b22ea7b77a55785cfbbddf022bf4bfd3 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/debugfs.h>
+#include <linux/log2.h>
 
 /*
  * DDR target is the same on all platforms.
@@ -222,12 +223,6 @@ static int mvebu_mbus_window_conflicts(struct mvebu_mbus_state *mbus,
                 */
                if ((u64)base < wend && end > wbase)
                        return 0;
-
-               /*
-                * Check if target/attribute conflicts
-                */
-               if (target == wtarget && attr == wattr)
-                       return 0;
        }
 
        return 1;
@@ -266,6 +261,17 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus,
                mbus->soc->win_cfg_offset(win);
        u32 ctrl, remap_addr;
 
+       if (!is_power_of_2(size)) {
+               WARN(true, "Invalid MBus window size: 0x%zx\n", size);
+               return -EINVAL;
+       }
+
+       if ((base & (phys_addr_t)(size - 1)) != 0) {
+               WARN(true, "Invalid MBus base/size: %pa len 0x%zx\n", &base,
+                    size);
+               return -EINVAL;
+       }
+
        ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) |
                (attr << WIN_CTRL_ATTR_SHIFT)    |
                (target << WIN_CTRL_TGT_SHIFT)   |
@@ -413,6 +419,10 @@ static int mvebu_devs_debug_show(struct seq_file *seq, void *v)
                           win, (unsigned long long)wbase,
                           (unsigned long long)(wbase + wsize), wtarget, wattr);
 
+               if (!is_power_of_2(wsize) ||
+                   ((wbase & (u64)(wsize - 1)) != 0))
+                       seq_puts(seq, " (Invalid base/size!!)");
+
                if (win < mbus->soc->num_remappable_wins) {
                        seq_printf(seq, " (remap %016llx)\n",
                                   (unsigned long long)wremap);
index 6b75713d953a4e719cd33610f6034bec430c5d95..102c50d38902ca43fed85641618ba202a7251679 100644 (file)
@@ -995,8 +995,11 @@ retry:
                ibytes = min_t(size_t, ibytes, have_bytes - reserved);
        if (ibytes < min)
                ibytes = 0;
-       entropy_count = max_t(int, 0,
-                             entropy_count - (ibytes << (ENTROPY_SHIFT + 3)));
+       if (have_bytes >= ibytes + reserved)
+               entropy_count -= ibytes << (ENTROPY_SHIFT + 3);
+       else
+               entropy_count = reserved << (ENTROPY_SHIFT + 3);
+
        if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
                goto retry;
 
index b3ea223585bdeac64de19606f96b21b280c96210..61dcc8011ec711246727ce5c2750277f33c219f4 100644 (file)
@@ -328,13 +328,11 @@ int tpm_add_ppi(struct kobject *parent)
        /* Cache TPM ACPI handle and version string */
        acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
                            ppi_callback, NULL, NULL, &tpm_ppi_handle);
-       if (tpm_ppi_handle == NULL)
-               return -ENODEV;
-
-       return sysfs_create_group(parent, &ppi_attr_grp);
+       return tpm_ppi_handle ? sysfs_create_group(parent, &ppi_attr_grp) : 0;
 }
 
 void tpm_remove_ppi(struct kobject *parent)
 {
-       sysfs_remove_group(parent, &ppi_attr_grp);
+       if (tpm_ppi_handle)
+               sysfs_remove_group(parent, &ppi_attr_grp);
 }
index c7607feb18dd159b7626fd48340c764f50738c10..54a06526f64f09d7a49b8e043bd91d2e3d2dfffd 100644 (file)
@@ -27,7 +27,7 @@ LIST_HEAD(ccu_list);  /* The list of set up CCUs */
 
 static bool clk_requires_trigger(struct kona_clk *bcm_clk)
 {
-       struct peri_clk_data *peri = bcm_clk->peri;
+       struct peri_clk_data *peri = bcm_clk->u.peri;
        struct bcm_clk_sel *sel;
        struct bcm_clk_div *div;
 
@@ -63,7 +63,7 @@ static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk)
        u32 limit;
 
        BUG_ON(bcm_clk->type != bcm_clk_peri);
-       peri = bcm_clk->peri;
+       peri = bcm_clk->u.peri;
        name = bcm_clk->name;
        range = bcm_clk->ccu->range;
 
@@ -81,19 +81,19 @@ static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk)
 
        div = &peri->div;
        if (divider_exists(div)) {
-               if (div->offset > limit) {
+               if (div->u.s.offset > limit) {
                        pr_err("%s: bad divider offset for %s (%u > %u)\n",
-                               __func__, name, div->offset, limit);
+                               __func__, name, div->u.s.offset, limit);
                        return false;
                }
        }
 
        div = &peri->pre_div;
        if (divider_exists(div)) {
-               if (div->offset > limit) {
+               if (div->u.s.offset > limit) {
                        pr_err("%s: bad pre-divider offset for %s "
                                        "(%u > %u)\n",
-                               __func__, name, div->offset, limit);
+                               __func__, name, div->u.s.offset, limit);
                        return false;
                }
        }
@@ -249,21 +249,22 @@ static bool div_valid(struct bcm_clk_div *div, const char *field_name,
 {
        if (divider_is_fixed(div)) {
                /* Any fixed divider value but 0 is OK */
-               if (div->fixed == 0) {
+               if (div->u.fixed == 0) {
                        pr_err("%s: bad %s fixed value 0 for %s\n", __func__,
                                field_name, clock_name);
                        return false;
                }
                return true;
        }
-       if (!bitfield_valid(div->shift, div->width, field_name, clock_name))
+       if (!bitfield_valid(div->u.s.shift, div->u.s.width,
+                               field_name, clock_name))
                return false;
 
        if (divider_has_fraction(div))
-               if (div->frac_width > div->width) {
+               if (div->u.s.frac_width > div->u.s.width) {
                        pr_warn("%s: bad %s fraction width for %s (%u > %u)\n",
                                __func__, field_name, clock_name,
-                               div->frac_width, div->width);
+                               div->u.s.frac_width, div->u.s.width);
                        return false;
                }
 
@@ -278,7 +279,7 @@ static bool div_valid(struct bcm_clk_div *div, const char *field_name,
  */
 static bool kona_dividers_valid(struct kona_clk *bcm_clk)
 {
-       struct peri_clk_data *peri = bcm_clk->peri;
+       struct peri_clk_data *peri = bcm_clk->u.peri;
        struct bcm_clk_div *div;
        struct bcm_clk_div *pre_div;
        u32 limit;
@@ -295,7 +296,7 @@ static bool kona_dividers_valid(struct kona_clk *bcm_clk)
 
        limit = BITS_PER_BYTE * sizeof(u32);
 
-       return div->frac_width + pre_div->frac_width <= limit;
+       return div->u.s.frac_width + pre_div->u.s.frac_width <= limit;
 }
 
 
@@ -328,7 +329,7 @@ peri_clk_data_valid(struct kona_clk *bcm_clk)
        if (!peri_clk_data_offsets_valid(bcm_clk))
                return false;
 
-       peri = bcm_clk->peri;
+       peri = bcm_clk->u.peri;
        name = bcm_clk->name;
        gate = &peri->gate;
        if (gate_exists(gate) && !gate_valid(gate, "gate", name))
@@ -588,12 +589,12 @@ static void bcm_clk_teardown(struct kona_clk *bcm_clk)
 {
        switch (bcm_clk->type) {
        case bcm_clk_peri:
-               peri_clk_teardown(bcm_clk->data, &bcm_clk->init_data);
+               peri_clk_teardown(bcm_clk->u.data, &bcm_clk->init_data);
                break;
        default:
                break;
        }
-       bcm_clk->data = NULL;
+       bcm_clk->u.data = NULL;
        bcm_clk->type = bcm_clk_none;
 }
 
@@ -644,7 +645,7 @@ struct clk *kona_clk_setup(struct ccu_data *ccu, const char *name,
                break;
        }
        bcm_clk->type = type;
-       bcm_clk->data = data;
+       bcm_clk->u.data = data;
 
        /* Make sure everything makes sense before we set it up */
        if (!kona_clk_valid(bcm_clk)) {
index e3d339e08309f66ac61d2286c7c28bca9c4cec76..db11a87449f236c6bc1fcf4e5f6084750a1d4f3a 100644 (file)
@@ -61,7 +61,7 @@ u64 do_div_round_closest(u64 dividend, unsigned long divisor)
 /* Convert a divider into the scaled divisor value it represents. */
 static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
 {
-       return (u64)reg_div + ((u64)1 << div->frac_width);
+       return (u64)reg_div + ((u64)1 << div->u.s.frac_width);
 }
 
 /*
@@ -77,7 +77,7 @@ u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
        BUG_ON(billionths >= BILLION);
 
        combined = (u64)div_value * BILLION + billionths;
-       combined <<= div->frac_width;
+       combined <<= div->u.s.frac_width;
 
        return do_div_round_closest(combined, BILLION);
 }
@@ -87,7 +87,7 @@ static inline u64
 scaled_div_min(struct bcm_clk_div *div)
 {
        if (divider_is_fixed(div))
-               return (u64)div->fixed;
+               return (u64)div->u.fixed;
 
        return scaled_div_value(div, 0);
 }
@@ -98,9 +98,9 @@ u64 scaled_div_max(struct bcm_clk_div *div)
        u32 reg_div;
 
        if (divider_is_fixed(div))
-               return (u64)div->fixed;
+               return (u64)div->u.fixed;
 
-       reg_div = ((u32)1 << div->width) - 1;
+       reg_div = ((u32)1 << div->u.s.width) - 1;
 
        return scaled_div_value(div, reg_div);
 }
@@ -115,7 +115,7 @@ divider(struct bcm_clk_div *div, u64 scaled_div)
        BUG_ON(scaled_div < scaled_div_min(div));
        BUG_ON(scaled_div > scaled_div_max(div));
 
-       return (u32)(scaled_div - ((u64)1 << div->frac_width));
+       return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width));
 }
 
 /* Return a rate scaled for use when dividing by a scaled divisor. */
@@ -125,7 +125,7 @@ scale_rate(struct bcm_clk_div *div, u32 rate)
        if (divider_is_fixed(div))
                return (u64)rate;
 
-       return (u64)rate << div->frac_width;
+       return (u64)rate << div->u.s.frac_width;
 }
 
 /* CCU access */
@@ -398,14 +398,14 @@ static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div)
        u32 reg_div;
 
        if (divider_is_fixed(div))
-               return (u64)div->fixed;
+               return (u64)div->u.fixed;
 
        flags = ccu_lock(ccu);
-       reg_val = __ccu_read(ccu, div->offset);
+       reg_val = __ccu_read(ccu, div->u.s.offset);
        ccu_unlock(ccu, flags);
 
        /* Extract the full divider field from the register value */
-       reg_div = bitfield_extract(reg_val, div->shift, div->width);
+       reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width);
 
        /* Return the scaled divisor value it represents */
        return scaled_div_value(div, reg_div);
@@ -433,16 +433,17 @@ static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
         * state was defined in the device tree, we just find out
         * what its current value is rather than updating it.
         */
-       if (div->scaled_div == BAD_SCALED_DIV_VALUE) {
-               reg_val = __ccu_read(ccu, div->offset);
-               reg_div = bitfield_extract(reg_val, div->shift, div->width);
-               div->scaled_div = scaled_div_value(div, reg_div);
+       if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) {
+               reg_val = __ccu_read(ccu, div->u.s.offset);
+               reg_div = bitfield_extract(reg_val, div->u.s.shift,
+                                               div->u.s.width);
+               div->u.s.scaled_div = scaled_div_value(div, reg_div);
 
                return 0;
        }
 
        /* Convert the scaled divisor to the value we need to record */
-       reg_div = divider(div, div->scaled_div);
+       reg_div = divider(div, div->u.s.scaled_div);
 
        /* Clock needs to be enabled before changing the rate */
        enabled = __is_clk_gate_enabled(ccu, gate);
@@ -452,9 +453,10 @@ static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
        }
 
        /* Replace the divider value and record the result */
-       reg_val = __ccu_read(ccu, div->offset);
-       reg_val = bitfield_replace(reg_val, div->shift, div->width, reg_div);
-       __ccu_write(ccu, div->offset, reg_val);
+       reg_val = __ccu_read(ccu, div->u.s.offset);
+       reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width,
+                                       reg_div);
+       __ccu_write(ccu, div->u.s.offset, reg_val);
 
        /* If the trigger fails we still want to disable the gate */
        if (!__clk_trigger(ccu, trig))
@@ -490,11 +492,11 @@ static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
 
        BUG_ON(divider_is_fixed(div));
 
-       previous = div->scaled_div;
+       previous = div->u.s.scaled_div;
        if (previous == scaled_div)
                return 0;       /* No change */
 
-       div->scaled_div = scaled_div;
+       div->u.s.scaled_div = scaled_div;
 
        flags = ccu_lock(ccu);
        __ccu_write_enable(ccu);
@@ -505,7 +507,7 @@ static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
        ccu_unlock(ccu, flags);
 
        if (ret)
-               div->scaled_div = previous;             /* Revert the change */
+               div->u.s.scaled_div = previous;         /* Revert the change */
 
        return ret;
 
@@ -802,7 +804,7 @@ static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
 static int kona_peri_clk_enable(struct clk_hw *hw)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct bcm_clk_gate *gate = &bcm_clk->peri->gate;
+       struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
 
        return clk_gate(bcm_clk->ccu, bcm_clk->name, gate, true);
 }
@@ -810,7 +812,7 @@ static int kona_peri_clk_enable(struct clk_hw *hw)
 static void kona_peri_clk_disable(struct clk_hw *hw)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct bcm_clk_gate *gate = &bcm_clk->peri->gate;
+       struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
 
        (void)clk_gate(bcm_clk->ccu, bcm_clk->name, gate, false);
 }
@@ -818,7 +820,7 @@ static void kona_peri_clk_disable(struct clk_hw *hw)
 static int kona_peri_clk_is_enabled(struct clk_hw *hw)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct bcm_clk_gate *gate = &bcm_clk->peri->gate;
+       struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
 
        return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0;
 }
@@ -827,7 +829,7 @@ static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw,
                        unsigned long parent_rate)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct peri_clk_data *data = bcm_clk->peri;
+       struct peri_clk_data *data = bcm_clk->u.peri;
 
        return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div,
                                parent_rate);
@@ -837,20 +839,20 @@ static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
                        unsigned long *parent_rate)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct bcm_clk_div *div = &bcm_clk->peri->div;
+       struct bcm_clk_div *div = &bcm_clk->u.peri->div;
 
        if (!divider_exists(div))
                return __clk_get_rate(hw->clk);
 
        /* Quietly avoid a zero rate */
-       return round_rate(bcm_clk->ccu, div, &bcm_clk->peri->pre_div,
+       return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div,
                                rate ? rate : 1, *parent_rate, NULL);
 }
 
 static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct peri_clk_data *data = bcm_clk->peri;
+       struct peri_clk_data *data = bcm_clk->u.peri;
        struct bcm_clk_sel *sel = &data->sel;
        struct bcm_clk_trig *trig;
        int ret;
@@ -884,7 +886,7 @@ static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
 static u8 kona_peri_clk_get_parent(struct clk_hw *hw)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct peri_clk_data *data = bcm_clk->peri;
+       struct peri_clk_data *data = bcm_clk->u.peri;
        u8 index;
 
        index = selector_read_index(bcm_clk->ccu, &data->sel);
@@ -897,7 +899,7 @@ static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate,
                        unsigned long parent_rate)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct peri_clk_data *data = bcm_clk->peri;
+       struct peri_clk_data *data = bcm_clk->u.peri;
        struct bcm_clk_div *div = &data->div;
        u64 scaled_div = 0;
        int ret;
@@ -958,7 +960,7 @@ struct clk_ops kona_peri_clk_ops = {
 static bool __peri_clk_init(struct kona_clk *bcm_clk)
 {
        struct ccu_data *ccu = bcm_clk->ccu;
-       struct peri_clk_data *peri = bcm_clk->peri;
+       struct peri_clk_data *peri = bcm_clk->u.peri;
        const char *name = bcm_clk->name;
        struct bcm_clk_trig *trig;
 
index 5e139adc3dc5909deee925ef38d2915894d52014..dee690951bb6c21677515039d5cac39770418414 100644 (file)
@@ -57,7 +57,7 @@
 #define divider_exists(div)            FLAG_TEST(div, DIV, EXISTS)
 #define divider_is_fixed(div)          FLAG_TEST(div, DIV, FIXED)
 #define divider_has_fraction(div)      (!divider_is_fixed(div) && \
-                                               (div)->frac_width > 0)
+                                               (div)->u.s.frac_width > 0)
 
 #define selector_exists(sel)           ((sel)->width != 0)
 #define trigger_exists(trig)           FLAG_TEST(trig, TRIG, EXISTS)
@@ -244,9 +244,9 @@ struct bcm_clk_div {
                        u32 frac_width; /* field fraction width */
 
                        u64 scaled_div; /* scaled divider value */
-               };
+               } s;
                u32 fixed;      /* non-zero fixed divider value */
-       };
+       } u;
        u32 flags;              /* BCM_CLK_DIV_FLAGS_* below */
 };
 
@@ -263,28 +263,28 @@ struct bcm_clk_div {
 /* A fixed (non-zero) divider */
 #define FIXED_DIVIDER(_value)                                          \
        {                                                               \
-               .fixed = (_value),                                      \
+               .u.fixed = (_value),                                    \
                .flags = FLAG(DIV, EXISTS)|FLAG(DIV, FIXED),            \
        }
 
 /* A divider with an integral divisor */
 #define DIVIDER(_offset, _shift, _width)                               \
        {                                                               \
-               .offset = (_offset),                                    \
-               .shift = (_shift),                                      \
-               .width = (_width),                                      \
-               .scaled_div = BAD_SCALED_DIV_VALUE,                     \
+               .u.s.offset = (_offset),                                \
+               .u.s.shift = (_shift),                                  \
+               .u.s.width = (_width),                                  \
+               .u.s.scaled_div = BAD_SCALED_DIV_VALUE,                 \
                .flags = FLAG(DIV, EXISTS),                             \
        }
 
 /* A divider whose divisor has an integer and fractional part */
 #define FRAC_DIVIDER(_offset, _shift, _width, _frac_width)             \
        {                                                               \
-               .offset = (_offset),                                    \
-               .shift = (_shift),                                      \
-               .width = (_width),                                      \
-               .frac_width = (_frac_width),                            \
-               .scaled_div = BAD_SCALED_DIV_VALUE,                     \
+               .u.s.offset = (_offset),                                \
+               .u.s.shift = (_shift),                                  \
+               .u.s.width = (_width),                                  \
+               .u.s.frac_width = (_frac_width),                        \
+               .u.s.scaled_div = BAD_SCALED_DIV_VALUE,                 \
                .flags = FLAG(DIV, EXISTS),                             \
        }
 
@@ -380,7 +380,7 @@ struct kona_clk {
        union {
                void *data;
                struct peri_clk_data *peri;
-       };
+       } u;
 };
 #define to_kona_clk(_hw) \
        container_of(_hw, struct kona_clk, hw)
index ec22112e569f7f3dc8f7c9477d0e3f99951e66c6..4637697c139f634a36d81169bad5b96c3553473d 100644 (file)
@@ -144,6 +144,37 @@ static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
        return true;
 }
 
+static int _round_up_table(const struct clk_div_table *table, int div)
+{
+       const struct clk_div_table *clkt;
+       int up = _get_table_maxdiv(table);
+
+       for (clkt = table; clkt->div; clkt++) {
+               if (clkt->div == div)
+                       return clkt->div;
+               else if (clkt->div < div)
+                       continue;
+
+               if ((clkt->div - div) < (up - div))
+                       up = clkt->div;
+       }
+
+       return up;
+}
+
+static int _div_round_up(struct clk_divider *divider,
+               unsigned long parent_rate, unsigned long rate)
+{
+       int div = DIV_ROUND_UP(parent_rate, rate);
+
+       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+               div = __roundup_pow_of_two(div);
+       if (divider->table)
+               div = _round_up_table(divider->table, div);
+
+       return div;
+}
+
 static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
                unsigned long *best_parent_rate)
 {
@@ -159,7 +190,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
 
        if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
                parent_rate = *best_parent_rate;
-               bestdiv = DIV_ROUND_UP(parent_rate, rate);
+               bestdiv = _div_round_up(divider, parent_rate, rate);
                bestdiv = bestdiv == 0 ? 1 : bestdiv;
                bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
                return bestdiv;
@@ -219,6 +250,10 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
        u32 val;
 
        div = DIV_ROUND_UP(parent_rate, rate);
+
+       if (!_is_valid_div(divider, div))
+               return -EINVAL;
+
        value = _get_val(divider, div);
 
        if (value > div_mask(divider))
index dff0373f53c1fb1df1784366a28c35323cde17a5..7cf2c093cc54f28dbebd9482e43233a49daf02e3 100644 (file)
@@ -1984,9 +1984,28 @@ struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
 }
 EXPORT_SYMBOL_GPL(__clk_register);
 
-static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
+/**
+ * clk_register - allocate a new clock, register it and return an opaque cookie
+ * @dev: device that is registering this clock
+ * @hw: link to hardware-specific clock data
+ *
+ * clk_register is the primary interface for populating the clock tree with new
+ * clock nodes.  It returns a pointer to the newly allocated struct clk which
+ * cannot be dereferenced by driver code but may be used in conjuction with the
+ * rest of the clock API.  In the event of an error clk_register will return an
+ * error code; drivers must test for an error code after calling clk_register.
+ */
+struct clk *clk_register(struct device *dev, struct clk_hw *hw)
 {
        int i, ret;
+       struct clk *clk;
+
+       clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+       if (!clk) {
+               pr_err("%s: could not allocate clk\n", __func__);
+               ret = -ENOMEM;
+               goto fail_out;
+       }
 
        clk->name = kstrdup(hw->init->name, GFP_KERNEL);
        if (!clk->name) {
@@ -2026,7 +2045,7 @@ static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
 
        ret = __clk_init(dev, clk);
        if (!ret)
-               return 0;
+               return clk;
 
 fail_parent_names_copy:
        while (--i >= 0)
@@ -2035,36 +2054,6 @@ fail_parent_names_copy:
 fail_parent_names:
        kfree(clk->name);
 fail_name:
-       return ret;
-}
-
-/**
- * clk_register - allocate a new clock, register it and return an opaque cookie
- * @dev: device that is registering this clock
- * @hw: link to hardware-specific clock data
- *
- * clk_register is the primary interface for populating the clock tree with new
- * clock nodes.  It returns a pointer to the newly allocated struct clk which
- * cannot be dereferenced by driver code but may be used in conjuction with the
- * rest of the clock API.  In the event of an error clk_register will return an
- * error code; drivers must test for an error code after calling clk_register.
- */
-struct clk *clk_register(struct device *dev, struct clk_hw *hw)
-{
-       int ret;
-       struct clk *clk;
-
-       clk = kzalloc(sizeof(*clk), GFP_KERNEL);
-       if (!clk) {
-               pr_err("%s: could not allocate clk\n", __func__);
-               ret = -ENOMEM;
-               goto fail_out;
-       }
-
-       ret = _clk_register(dev, hw, clk);
-       if (!ret)
-               return clk;
-
        kfree(clk);
 fail_out:
        return ERR_PTR(ret);
@@ -2151,9 +2140,10 @@ void clk_unregister(struct clk *clk)
 
        if (!hlist_empty(&clk->children)) {
                struct clk *child;
+               struct hlist_node *t;
 
                /* Reparent all children to the orphan list. */
-               hlist_for_each_entry(child, &clk->children, child_node)
+               hlist_for_each_entry_safe(child, t, &clk->children, child_node)
                        clk_set_parent(child, NULL);
        }
 
@@ -2173,7 +2163,7 @@ EXPORT_SYMBOL_GPL(clk_unregister);
 
 static void devm_clk_release(struct device *dev, void *res)
 {
-       clk_unregister(res);
+       clk_unregister(*(struct clk **)res);
 }
 
 /**
@@ -2188,18 +2178,18 @@ static void devm_clk_release(struct device *dev, void *res)
 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
 {
        struct clk *clk;
-       int ret;
+       struct clk **clkp;
 
-       clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL);
-       if (!clk)
+       clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
+       if (!clkp)
                return ERR_PTR(-ENOMEM);
 
-       ret = _clk_register(dev, hw, clk);
-       if (!ret) {
-               devres_add(dev, clk);
+       clk = clk_register(dev, hw);
+       if (!IS_ERR(clk)) {
+               *clkp = clk;
+               devres_add(dev, clkp);
        } else {
-               devres_free(clk);
-               clk = ERR_PTR(ret);
+               devres_free(clkp);
        }
 
        return clk;
index 2e5810c88d1150874ece970fb64136a450ef64a6..1f6324e29a8099b09930f26234c703d7973be686 100644 (file)
@@ -156,6 +156,7 @@ cpg_mstp_clock_register(const char *name, const char *parent_name,
 static void __init cpg_mstp_clocks_init(struct device_node *np)
 {
        struct mstp_clock_group *group;
+       const char *idxname;
        struct clk **clks;
        unsigned int i;
 
@@ -184,6 +185,11 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
        for (i = 0; i < MSTP_MAX_CLOCKS; ++i)
                clks[i] = ERR_PTR(-ENOENT);
 
+       if (of_find_property(np, "clock-indices", &i))
+               idxname = "clock-indices";
+       else
+               idxname = "renesas,clock-indices";
+
        for (i = 0; i < MSTP_MAX_CLOCKS; ++i) {
                const char *parent_name;
                const char *name;
@@ -197,8 +203,7 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
                        continue;
 
                parent_name = of_clk_get_parent_name(np, i);
-               ret = of_property_read_u32_index(np, "renesas,clock-indices", i,
-                                                &clkidx);
+               ret = of_property_read_u32_index(np, idxname, i, &clkidx);
                if (parent_name == NULL || ret < 0)
                        break;
 
index 88dafb5e96270fa13923c5a95792c910a1bd00d4..de6da957a09d6ebe82f416370c84a7dc50acea8e 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/clk-provider.h>
 #include <linux/io.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 
 #include "clk.h"
 
@@ -43,6 +44,8 @@
 
 #define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw)
 
+void __iomem *clk_mgr_base_addr;
+
 static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
                                         unsigned long parent_rate)
 {
@@ -87,6 +90,7 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
        const char *clk_name = node->name;
        const char *parent_name[SOCFPGA_MAX_PARENTS];
        struct clk_init_data init;
+       struct device_node *clkmgr_np;
        int rc;
        int i = 0;
 
@@ -96,6 +100,9 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
        if (WARN_ON(!pll_clk))
                return NULL;
 
+       clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr");
+       clk_mgr_base_addr = of_iomap(clkmgr_np, 0);
+       BUG_ON(!clk_mgr_base_addr);
        pll_clk->hw.reg = clk_mgr_base_addr + reg;
 
        of_property_read_string(node, "clock-output-names", &clk_name);
index 35a960a993f95c72b6247e52032c4184cbdd00b1..43db947e5f0e51e60c232832d379faca9ca386d3 100644 (file)
  * You should have received a copy of the GNU General Public License
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
-#include <linux/io.h>
 #include <linux/of.h>
-#include <linux/of_address.h>
 
 #include "clk.h"
 
-void __iomem *clk_mgr_base_addr;
-
-static const struct of_device_id socfpga_child_clocks[] __initconst = {
-       { .compatible = "altr,socfpga-pll-clock", socfpga_pll_init, },
-       { .compatible = "altr,socfpga-perip-clk", socfpga_periph_init, },
-       { .compatible = "altr,socfpga-gate-clk", socfpga_gate_init, },
-       {},
-};
-
-static void __init socfpga_clkmgr_init(struct device_node *node)
-{
-       clk_mgr_base_addr = of_iomap(node, 0);
-       of_clk_init(socfpga_child_clocks);
-}
-CLK_OF_DECLARE(socfpga_mgr, "altr,clk-mgr", socfpga_clkmgr_init);
+CLK_OF_DECLARE(socfpga_pll_clk, "altr,socfpga-pll-clock", socfpga_pll_init);
+CLK_OF_DECLARE(socfpga_perip_clk, "altr,socfpga-perip-clk", socfpga_periph_init);
+CLK_OF_DECLARE(socfpga_gate_clk, "altr,socfpga-gate-clk", socfpga_gate_init);
 
index 0d20241e07704df196c72ef22510775d22f5bfde..e1769addf435ef1dd7e1643c6f6afc799e55f013 100644 (file)
@@ -1718,7 +1718,7 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name,
                                        "pll_re_vco");
        } else {
                val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL);
-               pll_writel(val, pll_params->aux_reg, pll);
+               pll_writel(val_aux, pll_params->aux_reg, pll);
        }
 
        clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
index 099967302bf25939019875846f7819461a4805c2..eab8ccfe6bebed652b8fec4849a5fc9fb67d82f3 100644 (file)
@@ -37,6 +37,7 @@
 #define BYT_RATIOS             0x66a
 #define BYT_VIDS               0x66b
 #define BYT_TURBO_RATIOS       0x66c
+#define BYT_TURBO_VIDS         0x66d
 
 
 #define FRAC_BITS 6
@@ -70,8 +71,9 @@ struct pstate_data {
 };
 
 struct vid_data {
-       int32_t min;
-       int32_t max;
+       int min;
+       int max;
+       int turbo;
        int32_t ratio;
 };
 
@@ -359,14 +361,14 @@ static int byt_get_min_pstate(void)
 {
        u64 value;
        rdmsrl(BYT_RATIOS, value);
-       return (value >> 8) & 0xFF;
+       return (value >> 8) & 0x3F;
 }
 
 static int byt_get_max_pstate(void)
 {
        u64 value;
        rdmsrl(BYT_RATIOS, value);
-       return (value >> 16) & 0xFF;
+       return (value >> 16) & 0x3F;
 }
 
 static int byt_get_turbo_pstate(void)
@@ -393,6 +395,9 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
        vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
        vid = fp_toint(vid_fp);
 
+       if (pstate > cpudata->pstate.max_pstate)
+               vid = cpudata->vid.turbo;
+
        val |= vid;
 
        wrmsrl(MSR_IA32_PERF_CTL, val);
@@ -402,13 +407,17 @@ static void byt_get_vid(struct cpudata *cpudata)
 {
        u64 value;
 
+
        rdmsrl(BYT_VIDS, value);
-       cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
-       cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
+       cpudata->vid.min = int_tofp((value >> 8) & 0x3f);
+       cpudata->vid.max = int_tofp((value >> 16) & 0x3f);
        cpudata->vid.ratio = div_fp(
                cpudata->vid.max - cpudata->vid.min,
                int_tofp(cpudata->pstate.max_pstate -
                        cpudata->pstate.min_pstate));
+
+       rdmsrl(BYT_TURBO_VIDS, value);
+       cpudata->vid.turbo = value & 0x7f;
 }
 
 
@@ -545,12 +554,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
 
        if (pstate_funcs.get_vid)
                pstate_funcs.get_vid(cpu);
-
-       /*
-        * goto max pstate so we don't slow up boot if we are built-in if we are
-        * a module we will take care of it during normal operation
-        */
-       intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
+       intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
 }
 
 static inline void intel_pstate_calc_busy(struct cpudata *cpu,
@@ -695,11 +699,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
        cpu = all_cpu_data[cpunum];
 
        intel_pstate_get_cpu_pstates(cpu);
-       if (!cpu->pstate.current_pstate) {
-               all_cpu_data[cpunum] = NULL;
-               kfree(cpu);
-               return -ENODATA;
-       }
 
        cpu->cpu = cpunum;
 
@@ -710,7 +709,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
        cpu->timer.expires = jiffies + HZ/100;
        intel_pstate_busy_pid_reset(cpu);
        intel_pstate_sample(cpu);
-       intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
 
        add_timer_on(&cpu->timer, cpunum);
 
index f0bc31f5db27a41db3d7f8556e72274b42a5eda5..d4add86219444af31891ef8c76013eaed0838282 100644 (file)
@@ -62,7 +62,7 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
        set_cpus_allowed_ptr(current, &cpus_allowed);
 
        /* setting the cpu frequency */
-       clk_set_rate(policy->clk, freq);
+       clk_set_rate(policy->clk, freq * 1000);
 
        return 0;
 }
@@ -92,7 +92,7 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
             i++)
                loongson2_clockmod_table[i].frequency = (rate * i) / 8;
 
-       ret = clk_set_rate(cpuclk, rate);
+       ret = clk_set_rate(cpuclk, rate * 1000);
        if (ret) {
                clk_put(cpuclk);
                return ret;
index 9f25f5296029aeb0a22e0caf81bf35ca41b00063..0eabd81e1a902711bb838eb93dfed8e3289be610 100644 (file)
        char *tmp;                                              \
                                                                \
        tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC);  \
-       sprintf(tmp, format, param);                            \
-       strcat(str, tmp);                                       \
-       kfree(tmp);                                             \
+       if (likely(tmp)) {                                      \
+               sprintf(tmp, format, param);                    \
+               strcat(str, tmp);                               \
+               kfree(tmp);                                     \
+       } else {                                                \
+               strcat(str, "kmalloc failure in SPRINTFCAT");   \
+       }                                                       \
 }
 
 static void report_jump_idx(u32 status, char *outstr)
index a886713937fd05b38fc10866bbce5266ac0bfae1..d5d30ed863ceb904c6e19e245b94d7ddba40f88f 100644 (file)
@@ -1009,6 +1009,7 @@ static void dmaengine_unmap(struct kref *kref)
                dma_unmap_page(dev, unmap->addr[i], unmap->len,
                               DMA_BIDIRECTIONAL);
        }
+       cnt = unmap->map_cnt;
        mempool_free(unmap, __get_unmap_pool(cnt)->pool);
 }
 
@@ -1074,6 +1075,7 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
        memset(unmap, 0, sizeof(*unmap));
        kref_init(&unmap->kref);
        unmap->dev = dev;
+       unmap->map_cnt = nr;
 
        return unmap;
 }
index 766b68ed505c4d2b3964bfb1f0de6ab5ae1ff3a9..394cbc5c93e3600a8c918bfb673756bdd279ce7b 100644 (file)
@@ -191,12 +191,10 @@ static void mv_set_mode(struct mv_xor_chan *chan,
 
 static void mv_chan_activate(struct mv_xor_chan *chan)
 {
-       u32 activation;
-
        dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
-       activation = readl_relaxed(XOR_ACTIVATION(chan));
-       activation |= 0x1;
-       writel_relaxed(activation, XOR_ACTIVATION(chan));
+
+       /* writel ensures all descriptors are flushed before activation */
+       writel(BIT(0), XOR_ACTIVATION(chan));
 }
 
 static char mv_chan_is_busy(struct mv_xor_chan *chan)
index 3ee852c9925b6a1aa1887df101662194eeb3c42c..071c2c969eec06ad929ecfb871c614297a615e9e 100644 (file)
@@ -756,6 +756,7 @@ static const struct {
         */
        { ACPI_SIG_IBFT },
        { "iBFT" },
+       { "BIFT" },     /* Broadcom iSCSI Offload */
 };
 
 static void __init acpi_find_ibft_region(void)
index e73c6755a5eb6b324d06f14f4a7cf574d025c8ca..70304220a479a9862f4f5709e6a9a88a785b487d 100644 (file)
@@ -305,6 +305,8 @@ static struct ichx_desc ich6_desc = {
 
        .ngpio = 50,
        .have_blink = true,
+       .regs = ichx_regs,
+       .reglen = ichx_reglen,
 };
 
 /* Intel 3100 */
@@ -324,6 +326,8 @@ static struct ichx_desc i3100_desc = {
        .uses_gpe0 = true,
 
        .ngpio = 50,
+       .regs = ichx_regs,
+       .reglen = ichx_reglen,
 };
 
 /* ICH7 and ICH8-based */
index 99a68310e7c09eb053f5cd28395549ece9f8babe..3d53fd6880d1970b074f6b0eda2575ea55e27476 100644 (file)
@@ -894,9 +894,11 @@ static int mcp23s08_probe(struct spi_device *spi)
                        dev_err(&spi->dev, "invalid spi-present-mask\n");
                        return -ENODEV;
                }
-
-               for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++)
+               for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
+                       if ((spi_present_mask & (1 << addr)))
+                               chips++;
                        pullups[addr] = 0;
+               }
        } else {
                type = spi_get_device_id(spi)->driver_data;
                pdata = dev_get_platdata(&spi->dev);
@@ -919,12 +921,12 @@ static int mcp23s08_probe(struct spi_device *spi)
                        pullups[addr] = pdata->chip[addr].pullups;
                }
 
-               if (!chips)
-                       return -ENODEV;
-
                base = pdata->base;
        }
 
+       if (!chips)
+               return -ENODEV;
+
        data = kzalloc(sizeof(*data) + chips * sizeof(struct mcp23s08),
                        GFP_KERNEL);
        if (!data)
index fa486c5fbb0250650b558632c6f63aa0ebf5bd63..aff4a113cda3c0cd724d3b4d13ce0fb8bbc48313 100644 (file)
@@ -560,47 +560,71 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
 
        dev_priv->vbt.edp_pps = *edp_pps;
 
-       dev_priv->vbt.edp_rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
-               DP_LINK_BW_1_62;
+       switch (edp_link_params->rate) {
+       case EDP_RATE_1_62:
+               dev_priv->vbt.edp_rate = DP_LINK_BW_1_62;
+               break;
+       case EDP_RATE_2_7:
+               dev_priv->vbt.edp_rate = DP_LINK_BW_2_7;
+               break;
+       default:
+               DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n",
+                             edp_link_params->rate);
+               break;
+       }
+
        switch (edp_link_params->lanes) {
-       case 0:
+       case EDP_LANE_1:
                dev_priv->vbt.edp_lanes = 1;
                break;
-       case 1:
+       case EDP_LANE_2:
                dev_priv->vbt.edp_lanes = 2;
                break;
-       case 3:
-       default:
+       case EDP_LANE_4:
                dev_priv->vbt.edp_lanes = 4;
                break;
+       default:
+               DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n",
+                             edp_link_params->lanes);
+               break;
        }
+
        switch (edp_link_params->preemphasis) {
-       case 0:
+       case EDP_PREEMPHASIS_NONE:
                dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
                break;
-       case 1:
+       case EDP_PREEMPHASIS_3_5dB:
                dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
                break;
-       case 2:
+       case EDP_PREEMPHASIS_6dB:
                dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
                break;
-       case 3:
+       case EDP_PREEMPHASIS_9_5dB:
                dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
                break;
+       default:
+               DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
+                             edp_link_params->preemphasis);
+               break;
        }
+
        switch (edp_link_params->vswing) {
-       case 0:
+       case EDP_VSWING_0_4V:
                dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400;
                break;
-       case 1:
+       case EDP_VSWING_0_6V:
                dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600;
                break;
-       case 2:
+       case EDP_VSWING_0_8V:
                dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800;
                break;
-       case 3:
+       case EDP_VSWING_1_2V:
                dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200;
                break;
+       default:
+               DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
+                             edp_link_params->vswing);
+               break;
        }
 }
 
index 5ca68aa9f237e0422350a537ce48075e3c23b8ae..2a00cb828d20c7549a7b00a6745c228998cc136b 100644 (file)
@@ -121,6 +121,22 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
        return max_link_bw;
 }
 
+static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       u8 source_max, sink_max;
+
+       source_max = 4;
+       if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
+           (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
+               source_max = 2;
+
+       sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
+
+       return min(source_max, sink_max);
+}
+
 /*
  * The units on the numbers in the next two are... bizarre.  Examples will
  * make it clearer; this one parallels an example in the eDP spec.
@@ -171,7 +187,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
        }
 
        max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
-       max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
+       max_lanes = intel_dp_max_lane_count(intel_dp);
 
        max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
        mode_rate = intel_dp_link_required(target_clock, 18);
@@ -751,8 +767,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        struct intel_crtc *intel_crtc = encoder->new_crtc;
        struct intel_connector *intel_connector = intel_dp->attached_connector;
        int lane_count, clock;
-       int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+       int min_lane_count = 1;
+       int max_lane_count = intel_dp_max_lane_count(intel_dp);
        /* Conveniently, the link BW constants become indices with a shift...*/
+       int min_clock = 0;
        int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
        int bpp, mode_rate;
        static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
@@ -785,19 +803,38 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        /* Walk through all bpp values. Luckily they're all nicely spaced with 2
         * bpc in between. */
        bpp = pipe_config->pipe_bpp;
-       if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
-           dev_priv->vbt.edp_bpp < bpp) {
-               DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
-                             dev_priv->vbt.edp_bpp);
-               bpp = dev_priv->vbt.edp_bpp;
+       if (is_edp(intel_dp)) {
+               if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
+                       DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
+                                     dev_priv->vbt.edp_bpp);
+                       bpp = dev_priv->vbt.edp_bpp;
+               }
+
+               if (IS_BROADWELL(dev)) {
+                       /* Yes, it's an ugly hack. */
+                       min_lane_count = max_lane_count;
+                       DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n",
+                                     min_lane_count);
+               } else if (dev_priv->vbt.edp_lanes) {
+                       min_lane_count = min(dev_priv->vbt.edp_lanes,
+                                            max_lane_count);
+                       DRM_DEBUG_KMS("using min %u lanes per VBT\n",
+                                     min_lane_count);
+               }
+
+               if (dev_priv->vbt.edp_rate) {
+                       min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
+                       DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
+                                     bws[min_clock]);
+               }
        }
 
        for (; bpp >= 6*3; bpp -= 2*3) {
                mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
                                                   bpp);
 
-               for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
-                       for (clock = 0; clock <= max_clock; clock++) {
+               for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
+                       for (clock = min_clock; clock <= max_clock; clock++) {
                                link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
                                link_avail = intel_dp_max_data_rate(link_clock,
                                                                    lane_count);
index fce4a0d93c0b19b7d51f4d2578331b13aef51399..f73ba5e6b7a8d685b4530e16c6735c095c2aeb5f 100644 (file)
@@ -387,6 +387,15 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                                                          height);
                }
 
+               /* No preferred mode marked by the EDID? Are there any modes? */
+               if (!modes[i] && !list_empty(&connector->modes)) {
+                       DRM_DEBUG_KMS("using first mode listed on connector %s\n",
+                                     drm_get_connector_name(connector));
+                       modes[i] = list_first_entry(&connector->modes,
+                                                   struct drm_display_mode,
+                                                   head);
+               }
+
                /* last resort: use current mode */
                if (!modes[i]) {
                        /*
index 0eead16aeda7404053de8c9885992b0904c1049c..cb8cfb7e09749938383c18a7eb6e2bc149823095 100644 (file)
@@ -492,6 +492,7 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
        enum pipe pipe = intel_get_pipe_from_connector(connector);
        u32 freq;
        unsigned long flags;
+       u64 n;
 
        if (!panel->backlight.present || pipe == INVALID_PIPE)
                return;
@@ -502,10 +503,9 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
 
        /* scale to hardware max, but be careful to not overflow */
        freq = panel->backlight.max;
-       if (freq < max)
-               level = level * freq / max;
-       else
-               level = freq / max * level;
+       n = (u64)level * freq;
+       do_div(n, max);
+       level = n;
 
        panel->backlight.level = level;
        if (panel->backlight.device)
index 19e94c3edc1957d96b928fb23d696b393216fc42..d93dcf683e8c3695960ca93c92dce87a7823d3f2 100644 (file)
@@ -2095,6 +2095,43 @@ static void intel_print_wm_latency(struct drm_device *dev,
        }
 }
 
+static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
+                                   uint16_t wm[5], uint16_t min)
+{
+       int level, max_level = ilk_wm_max_level(dev_priv->dev);
+
+       if (wm[0] >= min)
+               return false;
+
+       wm[0] = max(wm[0], min);
+       for (level = 1; level <= max_level; level++)
+               wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
+
+       return true;
+}
+
+static void snb_wm_latency_quirk(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       bool changed;
+
+       /*
+        * The BIOS provided WM memory latency values are often
+        * inadequate for high resolution displays. Adjust them.
+        */
+       changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
+               ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
+               ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
+
+       if (!changed)
+               return;
+
+       DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
+       intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
+       intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
+       intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+}
+
 static void ilk_setup_wm_latency(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2112,6 +2149,9 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
        intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
        intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
        intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+
+       if (IS_GEN6(dev))
+               snb_wm_latency_quirk(dev);
 }
 
 static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
index d27155adf5db2b039dee51b9aa0de25e60c8967e..46be00d66df3da3e74597ba9c02f0ac745741751 100644 (file)
@@ -2424,8 +2424,8 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
        if (ret < 0)
                goto err1;
 
-       ret = sysfs_create_link(&encoder->ddc.dev.kobj,
-                               &drm_connector->kdev->kobj,
+       ret = sysfs_create_link(&drm_connector->kdev->kobj,
+                               &encoder->ddc.dev.kobj,
                                encoder->ddc.dev.kobj.name);
        if (ret < 0)
                goto err2;
index f729dc71d5beb031599aca72f82c90ec946e2fe0..d0c75779d3f6f91e9cc98b0f1a853344783fdb48 100644 (file)
@@ -185,6 +185,8 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
 {
        __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
                           _MASKED_BIT_DISABLE(0xffff));
+       __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+                          _MASKED_BIT_DISABLE(0xffff));
        /* something from same cacheline, but !FORCEWAKE_VLV */
        __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
 }
index 7762665ad8fdb5b461680ee84b7b224b77a5d4d5..876de9ac3793fd30af193ac7e8410818bdb56e7a 100644 (file)
@@ -1009,7 +1009,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
        }
 
        if (outp == 8)
-               return false;
+               return conf;
 
        data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1);
        if (data == 0x0000)
index 43fec17ea540b6c53af704931d4baf29d2391e4e..bbf117be572f4617cff0fd06907fb2a13977ae93 100644 (file)
@@ -40,6 +40,7 @@ pwm_info(struct nouveau_therm *therm, int line)
                case 0x00: return 2;
                case 0x19: return 1;
                case 0x1c: return 0;
+               case 0x1e: return 2;
                default:
                        break;
                }
index 68528619834a75eece9f3b6c6d59cd34a0b03f73..8149e7cf430330095da34f98d27f842832c2c91c 100644 (file)
@@ -1642,6 +1642,7 @@ struct radeon_vce {
        unsigned                fb_version;
        atomic_t                handles[RADEON_MAX_VCE_HANDLES];
        struct drm_file         *filp[RADEON_MAX_VCE_HANDLES];
+       unsigned                img_size[RADEON_MAX_VCE_HANDLES];
        struct delayed_work     idle_work;
 };
 
@@ -1655,7 +1656,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
                               uint32_t handle, struct radeon_fence **fence);
 void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp);
 void radeon_vce_note_usage(struct radeon_device *rdev);
-int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi);
+int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size);
 int radeon_vce_cs_parse(struct radeon_cs_parser *p);
 bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
                               struct radeon_ring *ring,
@@ -2640,7 +2641,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
 #define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE))
 #define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI))
 #define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE))
-#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI))
+#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \
+                            (rdev->family == CHIP_MULLINS))
 
 #define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
                              (rdev->ddev->pdev->device == 0x6850) || \
index b3633d9a531703a1cd4189c061a0907e89ee1085..9ab30976287d4c27e0dc94e0cadfa993e6ec77cc 100644 (file)
@@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
                }
        }
 
+       if (!found) {
+               while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+                       dhandle = ACPI_HANDLE(&pdev->dev);
+                       if (!dhandle)
+                               continue;
+
+                       status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+                       if (!ACPI_FAILURE(status)) {
+                               found = true;
+                               break;
+                       }
+               }
+       }
+
        if (!found)
                return false;
 
index 408b6ac53f0b808c525f86b99e90922716f5e6dc..f00dbbf4d806511a86b034a73c2d68adac971b99 100644 (file)
@@ -999,7 +999,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
 
        /* avoid high jitter with small fractional dividers */
        if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
-               fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60);
+               fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50);
                if (fb_div < fb_div_min) {
                        unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
                        fb_div *= tmp;
index 0cc47f12d9957d916b41acf4d3874fa062313175..eaaedba0467595aaced591c6d70666c75ac97211 100644 (file)
@@ -577,28 +577,29 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                        return r;
                }
 
-               r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
-               if (r) {
-                       radeon_vm_fini(rdev, &fpriv->vm);
-                       kfree(fpriv);
-                       return r;
-               }
+               if (rdev->accel_working) {
+                       r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+                       if (r) {
+                               radeon_vm_fini(rdev, &fpriv->vm);
+                               kfree(fpriv);
+                               return r;
+                       }
 
-               /* map the ib pool buffer read only into
-                * virtual address space */
-               bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
-                                        rdev->ring_tmp_bo.bo);
-               r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
-                                         RADEON_VM_PAGE_READABLE |
-                                         RADEON_VM_PAGE_SNOOPED);
+                       /* map the ib pool buffer read only into
+                        * virtual address space */
+                       bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
+                                                rdev->ring_tmp_bo.bo);
+                       r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+                                                 RADEON_VM_PAGE_READABLE |
+                                                 RADEON_VM_PAGE_SNOOPED);
 
-               radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
-               if (r) {
-                       radeon_vm_fini(rdev, &fpriv->vm);
-                       kfree(fpriv);
-                       return r;
+                       radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+                       if (r) {
+                               radeon_vm_fini(rdev, &fpriv->vm);
+                               kfree(fpriv);
+                               return r;
+                       }
                }
-
                file_priv->driver_priv = fpriv;
        }
 
@@ -626,13 +627,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
                struct radeon_bo_va *bo_va;
                int r;
 
-               r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
-               if (!r) {
-                       bo_va = radeon_vm_bo_find(&fpriv->vm,
-                                                 rdev->ring_tmp_bo.bo);
-                       if (bo_va)
-                               radeon_vm_bo_rmv(rdev, bo_va);
-                       radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+               if (rdev->accel_working) {
+                       r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+                       if (!r) {
+                               bo_va = radeon_vm_bo_find(&fpriv->vm,
+                                                         rdev->ring_tmp_bo.bo);
+                               if (bo_va)
+                                       radeon_vm_bo_rmv(rdev, bo_va);
+                               radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+                       }
                }
 
                radeon_vm_fini(rdev, &fpriv->vm);
index 19bec0dbfa38bf052db75cb52401adfcb29ac986..4faa4d6f9bb4f0616e0575d916069b47fa9389ed 100644 (file)
@@ -458,7 +458,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
                         * into account. We don't want to disallow buffer moves
                         * completely.
                         */
-                       if (current_domain != RADEON_GEM_DOMAIN_CPU &&
+                       if ((lobj->alt_domain & current_domain) != 0 &&
                            (domain & current_domain) == 0 && /* will be moved */
                            bytes_moved > bytes_moved_threshold) {
                                /* don't move it */
@@ -699,22 +699,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
        rbo = container_of(bo, struct radeon_bo, tbo);
        radeon_bo_check_tiling(rbo, 0, 0);
        rdev = rbo->rdev;
-       if (bo->mem.mem_type == TTM_PL_VRAM) {
-               size = bo->mem.num_pages << PAGE_SHIFT;
-               offset = bo->mem.start << PAGE_SHIFT;
-               if ((offset + size) > rdev->mc.visible_vram_size) {
-                       /* hurrah the memory is not visible ! */
-                       radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
-                       rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
-                       r = ttm_bo_validate(bo, &rbo->placement, false, false);
-                       if (unlikely(r != 0))
-                               return r;
-                       offset = bo->mem.start << PAGE_SHIFT;
-                       /* this should not happen */
-                       if ((offset + size) > rdev->mc.visible_vram_size)
-                               return -EINVAL;
-               }
+       if (bo->mem.mem_type != TTM_PL_VRAM)
+               return 0;
+
+       size = bo->mem.num_pages << PAGE_SHIFT;
+       offset = bo->mem.start << PAGE_SHIFT;
+       if ((offset + size) <= rdev->mc.visible_vram_size)
+               return 0;
+
+       /* hurrah the memory is not visible ! */
+       radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+       rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+       r = ttm_bo_validate(bo, &rbo->placement, false, false);
+       if (unlikely(r == -ENOMEM)) {
+               radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+               return ttm_bo_validate(bo, &rbo->placement, false, false);
+       } else if (unlikely(r != 0)) {
+               return r;
        }
+
+       offset = bo->mem.start << PAGE_SHIFT;
+       /* this should never happen */
+       if ((offset + size) > rdev->mc.visible_vram_size)
+               return -EINVAL;
+
        return 0;
 }
 
index f30b8426eee233b286e45ca0fea9a39de8b47ff0..53d6e1bb48dc326bb3487fde5f02749df56d4e57 100644 (file)
@@ -361,6 +361,11 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct radeon_device *rdev = ddev->dev_private;
 
+       /* Can't set profile when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return -EINVAL;
+
        mutex_lock(&rdev->pm.mutex);
        if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
                if (strncmp("default", buf, strlen("default")) == 0)
@@ -409,6 +414,13 @@ static ssize_t radeon_set_pm_method(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct radeon_device *rdev = ddev->dev_private;
 
+       /* Can't set method when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+               count = -EINVAL;
+               goto fail;
+       }
+
        /* we don't support the legacy modes with dpm */
        if (rdev->pm.pm_method == PM_METHOD_DPM) {
                count = -EINVAL;
@@ -446,6 +458,10 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
        struct radeon_device *rdev = ddev->dev_private;
        enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
 
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return snprintf(buf, PAGE_SIZE, "off\n");
+
        return snprintf(buf, PAGE_SIZE, "%s\n",
                        (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
                        (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
@@ -459,6 +475,11 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct radeon_device *rdev = ddev->dev_private;
 
+       /* Can't set dpm state when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return -EINVAL;
+
        mutex_lock(&rdev->pm.mutex);
        if (strncmp("battery", buf, strlen("battery")) == 0)
                rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
@@ -485,6 +506,10 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
        struct radeon_device *rdev = ddev->dev_private;
        enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
 
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return snprintf(buf, PAGE_SIZE, "off\n");
+
        return snprintf(buf, PAGE_SIZE, "%s\n",
                        (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
                        (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
@@ -500,6 +525,11 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
        enum radeon_dpm_forced_level level;
        int ret = 0;
 
+       /* Can't force performance level when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return -EINVAL;
+
        mutex_lock(&rdev->pm.mutex);
        if (strncmp("low", buf, strlen("low")) == 0) {
                level = RADEON_DPM_FORCED_LEVEL_LOW;
@@ -538,8 +568,14 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
                                      char *buf)
 {
        struct radeon_device *rdev = dev_get_drvdata(dev);
+       struct drm_device *ddev = rdev->ddev;
        int temp;
 
+       /* Can't get temperature when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return -EINVAL;
+
        if (rdev->asic->pm.get_temperature)
                temp = radeon_get_temperature(rdev);
        else
@@ -1614,8 +1650,12 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        struct radeon_device *rdev = dev->dev_private;
+       struct drm_device *ddev = rdev->ddev;
 
-       if (rdev->pm.dpm_enabled) {
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+               seq_printf(m, "PX asic powered off\n");
+       } else if (rdev->pm.dpm_enabled) {
                mutex_lock(&rdev->pm.mutex);
                if (rdev->asic->dpm.debugfs_print_current_performance_level)
                        radeon_dpm_debugfs_print_current_performance_level(rdev, m);
index f73324c81491fe52c472fdd8bab6558c347db2c3..3971d968af6c0d86d0ba6e6f08f94d26714e17ec 100644 (file)
@@ -443,13 +443,16 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
  * @p: parser context
  * @lo: address of lower dword
  * @hi: address of higher dword
+ * @size: size of checker for relocation buffer
  *
  * Patch relocation inside command stream with real buffer address
  */
-int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
+int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
+                       unsigned size)
 {
        struct radeon_cs_chunk *relocs_chunk;
-       uint64_t offset;
+       struct radeon_cs_reloc *reloc;
+       uint64_t start, end, offset;
        unsigned idx;
 
        relocs_chunk = &p->chunks[p->chunk_relocs_idx];
@@ -462,14 +465,59 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
                return -EINVAL;
        }
 
-       offset += p->relocs_ptr[(idx / 4)]->gpu_offset;
+       reloc = p->relocs_ptr[(idx / 4)];
+       start = reloc->gpu_offset;
+       end = start + radeon_bo_size(reloc->robj);
+       start += offset;
 
-        p->ib.ptr[lo] = offset & 0xFFFFFFFF;
-        p->ib.ptr[hi] = offset >> 32;
+       p->ib.ptr[lo] = start & 0xFFFFFFFF;
+       p->ib.ptr[hi] = start >> 32;
+
+       if (end <= start) {
+               DRM_ERROR("invalid reloc offset %llX!\n", offset);
+               return -EINVAL;
+       }
+       if ((end - start) < size) {
+               DRM_ERROR("buffer to small (%d / %d)!\n",
+                       (unsigned)(end - start), size);
+               return -EINVAL;
+       }
 
        return 0;
 }
 
+/**
+ * radeon_vce_validate_handle - validate stream handle
+ *
+ * @p: parser context
+ * @handle: handle to validate
+ *
+ * Validates the handle and return the found session index or -EINVAL
+ * we we don't have another free session index.
+ */
+int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
+{
+       unsigned i;
+
+       /* validate the handle */
+       for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+               if (atomic_read(&p->rdev->vce.handles[i]) == handle)
+                       return i;
+       }
+
+       /* handle not found try to alloc a new one */
+       for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+               if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
+                       p->rdev->vce.filp[i] = p->filp;
+                       p->rdev->vce.img_size[i] = 0;
+                       return i;
+               }
+       }
+
+       DRM_ERROR("No more free VCE handles!\n");
+       return -EINVAL;
+}
+
 /**
  * radeon_vce_cs_parse - parse and validate the command stream
  *
@@ -478,8 +526,10 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
  */
 int radeon_vce_cs_parse(struct radeon_cs_parser *p)
 {
-       uint32_t handle = 0;
-       bool destroy = false;
+       int session_idx = -1;
+       bool destroyed = false;
+       uint32_t tmp, handle = 0;
+       uint32_t *size = &tmp;
        int i, r;
 
        while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
@@ -491,13 +541,29 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
                        return -EINVAL;
                }
 
+               if (destroyed) {
+                       DRM_ERROR("No other command allowed after destroy!\n");
+                       return -EINVAL;
+               }
+
                switch (cmd) {
                case 0x00000001: // session
                        handle = radeon_get_ib_value(p, p->idx + 2);
+                       session_idx = radeon_vce_validate_handle(p, handle);
+                       if (session_idx < 0)
+                               return session_idx;
+                       size = &p->rdev->vce.img_size[session_idx];
                        break;
 
                case 0x00000002: // task info
+                       break;
+
                case 0x01000001: // create
+                       *size = radeon_get_ib_value(p, p->idx + 8) *
+                               radeon_get_ib_value(p, p->idx + 10) *
+                               8 * 3 / 2;
+                       break;
+
                case 0x04000001: // config extension
                case 0x04000002: // pic control
                case 0x04000005: // rate control
@@ -506,23 +572,39 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
                        break;
 
                case 0x03000001: // encode
-                       r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9);
+                       r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
+                                               *size);
                        if (r)
                                return r;
 
-                       r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11);
+                       r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
+                                               *size / 3);
                        if (r)
                                return r;
                        break;
 
                case 0x02000001: // destroy
-                       destroy = true;
+                       destroyed = true;
                        break;
 
                case 0x05000001: // context buffer
+                       r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+                                               *size * 2);
+                       if (r)
+                               return r;
+                       break;
+
                case 0x05000004: // video bitstream buffer
+                       tmp = radeon_get_ib_value(p, p->idx + 4);
+                       r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+                                               tmp);
+                       if (r)
+                               return r;
+                       break;
+
                case 0x05000005: // feedback buffer
-                       r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2);
+                       r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+                                               4096);
                        if (r)
                                return r;
                        break;
@@ -532,33 +614,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
                        return -EINVAL;
                }
 
+               if (session_idx == -1) {
+                       DRM_ERROR("no session command at start of IB\n");
+                       return -EINVAL;
+               }
+
                p->idx += len / 4;
        }
 
-       if (destroy) {
+       if (destroyed) {
                /* IB contains a destroy msg, free the handle */
                for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
                        atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
-
-               return 0;
-        }
-
-       /* create or encode, validate the handle */
-       for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
-               if (atomic_read(&p->rdev->vce.handles[i]) == handle)
-                       return 0;
        }
 
-       /* handle not found try to alloc a new one */
-       for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
-               if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
-                       p->rdev->vce.filp[i] = p->filp;
-                       return 0;
-               }
-       }
-
-       DRM_ERROR("No more free VCE handles!\n");
-       return -EINVAL;
+       return 0;
 }
 
 /**
index 2aae6ce49d3286888ea22347b60d71cc5220eef4..d9ab99f47612743bb41361a8cca8114999e52fca 100644 (file)
@@ -595,7 +595,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
        ndw = 64;
 
        /* assume the worst case */
-       ndw += vm->max_pde_used * 12;
+       ndw += vm->max_pde_used * 16;
 
        /* update too big for an IB */
        if (ndw > 0xfffff)
index 683532f849311d1ca19de3daccb168b368d4c0b5..7321283602ce0c1d8429193efe4221ba26a33761 100644 (file)
 #define                SPLL_CHG_STATUS                         (1 << 1)
 #define        SPLL_CNTL_MODE                                  0x618
 #define                SPLL_SW_DIR_CONTROL                     (1 << 0)
-#      define SPLL_REFCLK_SEL(x)                       ((x) << 8)
-#      define SPLL_REFCLK_SEL_MASK                     0xFF00
+#      define SPLL_REFCLK_SEL(x)                       ((x) << 26)
+#      define SPLL_REFCLK_SEL_MASK                     (3 << 26)
 
 #define        CG_SPLL_SPREAD_SPECTRUM                         0x620
 #define                SSEN                                    (1 << 0)
index bc196f49ec53be2e13959a9ebca9aa9c82edcfff..4af0da96c2e22a6d18474cf60e34649acc11167b 100644 (file)
@@ -1053,7 +1053,7 @@ config SENSORS_PC87427
 
 config SENSORS_NTC_THERMISTOR
        tristate "NTC thermistor support"
-       depends on (!OF && !IIO) || (OF && IIO)
+       depends on !OF || IIO=n || IIO
        help
          This driver supports NTC thermistors sensor reading and its
          interpretation. The driver can also monitor the temperature and
index 90ec1173b8a125c629542e079cac66872ea60ec4..01723f04fe45bfa449b60d07a05cd905cc9ccc98 100644 (file)
@@ -163,7 +163,7 @@ static ssize_t store_hyst(struct device *dev,
        if (retval < 0)
                goto fail;
 
-       hyst = val - retval * 1000;
+       hyst = retval * 1000 - val;
        hyst = DIV_ROUND_CLOSEST(hyst, 1000);
        if (hyst < 0 || hyst > 255) {
                retval = -ERANGE;
@@ -330,7 +330,7 @@ static int emc1403_detect(struct i2c_client *client,
        }
 
        id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG);
-       if (id != 0x01)
+       if (id < 0x01 || id > 0x04)
                return -ENODEV;
 
        return 0;
@@ -355,9 +355,9 @@ static int emc1403_probe(struct i2c_client *client,
        if (id->driver_data)
                data->groups[1] = &emc1404_group;
 
-       hwmon_dev = hwmon_device_register_with_groups(&client->dev,
-                                                     client->name, data,
-                                                     data->groups);
+       hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+                                                          client->name, data,
+                                                          data->groups);
        if (IS_ERR(hwmon_dev))
                return PTR_ERR(hwmon_dev);
 
index 8a17f01e8672065d7099e4c3ae156f93705eade4..e76feb86a1d4c0b07ab773675ef1244b88ee9b98 100644 (file)
@@ -44,6 +44,7 @@ struct ntc_compensation {
        unsigned int    ohm;
 };
 
+/* Order matters, ntc_match references the entries by index */
 static const struct platform_device_id ntc_thermistor_id[] = {
        { "ncp15wb473", TYPE_NCPXXWB473 },
        { "ncp18wb473", TYPE_NCPXXWB473 },
@@ -141,7 +142,7 @@ struct ntc_data {
        char name[PLATFORM_NAME_SIZE];
 };
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO)
 static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
 {
        struct iio_channel *channel = pdata->chan;
@@ -163,15 +164,15 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
 
 static const struct of_device_id ntc_match[] = {
        { .compatible = "ntc,ncp15wb473",
-               .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+               .data = &ntc_thermistor_id[0] },
        { .compatible = "ntc,ncp18wb473",
-               .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+               .data = &ntc_thermistor_id[1] },
        { .compatible = "ntc,ncp21wb473",
-               .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+               .data = &ntc_thermistor_id[2] },
        { .compatible = "ntc,ncp03wb473",
-               .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+               .data = &ntc_thermistor_id[3] },
        { .compatible = "ntc,ncp15wl333",
-               .data = &ntc_thermistor_id[TYPE_NCPXXWL333] },
+               .data = &ntc_thermistor_id[4] },
        { },
 };
 MODULE_DEVICE_TABLE(of, ntc_match);
@@ -223,6 +224,8 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
        return NULL;
 }
 
+#define ntc_match      NULL
+
 static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata)
 { }
 #endif
index 22e92c3d3d07448cea9aa37fcc4ad6c612570e13..3c20e4bd6dd1380238df20f06941828ec84aadd0 100644 (file)
@@ -422,6 +422,9 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
         */
        dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR);
 
+       /* enforce disabled interrupts (due to HW issues) */
+       i2c_dw_disable_int(dev);
+
        /* Enable the adapter */
        __i2c_dw_enable(dev, true);
 
index 28cbe1b2a2ec2958547b843d2f86d672fedb5e0b..32c85e9ecdaeb141f0653c3a7e8c85784a1140ce 100644 (file)
@@ -999,7 +999,7 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
 
        dev->virtbase = devm_ioremap(&adev->dev, adev->res.start,
                                resource_size(&adev->res));
-       if (IS_ERR(dev->virtbase)) {
+       if (!dev->virtbase) {
                ret = -ENOMEM;
                goto err_no_mem;
        }
index 1b4cf14f1106aac597b1ab0346ed848e2139ba22..2a5efb5b487cdc2e4fb8774e94167f6af53ca7d8 100644 (file)
@@ -479,7 +479,7 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
        int ret, idx;
 
        ret = pm_runtime_get_sync(qup->dev);
-       if (ret)
+       if (ret < 0)
                goto out;
 
        writel(1, qup->base + QUP_SW_RESET);
index d4fa8eba6e9d2e40e11cb0ccba5ee64606dd8fff..06d47aafbb79c75a18a66e12964544a9a2b3496f 100644 (file)
@@ -561,6 +561,12 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
 
        ret = -EINVAL;
        for (i = 0; i < num; i++) {
+               /* This HW can't send STOP after address phase */
+               if (msgs[i].len == 0) {
+                       ret = -EOPNOTSUPP;
+                       break;
+               }
+
                /*-------------- spin lock -----------------*/
                spin_lock_irqsave(&priv->lock, flags);
 
@@ -625,7 +631,8 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
 
 static u32 rcar_i2c_func(struct i2c_adapter *adap)
 {
-       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+       /* This HW can't do SMBUS_QUICK and NOSTART */
+       return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
 }
 
 static const struct i2c_algorithm rcar_i2c_algo = {
index ae4491062e411a83a5729a1e267e2430f23f069c..bb3a9964f7e00c2b2c604e32490b0c5f015c4bfe 100644 (file)
@@ -1276,10 +1276,10 @@ static int s3c24xx_i2c_resume(struct device *dev)
        struct platform_device *pdev = to_platform_device(dev);
        struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
 
-       i2c->suspended = 0;
        clk_prepare_enable(i2c->clk);
        s3c24xx_i2c_init(i2c);
        clk_disable_unprepare(i2c->clk);
+       i2c->suspended = 0;
 
        return 0;
 }
index 1b6dbe156a3708692a743cd58fc3351d4b01e533..199c7896f08188ca40fa9cd30462f0631d387338 100644 (file)
@@ -48,6 +48,7 @@
 
 #include <linux/mlx4/driver.h>
 #include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
 
 #include "mlx4_ib.h"
 #include "user.h"
@@ -1614,6 +1615,53 @@ static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
 }
 #endif
 
+#define MLX4_IB_INVALID_MAC    ((u64)-1)
+static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
+                              struct net_device *dev,
+                              int port)
+{
+       u64 new_smac = 0;
+       u64 release_mac = MLX4_IB_INVALID_MAC;
+       struct mlx4_ib_qp *qp;
+
+       read_lock(&dev_base_lock);
+       new_smac = mlx4_mac_to_u64(dev->dev_addr);
+       read_unlock(&dev_base_lock);
+
+       mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
+       qp = ibdev->qp1_proxy[port - 1];
+       if (qp) {
+               int new_smac_index;
+               u64 old_smac = qp->pri.smac;
+               struct mlx4_update_qp_params update_params;
+
+               if (new_smac == old_smac)
+                       goto unlock;
+
+               new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
+
+               if (new_smac_index < 0)
+                       goto unlock;
+
+               update_params.smac_index = new_smac_index;
+               if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
+                                  &update_params)) {
+                       release_mac = new_smac;
+                       goto unlock;
+               }
+
+               qp->pri.smac = new_smac;
+               qp->pri.smac_index = new_smac_index;
+
+               release_mac = old_smac;
+       }
+
+unlock:
+       mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
+       if (release_mac != MLX4_IB_INVALID_MAC)
+               mlx4_unregister_mac(ibdev->dev, port, release_mac);
+}
+
 static void mlx4_ib_get_dev_addr(struct net_device *dev,
                                 struct mlx4_ib_dev *ibdev, u8 port)
 {
@@ -1689,9 +1737,13 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
        return 0;
 }
 
-static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
+static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
+                                struct net_device *dev,
+                                unsigned long event)
+
 {
        struct mlx4_ib_iboe *iboe;
+       int update_qps_port = -1;
        int port;
 
        iboe = &ibdev->iboe;
@@ -1719,6 +1771,11 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
                }
                curr_master = iboe->masters[port - 1];
 
+               if (dev == iboe->netdevs[port - 1] &&
+                   (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
+                    event == NETDEV_UP || event == NETDEV_CHANGE))
+                       update_qps_port = port;
+
                if (curr_netdev) {
                        port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
                                                IB_PORT_ACTIVE : IB_PORT_DOWN;
@@ -1752,6 +1809,9 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
        }
 
        spin_unlock(&iboe->lock);
+
+       if (update_qps_port > 0)
+               mlx4_ib_update_qps(ibdev, dev, update_qps_port);
 }
 
 static int mlx4_ib_netdev_event(struct notifier_block *this,
@@ -1764,7 +1824,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this,
                return NOTIFY_DONE;
 
        ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
-       mlx4_ib_scan_netdevs(ibdev);
+       mlx4_ib_scan_netdevs(ibdev, dev, event);
 
        return NOTIFY_DONE;
 }
@@ -2043,6 +2103,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                goto err_map;
 
        for (i = 0; i < ibdev->num_ports; ++i) {
+               mutex_init(&ibdev->qp1_proxy_lock[i]);
                if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
                                                IB_LINK_LAYER_ETHERNET) {
                        err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
@@ -2126,7 +2187,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                for (i = 1 ; i <= ibdev->num_ports ; ++i)
                        reset_gid_table(ibdev, i);
                rtnl_lock();
-               mlx4_ib_scan_netdevs(ibdev);
+               mlx4_ib_scan_netdevs(ibdev, NULL, 0);
                rtnl_unlock();
                mlx4_ib_init_gid_table(ibdev);
        }
index f589522fddfd9efa4e32fdd0a7e8f63e49a54927..66b0b7dbd9f41cac95cfb76b4e766249e1abd83b 100644 (file)
@@ -522,6 +522,9 @@ struct mlx4_ib_dev {
        int steer_qpn_count;
        int steer_qpn_base;
        int steering_support;
+       struct mlx4_ib_qp      *qp1_proxy[MLX4_MAX_PORTS];
+       /* lock when destroying qp1_proxy and getting netdev events */
+       struct mutex            qp1_proxy_lock[MLX4_MAX_PORTS];
 };
 
 struct ib_event_work {
index 41308af4163c3dc852adc23f983bdd86d279374e..dc57482ae7af2b2ed8935676a67b459847b3302c 100644 (file)
@@ -1132,6 +1132,12 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp)
        if (is_qp0(dev, mqp))
                mlx4_CLOSE_PORT(dev->dev, mqp->port);
 
+       if (dev->qp1_proxy[mqp->port - 1] == mqp) {
+               mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
+               dev->qp1_proxy[mqp->port - 1] = NULL;
+               mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
+       }
+
        pd = get_pd(mqp);
        destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
 
@@ -1646,6 +1652,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                                err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
                                if (err)
                                        return -EINVAL;
+                               if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
+                                       dev->qp1_proxy[qp->port - 1] = qp;
                        }
                }
        }
index c98fdb185931644bd3ed92d337f85aa0d2517149..a1710465faaf2345ae9c88755790e3e74a6e76e8 100644 (file)
@@ -28,6 +28,7 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 #include <target/iscsi/iscsi_transport.h>
+#include <linux/semaphore.h>
 
 #include "isert_proto.h"
 #include "ib_isert.h"
@@ -561,7 +562,15 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        struct isert_device *device;
        struct ib_device *ib_dev = cma_id->device;
        int ret = 0;
-       u8 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
+       u8 pi_support;
+
+       spin_lock_bh(&np->np_thread_lock);
+       if (!np->enabled) {
+               spin_unlock_bh(&np->np_thread_lock);
+               pr_debug("iscsi_np is not enabled, reject connect request\n");
+               return rdma_reject(cma_id, NULL, 0);
+       }
+       spin_unlock_bh(&np->np_thread_lock);
 
        pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
                 cma_id, cma_id->context);
@@ -652,6 +661,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
                goto out_mr;
        }
 
+       pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
        if (pi_support && !device->pi_capable) {
                pr_err("Protection information requested but not supported\n");
                ret = -EINVAL;
@@ -663,11 +673,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
                goto out_conn_dev;
 
        mutex_lock(&isert_np->np_accept_mutex);
-       list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node);
+       list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
        mutex_unlock(&isert_np->np_accept_mutex);
 
-       pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np);
-       wake_up(&isert_np->np_accept_wq);
+       pr_debug("isert_connect_request() up np_sem np: %p\n", np);
+       up(&isert_np->np_sem);
        return 0;
 
 out_conn_dev:
@@ -2999,7 +3009,7 @@ isert_setup_np(struct iscsi_np *np,
                pr_err("Unable to allocate struct isert_np\n");
                return -ENOMEM;
        }
-       init_waitqueue_head(&isert_np->np_accept_wq);
+       sema_init(&isert_np->np_sem, 0);
        mutex_init(&isert_np->np_accept_mutex);
        INIT_LIST_HEAD(&isert_np->np_accept_list);
        init_completion(&isert_np->np_login_comp);
@@ -3047,18 +3057,6 @@ out:
        return ret;
 }
 
-static int
-isert_check_accept_queue(struct isert_np *isert_np)
-{
-       int empty;
-
-       mutex_lock(&isert_np->np_accept_mutex);
-       empty = list_empty(&isert_np->np_accept_list);
-       mutex_unlock(&isert_np->np_accept_mutex);
-
-       return empty;
-}
-
 static int
 isert_rdma_accept(struct isert_conn *isert_conn)
 {
@@ -3151,16 +3149,14 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
        int max_accept = 0, ret;
 
 accept_wait:
-       ret = wait_event_interruptible(isert_np->np_accept_wq,
-                       !isert_check_accept_queue(isert_np) ||
-                       np->np_thread_state == ISCSI_NP_THREAD_RESET);
+       ret = down_interruptible(&isert_np->np_sem);
        if (max_accept > 5)
                return -ENODEV;
 
        spin_lock_bh(&np->np_thread_lock);
        if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
                spin_unlock_bh(&np->np_thread_lock);
-               pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
+               pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
                return -ENODEV;
        }
        spin_unlock_bh(&np->np_thread_lock);
index 4c072ae34c01a3021e57cb5378a3949b97d0876c..da6612e6800004b0984880d54ef57bcaab1b0be1 100644 (file)
@@ -182,7 +182,7 @@ struct isert_device {
 };
 
 struct isert_np {
-       wait_queue_head_t       np_accept_wq;
+       struct semaphore        np_sem;
        struct rdma_cm_id       *np_cm_id;
        struct mutex            np_accept_mutex;
        struct list_head        np_accept_list;
index 422807ff0999ffb8e5b695caa1b2afee732cd120..d260605e6d5fb1b0cb6ca30c5d7e960ab43b4999 100644 (file)
@@ -272,6 +272,18 @@ config SHMOBILE_IOMMU_L1SIZE
        default 256 if SHMOBILE_IOMMU_ADDRSIZE_64MB
        default 128 if SHMOBILE_IOMMU_ADDRSIZE_32MB
 
+config IPMMU_VMSA
+       bool "Renesas VMSA-compatible IPMMU"
+       depends on ARM_LPAE
+       depends on ARCH_SHMOBILE || COMPILE_TEST
+       select IOMMU_API
+       select ARM_DMA_USE_IOMMU
+       help
+         Support for the Renesas VMSA-compatible IPMMU Renesas found in the
+         R-Mobile APE6 and R-Car H2/M2 SoCs.
+
+         If unsure, say N.
+
 config SPAPR_TCE_IOMMU
        bool "sPAPR TCE IOMMU Support"
        depends on PPC_POWERNV || PPC_PSERIES
index 5d58bf16e9e3c15d013c3cb071019be5403223df..8893bad048e0e7efdb7eafef3d7fbd8c7e051701 100644 (file)
@@ -7,6 +7,7 @@ obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
 obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
 obj-$(CONFIG_DMAR_TABLE) += dmar.o
 obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
+obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
 obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
 obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
 obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o
index c949520bd196ec47cf6e572e68cdd67c11b9f84f..4aec6a29e316b56b2be4ef2a48bfed8bcd05ea9d 100644 (file)
@@ -3499,8 +3499,6 @@ int __init amd_iommu_init_passthrough(void)
 {
        struct iommu_dev_data *dev_data;
        struct pci_dev *dev = NULL;
-       struct amd_iommu *iommu;
-       u16 devid;
        int ret;
 
        ret = alloc_passthrough_domain();
@@ -3514,12 +3512,6 @@ int __init amd_iommu_init_passthrough(void)
                dev_data = get_dev_data(&dev->dev);
                dev_data->passthrough = true;
 
-               devid = get_device_id(&dev->dev);
-
-               iommu = amd_iommu_rlookup_table[devid];
-               if (!iommu)
-                       continue;
-
                attach_device(&dev->dev, pt_domain);
        }
 
@@ -3999,7 +3991,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
        iommu_flush_dte(iommu, devid);
        if (devid != alias) {
                irq_lookup_table[alias] = table;
-               set_dte_irq_entry(devid, table);
+               set_dte_irq_entry(alias, table);
                iommu_flush_dte(iommu, alias);
        }
 
index b76c58dbe30ce5ac38c8422b66b5ed2ea4f0fb1e..0e08545d72989e114f016751f68df5a3191088c0 100644 (file)
@@ -788,7 +788,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
                 * per device. But we can enable the exclusion range per
                 * device. This is done here
                 */
-               set_dev_entry_bit(m->devid, DEV_ENTRY_EX);
+               set_dev_entry_bit(devid, DEV_ENTRY_EX);
                iommu->exclusion_start = m->range_start;
                iommu->exclusion_length = m->range_length;
        }
index 5208828792e603ad3f5effaa22b2e9c09ca0ac02..d4daa05efe60d6d51edfe653793c77eecdce6c4f 100644 (file)
@@ -45,6 +45,8 @@ struct pri_queue {
 struct pasid_state {
        struct list_head list;                  /* For global state-list */
        atomic_t count;                         /* Reference count */
+       atomic_t mmu_notifier_count;            /* Counting nested mmu_notifier
+                                                  calls */
        struct task_struct *task;               /* Task bound to this PASID */
        struct mm_struct *mm;                   /* mm_struct for the faults */
        struct mmu_notifier mn;                 /* mmu_otifier handle */
@@ -56,6 +58,8 @@ struct pasid_state {
 };
 
 struct device_state {
+       struct list_head list;
+       u16 devid;
        atomic_t count;
        struct pci_dev *pdev;
        struct pasid_state **states;
@@ -81,13 +85,9 @@ struct fault {
        u16 flags;
 };
 
-static struct device_state **state_table;
+static LIST_HEAD(state_list);
 static spinlock_t state_lock;
 
-/* List and lock for all pasid_states */
-static LIST_HEAD(pasid_state_list);
-static DEFINE_SPINLOCK(ps_lock);
-
 static struct workqueue_struct *iommu_wq;
 
 /*
@@ -99,7 +99,6 @@ static u64 *empty_page_table;
 
 static void free_pasid_states(struct device_state *dev_state);
 static void unbind_pasid(struct device_state *dev_state, int pasid);
-static int task_exit(struct notifier_block *nb, unsigned long e, void *data);
 
 static u16 device_id(struct pci_dev *pdev)
 {
@@ -111,13 +110,25 @@ static u16 device_id(struct pci_dev *pdev)
        return devid;
 }
 
+static struct device_state *__get_device_state(u16 devid)
+{
+       struct device_state *dev_state;
+
+       list_for_each_entry(dev_state, &state_list, list) {
+               if (dev_state->devid == devid)
+                       return dev_state;
+       }
+
+       return NULL;
+}
+
 static struct device_state *get_device_state(u16 devid)
 {
        struct device_state *dev_state;
        unsigned long flags;
 
        spin_lock_irqsave(&state_lock, flags);
-       dev_state = state_table[devid];
+       dev_state = __get_device_state(devid);
        if (dev_state != NULL)
                atomic_inc(&dev_state->count);
        spin_unlock_irqrestore(&state_lock, flags);
@@ -158,29 +169,6 @@ static void put_device_state_wait(struct device_state *dev_state)
        free_device_state(dev_state);
 }
 
-static struct notifier_block profile_nb = {
-       .notifier_call = task_exit,
-};
-
-static void link_pasid_state(struct pasid_state *pasid_state)
-{
-       spin_lock(&ps_lock);
-       list_add_tail(&pasid_state->list, &pasid_state_list);
-       spin_unlock(&ps_lock);
-}
-
-static void __unlink_pasid_state(struct pasid_state *pasid_state)
-{
-       list_del(&pasid_state->list);
-}
-
-static void unlink_pasid_state(struct pasid_state *pasid_state)
-{
-       spin_lock(&ps_lock);
-       __unlink_pasid_state(pasid_state);
-       spin_unlock(&ps_lock);
-}
-
 /* Must be called under dev_state->lock */
 static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
                                                  int pasid, bool alloc)
@@ -337,7 +325,6 @@ static void unbind_pasid(struct device_state *dev_state, int pasid)
        if (pasid_state == NULL)
                return;
 
-       unlink_pasid_state(pasid_state);
        __unbind_pasid(pasid_state);
        put_pasid_state_wait(pasid_state); /* Reference taken in this function */
 }
@@ -379,7 +366,12 @@ static void free_pasid_states(struct device_state *dev_state)
                        continue;
 
                put_pasid_state(pasid_state);
-               unbind_pasid(dev_state, i);
+
+               /*
+                * This will call the mn_release function and
+                * unbind the PASID
+                */
+               mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
        }
 
        if (dev_state->pasid_levels == 2)
@@ -443,8 +435,11 @@ static void mn_invalidate_range_start(struct mmu_notifier *mn,
        pasid_state = mn_to_state(mn);
        dev_state   = pasid_state->device_state;
 
-       amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
-                                 __pa(empty_page_table));
+       if (atomic_add_return(1, &pasid_state->mmu_notifier_count) == 1) {
+               amd_iommu_domain_set_gcr3(dev_state->domain,
+                                         pasid_state->pasid,
+                                         __pa(empty_page_table));
+       }
 }
 
 static void mn_invalidate_range_end(struct mmu_notifier *mn,
@@ -457,11 +452,31 @@ static void mn_invalidate_range_end(struct mmu_notifier *mn,
        pasid_state = mn_to_state(mn);
        dev_state   = pasid_state->device_state;
 
-       amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
-                                 __pa(pasid_state->mm->pgd));
+       if (atomic_dec_and_test(&pasid_state->mmu_notifier_count)) {
+               amd_iommu_domain_set_gcr3(dev_state->domain,
+                                         pasid_state->pasid,
+                                         __pa(pasid_state->mm->pgd));
+       }
+}
+
+static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
+{
+       struct pasid_state *pasid_state;
+       struct device_state *dev_state;
+
+       might_sleep();
+
+       pasid_state = mn_to_state(mn);
+       dev_state   = pasid_state->device_state;
+
+       if (pasid_state->device_state->inv_ctx_cb)
+               dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
+
+       unbind_pasid(dev_state, pasid_state->pasid);
 }
 
 static struct mmu_notifier_ops iommu_mn = {
+       .release                = mn_release,
        .clear_flush_young      = mn_clear_flush_young,
        .change_pte             = mn_change_pte,
        .invalidate_page        = mn_invalidate_page,
@@ -504,8 +519,10 @@ static void do_fault(struct work_struct *work)
 
        write = !!(fault->flags & PPR_FAULT_WRITE);
 
+       down_read(&fault->state->mm->mmap_sem);
        npages = get_user_pages(fault->state->task, fault->state->mm,
                                fault->address, 1, write, 0, &page, NULL);
+       up_read(&fault->state->mm->mmap_sem);
 
        if (npages == 1) {
                put_page(page);
@@ -604,53 +621,6 @@ static struct notifier_block ppr_nb = {
        .notifier_call = ppr_notifier,
 };
 
-static int task_exit(struct notifier_block *nb, unsigned long e, void *data)
-{
-       struct pasid_state *pasid_state;
-       struct task_struct *task;
-
-       task = data;
-
-       /*
-        * Using this notifier is a hack - but there is no other choice
-        * at the moment. What I really want is a sleeping notifier that
-        * is called when an MM goes down. But such a notifier doesn't
-        * exist yet. The notifier needs to sleep because it has to make
-        * sure that the device does not use the PASID and the address
-        * space anymore before it is destroyed. This includes waiting
-        * for pending PRI requests to pass the workqueue. The
-        * MMU-Notifiers would be a good fit, but they use RCU and so
-        * they are not allowed to sleep. Lets see how we can solve this
-        * in a more intelligent way in the future.
-        */
-again:
-       spin_lock(&ps_lock);
-       list_for_each_entry(pasid_state, &pasid_state_list, list) {
-               struct device_state *dev_state;
-               int pasid;
-
-               if (pasid_state->task != task)
-                       continue;
-
-               /* Drop Lock and unbind */
-               spin_unlock(&ps_lock);
-
-               dev_state = pasid_state->device_state;
-               pasid     = pasid_state->pasid;
-
-               if (pasid_state->device_state->inv_ctx_cb)
-                       dev_state->inv_ctx_cb(dev_state->pdev, pasid);
-
-               unbind_pasid(dev_state, pasid);
-
-               /* Task may be in the list multiple times */
-               goto again;
-       }
-       spin_unlock(&ps_lock);
-
-       return NOTIFY_OK;
-}
-
 int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
                         struct task_struct *task)
 {
@@ -680,6 +650,7 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
                goto out;
 
        atomic_set(&pasid_state->count, 1);
+       atomic_set(&pasid_state->mmu_notifier_count, 0);
        init_waitqueue_head(&pasid_state->wq);
        spin_lock_init(&pasid_state->lock);
 
@@ -703,8 +674,6 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
        if (ret)
                goto out_clear_state;
 
-       link_pasid_state(pasid_state);
-
        return 0;
 
 out_clear_state:
@@ -725,6 +694,7 @@ EXPORT_SYMBOL(amd_iommu_bind_pasid);
 
 void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
 {
+       struct pasid_state *pasid_state;
        struct device_state *dev_state;
        u16 devid;
 
@@ -741,7 +711,17 @@ void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
        if (pasid < 0 || pasid >= dev_state->max_pasids)
                goto out;
 
-       unbind_pasid(dev_state, pasid);
+       pasid_state = get_pasid_state(dev_state, pasid);
+       if (pasid_state == NULL)
+               goto out;
+       /*
+        * Drop reference taken here. We are safe because we still hold
+        * the reference taken in the amd_iommu_bind_pasid function.
+        */
+       put_pasid_state(pasid_state);
+
+       /* This will call the mn_release function and unbind the PASID */
+       mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
 
 out:
        put_device_state(dev_state);
@@ -771,7 +751,8 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
 
        spin_lock_init(&dev_state->lock);
        init_waitqueue_head(&dev_state->wq);
-       dev_state->pdev = pdev;
+       dev_state->pdev  = pdev;
+       dev_state->devid = devid;
 
        tmp = pasids;
        for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
@@ -801,13 +782,13 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
 
        spin_lock_irqsave(&state_lock, flags);
 
-       if (state_table[devid] != NULL) {
+       if (__get_device_state(devid) != NULL) {
                spin_unlock_irqrestore(&state_lock, flags);
                ret = -EBUSY;
                goto out_free_domain;
        }
 
-       state_table[devid] = dev_state;
+       list_add_tail(&dev_state->list, &state_list);
 
        spin_unlock_irqrestore(&state_lock, flags);
 
@@ -839,13 +820,13 @@ void amd_iommu_free_device(struct pci_dev *pdev)
 
        spin_lock_irqsave(&state_lock, flags);
 
-       dev_state = state_table[devid];
+       dev_state = __get_device_state(devid);
        if (dev_state == NULL) {
                spin_unlock_irqrestore(&state_lock, flags);
                return;
        }
 
-       state_table[devid] = NULL;
+       list_del(&dev_state->list);
 
        spin_unlock_irqrestore(&state_lock, flags);
 
@@ -872,7 +853,7 @@ int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
        spin_lock_irqsave(&state_lock, flags);
 
        ret = -EINVAL;
-       dev_state = state_table[devid];
+       dev_state = __get_device_state(devid);
        if (dev_state == NULL)
                goto out_unlock;
 
@@ -903,7 +884,7 @@ int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
        spin_lock_irqsave(&state_lock, flags);
 
        ret = -EINVAL;
-       dev_state = state_table[devid];
+       dev_state = __get_device_state(devid);
        if (dev_state == NULL)
                goto out_unlock;
 
@@ -920,7 +901,6 @@ EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
 
 static int __init amd_iommu_v2_init(void)
 {
-       size_t state_table_size;
        int ret;
 
        pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n");
@@ -936,16 +916,10 @@ static int __init amd_iommu_v2_init(void)
 
        spin_lock_init(&state_lock);
 
-       state_table_size = MAX_DEVICES * sizeof(struct device_state *);
-       state_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
-                                              get_order(state_table_size));
-       if (state_table == NULL)
-               return -ENOMEM;
-
        ret = -ENOMEM;
        iommu_wq = create_workqueue("amd_iommu_v2");
        if (iommu_wq == NULL)
-               goto out_free;
+               goto out;
 
        ret = -ENOMEM;
        empty_page_table = (u64 *)get_zeroed_page(GFP_KERNEL);
@@ -953,29 +927,24 @@ static int __init amd_iommu_v2_init(void)
                goto out_destroy_wq;
 
        amd_iommu_register_ppr_notifier(&ppr_nb);
-       profile_event_register(PROFILE_TASK_EXIT, &profile_nb);
 
        return 0;
 
 out_destroy_wq:
        destroy_workqueue(iommu_wq);
 
-out_free:
-       free_pages((unsigned long)state_table, get_order(state_table_size));
-
+out:
        return ret;
 }
 
 static void __exit amd_iommu_v2_exit(void)
 {
        struct device_state *dev_state;
-       size_t state_table_size;
        int i;
 
        if (!amd_iommu_v2_supported())
                return;
 
-       profile_event_unregister(PROFILE_TASK_EXIT, &profile_nb);
        amd_iommu_unregister_ppr_notifier(&ppr_nb);
 
        flush_workqueue(iommu_wq);
@@ -998,9 +967,6 @@ static void __exit amd_iommu_v2_exit(void)
 
        destroy_workqueue(iommu_wq);
 
-       state_table_size = MAX_DEVICES * sizeof(struct device_state *);
-       free_pages((unsigned long)state_table, get_order(state_table_size));
-
        free_page((unsigned long)empty_page_table);
 }
 
index 647c3c7fd7428f31dd2b313a1fc8b29ac33b482c..1599354e974d200555218509ba87ed4a65647027 100644 (file)
@@ -1167,7 +1167,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
        for (i = 0; i < master->num_streamids; ++i) {
                u32 idx, s2cr;
                idx = master->smrs ? master->smrs[i].idx : master->streamids[i];
-               s2cr = (S2CR_TYPE_TRANS << S2CR_TYPE_SHIFT) |
+               s2cr = S2CR_TYPE_TRANS |
                       (smmu_domain->root_cfg.cbndx << S2CR_CBNDX_SHIFT);
                writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
        }
@@ -1804,7 +1804,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
         * allocation (PTRS_PER_PGD).
         */
 #ifdef CONFIG_64BIT
-       smmu->s1_output_size = min(39UL, size);
+       smmu->s1_output_size = min((unsigned long)VA_BITS, size);
 #else
        smmu->s1_output_size = min(32UL, size);
 #endif
index cba0498eb0116b8d4366a0e3cb1abf8dbfc23d6b..b99dd88e31b9b20b41bccde26fd54789a4d61250 100644 (file)
@@ -592,8 +592,7 @@ found_cpu_node:
                /* advance to next node in cache hierarchy */
                node = of_find_node_by_phandle(*prop);
                if (!node) {
-                       pr_debug("Invalid node for cache hierarchy %s\n",
-                               node->full_name);
+                       pr_debug("Invalid node for cache hierarchy\n");
                        return ~(u32)0;
                }
        }
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
new file mode 100644 (file)
index 0000000..53cde08
--- /dev/null
@@ -0,0 +1,1255 @@
+/*
+ * IPMMU VMSA
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/platform_data/ipmmu-vmsa.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#include <asm/dma-iommu.h>
+#include <asm/pgalloc.h>
+
+struct ipmmu_vmsa_device {
+       struct device *dev;
+       void __iomem *base;
+       struct list_head list;
+
+       const struct ipmmu_vmsa_platform_data *pdata;
+       unsigned int num_utlbs;
+
+       struct dma_iommu_mapping *mapping;
+};
+
+struct ipmmu_vmsa_domain {
+       struct ipmmu_vmsa_device *mmu;
+       struct iommu_domain *io_domain;
+
+       unsigned int context_id;
+       spinlock_t lock;                        /* Protects mappings */
+       pgd_t *pgd;
+};
+
+struct ipmmu_vmsa_archdata {
+       struct ipmmu_vmsa_device *mmu;
+       unsigned int utlb;
+};
+
+static DEFINE_SPINLOCK(ipmmu_devices_lock);
+static LIST_HEAD(ipmmu_devices);
+
+#define TLB_LOOP_TIMEOUT               100     /* 100us */
+
+/* -----------------------------------------------------------------------------
+ * Registers Definition
+ */
+
+#define IM_CTX_SIZE                    0x40
+
+#define IMCTR                          0x0000
+#define IMCTR_TRE                      (1 << 17)
+#define IMCTR_AFE                      (1 << 16)
+#define IMCTR_RTSEL_MASK               (3 << 4)
+#define IMCTR_RTSEL_SHIFT              4
+#define IMCTR_TREN                     (1 << 3)
+#define IMCTR_INTEN                    (1 << 2)
+#define IMCTR_FLUSH                    (1 << 1)
+#define IMCTR_MMUEN                    (1 << 0)
+
+#define IMCAAR                         0x0004
+
+#define IMTTBCR                                0x0008
+#define IMTTBCR_EAE                    (1 << 31)
+#define IMTTBCR_PMB                    (1 << 30)
+#define IMTTBCR_SH1_NON_SHAREABLE      (0 << 28)
+#define IMTTBCR_SH1_OUTER_SHAREABLE    (2 << 28)
+#define IMTTBCR_SH1_INNER_SHAREABLE    (3 << 28)
+#define IMTTBCR_SH1_MASK               (3 << 28)
+#define IMTTBCR_ORGN1_NC               (0 << 26)
+#define IMTTBCR_ORGN1_WB_WA            (1 << 26)
+#define IMTTBCR_ORGN1_WT               (2 << 26)
+#define IMTTBCR_ORGN1_WB               (3 << 26)
+#define IMTTBCR_ORGN1_MASK             (3 << 26)
+#define IMTTBCR_IRGN1_NC               (0 << 24)
+#define IMTTBCR_IRGN1_WB_WA            (1 << 24)
+#define IMTTBCR_IRGN1_WT               (2 << 24)
+#define IMTTBCR_IRGN1_WB               (3 << 24)
+#define IMTTBCR_IRGN1_MASK             (3 << 24)
+#define IMTTBCR_TSZ1_MASK              (7 << 16)
+#define IMTTBCR_TSZ1_SHIFT             16
+#define IMTTBCR_SH0_NON_SHAREABLE      (0 << 12)
+#define IMTTBCR_SH0_OUTER_SHAREABLE    (2 << 12)
+#define IMTTBCR_SH0_INNER_SHAREABLE    (3 << 12)
+#define IMTTBCR_SH0_MASK               (3 << 12)
+#define IMTTBCR_ORGN0_NC               (0 << 10)
+#define IMTTBCR_ORGN0_WB_WA            (1 << 10)
+#define IMTTBCR_ORGN0_WT               (2 << 10)
+#define IMTTBCR_ORGN0_WB               (3 << 10)
+#define IMTTBCR_ORGN0_MASK             (3 << 10)
+#define IMTTBCR_IRGN0_NC               (0 << 8)
+#define IMTTBCR_IRGN0_WB_WA            (1 << 8)
+#define IMTTBCR_IRGN0_WT               (2 << 8)
+#define IMTTBCR_IRGN0_WB               (3 << 8)
+#define IMTTBCR_IRGN0_MASK             (3 << 8)
+#define IMTTBCR_SL0_LVL_2              (0 << 4)
+#define IMTTBCR_SL0_LVL_1              (1 << 4)
+#define IMTTBCR_TSZ0_MASK              (7 << 0)
+#define IMTTBCR_TSZ0_SHIFT             O
+
+#define IMBUSCR                                0x000c
+#define IMBUSCR_DVM                    (1 << 2)
+#define IMBUSCR_BUSSEL_SYS             (0 << 0)
+#define IMBUSCR_BUSSEL_CCI             (1 << 0)
+#define IMBUSCR_BUSSEL_IMCAAR          (2 << 0)
+#define IMBUSCR_BUSSEL_CCI_IMCAAR      (3 << 0)
+#define IMBUSCR_BUSSEL_MASK            (3 << 0)
+
+#define IMTTLBR0                       0x0010
+#define IMTTUBR0                       0x0014
+#define IMTTLBR1                       0x0018
+#define IMTTUBR1                       0x001c
+
+#define IMSTR                          0x0020
+#define IMSTR_ERRLVL_MASK              (3 << 12)
+#define IMSTR_ERRLVL_SHIFT             12
+#define IMSTR_ERRCODE_TLB_FORMAT       (1 << 8)
+#define IMSTR_ERRCODE_ACCESS_PERM      (4 << 8)
+#define IMSTR_ERRCODE_SECURE_ACCESS    (5 << 8)
+#define IMSTR_ERRCODE_MASK             (7 << 8)
+#define IMSTR_MHIT                     (1 << 4)
+#define IMSTR_ABORT                    (1 << 2)
+#define IMSTR_PF                       (1 << 1)
+#define IMSTR_TF                       (1 << 0)
+
+#define IMMAIR0                                0x0028
+#define IMMAIR1                                0x002c
+#define IMMAIR_ATTR_MASK               0xff
+#define IMMAIR_ATTR_DEVICE             0x04
+#define IMMAIR_ATTR_NC                 0x44
+#define IMMAIR_ATTR_WBRWA              0xff
+#define IMMAIR_ATTR_SHIFT(n)           ((n) << 3)
+#define IMMAIR_ATTR_IDX_NC             0
+#define IMMAIR_ATTR_IDX_WBRWA          1
+#define IMMAIR_ATTR_IDX_DEV            2
+
+#define IMEAR                          0x0030
+
+#define IMPCTR                         0x0200
+#define IMPSTR                         0x0208
+#define IMPEAR                         0x020c
+#define IMPMBA(n)                      (0x0280 + ((n) * 4))
+#define IMPMBD(n)                      (0x02c0 + ((n) * 4))
+
+#define IMUCTR(n)                      (0x0300 + ((n) * 16))
+#define IMUCTR_FIXADDEN                        (1 << 31)
+#define IMUCTR_FIXADD_MASK             (0xff << 16)
+#define IMUCTR_FIXADD_SHIFT            16
+#define IMUCTR_TTSEL_MMU(n)            ((n) << 4)
+#define IMUCTR_TTSEL_PMB               (8 << 4)
+#define IMUCTR_TTSEL_MASK              (15 << 4)
+#define IMUCTR_FLUSH                   (1 << 1)
+#define IMUCTR_MMUEN                   (1 << 0)
+
+#define IMUASID(n)                     (0x0308 + ((n) * 16))
+#define IMUASID_ASID8_MASK             (0xff << 8)
+#define IMUASID_ASID8_SHIFT            8
+#define IMUASID_ASID0_MASK             (0xff << 0)
+#define IMUASID_ASID0_SHIFT            0
+
+/* -----------------------------------------------------------------------------
+ * Page Table Bits
+ */
+
+/*
+ * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory access,
+ * Long-descriptor format" that the NStable bit being set in a table descriptor
+ * will result in the NStable and NS bits of all child entries being ignored and
+ * considered as being set. The IPMMU seems not to comply with this, as it
+ * generates a secure access page fault if any of the NStable and NS bits isn't
+ * set when running in non-secure mode.
+ */
+#ifndef PMD_NSTABLE
+#define PMD_NSTABLE                    (_AT(pmdval_t, 1) << 63)
+#endif
+
+#define ARM_VMSA_PTE_XN                        (((pteval_t)3) << 53)
+#define ARM_VMSA_PTE_CONT              (((pteval_t)1) << 52)
+#define ARM_VMSA_PTE_AF                        (((pteval_t)1) << 10)
+#define ARM_VMSA_PTE_SH_NS             (((pteval_t)0) << 8)
+#define ARM_VMSA_PTE_SH_OS             (((pteval_t)2) << 8)
+#define ARM_VMSA_PTE_SH_IS             (((pteval_t)3) << 8)
+#define ARM_VMSA_PTE_SH_MASK           (((pteval_t)3) << 8)
+#define ARM_VMSA_PTE_NS                        (((pteval_t)1) << 5)
+#define ARM_VMSA_PTE_PAGE              (((pteval_t)3) << 0)
+
+/* Stage-1 PTE */
+#define ARM_VMSA_PTE_nG                        (((pteval_t)1) << 11)
+#define ARM_VMSA_PTE_AP_UNPRIV         (((pteval_t)1) << 6)
+#define ARM_VMSA_PTE_AP_RDONLY         (((pteval_t)2) << 6)
+#define ARM_VMSA_PTE_AP_MASK           (((pteval_t)3) << 6)
+#define ARM_VMSA_PTE_ATTRINDX_MASK     (((pteval_t)3) << 2)
+#define ARM_VMSA_PTE_ATTRINDX_SHIFT    2
+
+#define ARM_VMSA_PTE_ATTRS_MASK \
+       (ARM_VMSA_PTE_XN | ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_nG | \
+        ARM_VMSA_PTE_AF | ARM_VMSA_PTE_SH_MASK | ARM_VMSA_PTE_AP_MASK | \
+        ARM_VMSA_PTE_NS | ARM_VMSA_PTE_ATTRINDX_MASK)
+
+#define ARM_VMSA_PTE_CONT_ENTRIES      16
+#define ARM_VMSA_PTE_CONT_SIZE         (PAGE_SIZE * ARM_VMSA_PTE_CONT_ENTRIES)
+
+#define IPMMU_PTRS_PER_PTE             512
+#define IPMMU_PTRS_PER_PMD             512
+#define IPMMU_PTRS_PER_PGD             4
+
+/* -----------------------------------------------------------------------------
+ * Read/Write Access
+ */
+
+static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
+{
+       return ioread32(mmu->base + offset);
+}
+
+static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
+                       u32 data)
+{
+       iowrite32(data, mmu->base + offset);
+}
+
+static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg)
+{
+       return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg);
+}
+
+static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg,
+                           u32 data)
+{
+       ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * TLB and microTLB Management
+ */
+
+/* Wait for any pending TLB invalidations to complete */
+static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
+{
+       unsigned int count = 0;
+
+       while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) {
+               cpu_relax();
+               if (++count == TLB_LOOP_TIMEOUT) {
+                       dev_err_ratelimited(domain->mmu->dev,
+                       "TLB sync timed out -- MMU may be deadlocked\n");
+                       return;
+               }
+               udelay(1);
+       }
+}
+
+static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
+{
+       u32 reg;
+
+       reg = ipmmu_ctx_read(domain, IMCTR);
+       reg |= IMCTR_FLUSH;
+       ipmmu_ctx_write(domain, IMCTR, reg);
+
+       ipmmu_tlb_sync(domain);
+}
+
+/*
+ * Enable MMU translation for the microTLB.
+ */
+static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
+                             unsigned int utlb)
+{
+       struct ipmmu_vmsa_device *mmu = domain->mmu;
+
+       /*
+        * TODO: Reference-count the microTLB as several bus masters can be
+        * connected to the same microTLB.
+        */
+
+       /* TODO: What should we set the ASID to ? */
+       ipmmu_write(mmu, IMUASID(utlb), 0);
+       /* TODO: Do we need to flush the microTLB ? */
+       ipmmu_write(mmu, IMUCTR(utlb),
+                   IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH |
+                   IMUCTR_MMUEN);
+}
+
+/*
+ * Disable MMU translation for the microTLB.
+ */
+static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
+                              unsigned int utlb)
+{
+       struct ipmmu_vmsa_device *mmu = domain->mmu;
+
+       ipmmu_write(mmu, IMUCTR(utlb), 0);
+}
+
+static void ipmmu_flush_pgtable(struct ipmmu_vmsa_device *mmu, void *addr,
+                               size_t size)
+{
+       unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+
+       /*
+        * TODO: Add support for coherent walk through CCI with DVM and remove
+        * cache handling.
+        */
+       dma_map_page(mmu->dev, virt_to_page(addr), offset, size, DMA_TO_DEVICE);
+}
+
+/* -----------------------------------------------------------------------------
+ * Domain/Context Management
+ */
+
+static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
+{
+       phys_addr_t ttbr;
+       u32 reg;
+
+       /*
+        * TODO: When adding support for multiple contexts, find an unused
+        * context.
+        */
+       domain->context_id = 0;
+
+       /* TTBR0 */
+       ipmmu_flush_pgtable(domain->mmu, domain->pgd,
+                           IPMMU_PTRS_PER_PGD * sizeof(*domain->pgd));
+       ttbr = __pa(domain->pgd);
+       ipmmu_ctx_write(domain, IMTTLBR0, ttbr);
+       ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32);
+
+       /*
+        * TTBCR
+        * We use long descriptors with inner-shareable WBWA tables and allocate
+        * the whole 32-bit VA space to TTBR0.
+        */
+       ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE |
+                       IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
+                       IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1);
+
+       /*
+        * MAIR0
+        * We need three attributes only, non-cacheable, write-back read/write
+        * allocate and device memory.
+        */
+       reg = (IMMAIR_ATTR_NC << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_NC))
+           | (IMMAIR_ATTR_WBRWA << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_WBRWA))
+           | (IMMAIR_ATTR_DEVICE << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_DEV));
+       ipmmu_ctx_write(domain, IMMAIR0, reg);
+
+       /* IMBUSCR */
+       ipmmu_ctx_write(domain, IMBUSCR,
+                       ipmmu_ctx_read(domain, IMBUSCR) &
+                       ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
+
+       /*
+        * IMSTR
+        * Clear all interrupt flags.
+        */
+       ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR));
+
+       /*
+        * IMCTR
+        * Enable the MMU and interrupt generation. The long-descriptor
+        * translation table format doesn't use TEX remapping. Don't enable AF
+        * software management as we have no use for it. Flush the TLB as
+        * required when modifying the context registers.
+        */
+       ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
+
+       return 0;
+}
+
+static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
+{
+       /*
+        * Disable the context. Flush the TLB as required when modifying the
+        * context registers.
+        *
+        * TODO: Is TLB flush really needed ?
+        */
+       ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH);
+       ipmmu_tlb_sync(domain);
+}
+
+/* -----------------------------------------------------------------------------
+ * Fault Handling
+ */
+
+static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
+{
+       const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
+       struct ipmmu_vmsa_device *mmu = domain->mmu;
+       u32 status;
+       u32 iova;
+
+       status = ipmmu_ctx_read(domain, IMSTR);
+       if (!(status & err_mask))
+               return IRQ_NONE;
+
+       iova = ipmmu_ctx_read(domain, IMEAR);
+
+       /*
+        * Clear the error status flags. Unlike traditional interrupt flag
+        * registers that must be cleared by writing 1, this status register
+        * seems to require 0. The error address register must be read before,
+        * otherwise its value will be 0.
+        */
+       ipmmu_ctx_write(domain, IMSTR, 0);
+
+       /* Log fatal errors. */
+       if (status & IMSTR_MHIT)
+               dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n",
+                                   iova);
+       if (status & IMSTR_ABORT)
+               dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n",
+                                   iova);
+
+       if (!(status & (IMSTR_PF | IMSTR_TF)))
+               return IRQ_NONE;
+
+       /*
+        * Try to handle page faults and translation faults.
+        *
+        * TODO: We need to look up the faulty device based on the I/O VA. Use
+        * the IOMMU device for now.
+        */
+       if (!report_iommu_fault(domain->io_domain, mmu->dev, iova, 0))
+               return IRQ_HANDLED;
+
+       dev_err_ratelimited(mmu->dev,
+                           "Unhandled fault: status 0x%08x iova 0x%08x\n",
+                           status, iova);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t ipmmu_irq(int irq, void *dev)
+{
+       struct ipmmu_vmsa_device *mmu = dev;
+       struct iommu_domain *io_domain;
+       struct ipmmu_vmsa_domain *domain;
+
+       if (!mmu->mapping)
+               return IRQ_NONE;
+
+       io_domain = mmu->mapping->domain;
+       domain = io_domain->priv;
+
+       return ipmmu_domain_irq(domain);
+}
+
+/* -----------------------------------------------------------------------------
+ * Page Table Management
+ */
+
+#define pud_pgtable(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
+
+static void ipmmu_free_ptes(pmd_t *pmd)
+{
+       pgtable_t table = pmd_pgtable(*pmd);
+       __free_page(table);
+}
+
+static void ipmmu_free_pmds(pud_t *pud)
+{
+       pmd_t *pmd = pmd_offset(pud, 0);
+       pgtable_t table;
+       unsigned int i;
+
+       for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) {
+               if (!pmd_table(*pmd))
+                       continue;
+
+               ipmmu_free_ptes(pmd);
+               pmd++;
+       }
+
+       table = pud_pgtable(*pud);
+       __free_page(table);
+}
+
+static void ipmmu_free_pgtables(struct ipmmu_vmsa_domain *domain)
+{
+       pgd_t *pgd, *pgd_base = domain->pgd;
+       unsigned int i;
+
+       /*
+        * Recursively free the page tables for this domain. We don't care about
+        * speculative TLB filling, because the TLB will be nuked next time this
+        * context bank is re-allocated and no devices currently map to these
+        * tables.
+        */
+       pgd = pgd_base;
+       for (i = 0; i < IPMMU_PTRS_PER_PGD; ++i) {
+               if (pgd_none(*pgd))
+                       continue;
+               ipmmu_free_pmds((pud_t *)pgd);
+               pgd++;
+       }
+
+       kfree(pgd_base);
+}
+
+/*
+ * We can't use the (pgd|pud|pmd|pte)_populate or the set_(pgd|pud|pmd|pte)
+ * functions as they would flush the CPU TLB.
+ */
+
+static pte_t *ipmmu_alloc_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
+                             unsigned long iova)
+{
+       pte_t *pte;
+
+       if (!pmd_none(*pmd))
+               return pte_offset_kernel(pmd, iova);
+
+       pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
+       if (!pte)
+               return NULL;
+
+       ipmmu_flush_pgtable(mmu, pte, PAGE_SIZE);
+       *pmd = __pmd(__pa(pte) | PMD_NSTABLE | PMD_TYPE_TABLE);
+       ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
+
+       return pte + pte_index(iova);
+}
+
+static pmd_t *ipmmu_alloc_pmd(struct ipmmu_vmsa_device *mmu, pgd_t *pgd,
+                             unsigned long iova)
+{
+       pud_t *pud = (pud_t *)pgd;
+       pmd_t *pmd;
+
+       if (!pud_none(*pud))
+               return pmd_offset(pud, iova);
+
+       pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
+       if (!pmd)
+               return NULL;
+
+       ipmmu_flush_pgtable(mmu, pmd, PAGE_SIZE);
+       *pud = __pud(__pa(pmd) | PMD_NSTABLE | PMD_TYPE_TABLE);
+       ipmmu_flush_pgtable(mmu, pud, sizeof(*pud));
+
+       return pmd + pmd_index(iova);
+}
+
+static u64 ipmmu_page_prot(unsigned int prot, u64 type)
+{
+       u64 pgprot = ARM_VMSA_PTE_XN | ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF
+                  | ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV
+                  | ARM_VMSA_PTE_NS | type;
+
+       if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
+               pgprot |= ARM_VMSA_PTE_AP_RDONLY;
+
+       if (prot & IOMMU_CACHE)
+               pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT;
+
+       if (prot & IOMMU_EXEC)
+               pgprot &= ~ARM_VMSA_PTE_XN;
+       else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
+               /* If no access create a faulting entry to avoid TLB fills. */
+               pgprot &= ~ARM_VMSA_PTE_PAGE;
+
+       return pgprot;
+}
+
+static int ipmmu_alloc_init_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
+                               unsigned long iova, unsigned long pfn,
+                               size_t size, int prot)
+{
+       pteval_t pteval = ipmmu_page_prot(prot, ARM_VMSA_PTE_PAGE);
+       unsigned int num_ptes = 1;
+       pte_t *pte, *start;
+       unsigned int i;
+
+       pte = ipmmu_alloc_pte(mmu, pmd, iova);
+       if (!pte)
+               return -ENOMEM;
+
+       start = pte;
+
+       /*
+        * Install the page table entries. We can be called both for a single
+        * page or for a block of 16 physically contiguous pages. In the latter
+        * case set the PTE contiguous hint.
+        */
+       if (size == SZ_64K) {
+               pteval |= ARM_VMSA_PTE_CONT;
+               num_ptes = ARM_VMSA_PTE_CONT_ENTRIES;
+       }
+
+       for (i = num_ptes; i; --i)
+               *pte++ = pfn_pte(pfn++, __pgprot(pteval));
+
+       ipmmu_flush_pgtable(mmu, start, sizeof(*pte) * num_ptes);
+
+       return 0;
+}
+
+static int ipmmu_alloc_init_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
+                               unsigned long iova, unsigned long pfn,
+                               int prot)
+{
+       pmdval_t pmdval = ipmmu_page_prot(prot, PMD_TYPE_SECT);
+
+       *pmd = pfn_pmd(pfn, __pgprot(pmdval));
+       ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
+
+       return 0;
+}
+
+static int ipmmu_create_mapping(struct ipmmu_vmsa_domain *domain,
+                               unsigned long iova, phys_addr_t paddr,
+                               size_t size, int prot)
+{
+       struct ipmmu_vmsa_device *mmu = domain->mmu;
+       pgd_t *pgd = domain->pgd;
+       unsigned long flags;
+       unsigned long pfn;
+       pmd_t *pmd;
+       int ret;
+
+       if (!pgd)
+               return -EINVAL;
+
+       if (size & ~PAGE_MASK)
+               return -EINVAL;
+
+       if (paddr & ~((1ULL << 40) - 1))
+               return -ERANGE;
+
+       pfn = __phys_to_pfn(paddr);
+       pgd += pgd_index(iova);
+
+       /* Update the page tables. */
+       spin_lock_irqsave(&domain->lock, flags);
+
+       pmd = ipmmu_alloc_pmd(mmu, pgd, iova);
+       if (!pmd) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       switch (size) {
+       case SZ_2M:
+               ret = ipmmu_alloc_init_pmd(mmu, pmd, iova, pfn, prot);
+               break;
+       case SZ_64K:
+       case SZ_4K:
+               ret = ipmmu_alloc_init_pte(mmu, pmd, iova, pfn, size, prot);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+done:
+       spin_unlock_irqrestore(&domain->lock, flags);
+
+       if (!ret)
+               ipmmu_tlb_invalidate(domain);
+
+       return ret;
+}
+
+static void ipmmu_clear_pud(struct ipmmu_vmsa_device *mmu, pud_t *pud)
+{
+       /* Free the page table. */
+       pgtable_t table = pud_pgtable(*pud);
+       __free_page(table);
+
+       /* Clear the PUD. */
+       *pud = __pud(0);
+       ipmmu_flush_pgtable(mmu, pud, sizeof(*pud));
+}
+
+static void ipmmu_clear_pmd(struct ipmmu_vmsa_device *mmu, pud_t *pud,
+                           pmd_t *pmd)
+{
+       unsigned int i;
+
+       /* Free the page table. */
+       if (pmd_table(*pmd)) {
+               pgtable_t table = pmd_pgtable(*pmd);
+               __free_page(table);
+       }
+
+       /* Clear the PMD. */
+       *pmd = __pmd(0);
+       ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
+
+       /* Check whether the PUD is still needed. */
+       pmd = pmd_offset(pud, 0);
+       for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) {
+               if (!pmd_none(pmd[i]))
+                       return;
+       }
+
+       /* Clear the parent PUD. */
+       ipmmu_clear_pud(mmu, pud);
+}
+
+static void ipmmu_clear_pte(struct ipmmu_vmsa_device *mmu, pud_t *pud,
+                           pmd_t *pmd, pte_t *pte, unsigned int num_ptes)
+{
+       unsigned int i;
+
+       /* Clear the PTE. */
+       for (i = num_ptes; i; --i)
+               pte[i-1] = __pte(0);
+
+       ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * num_ptes);
+
+       /* Check whether the PMD is still needed. */
+       pte = pte_offset_kernel(pmd, 0);
+       for (i = 0; i < IPMMU_PTRS_PER_PTE; ++i) {
+               if (!pte_none(pte[i]))
+                       return;
+       }
+
+       /* Clear the parent PMD. */
+       ipmmu_clear_pmd(mmu, pud, pmd);
+}
+
+static int ipmmu_split_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd)
+{
+       pte_t *pte, *start;
+       pteval_t pteval;
+       unsigned long pfn;
+       unsigned int i;
+
+       pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
+       if (!pte)
+               return -ENOMEM;
+
+       /* Copy the PMD attributes. */
+       pteval = (pmd_val(*pmd) & ARM_VMSA_PTE_ATTRS_MASK)
+              | ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_PAGE;
+
+       pfn = pmd_pfn(*pmd);
+       start = pte;
+
+       for (i = IPMMU_PTRS_PER_PTE; i; --i)
+               *pte++ = pfn_pte(pfn++, __pgprot(pteval));
+
+       ipmmu_flush_pgtable(mmu, start, PAGE_SIZE);
+       *pmd = __pmd(__pa(start) | PMD_NSTABLE | PMD_TYPE_TABLE);
+       ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
+
+       return 0;
+}
+
+static void ipmmu_split_pte(struct ipmmu_vmsa_device *mmu, pte_t *pte)
+{
+       unsigned int i;
+
+       for (i = ARM_VMSA_PTE_CONT_ENTRIES; i; --i)
+               pte[i-1] = __pte(pte_val(*pte) & ~ARM_VMSA_PTE_CONT);
+
+       ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * ARM_VMSA_PTE_CONT_ENTRIES);
+}
+
+static int ipmmu_clear_mapping(struct ipmmu_vmsa_domain *domain,
+                              unsigned long iova, size_t size)
+{
+       struct ipmmu_vmsa_device *mmu = domain->mmu;
+       unsigned long flags;
+       pgd_t *pgd = domain->pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+       int ret = 0;
+
+       if (!pgd)
+               return -EINVAL;
+
+       if (size & ~PAGE_MASK)
+               return -EINVAL;
+
+       pgd += pgd_index(iova);
+       pud = (pud_t *)pgd;
+
+       spin_lock_irqsave(&domain->lock, flags);
+
+       /* If there's no PUD or PMD we're done. */
+       if (pud_none(*pud))
+               goto done;
+
+       pmd = pmd_offset(pud, iova);
+       if (pmd_none(*pmd))
+               goto done;
+
+       /*
+        * When freeing a 2MB block just clear the PMD. In the unlikely case the
+        * block is mapped as individual pages this will free the corresponding
+        * PTE page table.
+        */
+       if (size == SZ_2M) {
+               ipmmu_clear_pmd(mmu, pud, pmd);
+               goto done;
+       }
+
+       /*
+        * If the PMD has been mapped as a section remap it as pages to allow
+        * freeing individual pages.
+        */
+       if (pmd_sect(*pmd))
+               ipmmu_split_pmd(mmu, pmd);
+
+       pte = pte_offset_kernel(pmd, iova);
+
+       /*
+        * When freeing a 64kB block just clear the PTE entries. We don't have
+        * to care about the contiguous hint of the surrounding entries.
+        */
+       if (size == SZ_64K) {
+               ipmmu_clear_pte(mmu, pud, pmd, pte, ARM_VMSA_PTE_CONT_ENTRIES);
+               goto done;
+       }
+
+       /*
+        * If the PTE has been mapped with the contiguous hint set remap it and
+        * its surrounding PTEs to allow unmapping a single page.
+        */
+       if (pte_val(*pte) & ARM_VMSA_PTE_CONT)
+               ipmmu_split_pte(mmu, pte);
+
+       /* Clear the PTE. */
+       ipmmu_clear_pte(mmu, pud, pmd, pte, 1);
+
+done:
+       spin_unlock_irqrestore(&domain->lock, flags);
+
+       if (ret)
+               ipmmu_tlb_invalidate(domain);
+
+       return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * IOMMU Operations
+ */
+
+static int ipmmu_domain_init(struct iommu_domain *io_domain)
+{
+       struct ipmmu_vmsa_domain *domain;
+
+       domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+       if (!domain)
+               return -ENOMEM;
+
+       spin_lock_init(&domain->lock);
+
+       domain->pgd = kzalloc(IPMMU_PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+       if (!domain->pgd) {
+               kfree(domain);
+               return -ENOMEM;
+       }
+
+       io_domain->priv = domain;
+       domain->io_domain = io_domain;
+
+       return 0;
+}
+
+static void ipmmu_domain_destroy(struct iommu_domain *io_domain)
+{
+       struct ipmmu_vmsa_domain *domain = io_domain->priv;
+
+       /*
+        * Free the domain resources. We assume that all devices have already
+        * been detached.
+        */
+       ipmmu_domain_destroy_context(domain);
+       ipmmu_free_pgtables(domain);
+       kfree(domain);
+}
+
+static int ipmmu_attach_device(struct iommu_domain *io_domain,
+                              struct device *dev)
+{
+       struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
+       struct ipmmu_vmsa_device *mmu = archdata->mmu;
+       struct ipmmu_vmsa_domain *domain = io_domain->priv;
+       unsigned long flags;
+       int ret = 0;
+
+       if (!mmu) {
+               dev_err(dev, "Cannot attach to IPMMU\n");
+               return -ENXIO;
+       }
+
+       spin_lock_irqsave(&domain->lock, flags);
+
+       if (!domain->mmu) {
+               /* The domain hasn't been used yet, initialize it. */
+               domain->mmu = mmu;
+               ret = ipmmu_domain_init_context(domain);
+       } else if (domain->mmu != mmu) {
+               /*
+                * Something is wrong, we can't attach two devices using
+                * different IOMMUs to the same domain.
+                */
+               dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
+                       dev_name(mmu->dev), dev_name(domain->mmu->dev));
+               ret = -EINVAL;
+       }
+
+       spin_unlock_irqrestore(&domain->lock, flags);
+
+       if (ret < 0)
+               return ret;
+
+       ipmmu_utlb_enable(domain, archdata->utlb);
+
+       return 0;
+}
+
+static void ipmmu_detach_device(struct iommu_domain *io_domain,
+                               struct device *dev)
+{
+       struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
+       struct ipmmu_vmsa_domain *domain = io_domain->priv;
+
+       ipmmu_utlb_disable(domain, archdata->utlb);
+
+       /*
+        * TODO: Optimize by disabling the context when no device is attached.
+        */
+}
+
+static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
+                    phys_addr_t paddr, size_t size, int prot)
+{
+       struct ipmmu_vmsa_domain *domain = io_domain->priv;
+
+       if (!domain)
+               return -ENODEV;
+
+       return ipmmu_create_mapping(domain, iova, paddr, size, prot);
+}
+
+static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
+                         size_t size)
+{
+       struct ipmmu_vmsa_domain *domain = io_domain->priv;
+       int ret;
+
+       ret = ipmmu_clear_mapping(domain, iova, size);
+       return ret ? 0 : size;
+}
+
+static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
+                                     dma_addr_t iova)
+{
+       struct ipmmu_vmsa_domain *domain = io_domain->priv;
+       pgd_t pgd;
+       pud_t pud;
+       pmd_t pmd;
+       pte_t pte;
+
+       /* TODO: Is locking needed ? */
+
+       if (!domain->pgd)
+               return 0;
+
+       pgd = *(domain->pgd + pgd_index(iova));
+       if (pgd_none(pgd))
+               return 0;
+
+       pud = *pud_offset(&pgd, iova);
+       if (pud_none(pud))
+               return 0;
+
+       pmd = *pmd_offset(&pud, iova);
+       if (pmd_none(pmd))
+               return 0;
+
+       if (pmd_sect(pmd))
+               return __pfn_to_phys(pmd_pfn(pmd)) | (iova & ~PMD_MASK);
+
+       pte = *(pmd_page_vaddr(pmd) + pte_index(iova));
+       if (pte_none(pte))
+               return 0;
+
+       return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
+}
+
+static int ipmmu_find_utlb(struct ipmmu_vmsa_device *mmu, struct device *dev)
+{
+       const struct ipmmu_vmsa_master *master = mmu->pdata->masters;
+       const char *devname = dev_name(dev);
+       unsigned int i;
+
+       for (i = 0; i < mmu->pdata->num_masters; ++i, ++master) {
+               if (strcmp(master->name, devname) == 0)
+                       return master->utlb;
+       }
+
+       return -1;
+}
+
+static int ipmmu_add_device(struct device *dev)
+{
+       struct ipmmu_vmsa_archdata *archdata;
+       struct ipmmu_vmsa_device *mmu;
+       struct iommu_group *group;
+       int utlb = -1;
+       int ret;
+
+       if (dev->archdata.iommu) {
+               dev_warn(dev, "IOMMU driver already assigned to device %s\n",
+                        dev_name(dev));
+               return -EINVAL;
+       }
+
+       /* Find the master corresponding to the device. */
+       spin_lock(&ipmmu_devices_lock);
+
+       list_for_each_entry(mmu, &ipmmu_devices, list) {
+               utlb = ipmmu_find_utlb(mmu, dev);
+               if (utlb >= 0) {
+                       /*
+                        * TODO Take a reference to the MMU to protect
+                        * against device removal.
+                        */
+                       break;
+               }
+       }
+
+       spin_unlock(&ipmmu_devices_lock);
+
+       if (utlb < 0)
+               return -ENODEV;
+
+       if (utlb >= mmu->num_utlbs)
+               return -EINVAL;
+
+       /* Create a device group and add the device to it. */
+       group = iommu_group_alloc();
+       if (IS_ERR(group)) {
+               dev_err(dev, "Failed to allocate IOMMU group\n");
+               return PTR_ERR(group);
+       }
+
+       ret = iommu_group_add_device(group, dev);
+       iommu_group_put(group);
+
+       if (ret < 0) {
+               dev_err(dev, "Failed to add device to IPMMU group\n");
+               return ret;
+       }
+
+       archdata = kzalloc(sizeof(*archdata), GFP_KERNEL);
+       if (!archdata) {
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       archdata->mmu = mmu;
+       archdata->utlb = utlb;
+       dev->archdata.iommu = archdata;
+
+       /*
+        * Create the ARM mapping, used by the ARM DMA mapping core to allocate
+        * VAs. This will allocate a corresponding IOMMU domain.
+        *
+        * TODO:
+        * - Create one mapping per context (TLB).
+        * - Make the mapping size configurable ? We currently use a 2GB mapping
+        *   at a 1GB offset to ensure that NULL VAs will fault.
+        */
+       if (!mmu->mapping) {
+               struct dma_iommu_mapping *mapping;
+
+               mapping = arm_iommu_create_mapping(&platform_bus_type,
+                                                  SZ_1G, SZ_2G);
+               if (IS_ERR(mapping)) {
+                       dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
+                       return PTR_ERR(mapping);
+               }
+
+               mmu->mapping = mapping;
+       }
+
+       /* Attach the ARM VA mapping to the device. */
+       ret = arm_iommu_attach_device(dev, mmu->mapping);
+       if (ret < 0) {
+               dev_err(dev, "Failed to attach device to VA mapping\n");
+               goto error;
+       }
+
+       return 0;
+
+error:
+       kfree(dev->archdata.iommu);
+       dev->archdata.iommu = NULL;
+       iommu_group_remove_device(dev);
+       return ret;
+}
+
+static void ipmmu_remove_device(struct device *dev)
+{
+       arm_iommu_detach_device(dev);
+       iommu_group_remove_device(dev);
+       kfree(dev->archdata.iommu);
+       dev->archdata.iommu = NULL;
+}
+
+static struct iommu_ops ipmmu_ops = {
+       .domain_init = ipmmu_domain_init,
+       .domain_destroy = ipmmu_domain_destroy,
+       .attach_dev = ipmmu_attach_device,
+       .detach_dev = ipmmu_detach_device,
+       .map = ipmmu_map,
+       .unmap = ipmmu_unmap,
+       .iova_to_phys = ipmmu_iova_to_phys,
+       .add_device = ipmmu_add_device,
+       .remove_device = ipmmu_remove_device,
+       .pgsize_bitmap = SZ_2M | SZ_64K | SZ_4K,
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe/remove and init
+ */
+
+static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
+{
+       unsigned int i;
+
+       /* Disable all contexts. */
+       for (i = 0; i < 4; ++i)
+               ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0);
+}
+
+static int ipmmu_probe(struct platform_device *pdev)
+{
+       struct ipmmu_vmsa_device *mmu;
+       struct resource *res;
+       int irq;
+       int ret;
+
+       if (!pdev->dev.platform_data) {
+               dev_err(&pdev->dev, "missing platform data\n");
+               return -EINVAL;
+       }
+
+       mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
+       if (!mmu) {
+               dev_err(&pdev->dev, "cannot allocate device data\n");
+               return -ENOMEM;
+       }
+
+       mmu->dev = &pdev->dev;
+       mmu->pdata = pdev->dev.platform_data;
+       mmu->num_utlbs = 32;
+
+       /* Map I/O memory and request IRQ. */
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       mmu->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(mmu->base))
+               return PTR_ERR(mmu->base);
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(&pdev->dev, "no IRQ found\n");
+               return irq;
+       }
+
+       ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
+                              dev_name(&pdev->dev), mmu);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
+               return irq;
+       }
+
+       ipmmu_device_reset(mmu);
+
+       /*
+        * We can't create the ARM mapping here as it requires the bus to have
+        * an IOMMU, which only happens when bus_set_iommu() is called in
+        * ipmmu_init() after the probe function returns.
+        */
+
+       spin_lock(&ipmmu_devices_lock);
+       list_add(&mmu->list, &ipmmu_devices);
+       spin_unlock(&ipmmu_devices_lock);
+
+       platform_set_drvdata(pdev, mmu);
+
+       return 0;
+}
+
+static int ipmmu_remove(struct platform_device *pdev)
+{
+       struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
+
+       spin_lock(&ipmmu_devices_lock);
+       list_del(&mmu->list);
+       spin_unlock(&ipmmu_devices_lock);
+
+       arm_iommu_release_mapping(mmu->mapping);
+
+       ipmmu_device_reset(mmu);
+
+       return 0;
+}
+
+static struct platform_driver ipmmu_driver = {
+       .driver = {
+               .owner = THIS_MODULE,
+               .name = "ipmmu-vmsa",
+       },
+       .probe = ipmmu_probe,
+       .remove = ipmmu_remove,
+};
+
+static int __init ipmmu_init(void)
+{
+       int ret;
+
+       ret = platform_driver_register(&ipmmu_driver);
+       if (ret < 0)
+               return ret;
+
+       if (!iommu_present(&platform_bus_type))
+               bus_set_iommu(&platform_bus_type, &ipmmu_ops);
+
+       return 0;
+}
+
+static void __exit ipmmu_exit(void)
+{
+       return platform_driver_unregister(&ipmmu_driver);
+}
+
+subsys_initcall(ipmmu_init);
+module_exit(ipmmu_exit);
+
+MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU");
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_LICENSE("GPL v2");
index 08ba4972da9d2543a8ec9ad9f312451647bcd1e9..61def7cb52633c04b4ea764f4948de69d12656f4 100644 (file)
@@ -127,13 +127,12 @@ static void msm_iommu_reset(void __iomem *base, int ncb)
 
 static int msm_iommu_probe(struct platform_device *pdev)
 {
-       struct resource *r, *r2;
+       struct resource *r;
        struct clk *iommu_clk;
        struct clk *iommu_pclk;
        struct msm_iommu_drvdata *drvdata;
        struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data;
        void __iomem *regs_base;
-       resource_size_t len;
        int ret, irq, par;
 
        if (pdev->id == -1) {
@@ -178,35 +177,16 @@ static int msm_iommu_probe(struct platform_device *pdev)
                iommu_clk = NULL;
 
        r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "physbase");
-
-       if (!r) {
-               ret = -ENODEV;
-               goto fail_clk;
-       }
-
-       len = resource_size(r);
-
-       r2 = request_mem_region(r->start, len, r->name);
-       if (!r2) {
-               pr_err("Could not request memory region: start=%p, len=%d\n",
-                                                       (void *) r->start, len);
-               ret = -EBUSY;
+       regs_base = devm_ioremap_resource(&pdev->dev, r);
+       if (IS_ERR(regs_base)) {
+               ret = PTR_ERR(regs_base);
                goto fail_clk;
        }
 
-       regs_base = ioremap(r2->start, len);
-
-       if (!regs_base) {
-               pr_err("Could not ioremap: start=%p, len=%d\n",
-                        (void *) r2->start, len);
-               ret = -EBUSY;
-               goto fail_mem;
-       }
-
        irq = platform_get_irq_byname(pdev, "secure_irq");
        if (irq < 0) {
                ret = -ENODEV;
-               goto fail_io;
+               goto fail_clk;
        }
 
        msm_iommu_reset(regs_base, iommu_dev->ncb);
@@ -222,14 +202,14 @@ static int msm_iommu_probe(struct platform_device *pdev)
        if (!par) {
                pr_err("%s: Invalid PAR value detected\n", iommu_dev->name);
                ret = -ENODEV;
-               goto fail_io;
+               goto fail_clk;
        }
 
        ret = request_irq(irq, msm_iommu_fault_handler, 0,
                        "msm_iommu_secure_irpt_handler", drvdata);
        if (ret) {
                pr_err("Request IRQ %d failed with ret=%d\n", irq, ret);
-               goto fail_io;
+               goto fail_clk;
        }
 
 
@@ -250,10 +230,6 @@ static int msm_iommu_probe(struct platform_device *pdev)
        clk_disable(iommu_pclk);
 
        return 0;
-fail_io:
-       iounmap(regs_base);
-fail_mem:
-       release_mem_region(r->start, len);
 fail_clk:
        if (iommu_clk) {
                clk_disable(iommu_clk);
index 7fcbfc498fa93c2527968191e6658be99eaa7b2d..895af06a667fefc404967b1a83375727fb09d41f 100644 (file)
@@ -34,6 +34,9 @@
 #include "omap-iopgtable.h"
 #include "omap-iommu.h"
 
+#define to_iommu(dev)                                                  \
+       ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
+
 #define for_each_iotlb_cr(obj, n, __i, cr)                             \
        for (__i = 0;                                                   \
             (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true);   \
@@ -391,6 +394,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
                                __func__, start, da, bytes);
                        iotlb_load_cr(obj, &cr);
                        iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
+                       break;
                }
        }
        pm_runtime_put_sync(obj->dev);
@@ -1037,19 +1041,18 @@ static void iopte_cachep_ctor(void *iopte)
        clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
 }
 
-static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa,
-                                  u32 flags)
+static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
 {
        memset(e, 0, sizeof(*e));
 
        e->da           = da;
        e->pa           = pa;
-       e->valid        = 1;
+       e->valid        = MMU_CAM_V;
        /* FIXME: add OMAP1 support */
-       e->pgsz         = flags & MMU_CAM_PGSZ_MASK;
-       e->endian       = flags & MMU_RAM_ENDIAN_MASK;
-       e->elsz         = flags & MMU_RAM_ELSZ_MASK;
-       e->mixed        = flags & MMU_RAM_MIXED_MASK;
+       e->pgsz         = pgsz;
+       e->endian       = MMU_RAM_ENDIAN_LITTLE;
+       e->elsz         = MMU_RAM_ELSZ_8;
+       e->mixed        = 0;
 
        return iopgsz_to_bytes(e->pgsz);
 }
@@ -1062,9 +1065,8 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
        struct device *dev = oiommu->dev;
        struct iotlb_entry e;
        int omap_pgsz;
-       u32 ret, flags;
+       u32 ret;
 
-       /* we only support mapping a single iommu page for now */
        omap_pgsz = bytes_to_iopgsz(bytes);
        if (omap_pgsz < 0) {
                dev_err(dev, "invalid size to map: %d\n", bytes);
@@ -1073,9 +1075,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
 
        dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
 
-       flags = omap_pgsz | prot;
-
-       iotlb_init_entry(&e, da, pa, flags);
+       iotlb_init_entry(&e, da, pa, omap_pgsz);
 
        ret = omap_iopgtable_store_entry(oiommu, &e);
        if (ret)
@@ -1248,12 +1248,6 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
        return ret;
 }
 
-static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
-                                   unsigned long cap)
-{
-       return 0;
-}
-
 static int omap_iommu_add_device(struct device *dev)
 {
        struct omap_iommu_arch_data *arch_data;
@@ -1305,7 +1299,6 @@ static struct iommu_ops omap_iommu_ops = {
        .map            = omap_iommu_map,
        .unmap          = omap_iommu_unmap,
        .iova_to_phys   = omap_iommu_iova_to_phys,
-       .domain_has_cap = omap_iommu_domain_has_cap,
        .add_device     = omap_iommu_add_device,
        .remove_device  = omap_iommu_remove_device,
        .pgsize_bitmap  = OMAP_IOMMU_PGSIZES,
index b6f9a51746caad5956442e7d99a1b5450cf72f0b..f891683e3f05af915738151a1a7574268fe84b3b 100644 (file)
@@ -93,6 +93,3 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
 /* to find an entry in the second-level page table. */
 #define iopte_index(da)                (((da) >> IOPTE_SHIFT) & (PTRS_PER_IOPTE - 1))
 #define iopte_offset(iopgd, da)        (iopgd_page_vaddr(iopgd) + iopte_index(da))
-
-#define to_iommu(dev)                                                  \
-       (platform_get_drvdata(to_platform_device(dev)))
index e3bc2e19b6dd8f2cd7bd91b30120ecbba477abe3..bd97adecb1fd28fb1419db63c261a1fadd7cf4f9 100644 (file)
@@ -94,11 +94,6 @@ static int ipmmu_probe(struct platform_device *pdev)
        struct resource *res;
        struct shmobile_ipmmu_platform_data *pdata = pdev->dev.platform_data;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "cannot get platform resources\n");
-               return -ENOENT;
-       }
        ipmmu = devm_kzalloc(&pdev->dev, sizeof(*ipmmu), GFP_KERNEL);
        if (!ipmmu) {
                dev_err(&pdev->dev, "cannot allocate device data\n");
@@ -106,19 +101,18 @@ static int ipmmu_probe(struct platform_device *pdev)
        }
        spin_lock_init(&ipmmu->flush_lock);
        ipmmu->dev = &pdev->dev;
-       ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start,
-                                               resource_size(res));
-       if (!ipmmu->ipmmu_base) {
-               dev_err(&pdev->dev, "ioremap_nocache failed\n");
-               return -ENOMEM;
-       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       ipmmu->ipmmu_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(ipmmu->ipmmu_base))
+               return PTR_ERR(ipmmu->ipmmu_base);
+
        ipmmu->dev_names = pdata->dev_names;
        ipmmu->num_dev_names = pdata->num_dev_names;
        platform_set_drvdata(pdev, ipmmu);
        ipmmu_reg_write(ipmmu, IMCTR1, 0x0); /* disable TLB */
        ipmmu_reg_write(ipmmu, IMCTR2, 0x0); /* disable PMB */
-       ipmmu_iommu_init(ipmmu);
-       return 0;
+       return ipmmu_iommu_init(ipmmu);
 }
 
 static struct platform_driver ipmmu_driver = {
index 784695d22fde1acaaf11acd78c7263438c04648e..53b213226c015ae29cd636c033a0c088776029aa 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/crypto.h>
 #include <linux/workqueue.h>
 #include <linux/backing-dev.h>
-#include <linux/percpu.h>
 #include <linux/atomic.h>
 #include <linux/scatterlist.h>
 #include <asm/page.h>
@@ -43,6 +42,7 @@ struct convert_context {
        struct bvec_iter iter_out;
        sector_t cc_sector;
        atomic_t cc_pending;
+       struct ablkcipher_request *req;
 };
 
 /*
@@ -111,15 +111,7 @@ struct iv_tcw_private {
 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
 
 /*
- * Duplicated per-CPU state for cipher.
- */
-struct crypt_cpu {
-       struct ablkcipher_request *req;
-};
-
-/*
- * The fields in here must be read only after initialization,
- * changing state should be in crypt_cpu.
+ * The fields in here must be read only after initialization.
  */
 struct crypt_config {
        struct dm_dev *dev;
@@ -150,12 +142,6 @@ struct crypt_config {
        sector_t iv_offset;
        unsigned int iv_size;
 
-       /*
-        * Duplicated per cpu state. Access through
-        * per_cpu_ptr() only.
-        */
-       struct crypt_cpu __percpu *cpu;
-
        /* ESSIV: struct crypto_cipher *essiv_tfm */
        void *iv_private;
        struct crypto_ablkcipher **tfms;
@@ -192,11 +178,6 @@ static void clone_init(struct dm_crypt_io *, struct bio *);
 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
 static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
 
-static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
-{
-       return this_cpu_ptr(cc->cpu);
-}
-
 /*
  * Use this to access cipher attributes that are the same for each CPU.
  */
@@ -903,16 +884,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
 static void crypt_alloc_req(struct crypt_config *cc,
                            struct convert_context *ctx)
 {
-       struct crypt_cpu *this_cc = this_crypt_config(cc);
        unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
 
-       if (!this_cc->req)
-               this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
+       if (!ctx->req)
+               ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
 
-       ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
-       ablkcipher_request_set_callback(this_cc->req,
+       ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
+       ablkcipher_request_set_callback(ctx->req,
            CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
-           kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
+           kcryptd_async_done, dmreq_of_req(cc, ctx->req));
 }
 
 /*
@@ -921,7 +901,6 @@ static void crypt_alloc_req(struct crypt_config *cc,
 static int crypt_convert(struct crypt_config *cc,
                         struct convert_context *ctx)
 {
-       struct crypt_cpu *this_cc = this_crypt_config(cc);
        int r;
 
        atomic_set(&ctx->cc_pending, 1);
@@ -932,7 +911,7 @@ static int crypt_convert(struct crypt_config *cc,
 
                atomic_inc(&ctx->cc_pending);
 
-               r = crypt_convert_block(cc, ctx, this_cc->req);
+               r = crypt_convert_block(cc, ctx, ctx->req);
 
                switch (r) {
                /* async */
@@ -941,7 +920,7 @@ static int crypt_convert(struct crypt_config *cc,
                        reinit_completion(&ctx->restart);
                        /* fall through*/
                case -EINPROGRESS:
-                       this_cc->req = NULL;
+                       ctx->req = NULL;
                        ctx->cc_sector++;
                        continue;
 
@@ -1040,6 +1019,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
        io->sector = sector;
        io->error = 0;
        io->base_io = NULL;
+       io->ctx.req = NULL;
        atomic_set(&io->io_pending, 0);
 
        return io;
@@ -1065,6 +1045,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
        if (!atomic_dec_and_test(&io->io_pending))
                return;
 
+       if (io->ctx.req)
+               mempool_free(io->ctx.req, cc->req_pool);
        mempool_free(io, cc->io_pool);
 
        if (likely(!base_io))
@@ -1492,8 +1474,6 @@ static int crypt_wipe_key(struct crypt_config *cc)
 static void crypt_dtr(struct dm_target *ti)
 {
        struct crypt_config *cc = ti->private;
-       struct crypt_cpu *cpu_cc;
-       int cpu;
 
        ti->private = NULL;
 
@@ -1505,13 +1485,6 @@ static void crypt_dtr(struct dm_target *ti)
        if (cc->crypt_queue)
                destroy_workqueue(cc->crypt_queue);
 
-       if (cc->cpu)
-               for_each_possible_cpu(cpu) {
-                       cpu_cc = per_cpu_ptr(cc->cpu, cpu);
-                       if (cpu_cc->req)
-                               mempool_free(cpu_cc->req, cc->req_pool);
-               }
-
        crypt_free_tfms(cc);
 
        if (cc->bs)
@@ -1530,9 +1503,6 @@ static void crypt_dtr(struct dm_target *ti)
        if (cc->dev)
                dm_put_device(ti, cc->dev);
 
-       if (cc->cpu)
-               free_percpu(cc->cpu);
-
        kzfree(cc->cipher);
        kzfree(cc->cipher_string);
 
@@ -1588,13 +1558,6 @@ static int crypt_ctr_cipher(struct dm_target *ti,
        if (tmp)
                DMWARN("Ignoring unexpected additional cipher options");
 
-       cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
-                                __alignof__(struct crypt_cpu));
-       if (!cc->cpu) {
-               ti->error = "Cannot allocate per cpu state";
-               goto bad_mem;
-       }
-
        /*
         * For compatibility with the original dm-crypt mapping format, if
         * only the cipher name is supplied, use cbc-plain.
index aa009e86587189a20a4ccf4734ad9d999cb5701f..fa0f6cbd6a41283d21b1286558eb4764e2c06b4b 100644 (file)
@@ -1566,8 +1566,8 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
                }
                if (m->pg_init_required)
                        __pg_init_all_paths(m);
-               spin_unlock_irqrestore(&m->lock, flags);
                dm_table_run_md_queue_async(m->ti->table);
+               spin_unlock_irqrestore(&m->lock, flags);
        }
 
        return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
index 13abade76ad9bbd65c83c67d35f075506b17b63f..2e71de8e0048c9eb1e1815d79fbedc8aaedf772a 100644 (file)
@@ -27,6 +27,7 @@
 #define MAPPING_POOL_SIZE 1024
 #define PRISON_CELLS 1024
 #define COMMIT_PERIOD HZ
+#define NO_SPACE_TIMEOUT (HZ * 60)
 
 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
                "A percentage of time allocated for copy on write");
@@ -175,6 +176,7 @@ struct pool {
        struct workqueue_struct *wq;
        struct work_struct worker;
        struct delayed_work waker;
+       struct delayed_work no_space_timeout;
 
        unsigned long last_commit_jiffies;
        unsigned ref_count;
@@ -935,7 +937,7 @@ static int commit(struct pool *pool)
 {
        int r;
 
-       if (get_pool_mode(pool) != PM_WRITE)
+       if (get_pool_mode(pool) >= PM_READ_ONLY)
                return -EINVAL;
 
        r = dm_pool_commit_metadata(pool->pmd);
@@ -1590,6 +1592,20 @@ static void do_waker(struct work_struct *ws)
        queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
 }
 
+/*
+ * We're holding onto IO to allow userland time to react.  After the
+ * timeout either the pool will have been resized (and thus back in
+ * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
+ */
+static void do_no_space_timeout(struct work_struct *ws)
+{
+       struct pool *pool = container_of(to_delayed_work(ws), struct pool,
+                                        no_space_timeout);
+
+       if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
+               set_pool_mode(pool, PM_READ_ONLY);
+}
+
 /*----------------------------------------------------------------*/
 
 struct noflush_work {
@@ -1715,6 +1731,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
                pool->process_discard = process_discard;
                pool->process_prepared_mapping = process_prepared_mapping;
                pool->process_prepared_discard = process_prepared_discard_passdown;
+
+               if (!pool->pf.error_if_no_space)
+                       queue_delayed_work(pool->wq, &pool->no_space_timeout, NO_SPACE_TIMEOUT);
                break;
 
        case PM_WRITE:
@@ -2100,6 +2119,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
 
        INIT_WORK(&pool->worker, do_worker);
        INIT_DELAYED_WORK(&pool->waker, do_waker);
+       INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
        spin_lock_init(&pool->lock);
        bio_list_init(&pool->deferred_flush_bios);
        INIT_LIST_HEAD(&pool->prepared_mappings);
@@ -2662,6 +2682,7 @@ static void pool_postsuspend(struct dm_target *ti)
        struct pool *pool = pt->pool;
 
        cancel_delayed_work(&pool->waker);
+       cancel_delayed_work(&pool->no_space_timeout);
        flush_workqueue(pool->wq);
        (void) commit(pool);
 }
index 8fda38d23e3847aa4d96ecd147e996514a5a4af7..237b7e0ddc7ae2617af41cb1fb9dc0cc195f44ab 100644 (file)
@@ -8516,7 +8516,8 @@ static int md_notify_reboot(struct notifier_block *this,
                if (mddev_trylock(mddev)) {
                        if (mddev->pers)
                                __md_stop_writes(mddev);
-                       mddev->safemode = 2;
+                       if (mddev->persistent)
+                               mddev->safemode = 2;
                        mddev_unlock(mddev);
                }
                need_delay = 1;
index 33fc408e5eacef0a1dce55fd5c0d578fc244b663..cb882aae9e20d4f7032a400884f45f50de57528f 100644 (file)
@@ -1172,6 +1172,13 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
        int max_sectors;
        int sectors;
 
+       /*
+        * Register the new request and wait if the reconstruction
+        * thread has put up a bar for new requests.
+        * Continue immediately if no resync is active currently.
+        */
+       wait_barrier(conf);
+
        sectors = bio_sectors(bio);
        while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
            bio->bi_iter.bi_sector < conf->reshape_progress &&
@@ -1552,12 +1559,6 @@ static void make_request(struct mddev *mddev, struct bio *bio)
 
        md_write_start(mddev, bio);
 
-       /*
-        * Register the new request and wait if the reconstruction
-        * thread has put up a bar for new requests.
-        * Continue immediately if no resync is active currently.
-        */
-       wait_barrier(conf);
 
        do {
 
index e8a1ce204036f45cca1d1d495f29c8c7b72942d5..cdd7c1b7259b008e873cc711294a3f0cecba422a 100644 (file)
@@ -1109,7 +1109,7 @@ static int ov7670_enum_framesizes(struct v4l2_subdev *sd,
         * windows that fall outside that.
         */
        for (i = 0; i < n_win_sizes; i++) {
-               struct ov7670_win_size *win = &info->devtype->win_sizes[index];
+               struct ov7670_win_size *win = &info->devtype->win_sizes[i];
                if (info->min_width && win->width < info->min_width)
                        continue;
                if (info->min_height && win->height < info->min_height)
index a4459301b5f829efcae8563241ef1108933194d7..ee0f57e01b5677df58c9f74565767bcaf57d54af 100644 (file)
@@ -1616,7 +1616,7 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state)
        if (ret < 0)
                return -EINVAL;
 
-       node_ep = v4l2_of_get_next_endpoint(node, NULL);
+       node_ep = of_graph_get_next_endpoint(node, NULL);
        if (!node_ep) {
                dev_warn(dev, "no endpoint defined for node: %s\n",
                                                node->full_name);
index d5a7a135f75d39d5bc2f6fb3366639dd6dc7ee34..703560fa5e73b456cbc0951ccb61957bdfecebac 100644 (file)
@@ -93,6 +93,7 @@ static long media_device_enum_entities(struct media_device *mdev,
        struct media_entity *ent;
        struct media_entity_desc u_ent;
 
+       memset(&u_ent, 0, sizeof(u_ent));
        if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id)))
                return -EFAULT;
 
index b4f12d00be059c3f381b31295d435074ee291dab..65670825296209917cc33546d1d43abb60af5ed5 100644 (file)
@@ -372,18 +372,32 @@ static int vpbe_stop_streaming(struct vb2_queue *vq)
 {
        struct vpbe_fh *fh = vb2_get_drv_priv(vq);
        struct vpbe_layer *layer = fh->layer;
+       struct vpbe_display *disp = fh->disp_dev;
+       unsigned long flags;
 
        if (!vb2_is_streaming(vq))
                return 0;
 
        /* release all active buffers */
+       spin_lock_irqsave(&disp->dma_queue_lock, flags);
+       if (layer->cur_frm == layer->next_frm) {
+               vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_ERROR);
+       } else {
+               if (layer->cur_frm != NULL)
+                       vb2_buffer_done(&layer->cur_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+               if (layer->next_frm != NULL)
+                       vb2_buffer_done(&layer->next_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+       }
+
        while (!list_empty(&layer->dma_queue)) {
                layer->next_frm = list_entry(layer->dma_queue.next,
                                                struct vpbe_disp_buffer, list);
                list_del(&layer->next_frm->list);
                vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR);
        }
-
+       spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
        return 0;
 }
 
index d762246eabf5a3b78a8c348e7a5e5f149580e143..0379cb9f9a9c25c7b0a4efb52e741d4723ddcdc4 100644 (file)
@@ -734,6 +734,8 @@ static int vpfe_release(struct file *file)
                }
                vpfe_dev->io_usrs = 0;
                vpfe_dev->numbuffers = config_params.numbuffers;
+               videobuf_stop(&vpfe_dev->buffer_queue);
+               videobuf_mmap_free(&vpfe_dev->buffer_queue);
        }
 
        /* Decrement device usrs counter */
index 756da78bac23109dbd3e7da464243da381354231..8dea0b84a3ad66788ab29b2437303ebd3bc38d6f 100644 (file)
@@ -358,8 +358,31 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
 
        common = &ch->common[VPIF_VIDEO_INDEX];
 
+       /* Disable channel as per its device type and channel id */
+       if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
+               enable_channel0(0);
+               channel0_intr_enable(0);
+       }
+       if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) ||
+               (2 == common->started)) {
+               enable_channel1(0);
+               channel1_intr_enable(0);
+       }
+       common->started = 0;
+
        /* release all active buffers */
        spin_lock_irqsave(&common->irqlock, flags);
+       if (common->cur_frm == common->next_frm) {
+               vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR);
+       } else {
+               if (common->cur_frm != NULL)
+                       vb2_buffer_done(&common->cur_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+               if (common->next_frm != NULL)
+                       vb2_buffer_done(&common->next_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+       }
+
        while (!list_empty(&common->dma_queue)) {
                common->next_frm = list_entry(common->dma_queue.next,
                                                struct vpif_cap_buffer, list);
@@ -933,17 +956,6 @@ static int vpif_release(struct file *filep)
        if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
                /* Reset io_usrs member of channel object */
                common->io_usrs = 0;
-               /* Disable channel as per its device type and channel id */
-               if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
-                       enable_channel0(0);
-                       channel0_intr_enable(0);
-               }
-               if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) ||
-                   (2 == common->started)) {
-                       enable_channel1(0);
-                       channel1_intr_enable(0);
-               }
-               common->started = 0;
                /* Free buffers allocated */
                vb2_queue_release(&common->buffer_queue);
                vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
index 0ac841e35aa48dd59bdf17b32f97afbe94b561b5..aed41edd050102e89248cecbb2607a4f45e6eb12 100644 (file)
@@ -320,8 +320,31 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
 
        common = &ch->common[VPIF_VIDEO_INDEX];
 
+       /* Disable channel */
+       if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
+               enable_channel2(0);
+               channel2_intr_enable(0);
+       }
+       if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) ||
+               (2 == common->started)) {
+               enable_channel3(0);
+               channel3_intr_enable(0);
+       }
+       common->started = 0;
+
        /* release all active buffers */
        spin_lock_irqsave(&common->irqlock, flags);
+       if (common->cur_frm == common->next_frm) {
+               vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR);
+       } else {
+               if (common->cur_frm != NULL)
+                       vb2_buffer_done(&common->cur_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+               if (common->next_frm != NULL)
+                       vb2_buffer_done(&common->next_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+       }
+
        while (!list_empty(&common->dma_queue)) {
                common->next_frm = list_entry(common->dma_queue.next,
                                                struct vpif_disp_buffer, list);
@@ -773,18 +796,6 @@ static int vpif_release(struct file *filep)
        if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
                /* Reset io_usrs member of channel object */
                common->io_usrs = 0;
-               /* Disable channel */
-               if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
-                       enable_channel2(0);
-                       channel2_intr_enable(0);
-               }
-               if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) ||
-                   (2 == common->started)) {
-                       enable_channel3(0);
-                       channel3_intr_enable(0);
-               }
-               common->started = 0;
-
                /* Free buffers allocated */
                vb2_queue_release(&common->buffer_queue);
                vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
index da2fc86cc52433bd8f1c6b32a898baa87c2a1064..25dbf5b05a96186527cf520f568f03721b181a80 100644 (file)
@@ -122,7 +122,7 @@ static struct fimc_fmt fimc_formats[] = {
        }, {
                .name           = "YUV 4:2:2 planar, Y/Cb/Cr",
                .fourcc         = V4L2_PIX_FMT_YUV422P,
-               .depth          = { 12 },
+               .depth          = { 16 },
                .color          = FIMC_FMT_YCBYCR422,
                .memplanes      = 1,
                .colplanes      = 3,
index 3aecaf4650942429eba75ee89cc33180aac4ae07..f0c9c42867de2e8cad570229f8592a6b445f1b47 100644 (file)
@@ -195,7 +195,7 @@ static int fc2580_set_params(struct dvb_frontend *fe)
 
        f_ref = 2UL * priv->cfg->clock / r_val;
        n_val = div_u64_rem(f_vco, f_ref, &k_val);
-       k_val_reg = 1UL * k_val * (1 << 20) / f_ref;
+       k_val_reg = div_u64(1ULL * k_val * (1 << 20), f_ref);
 
        ret = fc2580_wr_reg(priv, 0x18, r18_val | ((k_val_reg >> 16) & 0xff));
        if (ret < 0)
@@ -348,8 +348,8 @@ static int fc2580_set_params(struct dvb_frontend *fe)
        if (ret < 0)
                goto err;
 
-       ret = fc2580_wr_reg(priv, 0x37, 1UL * priv->cfg->clock * \
-                       fc2580_if_filter_lut[i].mul / 1000000000);
+       ret = fc2580_wr_reg(priv, 0x37, div_u64(1ULL * priv->cfg->clock *
+                       fc2580_if_filter_lut[i].mul, 1000000000));
        if (ret < 0)
                goto err;
 
index be38a9e637e08d20f673cb0c8de85fea608442d8..646c994521361ba7579466d814a8e7d2422397b5 100644 (file)
@@ -22,6 +22,7 @@
 #define FC2580_PRIV_H
 
 #include "fc2580.h"
+#include <linux/math64.h>
 
 struct fc2580_reg_val {
        u8 reg;
index 7407b8338ccfa33ce6a4179e5b9e99632a3f6ebf..bc38f03394cda0e1a397b390f354b620fe486a73 100644 (file)
@@ -41,4 +41,3 @@ ccflags-y += -I$(srctree)/drivers/media/dvb-core
 ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
 ccflags-y += -I$(srctree)/drivers/media/tuners
 ccflags-y += -I$(srctree)/drivers/media/common
-ccflags-y += -I$(srctree)/drivers/staging/media/rtl2832u_sdr
index 61d196e8b3abde6dc0d97e26fe3ca7cae9292957..dcbd392e6efc8f38265d8b7fd805c4b38eff9410 100644 (file)
@@ -24,7 +24,6 @@
 
 #include "rtl2830.h"
 #include "rtl2832.h"
-#include "rtl2832_sdr.h"
 
 #include "qt1010.h"
 #include "mt2060.h"
 #include "tua9001.h"
 #include "r820t.h"
 
+/*
+ * RTL2832_SDR module is in staging. That logic is added in order to avoid any
+ * hard dependency to drivers/staging/ directory as we want compile mainline
+ * driver even whole staging directory is missing.
+ */
+#include <media/v4l2-subdev.h>
+
+#if IS_ENABLED(CONFIG_DVB_RTL2832_SDR)
+struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
+       struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
+       struct v4l2_subdev *sd);
+#else
+static inline struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
+       struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
+       struct v4l2_subdev *sd)
+{
+       return NULL;
+}
+#endif
+
+#ifdef CONFIG_MEDIA_ATTACH
+#define dvb_attach_sdr(FUNCTION, ARGS...) ({ \
+       void *__r = NULL; \
+       typeof(&FUNCTION) __a = symbol_request(FUNCTION); \
+       if (__a) { \
+               __r = (void *) __a(ARGS); \
+               if (__r == NULL) \
+                       symbol_put(FUNCTION); \
+       } \
+       __r; \
+})
+
+#else
+#define dvb_attach_sdr(FUNCTION, ARGS...) ({ \
+       FUNCTION(ARGS); \
+})
+
+#endif
+
 static int rtl28xxu_disable_rc;
 module_param_named(disable_rc, rtl28xxu_disable_rc, int, 0644);
 MODULE_PARM_DESC(disable_rc, "disable RTL2832U remote controller");
@@ -908,7 +946,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
                                adap->fe[0]->ops.tuner_ops.get_rf_strength;
 
                /* attach SDR */
-               dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
+               dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
                                &rtl28xxu_rtl2832_fc0012_config, NULL);
                break;
        case TUNER_RTL2832_FC0013:
@@ -920,7 +958,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
                                adap->fe[0]->ops.tuner_ops.get_rf_strength;
 
                /* attach SDR */
-               dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
+               dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
                                &rtl28xxu_rtl2832_fc0013_config, NULL);
                break;
        case TUNER_RTL2832_E4000: {
@@ -951,7 +989,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
                        i2c_set_adapdata(i2c_adap_internal, d);
 
                        /* attach SDR */
-                       dvb_attach(rtl2832_sdr_attach, adap->fe[0],
+                       dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0],
                                        i2c_adap_internal,
                                        &rtl28xxu_rtl2832_e4000_config, sd);
                }
@@ -982,7 +1020,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
                                adap->fe[0]->ops.tuner_ops.get_rf_strength;
 
                /* attach SDR */
-               dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
+               dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
                                &rtl28xxu_rtl2832_r820t_config, NULL);
                break;
        case TUNER_RTL2832_R828D:
index 7277dbd2afcdb8c88629aafc2c9013b38cd79390..ecbcb39feb71ad21f7b765c36ac20858234d01f6 100644 (file)
@@ -1430,10 +1430,8 @@ static const struct usb_device_id device_table[] = {
        {USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)},
        {USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)},
        {USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)},
-#if !IS_ENABLED(CONFIG_USB_SN9C102)
        {USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)},
        {USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)},
-#endif
        {USB_DEVICE(0x0c45, 0x6027), SB(OV7630, 101)}, /* Genius Eye 310 */
        {USB_DEVICE(0x0c45, 0x6028), SB(PAS202, 102)},
        {USB_DEVICE(0x0c45, 0x6029), SB(PAS106, 102)},
index 04b2daf567bec232d384337491543151d890c69a..7e2411c36419c394b3d23a81eb5bdd4b5fee88c2 100644 (file)
@@ -178,6 +178,9 @@ struct v4l2_create_buffers32 {
 
 static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
 {
+       if (get_user(kp->type, &up->type))
+               return -EFAULT;
+
        switch (kp->type) {
        case V4L2_BUF_TYPE_VIDEO_CAPTURE:
        case V4L2_BUF_TYPE_VIDEO_OUTPUT:
@@ -204,17 +207,16 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us
 
 static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
 {
-       if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) ||
-                       get_user(kp->type, &up->type))
-                       return -EFAULT;
+       if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
+               return -EFAULT;
        return __get_v4l2_format32(kp, up);
 }
 
 static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
 {
        if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
-           copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt)))
-                       return -EFAULT;
+           copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
+               return -EFAULT;
        return __get_v4l2_format32(&kp->format, &up->format);
 }
 
index 110c03627051cb749ef92d853e47322714b1dd51..b59a17fb7c3e3f3eff43faecac3610f58df1dbd8 100644 (file)
@@ -108,8 +108,19 @@ static int devbus_set_timing_params(struct devbus *devbus,
                        node->full_name);
                return err;
        }
-       /* Convert bit width to byte width */
-       r.bus_width /= 8;
+
+       /*
+        * The bus width is encoded into the register as 0 for 8 bits,
+        * and 1 for 16 bits, so we do the necessary conversion here.
+        */
+       if (r.bus_width == 8)
+               r.bus_width = 0;
+       else if (r.bus_width == 16)
+               r.bus_width = 1;
+       else {
+               dev_err(devbus->dev, "invalid bus width %d\n", r.bus_width);
+               return -EINVAL;
+       }
 
        err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps",
                                 &r.badr_skew);
index 9f69e818b0009db7881b3f8c862393836e5a604b..93580a47cc548851a36d1ff4cb45e88636565a3e 100644 (file)
@@ -82,7 +82,8 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
 }
 
 /* Forward declaration */
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]);
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+                                     bool strict_match);
 static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
 static void rlb_src_unlink(struct bonding *bond, u32 index);
 static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
@@ -459,7 +460,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
 
        bond->alb_info.rlb_promisc_timeout_counter = 0;
 
-       alb_send_learning_packets(bond->curr_active_slave, addr);
+       alb_send_learning_packets(bond->curr_active_slave, addr, true);
 }
 
 /* slave being removed should not be active at this point
@@ -995,7 +996,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
 /*********************** tlb/rlb shared functions *********************/
 
 static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
-                           u16 vid)
+                           __be16 vlan_proto, u16 vid)
 {
        struct learning_pkt pkt;
        struct sk_buff *skb;
@@ -1021,7 +1022,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
        skb->dev = slave->dev;
 
        if (vid) {
-               skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid);
+               skb = vlan_put_tag(skb, vlan_proto, vid);
                if (!skb) {
                        pr_err("%s: Error: failed to insert VLAN tag\n",
                               slave->bond->dev->name);
@@ -1032,22 +1033,32 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
        dev_queue_xmit(skb);
 }
 
-
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+                                     bool strict_match)
 {
        struct bonding *bond = bond_get_bond_by_slave(slave);
        struct net_device *upper;
        struct list_head *iter;
 
        /* send untagged */
-       alb_send_lp_vid(slave, mac_addr, 0);
+       alb_send_lp_vid(slave, mac_addr, 0, 0);
 
        /* loop through vlans and send one packet for each */
        rcu_read_lock();
        netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
-               if (upper->priv_flags & IFF_802_1Q_VLAN)
-                       alb_send_lp_vid(slave, mac_addr,
-                                       vlan_dev_vlan_id(upper));
+               if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
+                       if (strict_match &&
+                           ether_addr_equal_64bits(mac_addr,
+                                                   upper->dev_addr)) {
+                               alb_send_lp_vid(slave, mac_addr,
+                                               vlan_dev_vlan_proto(upper),
+                                               vlan_dev_vlan_id(upper));
+                       } else if (!strict_match) {
+                               alb_send_lp_vid(slave, upper->dev_addr,
+                                               vlan_dev_vlan_proto(upper),
+                                               vlan_dev_vlan_id(upper));
+                       }
+               }
        }
        rcu_read_unlock();
 }
@@ -1107,7 +1118,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
 
        /* fasten the change in the switch */
        if (SLAVE_IS_OK(slave1)) {
-               alb_send_learning_packets(slave1, slave1->dev->dev_addr);
+               alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
                if (bond->alb_info.rlb_enabled) {
                        /* inform the clients that the mac address
                         * has changed
@@ -1119,7 +1130,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
        }
 
        if (SLAVE_IS_OK(slave2)) {
-               alb_send_learning_packets(slave2, slave2->dev->dev_addr);
+               alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
                if (bond->alb_info.rlb_enabled) {
                        /* inform the clients that the mac address
                         * has changed
@@ -1490,6 +1501,8 @@ void bond_alb_monitor(struct work_struct *work)
 
        /* send learning packets */
        if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
+               bool strict_match;
+
                /* change of curr_active_slave involves swapping of mac addresses.
                 * in order to avoid this swapping from happening while
                 * sending the learning packets, the curr_slave_lock must be held for
@@ -1497,8 +1510,15 @@ void bond_alb_monitor(struct work_struct *work)
                 */
                read_lock(&bond->curr_slave_lock);
 
-               bond_for_each_slave_rcu(bond, slave, iter)
-                       alb_send_learning_packets(slave, slave->dev->dev_addr);
+               bond_for_each_slave_rcu(bond, slave, iter) {
+                       /* If updating current_active, use all currently
+                        * user mac addreses (!strict_match).  Otherwise, only
+                        * use mac of the slave device.
+                        */
+                       strict_match = (slave != bond->curr_active_slave);
+                       alb_send_learning_packets(slave, slave->dev->dev_addr,
+                                                 strict_match);
+               }
 
                read_unlock(&bond->curr_slave_lock);
 
@@ -1721,7 +1741,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
        } else {
                /* set the new_slave to the bond mac address */
                alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
-               alb_send_learning_packets(new_slave, bond->dev->dev_addr);
+               alb_send_learning_packets(new_slave, bond->dev->dev_addr,
+                                         false);
        }
 
        write_lock_bh(&bond->curr_slave_lock);
@@ -1764,7 +1785,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
                alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
 
                read_lock(&bond->lock);
-               alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
+               alb_send_learning_packets(bond->curr_active_slave,
+                                         bond_dev->dev_addr, false);
                if (bond->alb_info.rlb_enabled) {
                        /* inform clients mac address has changed */
                        rlb_req_update_slave_clients(bond, bond->curr_active_slave);
index 69aff72c895716fe6c579d2bf7f46c79ddca2a36..d3a67896d43541f0b8ebc0b4e193db945087bc56 100644 (file)
@@ -2126,10 +2126,10 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
  */
 static void bond_arp_send(struct net_device *slave_dev, int arp_op,
                          __be32 dest_ip, __be32 src_ip,
-                         struct bond_vlan_tag *inner,
-                         struct bond_vlan_tag *outer)
+                         struct bond_vlan_tag *tags)
 {
        struct sk_buff *skb;
+       int i;
 
        pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
                 arp_op, slave_dev->name, &dest_ip, &src_ip);
@@ -2141,21 +2141,26 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
                net_err_ratelimited("ARP packet allocation failed\n");
                return;
        }
-       if (outer->vlan_id) {
-               if (inner->vlan_id) {
-                       pr_debug("inner tag: proto %X vid %X\n",
-                                ntohs(inner->vlan_proto), inner->vlan_id);
-                       skb = __vlan_put_tag(skb, inner->vlan_proto,
-                                            inner->vlan_id);
-                       if (!skb) {
-                               net_err_ratelimited("failed to insert inner VLAN tag\n");
-                               return;
-                       }
-               }
 
-               pr_debug("outer reg: proto %X vid %X\n",
-                        ntohs(outer->vlan_proto), outer->vlan_id);
-               skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id);
+       /* Go through all the tags backwards and add them to the packet */
+       for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) {
+               if (!tags[i].vlan_id)
+                       continue;
+
+               pr_debug("inner tag: proto %X vid %X\n",
+                        ntohs(tags[i].vlan_proto), tags[i].vlan_id);
+               skb = __vlan_put_tag(skb, tags[i].vlan_proto,
+                                    tags[i].vlan_id);
+               if (!skb) {
+                       net_err_ratelimited("failed to insert inner VLAN tag\n");
+                       return;
+               }
+       }
+       /* Set the outer tag */
+       if (tags[0].vlan_id) {
+               pr_debug("outer tag: proto %X vid %X\n",
+                        ntohs(tags[0].vlan_proto), tags[0].vlan_id);
+               skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id);
                if (!skb) {
                        net_err_ratelimited("failed to insert outer VLAN tag\n");
                        return;
@@ -2164,22 +2169,52 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
        arp_xmit(skb);
 }
 
+/* Validate the device path between the @start_dev and the @end_dev.
+ * The path is valid if the @end_dev is reachable through device
+ * stacking.
+ * When the path is validated, collect any vlan information in the
+ * path.
+ */
+static bool bond_verify_device_path(struct net_device *start_dev,
+                                   struct net_device *end_dev,
+                                   struct bond_vlan_tag *tags)
+{
+       struct net_device *upper;
+       struct list_head  *iter;
+       int  idx;
+
+       if (start_dev == end_dev)
+               return true;
+
+       netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
+               if (bond_verify_device_path(upper, end_dev, tags)) {
+                       if (is_vlan_dev(upper)) {
+                               idx = vlan_get_encap_level(upper);
+                               if (idx >= BOND_MAX_VLAN_ENCAP)
+                                       return false;
+
+                               tags[idx].vlan_proto =
+                                                   vlan_dev_vlan_proto(upper);
+                               tags[idx].vlan_id = vlan_dev_vlan_id(upper);
+                       }
+                       return true;
+               }
+       }
+
+       return false;
+}
 
 static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
 {
-       struct net_device *upper, *vlan_upper;
-       struct list_head *iter, *vlan_iter;
        struct rtable *rt;
-       struct bond_vlan_tag inner, outer;
+       struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
        __be32 *targets = bond->params.arp_targets, addr;
        int i;
+       bool ret;
 
        for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
                pr_debug("basa: target %pI4\n", &targets[i]);
-               inner.vlan_proto = 0;
-               inner.vlan_id = 0;
-               outer.vlan_proto = 0;
-               outer.vlan_id = 0;
+               memset(tags, 0, sizeof(tags));
 
                /* Find out through which dev should the packet go */
                rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
@@ -2192,7 +2227,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                                net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
                                                     bond->dev->name,
                                                     &targets[i]);
-                       bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer);
+                       bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
+                                     0, tags);
                        continue;
                }
 
@@ -2201,52 +2237,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                        goto found;
 
                rcu_read_lock();
-               /* first we search only for vlan devices. for every vlan
-                * found we verify its upper dev list, searching for the
-                * rt->dst.dev. If found we save the tag of the vlan and
-                * proceed to send the packet.
-                */
-               netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
-                                                 vlan_iter) {
-                       if (!is_vlan_dev(vlan_upper))
-                               continue;
-
-                       if (vlan_upper == rt->dst.dev) {
-                               outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
-                               outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
-                               rcu_read_unlock();
-                               goto found;
-                       }
-                       netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
-                                                         iter) {
-                               if (upper == rt->dst.dev) {
-                                       /* If the upper dev is a vlan dev too,
-                                        *  set the vlan tag to inner tag.
-                                        */
-                                       if (is_vlan_dev(upper)) {
-                                               inner.vlan_proto = vlan_dev_vlan_proto(upper);
-                                               inner.vlan_id = vlan_dev_vlan_id(upper);
-                                       }
-                                       outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
-                                       outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
-                                       rcu_read_unlock();
-                                       goto found;
-                               }
-                       }
-               }
-
-               /* if the device we're looking for is not on top of any of
-                * our upper vlans, then just search for any dev that
-                * matches, and in case it's a vlan - save the id
-                */
-               netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
-                       if (upper == rt->dst.dev) {
-                               rcu_read_unlock();
-                               goto found;
-                       }
-               }
+               ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags);
                rcu_read_unlock();
 
+               if (ret)
+                       goto found;
+
                /* Not our device - skip */
                pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
                         bond->dev->name, &targets[i],
@@ -2259,7 +2255,7 @@ found:
                addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
                ip_rt_put(rt);
                bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
-                             addr, &inner, &outer);
+                             addr, tags);
        }
 }
 
index 724e30fa20b9fa70166b5d9b25ed9029fab6db73..832070298446458483cdf1132cc46b63ba323a7d 100644 (file)
@@ -125,6 +125,7 @@ static const struct bond_opt_value bond_fail_over_mac_tbl[] = {
 static const struct bond_opt_value bond_intmax_tbl[] = {
        { "off",     0,       BOND_VALFLAG_DEFAULT},
        { "maxval",  INT_MAX, BOND_VALFLAG_MAX},
+       { NULL,      -1,      0}
 };
 
 static const struct bond_opt_value bond_lacp_rate_tbl[] = {
index b8bdd0acc8f334ac97bca2ddfea602f473c3272f..00bea320e3b50c1eaa75a712f7afbbe65ac146fb 100644 (file)
@@ -36,6 +36,7 @@
 
 #define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
 
+#define BOND_MAX_VLAN_ENCAP    2
 #define BOND_MAX_ARP_TARGETS   16
 
 #define BOND_DEFAULT_MIIMON    100
index 8ab7103d4f44ea616ae8cb945eda8ea84aa3059b..61ffc12d8fd8e4e01056b06fd3ae73be7abe513e 100644 (file)
@@ -14,13 +14,6 @@ config CAN_C_CAN_PLATFORM
          SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com)
          boards like am335x, dm814x, dm813x and dm811x.
 
-config CAN_C_CAN_STRICT_FRAME_ORDERING
-       bool "Force a strict RX CAN frame order (may cause frame loss)"
-       ---help---
-         The RX split buffer prevents packet reordering but can cause packet
-         loss. Only enable this option when you accept to lose CAN frames
-         in favour of getting the received CAN frames in the correct order.
-
 config CAN_C_CAN_PCI
        tristate "Generic PCI Bus based C_CAN/D_CAN driver"
        depends on PCI
index a2ca820b5373841d2083d8d5de02fa9fce6c0ecf..95e04e2002daec774d394d0f0912ca7e952f280f 100644 (file)
@@ -732,26 +732,12 @@ static u32 c_can_adjust_pending(u32 pend)
 static inline void c_can_rx_object_get(struct net_device *dev,
                                       struct c_can_priv *priv, u32 obj)
 {
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
-       if (obj < C_CAN_MSG_RX_LOW_LAST)
-               c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW);
-       else
-#endif
                c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
 }
 
 static inline void c_can_rx_finalize(struct net_device *dev,
                                     struct c_can_priv *priv, u32 obj)
 {
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
-       if (obj < C_CAN_MSG_RX_LOW_LAST)
-               priv->rxmasked |= BIT(obj - 1);
-       else if (obj == C_CAN_MSG_RX_LOW_LAST) {
-               priv->rxmasked = 0;
-               /* activate all lower message objects */
-               c_can_activate_all_lower_rx_msg_obj(dev, IF_RX);
-       }
-#endif
        if (priv->type != BOSCH_D_CAN)
                c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
 }
@@ -799,9 +785,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv)
 {
        u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
 
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
-       pend &= ~priv->rxmasked;
-#endif
        return pend;
 }
 
@@ -814,25 +797,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv)
  * has arrived. To work-around this issue, we keep two groups of message
  * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
  *
- * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y
- *
- * To ensure in-order frame reception we use the following
- * approach while re-activating a message object to receive further
- * frames:
- * - if the current message object number is lower than
- *   C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
- *   the INTPND bit.
- * - if the current message object number is equal to
- *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
- *   receive message objects.
- * - if the current message object number is greater than
- *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
- *   only this message object.
- *
- * This can cause packet loss!
- *
- * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n
- *
  * We clear the newdat bit right away.
  *
  * This can result in packet reordering when the readout is slow.
index c540e3d12e3d826260dbb590e8045c92fa4efb2b..564933ae218c78848dfba1e166f219e9de994e79 100644 (file)
@@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        struct sja1000_priv *priv;
        struct peak_pci_chan *chan;
-       struct net_device *dev;
+       struct net_device *dev, *prev_dev;
        void __iomem *cfg_base, *reg_base;
        u16 sub_sys_id, icr;
        int i, err, channels;
@@ -688,11 +688,13 @@ failure_remove_channels:
        writew(0x0, cfg_base + PITA_ICR + 2);
 
        chan = NULL;
-       for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) {
-               unregister_sja1000dev(dev);
-               free_sja1000dev(dev);
+       for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
                priv = netdev_priv(dev);
                chan = priv->priv;
+               prev_dev = chan->prev_dev;
+
+               unregister_sja1000dev(dev);
+               free_sja1000dev(dev);
        }
 
        /* free any PCIeC resources too */
@@ -726,10 +728,12 @@ static void peak_pci_remove(struct pci_dev *pdev)
 
        /* Loop over all registered devices */
        while (1) {
+               struct net_device *prev_dev = chan->prev_dev;
+
                dev_info(&pdev->dev, "removing device %s\n", dev->name);
                unregister_sja1000dev(dev);
                free_sja1000dev(dev);
-               dev = chan->prev_dev;
+               dev = prev_dev;
 
                if (!dev) {
                        /* do that only for first channel */
index 39b26fe28d1051ff916faceb747da7a64dac711f..d7401017a3f10940f3a662bebc555d835be3ce4b 100644 (file)
@@ -35,6 +35,18 @@ source "drivers/net/ethernet/calxeda/Kconfig"
 source "drivers/net/ethernet/chelsio/Kconfig"
 source "drivers/net/ethernet/cirrus/Kconfig"
 source "drivers/net/ethernet/cisco/Kconfig"
+
+config CX_ECAT
+       tristate "Beckhoff CX5020 EtherCAT master support"
+       depends on PCI
+       ---help---
+         Driver for EtherCAT master module located on CCAT FPGA
+         that can be found on Beckhoff CX5020, and possibly other of CX
+         Beckhoff CX series industrial PCs.
+
+         To compile this driver as a module, choose M here. The module
+         will be called ec_bhf.
+
 source "drivers/net/ethernet/davicom/Kconfig"
 
 config DNET
index 545d0b3b9cb422b2fefa7122b074cd869a9085c2..35190e36c4568e6803279f878a6aa866685ca1be 100644 (file)
@@ -21,6 +21,7 @@ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
 obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
 obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
 obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
+obj-$(CONFIG_CX_ECAT) += ec_bhf.o
 obj-$(CONFIG_DM9000) += davicom/
 obj-$(CONFIG_DNET) += dnet.o
 obj-$(CONFIG_NET_VENDOR_DEC) += dec/
index d4a187e453698bbe96589921f97d5ea38bc3d5fb..3eff2fd3997e36ef128983fcaf080f595a693715 100644 (file)
@@ -5,3 +5,4 @@
 obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
 altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
 altera_msgdma.o altera_sgdma.o altera_utils.o
+ccflags-y += -D__CHECK_ENDIAN__
index 4d1f2fdd5c3275c3c9952cd118bda99b544ebd55..0fb986ba32905a5dac78c926333c9765ec4678de 100644 (file)
@@ -37,18 +37,16 @@ void msgdma_start_rxdma(struct altera_tse_private *priv)
 void msgdma_reset(struct altera_tse_private *priv)
 {
        int counter;
-       struct msgdma_csr *txcsr =
-               (struct msgdma_csr *)priv->tx_dma_csr;
-       struct msgdma_csr *rxcsr =
-               (struct msgdma_csr *)priv->rx_dma_csr;
 
        /* Reset Rx mSGDMA */
-       iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
-       iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control);
+       csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr,
+               msgdma_csroffs(status));
+       csrwr32(MSGDMA_CSR_CTL_RESET, priv->rx_dma_csr,
+               msgdma_csroffs(control));
 
        counter = 0;
        while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-               if (tse_bit_is_clear(&rxcsr->status,
+               if (tse_bit_is_clear(priv->rx_dma_csr, msgdma_csroffs(status),
                                     MSGDMA_CSR_STAT_RESETTING))
                        break;
                udelay(1);
@@ -59,15 +57,18 @@ void msgdma_reset(struct altera_tse_private *priv)
                           "TSE Rx mSGDMA resetting bit never cleared!\n");
 
        /* clear all status bits */
-       iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
+       csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, msgdma_csroffs(status));
 
        /* Reset Tx mSGDMA */
-       iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
-       iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control);
+       csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr,
+               msgdma_csroffs(status));
+
+       csrwr32(MSGDMA_CSR_CTL_RESET, priv->tx_dma_csr,
+               msgdma_csroffs(control));
 
        counter = 0;
        while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-               if (tse_bit_is_clear(&txcsr->status,
+               if (tse_bit_is_clear(priv->tx_dma_csr, msgdma_csroffs(status),
                                     MSGDMA_CSR_STAT_RESETTING))
                        break;
                udelay(1);
@@ -78,58 +79,58 @@ void msgdma_reset(struct altera_tse_private *priv)
                           "TSE Tx mSGDMA resetting bit never cleared!\n");
 
        /* clear all status bits */
-       iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
+       csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status));
 }
 
 void msgdma_disable_rxirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->rx_dma_csr;
-       tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+       tse_clear_bit(priv->rx_dma_csr, msgdma_csroffs(control),
+                     MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_enable_rxirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->rx_dma_csr;
-       tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+       tse_set_bit(priv->rx_dma_csr, msgdma_csroffs(control),
+                   MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_disable_txirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->tx_dma_csr;
-       tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+       tse_clear_bit(priv->tx_dma_csr, msgdma_csroffs(control),
+                     MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_enable_txirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->tx_dma_csr;
-       tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+       tse_set_bit(priv->tx_dma_csr, msgdma_csroffs(control),
+                   MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_clear_rxirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->rx_dma_csr;
-       iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+       csrwr32(MSGDMA_CSR_STAT_IRQ, priv->rx_dma_csr, msgdma_csroffs(status));
 }
 
 void msgdma_clear_txirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->tx_dma_csr;
-       iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+       csrwr32(MSGDMA_CSR_STAT_IRQ, priv->tx_dma_csr, msgdma_csroffs(status));
 }
 
 /* return 0 to indicate transmit is pending */
 int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 {
-       struct msgdma_extended_desc *desc = priv->tx_dma_desc;
-
-       iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo);
-       iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi);
-       iowrite32(0, &desc->write_addr_lo);
-       iowrite32(0, &desc->write_addr_hi);
-       iowrite32(buffer->len, &desc->len);
-       iowrite32(0, &desc->burst_seq_num);
-       iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride);
-       iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control);
+       csrwr32(lower_32_bits(buffer->dma_addr), priv->tx_dma_desc,
+               msgdma_descroffs(read_addr_lo));
+       csrwr32(upper_32_bits(buffer->dma_addr), priv->tx_dma_desc,
+               msgdma_descroffs(read_addr_hi));
+       csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_lo));
+       csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_hi));
+       csrwr32(buffer->len, priv->tx_dma_desc, msgdma_descroffs(len));
+       csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(burst_seq_num));
+       csrwr32(MSGDMA_DESC_TX_STRIDE, priv->tx_dma_desc,
+               msgdma_descroffs(stride));
+       csrwr32(MSGDMA_DESC_CTL_TX_SINGLE, priv->tx_dma_desc,
+               msgdma_descroffs(control));
        return 0;
 }
 
@@ -138,17 +139,16 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
        u32 ready = 0;
        u32 inuse;
        u32 status;
-       struct msgdma_csr *txcsr =
-               (struct msgdma_csr *)priv->tx_dma_csr;
 
        /* Get number of sent descriptors */
-       inuse = ioread32(&txcsr->rw_fill_level) & 0xffff;
+       inuse = csrrd32(priv->tx_dma_csr, msgdma_csroffs(rw_fill_level))
+                       & 0xffff;
 
        if (inuse) { /* Tx FIFO is not empty */
                ready = priv->tx_prod - priv->tx_cons - inuse - 1;
        } else {
                /* Check for buffered last packet */
-               status = ioread32(&txcsr->status);
+               status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
                if (status & MSGDMA_CSR_STAT_BUSY)
                        ready = priv->tx_prod - priv->tx_cons - 1;
                else
@@ -162,7 +162,6 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
 void msgdma_add_rx_desc(struct altera_tse_private *priv,
                        struct tse_buffer *rxbuffer)
 {
-       struct msgdma_extended_desc *desc = priv->rx_dma_desc;
        u32 len = priv->rx_dma_buf_sz;
        dma_addr_t dma_addr = rxbuffer->dma_addr;
        u32 control = (MSGDMA_DESC_CTL_END_ON_EOP
@@ -172,14 +171,16 @@ void msgdma_add_rx_desc(struct altera_tse_private *priv,
                        | MSGDMA_DESC_CTL_TR_ERR_IRQ
                        | MSGDMA_DESC_CTL_GO);
 
-       iowrite32(0, &desc->read_addr_lo);
-       iowrite32(0, &desc->read_addr_hi);
-       iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo);
-       iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi);
-       iowrite32(len, &desc->len);
-       iowrite32(0, &desc->burst_seq_num);
-       iowrite32(0x00010001, &desc->stride);
-       iowrite32(control, &desc->control);
+       csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_lo));
+       csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_hi));
+       csrwr32(lower_32_bits(dma_addr), priv->rx_dma_desc,
+               msgdma_descroffs(write_addr_lo));
+       csrwr32(upper_32_bits(dma_addr), priv->rx_dma_desc,
+               msgdma_descroffs(write_addr_hi));
+       csrwr32(len, priv->rx_dma_desc, msgdma_descroffs(len));
+       csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(burst_seq_num));
+       csrwr32(0x00010001, priv->rx_dma_desc, msgdma_descroffs(stride));
+       csrwr32(control, priv->rx_dma_desc, msgdma_descroffs(control));
 }
 
 /* status is returned on upper 16 bits,
@@ -190,14 +191,13 @@ u32 msgdma_rx_status(struct altera_tse_private *priv)
        u32 rxstatus = 0;
        u32 pktlength;
        u32 pktstatus;
-       struct msgdma_csr *rxcsr =
-               (struct msgdma_csr *)priv->rx_dma_csr;
-       struct msgdma_response *rxresp =
-               (struct msgdma_response *)priv->rx_dma_resp;
-
-       if (ioread32(&rxcsr->resp_fill_level) & 0xffff) {
-               pktlength = ioread32(&rxresp->bytes_transferred);
-               pktstatus = ioread32(&rxresp->status);
+
+       if (csrrd32(priv->rx_dma_csr, msgdma_csroffs(resp_fill_level))
+           & 0xffff) {
+               pktlength = csrrd32(priv->rx_dma_resp,
+                                   msgdma_respoffs(bytes_transferred));
+               pktstatus = csrrd32(priv->rx_dma_resp,
+                                   msgdma_respoffs(status));
                rxstatus = pktstatus;
                rxstatus = rxstatus << 16;
                rxstatus |= (pktlength & 0xffff);
index d7b59ba4019c1fe1914b2b5c0643e464fcdff62d..e335626e1b6b5288c4f3d4cc8f45d79dcd56655d 100644 (file)
 #ifndef __ALTERA_MSGDMAHW_H__
 #define __ALTERA_MSGDMAHW_H__
 
-/* mSGDMA standard descriptor format
- */
-struct msgdma_desc {
-       u32 read_addr;  /* data buffer source address */
-       u32 write_addr; /* data buffer destination address */
-       u32 len;        /* the number of bytes to transfer per descriptor */
-       u32 control;    /* characteristics of the transfer */
-};
-
 /* mSGDMA extended descriptor format
  */
 struct msgdma_extended_desc {
@@ -159,6 +150,10 @@ struct msgdma_response {
        u32 status;
 };
 
+#define msgdma_respoffs(a) (offsetof(struct msgdma_response, a))
+#define msgdma_csroffs(a) (offsetof(struct msgdma_csr, a))
+#define msgdma_descroffs(a) (offsetof(struct msgdma_extended_desc, a))
+
 /* mSGDMA response register bit definitions
  */
 #define MSGDMA_RESP_EARLY_TERM BIT(8)
index 9ce8630692b6e7c3a380745ec77afe0e005f1723..99cc56f451cf4886fbf0a5bca044dfa5cf033a2c 100644 (file)
@@ -20,8 +20,8 @@
 #include "altera_sgdmahw.h"
 #include "altera_sgdma.h"
 
-static void sgdma_setup_descrip(struct sgdma_descrip *desc,
-                               struct sgdma_descrip *ndesc,
+static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
+                               struct sgdma_descrip __iomem *ndesc,
                                dma_addr_t ndesc_phys,
                                dma_addr_t raddr,
                                dma_addr_t waddr,
@@ -31,17 +31,17 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
                                int wfixed);
 
 static int sgdma_async_write(struct altera_tse_private *priv,
-                             struct sgdma_descrip *desc);
+                             struct sgdma_descrip __iomem *desc);
 
 static int sgdma_async_read(struct altera_tse_private *priv);
 
 static dma_addr_t
 sgdma_txphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc);
+                struct sgdma_descrip __iomem *desc);
 
 static dma_addr_t
 sgdma_rxphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc);
+                struct sgdma_descrip __iomem *desc);
 
 static int sgdma_txbusy(struct altera_tse_private *priv);
 
@@ -79,7 +79,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
        priv->rxdescphys = (dma_addr_t) 0;
        priv->txdescphys = (dma_addr_t) 0;
 
-       priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc,
+       priv->rxdescphys = dma_map_single(priv->device,
+                                         (void __force *)priv->rx_dma_desc,
                                          priv->rxdescmem, DMA_BIDIRECTIONAL);
 
        if (dma_mapping_error(priv->device, priv->rxdescphys)) {
@@ -88,7 +89,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
                return -EINVAL;
        }
 
-       priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc,
+       priv->txdescphys = dma_map_single(priv->device,
+                                         (void __force *)priv->tx_dma_desc,
                                          priv->txdescmem, DMA_TO_DEVICE);
 
        if (dma_mapping_error(priv->device, priv->txdescphys)) {
@@ -98,8 +100,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
        }
 
        /* Initialize descriptor memory to all 0's, sync memory to cache */
-       memset(priv->tx_dma_desc, 0, priv->txdescmem);
-       memset(priv->rx_dma_desc, 0, priv->rxdescmem);
+       memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
+       memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
 
        dma_sync_single_for_device(priv->device, priv->txdescphys,
                                   priv->txdescmem, DMA_TO_DEVICE);
@@ -126,22 +128,15 @@ void sgdma_uninitialize(struct altera_tse_private *priv)
  */
 void sgdma_reset(struct altera_tse_private *priv)
 {
-       u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
-       u32 txdescriplen   = priv->txdescmem;
-       u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
-       u32 rxdescriplen   = priv->rxdescmem;
-       struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
-       struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
-
        /* Initialize descriptor memory to 0 */
-       memset(ptxdescripmem, 0, txdescriplen);
-       memset(prxdescripmem, 0, rxdescriplen);
+       memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
+       memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
 
-       iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control);
-       iowrite32(0, &ptxsgdma->control);
+       csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
+       csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
 
-       iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control);
-       iowrite32(0, &prxsgdma->control);
+       csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
+       csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
 }
 
 /* For SGDMA, interrupts remain enabled after initially enabling,
@@ -167,14 +162,14 @@ void sgdma_disable_txirq(struct altera_tse_private *priv)
 
 void sgdma_clear_rxirq(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+       tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
+                   SGDMA_CTRLREG_CLRINT);
 }
 
 void sgdma_clear_txirq(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
-       tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+       tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
+                   SGDMA_CTRLREG_CLRINT);
 }
 
 /* transmits buffer through SGDMA. Returns number of buffers
@@ -184,12 +179,11 @@ void sgdma_clear_txirq(struct altera_tse_private *priv)
  */
 int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 {
-       int pktstx = 0;
-       struct sgdma_descrip *descbase =
-               (struct sgdma_descrip *)priv->tx_dma_desc;
+       struct sgdma_descrip __iomem *descbase =
+               (struct sgdma_descrip __iomem *)priv->tx_dma_desc;
 
-       struct sgdma_descrip *cdesc = &descbase[0];
-       struct sgdma_descrip *ndesc = &descbase[1];
+       struct sgdma_descrip __iomem *cdesc = &descbase[0];
+       struct sgdma_descrip __iomem *ndesc = &descbase[1];
 
        /* wait 'til the tx sgdma is ready for the next transmit request */
        if (sgdma_txbusy(priv))
@@ -205,7 +199,7 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
                            0,                          /* read fixed */
                            SGDMA_CONTROL_WR_FIXED);    /* Generate SOP */
 
-       pktstx = sgdma_async_write(priv, cdesc);
+       sgdma_async_write(priv, cdesc);
 
        /* enqueue the request to the pending transmit queue */
        queue_tx(priv, buffer);
@@ -219,10 +213,10 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 u32 sgdma_tx_completions(struct altera_tse_private *priv)
 {
        u32 ready = 0;
-       struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
 
        if (!sgdma_txbusy(priv) &&
-           ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
+           ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
+            & SGDMA_CONTROL_HW_OWNED) == 0) &&
            (dequeue_tx(priv))) {
                ready = 1;
        }
@@ -246,32 +240,31 @@ void sgdma_add_rx_desc(struct altera_tse_private *priv,
  */
 u32 sgdma_rx_status(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc;
-       struct sgdma_descrip *desc = NULL;
-       int pktsrx;
-       unsigned int rxstatus = 0;
-       unsigned int pktlength = 0;
-       unsigned int pktstatus = 0;
+       struct sgdma_descrip __iomem *base =
+               (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
+       struct sgdma_descrip __iomem *desc = NULL;
        struct tse_buffer *rxbuffer = NULL;
+       unsigned int rxstatus = 0;
 
-       u32 sts = ioread32(&csr->status);
+       u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
 
        desc = &base[0];
        if (sts & SGDMA_STSREG_EOP) {
+               unsigned int pktlength = 0;
+               unsigned int pktstatus = 0;
                dma_sync_single_for_cpu(priv->device,
                                        priv->rxdescphys,
                                        priv->sgdmadesclen,
                                        DMA_FROM_DEVICE);
 
-               pktlength = desc->bytes_xferred;
-               pktstatus = desc->status & 0x3f;
-               rxstatus = pktstatus;
+               pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
+               pktstatus = csrrd8(desc, sgdma_descroffs(status));
+               rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
                rxstatus = rxstatus << 16;
                rxstatus |= (pktlength & 0xffff);
 
                if (rxstatus) {
-                       desc->status = 0;
+                       csrwr8(0, desc, sgdma_descroffs(status));
 
                        rxbuffer = dequeue_rx(priv);
                        if (rxbuffer == NULL)
@@ -279,12 +272,12 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
                                            "sgdma rx and rx queue empty!\n");
 
                        /* Clear control */
-                       iowrite32(0, &csr->control);
+                       csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
                        /* clear status */
-                       iowrite32(0xf, &csr->status);
+                       csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
 
                        /* kick the rx sgdma after reaping this descriptor */
-                       pktsrx = sgdma_async_read(priv);
+                       sgdma_async_read(priv);
 
                } else {
                        /* If the SGDMA indicated an end of packet on recv,
@@ -298,10 +291,11 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
                         */
                        netdev_err(priv->dev,
                                   "SGDMA RX Error Info: %x, %x, %x\n",
-                                  sts, desc->status, rxstatus);
+                                  sts, csrrd8(desc, sgdma_descroffs(status)),
+                                  rxstatus);
                }
        } else if (sts == 0) {
-               pktsrx = sgdma_async_read(priv);
+               sgdma_async_read(priv);
        }
 
        return rxstatus;
@@ -309,8 +303,8 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
 
 
 /* Private functions */
-static void sgdma_setup_descrip(struct sgdma_descrip *desc,
-                               struct sgdma_descrip *ndesc,
+static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
+                               struct sgdma_descrip __iomem *ndesc,
                                dma_addr_t ndesc_phys,
                                dma_addr_t raddr,
                                dma_addr_t waddr,
@@ -320,27 +314,30 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
                                int wfixed)
 {
        /* Clear the next descriptor as not owned by hardware */
-       u32 ctrl = ndesc->control;
+
+       u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
        ctrl &= ~SGDMA_CONTROL_HW_OWNED;
-       ndesc->control = ctrl;
+       csrwr8(ctrl, ndesc, sgdma_descroffs(control));
 
-       ctrl = 0;
        ctrl = SGDMA_CONTROL_HW_OWNED;
        ctrl |= generate_eop;
        ctrl |= rfixed;
        ctrl |= wfixed;
 
        /* Channel is implicitly zero, initialized to 0 by default */
-
-       desc->raddr = raddr;
-       desc->waddr = waddr;
-       desc->next = lower_32_bits(ndesc_phys);
-       desc->control = ctrl;
-       desc->status = 0;
-       desc->rburst = 0;
-       desc->wburst = 0;
-       desc->bytes = length;
-       desc->bytes_xferred = 0;
+       csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
+       csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
+
+       csrwr32(0, desc, sgdma_descroffs(pad1));
+       csrwr32(0, desc, sgdma_descroffs(pad2));
+       csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
+
+       csrwr8(ctrl, desc, sgdma_descroffs(control));
+       csrwr8(0, desc, sgdma_descroffs(status));
+       csrwr8(0, desc, sgdma_descroffs(wburst));
+       csrwr8(0, desc, sgdma_descroffs(rburst));
+       csrwr16(length, desc, sgdma_descroffs(bytes));
+       csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
 }
 
 /* If hardware is busy, don't restart async read.
@@ -351,12 +348,11 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
  */
 static int sgdma_async_read(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       struct sgdma_descrip *descbase =
-               (struct sgdma_descrip *)priv->rx_dma_desc;
+       struct sgdma_descrip __iomem *descbase =
+               (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
 
-       struct sgdma_descrip *cdesc = &descbase[0];
-       struct sgdma_descrip *ndesc = &descbase[1];
+       struct sgdma_descrip __iomem *cdesc = &descbase[0];
+       struct sgdma_descrip __iomem *ndesc = &descbase[1];
 
        struct tse_buffer *rxbuffer = NULL;
 
@@ -382,11 +378,13 @@ static int sgdma_async_read(struct altera_tse_private *priv)
                                           priv->sgdmadesclen,
                                           DMA_TO_DEVICE);
 
-               iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
-                         &csr->next_descrip);
+               csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
+                       priv->rx_dma_csr,
+                       sgdma_csroffs(next_descrip));
 
-               iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
-                         &csr->control);
+               csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
+                       priv->rx_dma_csr,
+                       sgdma_csroffs(control));
 
                return 1;
        }
@@ -395,32 +393,32 @@ static int sgdma_async_read(struct altera_tse_private *priv)
 }
 
 static int sgdma_async_write(struct altera_tse_private *priv,
-                            struct sgdma_descrip *desc)
+                            struct sgdma_descrip __iomem *desc)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
-
        if (sgdma_txbusy(priv))
                return 0;
 
        /* clear control and status */
-       iowrite32(0, &csr->control);
-       iowrite32(0x1f, &csr->status);
+       csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
+       csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
 
        dma_sync_single_for_device(priv->device, priv->txdescphys,
                                   priv->sgdmadesclen, DMA_TO_DEVICE);
 
-       iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
-                 &csr->next_descrip);
+       csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
+               priv->tx_dma_csr,
+               sgdma_csroffs(next_descrip));
 
-       iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
-                 &csr->control);
+       csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
+               priv->tx_dma_csr,
+               sgdma_csroffs(control));
 
        return 1;
 }
 
 static dma_addr_t
 sgdma_txphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc)
+                struct sgdma_descrip __iomem *desc)
 {
        dma_addr_t paddr = priv->txdescmem_busaddr;
        uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
@@ -429,7 +427,7 @@ sgdma_txphysaddr(struct altera_tse_private *priv,
 
 static dma_addr_t
 sgdma_rxphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc)
+                struct sgdma_descrip __iomem *desc)
 {
        dma_addr_t paddr = priv->rxdescmem_busaddr;
        uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
@@ -518,8 +516,8 @@ queue_rx_peekhead(struct altera_tse_private *priv)
  */
 static int sgdma_rxbusy(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
+       return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
+                      & SGDMA_STSREG_BUSY;
 }
 
 /* waits for the tx sgdma to finish it's current operation, returns 0
@@ -528,13 +526,14 @@ static int sgdma_rxbusy(struct altera_tse_private *priv)
 static int sgdma_txbusy(struct altera_tse_private *priv)
 {
        int delay = 0;
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
 
        /* if DMA is busy, wait for current transactino to finish */
-       while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
+       while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
+               & SGDMA_STSREG_BUSY) && (delay++ < 100))
                udelay(1);
 
-       if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) {
+       if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
+           & SGDMA_STSREG_BUSY) {
                netdev_err(priv->dev, "timeout waiting for tx dma\n");
                return 1;
        }
index ba3334f353836cd0441d537ba5fda8a28bea171e..85bc33b218d946f557647d1a73f9699e3189827b 100644 (file)
 
 /* SGDMA descriptor structure */
 struct sgdma_descrip {
-       unsigned int    raddr; /* address of data to be read */
-       unsigned int    pad1;
-       unsigned int    waddr;
-       unsigned int    pad2;
-       unsigned int    next;
-       unsigned int    pad3;
-       unsigned short  bytes;
-       unsigned char   rburst;
-       unsigned char   wburst;
-       unsigned short  bytes_xferred;  /* 16 bits, bytes xferred */
+       u32     raddr; /* address of data to be read */
+       u32     pad1;
+       u32     waddr;
+       u32     pad2;
+       u32     next;
+       u32     pad3;
+       u16     bytes;
+       u8      rburst;
+       u8      wburst;
+       u16     bytes_xferred;  /* 16 bits, bytes xferred */
 
        /* bit 0: error
         * bit 1: length error
@@ -39,7 +39,7 @@ struct sgdma_descrip {
         * bit 6: reserved
         * bit 7: status eop for recv case
         */
-       unsigned char   status;
+       u8      status;
 
        /* bit 0: eop
         * bit 1: read_fixed
@@ -47,7 +47,7 @@ struct sgdma_descrip {
         * bits 3,4,5,6: Channel (always 0)
         * bit 7: hardware owned
         */
-       unsigned char   control;
+       u8      control;
 } __packed;
 
 
@@ -101,6 +101,8 @@ struct sgdma_csr {
        u32     pad3[3];
 };
 
+#define sgdma_csroffs(a) (offsetof(struct sgdma_csr, a))
+#define sgdma_descroffs(a) (offsetof(struct sgdma_descrip, a))
 
 #define SGDMA_STSREG_ERR       BIT(0) /* Error */
 #define SGDMA_STSREG_EOP       BIT(1) /* EOP */
index 465c4aabebbd49d299cb266544c3469acd0f3b6b..2adb24d4523c915d3b7d87f1294ead36757cea50 100644 (file)
@@ -357,6 +357,8 @@ struct altera_tse_mac {
        u32 reserved5[42];
 };
 
+#define tse_csroffs(a) (offsetof(struct altera_tse_mac, a))
+
 /* Transmit and Receive Command Registers Bit Definitions
  */
 #define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC                BIT(17)
@@ -487,4 +489,49 @@ struct altera_tse_private {
  */
 void altera_tse_set_ethtool_ops(struct net_device *);
 
+static inline
+u32 csrrd32(void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+       return readl(paddr);
+}
+
+static inline
+u16 csrrd16(void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+       return readw(paddr);
+}
+
+static inline
+u8 csrrd8(void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+       return readb(paddr);
+}
+
+static inline
+void csrwr32(u32 val, void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+       writel(val, paddr);
+}
+
+static inline
+void csrwr16(u16 val, void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+       writew(val, paddr);
+}
+
+static inline
+void csrwr8(u8 val, void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+       writeb(val, paddr);
+}
+
 #endif /* __ALTERA_TSE_H__ */
index 76133caffa78eb3b1e385525fdfc7fda142e77e1..54c25eff795272661e2caabfd554ffcc7dd24e00 100644 (file)
@@ -96,54 +96,89 @@ static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
                           u64 *buf)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
        u64 ext;
 
-       buf[0] = ioread32(&mac->frames_transmitted_ok);
-       buf[1] = ioread32(&mac->frames_received_ok);
-       buf[2] = ioread32(&mac->frames_check_sequence_errors);
-       buf[3] = ioread32(&mac->alignment_errors);
+       buf[0] = csrrd32(priv->mac_dev,
+                        tse_csroffs(frames_transmitted_ok));
+       buf[1] = csrrd32(priv->mac_dev,
+                        tse_csroffs(frames_received_ok));
+       buf[2] = csrrd32(priv->mac_dev,
+                        tse_csroffs(frames_check_sequence_errors));
+       buf[3] = csrrd32(priv->mac_dev,
+                        tse_csroffs(alignment_errors));
 
        /* Extended aOctetsTransmittedOK counter */
-       ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32;
-       ext |= ioread32(&mac->octets_transmitted_ok);
+       ext = (u64) csrrd32(priv->mac_dev,
+                           tse_csroffs(msb_octets_transmitted_ok)) << 32;
+
+       ext |= csrrd32(priv->mac_dev,
+                      tse_csroffs(octets_transmitted_ok));
        buf[4] = ext;
 
        /* Extended aOctetsReceivedOK counter */
-       ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32;
-       ext |= ioread32(&mac->octets_received_ok);
+       ext = (u64) csrrd32(priv->mac_dev,
+                           tse_csroffs(msb_octets_received_ok)) << 32;
+
+       ext |= csrrd32(priv->mac_dev,
+                      tse_csroffs(octets_received_ok));
        buf[5] = ext;
 
-       buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames);
-       buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames);
-       buf[8] = ioread32(&mac->if_in_errors);
-       buf[9] = ioread32(&mac->if_out_errors);
-       buf[10] = ioread32(&mac->if_in_ucast_pkts);
-       buf[11] = ioread32(&mac->if_in_multicast_pkts);
-       buf[12] = ioread32(&mac->if_in_broadcast_pkts);
-       buf[13] = ioread32(&mac->if_out_discards);
-       buf[14] = ioread32(&mac->if_out_ucast_pkts);
-       buf[15] = ioread32(&mac->if_out_multicast_pkts);
-       buf[16] = ioread32(&mac->if_out_broadcast_pkts);
-       buf[17] = ioread32(&mac->ether_stats_drop_events);
+       buf[6] = csrrd32(priv->mac_dev,
+                        tse_csroffs(tx_pause_mac_ctrl_frames));
+       buf[7] = csrrd32(priv->mac_dev,
+                        tse_csroffs(rx_pause_mac_ctrl_frames));
+       buf[8] = csrrd32(priv->mac_dev,
+                        tse_csroffs(if_in_errors));
+       buf[9] = csrrd32(priv->mac_dev,
+                        tse_csroffs(if_out_errors));
+       buf[10] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_in_ucast_pkts));
+       buf[11] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_in_multicast_pkts));
+       buf[12] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_in_broadcast_pkts));
+       buf[13] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_discards));
+       buf[14] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_ucast_pkts));
+       buf[15] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_multicast_pkts));
+       buf[16] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_broadcast_pkts));
+       buf[17] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_drop_events));
 
        /* Extended etherStatsOctets counter */
-       ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32;
-       ext |= ioread32(&mac->ether_stats_octets);
+       ext = (u64) csrrd32(priv->mac_dev,
+                           tse_csroffs(msb_ether_stats_octets)) << 32;
+       ext |= csrrd32(priv->mac_dev,
+                      tse_csroffs(ether_stats_octets));
        buf[18] = ext;
 
-       buf[19] = ioread32(&mac->ether_stats_pkts);
-       buf[20] = ioread32(&mac->ether_stats_undersize_pkts);
-       buf[21] = ioread32(&mac->ether_stats_oversize_pkts);
-       buf[22] = ioread32(&mac->ether_stats_pkts_64_octets);
-       buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets);
-       buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets);
-       buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets);
-       buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets);
-       buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets);
-       buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets);
-       buf[29] = ioread32(&mac->ether_stats_jabbers);
-       buf[30] = ioread32(&mac->ether_stats_fragments);
+       buf[19] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts));
+       buf[20] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_undersize_pkts));
+       buf[21] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_oversize_pkts));
+       buf[22] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_64_octets));
+       buf[23] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_65to127_octets));
+       buf[24] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_128to255_octets));
+       buf[25] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_256to511_octets));
+       buf[26] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_512to1023_octets));
+       buf[27] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_1024to1518_octets));
+       buf[28] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_1519tox_octets));
+       buf[29] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_jabbers));
+       buf[30] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_fragments));
 }
 
 static int tse_sset_count(struct net_device *dev, int sset)
@@ -178,7 +213,6 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 {
        int i;
        struct altera_tse_private *priv = netdev_priv(dev);
-       u32 *tse_mac_regs = (u32 *)priv->mac_dev;
        u32 *buf = regbuf;
 
        /* Set version to a known value, so ethtool knows
@@ -196,7 +230,7 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
        regs->version = 1;
 
        for (i = 0; i < TSE_NUM_REGS; i++)
-               buf[i] = ioread32(&tse_mac_regs[i]);
+               buf[i] = csrrd32(priv->mac_dev, i * 4);
 }
 
 static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
index e44a4aeb970142a6622d77c52b30a27c58a6e786..7330681574d20ccd02855beabedc9b01c8ae2b48 100644 (file)
@@ -100,29 +100,30 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv)
  */
 static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 {
-       struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
-       unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
-       u32 data;
+       struct net_device *ndev = bus->priv;
+       struct altera_tse_private *priv = netdev_priv(ndev);
 
        /* set MDIO address */
-       iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+       csrwr32((mii_id & 0x1f), priv->mac_dev,
+               tse_csroffs(mdio_phy0_addr));
 
        /* get the data */
-       data = ioread32(&mdio_regs[regnum]) & 0xffff;
-       return data;
+       return csrrd32(priv->mac_dev,
+                      tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
 }
 
 static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
                                 u16 value)
 {
-       struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
-       unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
+       struct net_device *ndev = bus->priv;
+       struct altera_tse_private *priv = netdev_priv(ndev);
 
        /* set MDIO address */
-       iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+       csrwr32((mii_id & 0x1f), priv->mac_dev,
+               tse_csroffs(mdio_phy0_addr));
 
        /* write the data */
-       iowrite32((u32) value, &mdio_regs[regnum]);
+       csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
        return 0;
 }
 
@@ -168,7 +169,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
        for (i = 0; i < PHY_MAX_ADDR; i++)
                mdio->irq[i] = PHY_POLL;
 
-       mdio->priv = priv->mac_dev;
+       mdio->priv = dev;
        mdio->parent = priv->device;
 
        ret = of_mdiobus_register(mdio, mdio_node);
@@ -563,7 +564,6 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
        unsigned int nopaged_len = skb_headlen(skb);
        enum netdev_tx ret = NETDEV_TX_OK;
        dma_addr_t dma_addr;
-       int txcomplete = 0;
 
        spin_lock_bh(&priv->tx_lock);
 
@@ -599,7 +599,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
        dma_sync_single_for_device(priv->device, buffer->dma_addr,
                                   buffer->len, DMA_TO_DEVICE);
 
-       txcomplete = priv->dmaops->tx_buffer(priv, buffer);
+       priv->dmaops->tx_buffer(priv, buffer);
 
        skb_tx_timestamp(skb);
 
@@ -698,7 +698,6 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
        struct altera_tse_private *priv = netdev_priv(dev);
        struct phy_device *phydev = NULL;
        char phy_id_fmt[MII_BUS_ID_SIZE + 3];
-       int ret;
 
        if (priv->phy_addr != POLL_PHY) {
                snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
@@ -712,6 +711,7 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
                        netdev_err(dev, "Could not attach to PHY\n");
 
        } else {
+               int ret;
                phydev = phy_find_first(priv->mdio);
                if (phydev == NULL) {
                        netdev_err(dev, "No PHY found\n");
@@ -791,7 +791,6 @@ static int init_phy(struct net_device *dev)
 
 static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
 {
-       struct altera_tse_mac *mac = priv->mac_dev;
        u32 msb;
        u32 lsb;
 
@@ -799,8 +798,8 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
        lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
 
        /* Set primary MAC address */
-       iowrite32(msb, &mac->mac_addr_0);
-       iowrite32(lsb, &mac->mac_addr_1);
+       csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
+       csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
 }
 
 /* MAC software reset.
@@ -811,26 +810,26 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
  */
 static int reset_mac(struct altera_tse_private *priv)
 {
-       void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config;
        int counter;
        u32 dat;
 
-       dat = ioread32(cmd_cfg_reg);
+       dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
        dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
        dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
-       iowrite32(dat, cmd_cfg_reg);
+       csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
 
        counter = 0;
        while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-               if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET))
+               if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
+                                    MAC_CMDCFG_SW_RESET))
                        break;
                udelay(1);
        }
 
        if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-               dat = ioread32(cmd_cfg_reg);
+               dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
                dat &= ~MAC_CMDCFG_SW_RESET;
-               iowrite32(dat, cmd_cfg_reg);
+               csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
                return -1;
        }
        return 0;
@@ -840,41 +839,57 @@ static int reset_mac(struct altera_tse_private *priv)
 */
 static int init_mac(struct altera_tse_private *priv)
 {
-       struct altera_tse_mac *mac = priv->mac_dev;
        unsigned int cmd = 0;
        u32 frm_length;
 
        /* Setup Rx FIFO */
-       iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
-                 &mac->rx_section_empty);
-       iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full);
-       iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty);
-       iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full);
+       csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
+               priv->mac_dev, tse_csroffs(rx_section_empty));
+
+       csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
+               tse_csroffs(rx_section_full));
+
+       csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
+               tse_csroffs(rx_almost_empty));
+
+       csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
+               tse_csroffs(rx_almost_full));
 
        /* Setup Tx FIFO */
-       iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
-                 &mac->tx_section_empty);
-       iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full);
-       iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty);
-       iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full);
+       csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
+               priv->mac_dev, tse_csroffs(tx_section_empty));
+
+       csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
+               tse_csroffs(tx_section_full));
+
+       csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
+               tse_csroffs(tx_almost_empty));
+
+       csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
+               tse_csroffs(tx_almost_full));
 
        /* MAC Address Configuration */
        tse_update_mac_addr(priv, priv->dev->dev_addr);
 
        /* MAC Function Configuration */
        frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
-       iowrite32(frm_length, &mac->frm_length);
-       iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length);
+       csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
+
+       csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
+               tse_csroffs(tx_ipg_length));
 
        /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
         * start address
         */
-       tse_set_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
-       tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
-                                        ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
+       tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
+                   ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
+
+       tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
+                     ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
+                     ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
 
        /* Set the MAC options */
-       cmd = ioread32(&mac->command_config);
+       cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
        cmd &= ~MAC_CMDCFG_PAD_EN;      /* No padding Removal on Receive */
        cmd &= ~MAC_CMDCFG_CRC_FWD;     /* CRC Removal */
        cmd |= MAC_CMDCFG_RX_ERR_DISC;  /* Automatically discard frames
@@ -889,9 +904,10 @@ static int init_mac(struct altera_tse_private *priv)
        cmd &= ~MAC_CMDCFG_ETH_SPEED;
        cmd &= ~MAC_CMDCFG_ENA_10;
 
-       iowrite32(cmd, &mac->command_config);
+       csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
 
-       iowrite32(ALTERA_TSE_PAUSE_QUANTA, &mac->pause_quanta);
+       csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
+               tse_csroffs(pause_quanta));
 
        if (netif_msg_hw(priv))
                dev_dbg(priv->device,
@@ -904,15 +920,14 @@ static int init_mac(struct altera_tse_private *priv)
  */
 static void tse_set_mac(struct altera_tse_private *priv, bool enable)
 {
-       struct altera_tse_mac *mac = priv->mac_dev;
-       u32 value = ioread32(&mac->command_config);
+       u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 
        if (enable)
                value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
        else
                value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 
-       iowrite32(value, &mac->command_config);
+       csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
 }
 
 /* Change the MTU
@@ -942,13 +957,12 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
 static void altera_tse_set_mcfilter(struct net_device *dev)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
        int i;
        struct netdev_hw_addr *ha;
 
        /* clear the hash filter */
        for (i = 0; i < 64; i++)
-               iowrite32(0, &(mac->hash_table[i]));
+               csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
 
        netdev_for_each_mc_addr(ha, dev) {
                unsigned int hash = 0;
@@ -964,7 +978,7 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
 
                        hash = (hash << 1) | xor_bit;
                }
-               iowrite32(1, &(mac->hash_table[hash]));
+               csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
        }
 }
 
@@ -972,12 +986,11 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
 static void altera_tse_set_mcfilterall(struct net_device *dev)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
        int i;
 
        /* set the hash filter */
        for (i = 0; i < 64; i++)
-               iowrite32(1, &(mac->hash_table[i]));
+               csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
 }
 
 /* Set or clear the multicast filter for this adaptor
@@ -985,12 +998,12 @@ static void altera_tse_set_mcfilterall(struct net_device *dev)
 static void tse_set_rx_mode_hashfilter(struct net_device *dev)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
 
        spin_lock(&priv->mac_cfg_lock);
 
        if (dev->flags & IFF_PROMISC)
-               tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+               tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
+                           MAC_CMDCFG_PROMIS_EN);
 
        if (dev->flags & IFF_ALLMULTI)
                altera_tse_set_mcfilterall(dev);
@@ -1005,15 +1018,16 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev)
 static void tse_set_rx_mode(struct net_device *dev)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
 
        spin_lock(&priv->mac_cfg_lock);
 
        if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
            !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
-               tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+               tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
+                           MAC_CMDCFG_PROMIS_EN);
        else
-               tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+               tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
+                             MAC_CMDCFG_PROMIS_EN);
 
        spin_unlock(&priv->mac_cfg_lock);
 }
@@ -1362,6 +1376,11 @@ static int altera_tse_probe(struct platform_device *pdev)
                of_property_read_bool(pdev->dev.of_node,
                                      "altr,has-hash-multicast-filter");
 
+       /* Set hash filter to not set for now until the
+        * multicast filter receive issue is debugged
+        */
+       priv->hash_filter = 0;
+
        /* get supplemental address settings for this instance */
        priv->added_unicast =
                of_property_read_bool(pdev->dev.of_node,
@@ -1493,7 +1512,7 @@ static int altera_tse_remove(struct platform_device *pdev)
        return 0;
 }
 
-struct altera_dmaops altera_dtype_sgdma = {
+static const struct altera_dmaops altera_dtype_sgdma = {
        .altera_dtype = ALTERA_DTYPE_SGDMA,
        .dmamask = 32,
        .reset_dma = sgdma_reset,
@@ -1512,7 +1531,7 @@ struct altera_dmaops altera_dtype_sgdma = {
        .start_rxdma = sgdma_start_rxdma,
 };
 
-struct altera_dmaops altera_dtype_msgdma = {
+static const struct altera_dmaops altera_dtype_msgdma = {
        .altera_dtype = ALTERA_DTYPE_MSGDMA,
        .dmamask = 64,
        .reset_dma = msgdma_reset,
index 70fa13f486b2fc4ef1d6fc45bb82977d13ea1930..d7eeb1713ad2b85721a533fbcc469d22db5740ea 100644 (file)
 #include "altera_tse.h"
 #include "altera_utils.h"
 
-void tse_set_bit(void __iomem *ioaddr, u32 bit_mask)
+void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-       u32 value = ioread32(ioaddr);
+       u32 value = csrrd32(ioaddr, offs);
        value |= bit_mask;
-       iowrite32(value, ioaddr);
+       csrwr32(value, ioaddr, offs);
 }
 
-void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask)
+void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-       u32 value = ioread32(ioaddr);
+       u32 value = csrrd32(ioaddr, offs);
        value &= ~bit_mask;
-       iowrite32(value, ioaddr);
+       csrwr32(value, ioaddr, offs);
 }
 
-int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask)
+int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-       u32 value = ioread32(ioaddr);
+       u32 value = csrrd32(ioaddr, offs);
        return (value & bit_mask) ? 1 : 0;
 }
 
-int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask)
+int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-       u32 value = ioread32(ioaddr);
+       u32 value = csrrd32(ioaddr, offs);
        return (value & bit_mask) ? 0 : 1;
 }
index ce1db36d35832a974f9f958f0b57c298343c0967..baf100ccf5872c7c18ac365ddfe314308b78c9b5 100644 (file)
@@ -19,9 +19,9 @@
 #ifndef __ALTERA_UTILS_H__
 #define __ALTERA_UTILS_H__
 
-void tse_set_bit(void __iomem *ioaddr, u32 bit_mask);
-void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask);
-int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask);
-int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask);
+void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask);
 
 #endif /* __ALTERA_UTILS_H__*/
index b260913db23609ba34daf1f0e7d2165a7c2cc867..3b0d43154e677bfe8fd1f66aac7d3c0a6afbcfd5 100644 (file)
@@ -10051,8 +10051,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
 #define BCM_5710_UNDI_FW_MF_MAJOR      (0x07)
 #define BCM_5710_UNDI_FW_MF_MINOR      (0x08)
 #define BCM_5710_UNDI_FW_MF_VERS       (0x05)
-#define BNX2X_PREV_UNDI_MF_PORT(p)     (0x1a150c + ((p) << 4))
-#define BNX2X_PREV_UNDI_MF_FUNC(f)     (0x1a184c + ((f) << 4))
+#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
+#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
 static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
 {
        u8 major, minor, version;
@@ -10352,6 +10352,7 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
        /* Reset should be performed after BRB is emptied */
        if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
                u32 timer_count = 1000;
+               bool need_write = true;
 
                /* Close the MAC Rx to prevent BRB from filling up */
                bnx2x_prev_unload_close_mac(bp, &mac_vals);
@@ -10398,7 +10399,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
                         * cleaning methods - might be redundant but harmless.
                         */
                        if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
-                               bnx2x_prev_unload_undi_mf(bp);
+                               if (need_write) {
+                                       bnx2x_prev_unload_undi_mf(bp);
+                                       need_write = false;
+                               }
                        } else if (prev_undi) {
                                /* If UNDI resides in memory,
                                 * manually increment it
index 81cc2d9831c2192edcf414fa90dd4aa5962285aa..b8078d50261bf0944f456acc466c73c138b03306 100644 (file)
@@ -2695,7 +2695,7 @@ out:
                bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
        }
 
-       return 0;
+       return rc;
 }
 
 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
index 0c067e8564dd4e15e43a7e22f3e064091234b99a..784c7155b98a1977f2b809efe4ee5a45b23dc44b 100644 (file)
@@ -747,7 +747,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
 out:
        bnx2x_vfpf_finalize(bp, &req->first_tlv);
 
-       return 0;
+       return rc;
 }
 
 /* request pf to config rss table for vf queues*/
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
new file mode 100644 (file)
index 0000000..4884205
--- /dev/null
@@ -0,0 +1,706 @@
+ /*
+ * drivers/net/ethernet/beckhoff/ec_bhf.c
+ *
+ * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* This is a driver for EtherCAT master module present on CCAT FPGA.
+ * Those can be found on Bechhoff CX50xx industrial PCs.
+ */
+
+#if 0
+#define DEBUG
+#endif
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/stat.h>
+
+#define TIMER_INTERVAL_NSEC    20000
+
+#define INFO_BLOCK_SIZE                0x10
+#define INFO_BLOCK_TYPE                0x0
+#define INFO_BLOCK_REV         0x2
+#define INFO_BLOCK_BLK_CNT     0x4
+#define INFO_BLOCK_TX_CHAN     0x4
+#define INFO_BLOCK_RX_CHAN     0x5
+#define INFO_BLOCK_OFFSET      0x8
+
+#define EC_MII_OFFSET          0x4
+#define EC_FIFO_OFFSET         0x8
+#define EC_MAC_OFFSET          0xc
+
+#define MAC_FRAME_ERR_CNT      0x0
+#define MAC_RX_ERR_CNT         0x1
+#define MAC_CRC_ERR_CNT                0x2
+#define MAC_LNK_LST_ERR_CNT    0x3
+#define MAC_TX_FRAME_CNT       0x10
+#define MAC_RX_FRAME_CNT       0x14
+#define MAC_TX_FIFO_LVL                0x20
+#define MAC_DROPPED_FRMS       0x28
+#define MAC_CONNECTED_CCAT_FLAG        0x78
+
+#define MII_MAC_ADDR           0x8
+#define MII_MAC_FILT_FLAG      0xe
+#define MII_LINK_STATUS                0xf
+
+#define FIFO_TX_REG            0x0
+#define FIFO_TX_RESET          0x8
+#define FIFO_RX_REG            0x10
+#define FIFO_RX_ADDR_VALID     (1u << 31)
+#define FIFO_RX_RESET          0x18
+
+#define DMA_CHAN_OFFSET                0x1000
+#define DMA_CHAN_SIZE          0x8
+
+#define DMA_WINDOW_SIZE_MASK   0xfffffffc
+
+static struct pci_device_id ids[] = {
+       { PCI_DEVICE(0x15ec, 0x5000), },
+       { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ids);
+
+struct rx_header {
+#define RXHDR_NEXT_ADDR_MASK   0xffffffu
+#define RXHDR_NEXT_VALID       (1u << 31)
+       __le32 next;
+#define RXHDR_NEXT_RECV_FLAG   0x1
+       __le32 recv;
+#define RXHDR_LEN_MASK         0xfffu
+       __le16 len;
+       __le16 port;
+       __le32 reserved;
+       u8 timestamp[8];
+} __packed;
+
+#define PKT_PAYLOAD_SIZE       0x7e8
+struct rx_desc {
+       struct rx_header header;
+       u8 data[PKT_PAYLOAD_SIZE];
+} __packed;
+
+struct tx_header {
+       __le16 len;
+#define TX_HDR_PORT_0          0x1
+#define TX_HDR_PORT_1          0x2
+       u8 port;
+       u8 ts_enable;
+#define TX_HDR_SENT            0x1
+       __le32 sent;
+       u8 timestamp[8];
+} __packed;
+
+struct tx_desc {
+       struct tx_header header;
+       u8 data[PKT_PAYLOAD_SIZE];
+} __packed;
+
+#define FIFO_SIZE              64
+
+static long polling_frequency = TIMER_INTERVAL_NSEC;
+
+struct bhf_dma {
+       u8 *buf;
+       size_t len;
+       dma_addr_t buf_phys;
+
+       u8 *alloc;
+       size_t alloc_len;
+       dma_addr_t alloc_phys;
+};
+
+struct ec_bhf_priv {
+       struct net_device *net_dev;
+
+       struct pci_dev *dev;
+
+       void * __iomem io;
+       void * __iomem dma_io;
+
+       struct hrtimer hrtimer;
+
+       int tx_dma_chan;
+       int rx_dma_chan;
+       void * __iomem ec_io;
+       void * __iomem fifo_io;
+       void * __iomem mii_io;
+       void * __iomem mac_io;
+
+       struct bhf_dma rx_buf;
+       struct rx_desc *rx_descs;
+       int rx_dnext;
+       int rx_dcount;
+
+       struct bhf_dma tx_buf;
+       struct tx_desc *tx_descs;
+       int tx_dcount;
+       int tx_dnext;
+
+       u64 stat_rx_bytes;
+       u64 stat_tx_bytes;
+};
+
+#define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
+
+#define ETHERCAT_MASTER_ID     0x14
+
+static void ec_bhf_print_status(struct ec_bhf_priv *priv)
+{
+       struct device *dev = PRIV_TO_DEV(priv);
+
+       dev_dbg(dev, "Frame error counter: %d\n",
+               ioread8(priv->mac_io + MAC_FRAME_ERR_CNT));
+       dev_dbg(dev, "RX error counter: %d\n",
+               ioread8(priv->mac_io + MAC_RX_ERR_CNT));
+       dev_dbg(dev, "CRC error counter: %d\n",
+               ioread8(priv->mac_io + MAC_CRC_ERR_CNT));
+       dev_dbg(dev, "TX frame counter: %d\n",
+               ioread32(priv->mac_io + MAC_TX_FRAME_CNT));
+       dev_dbg(dev, "RX frame counter: %d\n",
+               ioread32(priv->mac_io + MAC_RX_FRAME_CNT));
+       dev_dbg(dev, "TX fifo level: %d\n",
+               ioread8(priv->mac_io + MAC_TX_FIFO_LVL));
+       dev_dbg(dev, "Dropped frames: %d\n",
+               ioread8(priv->mac_io + MAC_DROPPED_FRMS));
+       dev_dbg(dev, "Connected with CCAT slot: %d\n",
+               ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG));
+       dev_dbg(dev, "Link status: %d\n",
+               ioread8(priv->mii_io + MII_LINK_STATUS));
+}
+
+static void ec_bhf_reset(struct ec_bhf_priv *priv)
+{
+       iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT);
+       iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT);
+       iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT);
+       iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT);
+       iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT);
+       iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT);
+       iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS);
+
+       iowrite8(0, priv->fifo_io + FIFO_TX_RESET);
+       iowrite8(0, priv->fifo_io + FIFO_RX_RESET);
+
+       iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL);
+}
+
+static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc)
+{
+       u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header);
+       u32 addr = (u8 *)desc - priv->tx_buf.buf;
+
+       iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG);
+
+       dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n");
+}
+
+static int ec_bhf_desc_sent(struct tx_desc *desc)
+{
+       return le32_to_cpu(desc->header.sent) & TX_HDR_SENT;
+}
+
+static void ec_bhf_process_tx(struct ec_bhf_priv *priv)
+{
+       if (unlikely(netif_queue_stopped(priv->net_dev))) {
+               /* Make sure that we perceive changes to tx_dnext. */
+               smp_rmb();
+
+               if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext]))
+                       netif_wake_queue(priv->net_dev);
+       }
+}
+
+static int ec_bhf_pkt_received(struct rx_desc *desc)
+{
+       return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG;
+}
+
+static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc)
+{
+       iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf),
+                 priv->fifo_io + FIFO_RX_REG);
+}
+
+static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
+{
+       struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext];
+       struct device *dev = PRIV_TO_DEV(priv);
+
+       while (ec_bhf_pkt_received(desc)) {
+               int pkt_size = (le16_to_cpu(desc->header.len) &
+                              RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4;
+               u8 *data = desc->data;
+               struct sk_buff *skb;
+
+               skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
+               dev_dbg(dev, "Received packet, size: %d\n", pkt_size);
+
+               if (skb) {
+                       memcpy(skb_put(skb, pkt_size), data, pkt_size);
+                       skb->protocol = eth_type_trans(skb, priv->net_dev);
+                       dev_dbg(dev, "Protocol type: %x\n", skb->protocol);
+
+                       priv->stat_rx_bytes += pkt_size;
+
+                       netif_rx(skb);
+               } else {
+                       dev_err_ratelimited(dev,
+                               "Couldn't allocate a skb_buff for a packet of size %u\n",
+                               pkt_size);
+               }
+
+               desc->header.recv = 0;
+
+               ec_bhf_add_rx_desc(priv, desc);
+
+               priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount;
+               desc = &priv->rx_descs[priv->rx_dnext];
+       }
+
+}
+
+static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
+{
+       struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv,
+                                               hrtimer);
+       ec_bhf_process_rx(priv);
+       ec_bhf_process_tx(priv);
+
+       if (!netif_running(priv->net_dev))
+               return HRTIMER_NORESTART;
+
+       hrtimer_forward_now(timer, ktime_set(0, polling_frequency));
+       return HRTIMER_RESTART;
+}
+
+static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
+{
+       struct device *dev = PRIV_TO_DEV(priv);
+       unsigned block_count, i;
+       void * __iomem ec_info;
+
+       dev_dbg(dev, "Info block:\n");
+       dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
+       dev_dbg(dev, "Revision of function: %x\n",
+               (unsigned)ioread16(priv->io + INFO_BLOCK_REV));
+
+       block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT);
+       dev_dbg(dev, "Number of function blocks: %x\n", block_count);
+
+       for (i = 0; i < block_count; i++) {
+               u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE +
+                                   INFO_BLOCK_TYPE);
+               if (type == ETHERCAT_MASTER_ID)
+                       break;
+       }
+       if (i == block_count) {
+               dev_err(dev, "EtherCAT master with DMA block not found\n");
+               return -ENODEV;
+       }
+       dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i);
+
+       ec_info = priv->io + i * INFO_BLOCK_SIZE;
+       dev_dbg(dev, "EtherCAT master revision: %d\n",
+               ioread16(ec_info + INFO_BLOCK_REV));
+
+       priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN);
+       dev_dbg(dev, "EtherCAT master tx dma channel: %d\n",
+               priv->tx_dma_chan);
+
+       priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN);
+       dev_dbg(dev, "EtherCAT master rx dma channel: %d\n",
+                priv->rx_dma_chan);
+
+       priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET);
+       priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET);
+       priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET);
+       priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET);
+
+       dev_dbg(dev,
+               "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n",
+               priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io);
+
+       return 0;
+}
+
+static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
+                                    struct net_device *net_dev)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+       struct tx_desc *desc;
+       unsigned len;
+
+       dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n");
+
+       desc = &priv->tx_descs[priv->tx_dnext];
+
+       skb_copy_and_csum_dev(skb, desc->data);
+       len = skb->len;
+
+       memset(&desc->header, 0, sizeof(desc->header));
+       desc->header.len = cpu_to_le16(len);
+       desc->header.port = TX_HDR_PORT_0;
+
+       ec_bhf_send_packet(priv, desc);
+
+       priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount;
+
+       if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) {
+               /* Make sure that update updates to tx_dnext are perceived
+                * by timer routine.
+                */
+               smp_wmb();
+
+               netif_stop_queue(net_dev);
+
+               dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n");
+               ec_bhf_print_status(priv);
+       }
+
+       priv->stat_tx_bytes += len;
+
+       dev_kfree_skb(skb);
+
+       return NETDEV_TX_OK;
+}
+
+static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
+                               struct bhf_dma *buf,
+                               int channel,
+                               int size)
+{
+       int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET;
+       struct device *dev = PRIV_TO_DEV(priv);
+       u32 mask;
+
+       iowrite32(0xffffffff, priv->dma_io + offset);
+
+       mask = ioread32(priv->dma_io + offset);
+       mask &= DMA_WINDOW_SIZE_MASK;
+       dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel);
+
+       /* We want to allocate a chunk of memory that is:
+        * - aligned to the mask we just read
+        * - is of size 2^mask bytes (at most)
+        * In order to ensure that we will allocate buffer of
+        * 2 * 2^mask bytes.
+        */
+       buf->len = min_t(int, ~mask + 1, size);
+       buf->alloc_len = 2 * buf->len;
+
+       dev_dbg(dev, "Allocating %d bytes for channel %d",
+               (int)buf->alloc_len, channel);
+       buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys,
+                                       GFP_KERNEL);
+       if (buf->alloc == NULL) {
+               dev_info(dev, "Failed to allocate buffer\n");
+               return -ENOMEM;
+       }
+
+       buf->buf_phys = (buf->alloc_phys + buf->len) & mask;
+       buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys);
+
+       iowrite32(0, priv->dma_io + offset + 4);
+       iowrite32(buf->buf_phys, priv->dma_io + offset);
+       dev_dbg(dev, "Buffer: %x and read from dev: %x",
+               (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset));
+
+       return 0;
+}
+
+static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv)
+{
+       int i = 0;
+
+       priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc);
+       priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf;
+       priv->tx_dnext = 0;
+
+       for (i = 0; i < priv->tx_dcount; i++)
+               priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT);
+}
+
+static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv)
+{
+       int i;
+
+       priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc);
+       priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf;
+       priv->rx_dnext = 0;
+
+       for (i = 0; i < priv->rx_dcount; i++) {
+               struct rx_desc *desc = &priv->rx_descs[i];
+               u32 next;
+
+               if (i != priv->rx_dcount - 1)
+                       next = (u8 *)(desc + 1) - priv->rx_buf.buf;
+               else
+                       next = 0;
+               next |= RXHDR_NEXT_VALID;
+               desc->header.next = cpu_to_le32(next);
+               desc->header.recv = 0;
+               ec_bhf_add_rx_desc(priv, desc);
+       }
+}
+
+static int ec_bhf_open(struct net_device *net_dev)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+       struct device *dev = PRIV_TO_DEV(priv);
+       int err = 0;
+
+       dev_info(dev, "Opening device\n");
+
+       ec_bhf_reset(priv);
+
+       err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan,
+                                  FIFO_SIZE * sizeof(struct rx_desc));
+       if (err) {
+               dev_err(dev, "Failed to allocate rx buffer\n");
+               goto out;
+       }
+       ec_bhf_setup_rx_descs(priv);
+
+       dev_info(dev, "RX buffer allocated, address: %x\n",
+                (unsigned)priv->rx_buf.buf_phys);
+
+       err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan,
+                                  FIFO_SIZE * sizeof(struct tx_desc));
+       if (err) {
+               dev_err(dev, "Failed to allocate tx buffer\n");
+               goto error_rx_free;
+       }
+       dev_dbg(dev, "TX buffer allocated, addres: %x\n",
+               (unsigned)priv->tx_buf.buf_phys);
+
+       iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG);
+
+       ec_bhf_setup_tx_descs(priv);
+
+       netif_start_queue(net_dev);
+
+       hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       priv->hrtimer.function = ec_bhf_timer_fun;
+       hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency),
+                     HRTIMER_MODE_REL);
+
+       dev_info(PRIV_TO_DEV(priv), "Device open\n");
+
+       ec_bhf_print_status(priv);
+
+       return 0;
+
+error_rx_free:
+       dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc,
+                         priv->rx_buf.alloc_len);
+out:
+       return err;
+}
+
+static int ec_bhf_stop(struct net_device *net_dev)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+       struct device *dev = PRIV_TO_DEV(priv);
+
+       hrtimer_cancel(&priv->hrtimer);
+
+       ec_bhf_reset(priv);
+
+       netif_tx_disable(net_dev);
+
+       dma_free_coherent(dev, priv->tx_buf.alloc_len,
+                         priv->tx_buf.alloc, priv->tx_buf.alloc_phys);
+       dma_free_coherent(dev, priv->rx_buf.alloc_len,
+                         priv->rx_buf.alloc, priv->rx_buf.alloc_phys);
+
+       return 0;
+}
+
+static struct rtnl_link_stats64 *
+ec_bhf_get_stats(struct net_device *net_dev,
+                struct rtnl_link_stats64 *stats)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+
+       stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) +
+                               ioread8(priv->mac_io + MAC_CRC_ERR_CNT) +
+                               ioread8(priv->mac_io + MAC_FRAME_ERR_CNT);
+       stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT);
+       stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT);
+       stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS);
+
+       stats->tx_bytes = priv->stat_tx_bytes;
+       stats->rx_bytes = priv->stat_rx_bytes;
+
+       return stats;
+}
+
+static const struct net_device_ops ec_bhf_netdev_ops = {
+       .ndo_start_xmit         = ec_bhf_start_xmit,
+       .ndo_open               = ec_bhf_open,
+       .ndo_stop               = ec_bhf_stop,
+       .ndo_get_stats64        = ec_bhf_get_stats,
+       .ndo_change_mtu         = eth_change_mtu,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = eth_mac_addr
+};
+
+static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+       struct net_device *net_dev;
+       struct ec_bhf_priv *priv;
+       void * __iomem dma_io;
+       void * __iomem io;
+       int err = 0;
+
+       err = pci_enable_device(dev);
+       if (err)
+               return err;
+
+       pci_set_master(dev);
+
+       err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
+       if (err) {
+               dev_err(&dev->dev,
+                       "Required dma mask not supported, failed to initialize device\n");
+               err = -EIO;
+               goto err_disable_dev;
+       }
+
+       err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32));
+       if (err) {
+               dev_err(&dev->dev,
+                       "Required dma mask not supported, failed to initialize device\n");
+               goto err_disable_dev;
+       }
+
+       err = pci_request_regions(dev, "ec_bhf");
+       if (err) {
+               dev_err(&dev->dev, "Failed to request pci memory regions\n");
+               goto err_disable_dev;
+       }
+
+       io = pci_iomap(dev, 0, 0);
+       if (!io) {
+               dev_err(&dev->dev, "Failed to map pci card memory bar 0");
+               err = -EIO;
+               goto err_release_regions;
+       }
+
+       dma_io = pci_iomap(dev, 2, 0);
+       if (!dma_io) {
+               dev_err(&dev->dev, "Failed to map pci card memory bar 2");
+               err = -EIO;
+               goto err_unmap;
+       }
+
+       net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
+       if (net_dev == 0) {
+               err = -ENOMEM;
+               goto err_unmap_dma_io;
+       }
+
+       pci_set_drvdata(dev, net_dev);
+       SET_NETDEV_DEV(net_dev, &dev->dev);
+
+       net_dev->features = 0;
+       net_dev->flags |= IFF_NOARP;
+
+       net_dev->netdev_ops = &ec_bhf_netdev_ops;
+
+       priv = netdev_priv(net_dev);
+       priv->net_dev = net_dev;
+       priv->io = io;
+       priv->dma_io = dma_io;
+       priv->dev = dev;
+
+       err = ec_bhf_setup_offsets(priv);
+       if (err < 0)
+               goto err_free_net_dev;
+
+       memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
+
+       dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n",
+               net_dev->dev_addr);
+
+       err = register_netdev(net_dev);
+       if (err < 0)
+               goto err_free_net_dev;
+
+       return 0;
+
+err_free_net_dev:
+       free_netdev(net_dev);
+err_unmap_dma_io:
+       pci_iounmap(dev, dma_io);
+err_unmap:
+       pci_iounmap(dev, io);
+err_release_regions:
+       pci_release_regions(dev);
+err_disable_dev:
+       pci_clear_master(dev);
+       pci_disable_device(dev);
+
+       return err;
+}
+
+static void ec_bhf_remove(struct pci_dev *dev)
+{
+       struct net_device *net_dev = pci_get_drvdata(dev);
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+
+       unregister_netdev(net_dev);
+       free_netdev(net_dev);
+
+       pci_iounmap(dev, priv->dma_io);
+       pci_iounmap(dev, priv->io);
+       pci_release_regions(dev);
+       pci_clear_master(dev);
+       pci_disable_device(dev);
+}
+
+static struct pci_driver pci_driver = {
+       .name           = "ec_bhf",
+       .id_table       = ids,
+       .probe          = ec_bhf_probe,
+       .remove         = ec_bhf_remove,
+};
+
+static int __init ec_bhf_init(void)
+{
+       return pci_register_driver(&pci_driver);
+}
+
+static void __exit ec_bhf_exit(void)
+{
+       pci_unregister_driver(&pci_driver);
+}
+
+module_init(ec_bhf_init);
+module_exit(ec_bhf_exit);
+
+module_param(polling_frequency, long, S_IRUGO);
+MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");
index a18645407d2152b43353a50b76ccf6317ef90151..dc19bc5dec7732c02220e009ad08ffc393aa1c2c 100644 (file)
@@ -4949,6 +4949,12 @@ static void be_eeh_resume(struct pci_dev *pdev)
        if (status)
                goto err;
 
+       /* On some BE3 FW versions, after a HW reset,
+        * interrupts will remain disabled for each function.
+        * So, explicitly enable interrupts
+        */
+       be_intr_set(adapter, true);
+
        /* tell fw we're ready to fire cmds */
        status = be_cmd_fw_init(adapter);
        if (status)
index b0c6050479eb460ae306cccaa93926f738e64c2e..b78378cea5e39b6e18627b9f39ac5249c40293fd 100644 (file)
@@ -1988,7 +1988,7 @@ jme_alloc_txdesc(struct jme_adapter *jme,
        return idx;
 }
 
-static void
+static int
 jme_fill_tx_map(struct pci_dev *pdev,
                struct txdesc *txdesc,
                struct jme_buffer_info *txbi,
@@ -2005,6 +2005,9 @@ jme_fill_tx_map(struct pci_dev *pdev,
                                len,
                                PCI_DMA_TODEVICE);
 
+       if (unlikely(pci_dma_mapping_error(pdev, dmaaddr)))
+               return -EINVAL;
+
        pci_dma_sync_single_for_device(pdev,
                                       dmaaddr,
                                       len,
@@ -2021,9 +2024,30 @@ jme_fill_tx_map(struct pci_dev *pdev,
 
        txbi->mapping = dmaaddr;
        txbi->len = len;
+       return 0;
 }
 
-static void
+static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count)
+{
+       struct jme_ring *txring = &(jme->txring[0]);
+       struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
+       int mask = jme->tx_ring_mask;
+       int j;
+
+       for (j = 0 ; j < count ; j++) {
+               ctxbi = txbi + ((startidx + j + 2) & (mask));
+               pci_unmap_page(jme->pdev,
+                               ctxbi->mapping,
+                               ctxbi->len,
+                               PCI_DMA_TODEVICE);
+
+                               ctxbi->mapping = 0;
+                               ctxbi->len = 0;
+       }
+
+}
+
+static int
 jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
 {
        struct jme_ring *txring = &(jme->txring[0]);
@@ -2034,25 +2058,37 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
        int mask = jme->tx_ring_mask;
        const struct skb_frag_struct *frag;
        u32 len;
+       int ret = 0;
 
        for (i = 0 ; i < nr_frags ; ++i) {
                frag = &skb_shinfo(skb)->frags[i];
                ctxdesc = txdesc + ((idx + i + 2) & (mask));
                ctxbi = txbi + ((idx + i + 2) & (mask));
 
-               jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
+               ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
                                skb_frag_page(frag),
                                frag->page_offset, skb_frag_size(frag), hidma);
+               if (ret) {
+                       jme_drop_tx_map(jme, idx, i);
+                       goto out;
+               }
+
        }
 
        len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
        ctxdesc = txdesc + ((idx + 1) & (mask));
        ctxbi = txbi + ((idx + 1) & (mask));
-       jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
+       ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
                        offset_in_page(skb->data), len, hidma);
+       if (ret)
+               jme_drop_tx_map(jme, idx, i);
+
+out:
+       return ret;
 
 }
 
+
 static int
 jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
 {
@@ -2131,6 +2167,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
        struct txdesc *txdesc;
        struct jme_buffer_info *txbi;
        u8 flags;
+       int ret = 0;
 
        txdesc = (struct txdesc *)txring->desc + idx;
        txbi = txring->bufinf + idx;
@@ -2155,7 +2192,10 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
        if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
                jme_tx_csum(jme, skb, &flags);
        jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
-       jme_map_tx_skb(jme, skb, idx);
+       ret = jme_map_tx_skb(jme, skb, idx);
+       if (ret)
+               return ret;
+
        txdesc->desc1.flags = flags;
        /*
         * Set tx buffer info after telling NIC to send
@@ -2228,7 +2268,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
                return NETDEV_TX_BUSY;
        }
 
-       jme_fill_tx_desc(jme, skb, idx);
+       if (jme_fill_tx_desc(jme, skb, idx))
+               return NETDEV_TX_OK;
 
        jwrite32(jme, JME_TXCS, jme->reg_txcs |
                                TXCS_SELECT_QUEUE0 |
index 78099eab767374319c7e258bfa1f0d6df4c64fa3..92d3249f63f19a71b5ab9ed2e661cb4a8048134a 100644 (file)
@@ -1253,12 +1253,12 @@ static struct mlx4_cmd_info cmd_info[] = {
        },
        {
                .opcode = MLX4_CMD_UPDATE_QP,
-               .has_inbox = false,
+               .has_inbox = true,
                .has_outbox = false,
                .out_is_imm = false,
                .encode_slave_id = false,
                .verify = NULL,
-               .wrapper = mlx4_CMD_EPERM_wrapper
+               .wrapper = mlx4_UPDATE_QP_wrapper
        },
        {
                .opcode = MLX4_CMD_GET_OP_REQ,
index f9c46510196341a6089b0a23d7b53455dad69ae5..212cea440f90c73424d777f94c71fe61a0c48543 100644 (file)
@@ -1195,6 +1195,12 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
                           struct mlx4_cmd_mailbox *outbox,
                           struct mlx4_cmd_info *cmd);
 
+int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+
 int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
                         struct mlx4_vhcr *vhcr,
                         struct mlx4_cmd_mailbox *inbox,
index 61d64ebffd56e64b0fa8bf2d0fb69308e3d02c49..fbd32af89c7c0c2b94a36faec71f4e004048050d 100644 (file)
@@ -389,6 +389,41 @@ err_icm:
 
 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
 
+#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
+int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+                  enum mlx4_update_qp_attr attr,
+                  struct mlx4_update_qp_params *params)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_update_qp_context *cmd;
+       u64 pri_addr_path_mask = 0;
+       int err = 0;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       cmd = (struct mlx4_update_qp_context *)mailbox->buf;
+
+       if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
+               return -EINVAL;
+
+       if (attr & MLX4_UPDATE_QP_SMAC) {
+               pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
+               cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
+       }
+
+       cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
+
+       err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
+                      MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
+                      MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_update_qp);
+
 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
 {
        struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
index 1c3fdd4a1f7df3fe84847ae7ff278d652db13463..8f1254a79832c24b96b227de0c30ba3ed0116520 100644 (file)
@@ -3895,6 +3895,60 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
 
 }
 
+#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
+int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd_info)
+{
+       int err;
+       u32 qpn = vhcr->in_modifier & 0xffffff;
+       struct res_qp *rqp;
+       u64 mac;
+       unsigned port;
+       u64 pri_addr_path_mask;
+       struct mlx4_update_qp_context *cmd;
+       int smac_index;
+
+       cmd = (struct mlx4_update_qp_context *)inbox->buf;
+
+       pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
+       if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
+           (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
+               return -EPERM;
+
+       /* Just change the smac for the QP */
+       err = get_res(dev, slave, qpn, RES_QP, &rqp);
+       if (err) {
+               mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
+               return err;
+       }
+
+       port = (rqp->sched_queue >> 6 & 1) + 1;
+       smac_index = cmd->qp_context.pri_path.grh_mylmc;
+       err = mac_find_smac_ix_in_slave(dev, slave, port,
+                                       smac_index, &mac);
+       if (err) {
+               mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
+                        qpn, smac_index);
+               goto err_mac;
+       }
+
+       err = mlx4_cmd(dev, inbox->dma,
+                      vhcr->in_modifier, 0,
+                      MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
+                      MLX4_CMD_NATIVE);
+       if (err) {
+               mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
+               goto err_mac;
+       }
+
+err_mac:
+       put_res(dev, slave, qpn, RES_QP);
+       return err;
+}
+
 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
                                         struct mlx4_vhcr *vhcr,
                                         struct mlx4_cmd_mailbox *inbox,
index 7b52a88923ef2e53fadf9aca0185e2af492aa7be..f785d01c7d123dde875a774ebf1027800af33425 100644 (file)
@@ -1719,22 +1719,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
                                tx_ring->producer;
 }
 
-static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
-                                            struct net_device *netdev)
-{
-       int err;
-
-       netdev->num_tx_queues = adapter->drv_tx_rings;
-       netdev->real_num_tx_queues = adapter->drv_tx_rings;
-
-       err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
-       if (err)
-               netdev_err(netdev, "failed to set %d Tx queues\n",
-                          adapter->drv_tx_rings);
-
-       return err;
-}
-
 struct qlcnic_nic_template {
        int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
        int (*config_led) (struct qlcnic_adapter *, u32, u32);
index 0bc914859e38be2c52e070edb82061fa2a6a1e48..7e55e88a81bf26ede0928225fa85998254280195 100644 (file)
@@ -2206,6 +2206,31 @@ static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
        ahw->max_uc_count = count;
 }
 
+static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
+                                     u8 tx_queues, u8 rx_queues)
+{
+       struct net_device *netdev = adapter->netdev;
+       int err = 0;
+
+       if (tx_queues) {
+               err = netif_set_real_num_tx_queues(netdev, tx_queues);
+               if (err) {
+                       netdev_err(netdev, "failed to set %d Tx queues\n",
+                                  tx_queues);
+                       return err;
+               }
+       }
+
+       if (rx_queues) {
+               err = netif_set_real_num_rx_queues(netdev, rx_queues);
+               if (err)
+                       netdev_err(netdev, "failed to set %d Rx queues\n",
+                                  rx_queues);
+       }
+
+       return err;
+}
+
 int
 qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
                    int pci_using_dac)
@@ -2269,7 +2294,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
        netdev->priv_flags |= IFF_UNICAST_FLT;
        netdev->irq = adapter->msix_entries[0].vector;
 
-       err = qlcnic_set_real_num_queues(adapter, netdev);
+       err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings,
+                                        adapter->drv_sds_rings);
        if (err)
                return err;
 
@@ -2943,9 +2969,13 @@ static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
                            tx_ring->tx_stats.xmit_called,
                            tx_ring->tx_stats.xmit_on,
                            tx_ring->tx_stats.xmit_off);
+
+               if (tx_ring->crb_intr_mask)
+                       netdev_info(netdev, "crb_intr_mask=%d\n",
+                                   readl(tx_ring->crb_intr_mask));
+
                netdev_info(netdev,
-                           "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
-                           readl(tx_ring->crb_intr_mask),
+                           "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
                            readl(tx_ring->crb_cmd_producer),
                            tx_ring->producer, tx_ring->sw_consumer,
                            le32_to_cpu(*(tx_ring->hw_consumer)));
@@ -3978,12 +4008,21 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
 int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
+       u8 tx_rings, rx_rings;
        int err;
 
        if (test_bit(__QLCNIC_RESETTING, &adapter->state))
                return -EBUSY;
 
+       tx_rings = adapter->drv_tss_rings;
+       rx_rings = adapter->drv_rss_rings;
+
        netif_device_detach(netdev);
+
+       err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings);
+       if (err)
+               goto done;
+
        if (netif_running(netdev))
                __qlcnic_down(adapter, netdev);
 
@@ -4003,7 +4042,17 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
                return err;
        }
 
-       netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
+       /* Check if we need to update real_num_{tx|rx}_queues because
+        * qlcnic_setup_intr() may change Tx/Rx rings size
+        */
+       if ((tx_rings != adapter->drv_tx_rings) ||
+           (rx_rings != adapter->drv_sds_rings)) {
+               err = qlcnic_set_real_num_queues(adapter,
+                                                adapter->drv_tx_rings,
+                                                adapter->drv_sds_rings);
+               if (err)
+                       goto done;
+       }
 
        if (qlcnic_83xx_check(adapter)) {
                qlcnic_83xx_initialize_nic(adapter, 1);
index 32d969e857f7befc79bf4a6f18cb153c350b374b..89b83e59e1dc601898ddd60bd0fa704fdd7b6d43 100644 (file)
@@ -156,13 +156,15 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
        efx->net_dev->rx_cpu_rmap = NULL;
 #endif
 
-       /* Disable MSI/MSI-X interrupts */
-       efx_for_each_channel(channel, efx)
-               free_irq(channel->irq, &efx->msi_context[channel->channel]);
-
-       /* Disable legacy interrupt */
-       if (efx->legacy_irq)
+       if (EFX_INT_MODE_USE_MSI(efx)) {
+               /* Disable MSI/MSI-X interrupts */
+               efx_for_each_channel(channel, efx)
+                       free_irq(channel->irq,
+                                &efx->msi_context[channel->channel]);
+       } else {
+               /* Disable legacy interrupt */
                free_irq(efx->legacy_irq, efx);
+       }
 }
 
 /* Register dump */
index d940034acdd4aa465153f80ae0d5881cb648d6a8..0f4841d2e8dc9b556bde072a1786697ea7041460 100644 (file)
@@ -1704,7 +1704,7 @@ static int stmmac_open(struct net_device *dev)
                if (ret) {
                        pr_err("%s: Cannot attach to PHY (error: %d)\n",
                               __func__, ret);
-                       goto phy_error;
+                       return ret;
                }
        }
 
@@ -1779,8 +1779,6 @@ init_error:
 dma_desc_error:
        if (priv->phydev)
                phy_disconnect(priv->phydev);
-phy_error:
-       clk_disable_unprepare(priv->stmmac_clk);
 
        return ret;
 }
index df8d383acf48ed0da087bb19d144499484ac0376..b9ac20f42651bd90e33e5fcb3da38db5401117a2 100644 (file)
@@ -246,7 +246,7 @@ static inline void cas_lock_tx(struct cas *cp)
        int i;
 
        for (i = 0; i < N_TX_RINGS; i++)
-               spin_lock(&cp->tx_lock[i]);
+               spin_lock_nested(&cp->tx_lock[i], i);
 }
 
 static inline void cas_lock_all(struct cas *cp)
index 36aa109416c4c3a387a440795a8d3611cf3d4ded..c331b7ebc8124585f989d6fb3dcdbaf8a3d7d3ed 100644 (file)
@@ -1871,18 +1871,13 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
                phyid = be32_to_cpup(parp+1);
                mdio = of_find_device_by_node(mdio_node);
-
-               if (strncmp(mdio->name, "gpio", 4) == 0) {
-                       /* GPIO bitbang MDIO driver attached */
-                       struct mii_bus *bus = dev_get_drvdata(&mdio->dev);
-
-                       snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
-                                PHY_ID_FMT, bus->id, phyid);
-               } else {
-                       /* davinci MDIO driver attached */
-                       snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
-                                PHY_ID_FMT, mdio->name, phyid);
+               of_node_put(mdio_node);
+               if (!mdio) {
+                       pr_err("Missing mdio platform device\n");
+                       return -EINVAL;
                }
+               snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+                        PHY_ID_FMT, mdio->name, phyid);
 
                mac_addr = of_get_mac_address(slave_node);
                if (mac_addr)
index b0e2865a6810782e115d61287ed23578646a855e..d53e299ae1d97ca39c4f3af511b97c5dacb32827 100644 (file)
@@ -458,8 +458,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
        struct macvlan_dev *vlan = netdev_priv(dev);
        struct net_device *lowerdev = vlan->lowerdev;
 
-       if (change & IFF_ALLMULTI)
-               dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+       if (dev->flags & IFF_UP) {
+               if (change & IFF_ALLMULTI)
+                       dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+       }
 }
 
 static void macvlan_set_mac_lists(struct net_device *dev)
@@ -515,6 +517,11 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
 #define MACVLAN_STATE_MASK \
        ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
 
+static int macvlan_get_nest_level(struct net_device *dev)
+{
+       return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
+}
+
 static void macvlan_set_lockdep_class_one(struct net_device *dev,
                                          struct netdev_queue *txq,
                                          void *_unused)
@@ -525,8 +532,9 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev,
 
 static void macvlan_set_lockdep_class(struct net_device *dev)
 {
-       lockdep_set_class(&dev->addr_list_lock,
-                         &macvlan_netdev_addr_lock_key);
+       lockdep_set_class_and_subclass(&dev->addr_list_lock,
+                                      &macvlan_netdev_addr_lock_key,
+                                      macvlan_get_nest_level(dev));
        netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
 }
 
@@ -721,6 +729,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
        .ndo_fdb_add            = macvlan_fdb_add,
        .ndo_fdb_del            = macvlan_fdb_del,
        .ndo_fdb_dump           = ndo_dflt_fdb_dump,
+       .ndo_get_lock_subclass  = macvlan_get_nest_level,
 };
 
 void macvlan_common_setup(struct net_device *dev)
@@ -849,6 +858,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
        vlan->dev      = dev;
        vlan->port     = port;
        vlan->set_features = MACVLAN_FEATURES;
+       vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
 
        vlan->mode     = MACVLAN_MODE_VEPA;
        if (data && data[IFLA_MACVLAN_MODE])
index 9c4defdec67b09299f38f1b06bf8eacbccd007d1..5f1a2250018fec5ba01a2a1a1a11f2d736f544a9 100644 (file)
@@ -215,6 +215,10 @@ static int mdio_gpio_probe(struct platform_device *pdev)
        if (pdev->dev.of_node) {
                pdata = mdio_gpio_of_get_data(pdev);
                bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
+               if (bus_id < 0) {
+                       dev_warn(&pdev->dev, "failed to get alias id\n");
+                       bus_id = 0;
+               }
        } else {
                pdata = dev_get_platdata(&pdev->dev);
                bus_id = pdev->id;
index a972056b22498c15596a88f6b348e0f066ddb85b..3bc079a67a3dc85a222e2b784f7f65ec55dc6b59 100644 (file)
@@ -715,7 +715,7 @@ void phy_state_machine(struct work_struct *work)
        struct delayed_work *dwork = to_delayed_work(work);
        struct phy_device *phydev =
                        container_of(dwork, struct phy_device, state_queue);
-       int needs_aneg = 0, do_suspend = 0;
+       bool needs_aneg = false, do_suspend = false, do_resume = false;
        int err = 0;
 
        mutex_lock(&phydev->lock);
@@ -727,7 +727,7 @@ void phy_state_machine(struct work_struct *work)
        case PHY_PENDING:
                break;
        case PHY_UP:
-               needs_aneg = 1;
+               needs_aneg = true;
 
                phydev->link_timeout = PHY_AN_TIMEOUT;
 
@@ -757,7 +757,7 @@ void phy_state_machine(struct work_struct *work)
                        phydev->adjust_link(phydev->attached_dev);
 
                } else if (0 == phydev->link_timeout--)
-                       needs_aneg = 1;
+                       needs_aneg = true;
                break;
        case PHY_NOLINK:
                err = phy_read_status(phydev);
@@ -791,7 +791,7 @@ void phy_state_machine(struct work_struct *work)
                        netif_carrier_on(phydev->attached_dev);
                } else {
                        if (0 == phydev->link_timeout--)
-                               needs_aneg = 1;
+                               needs_aneg = true;
                }
 
                phydev->adjust_link(phydev->attached_dev);
@@ -827,7 +827,7 @@ void phy_state_machine(struct work_struct *work)
                        phydev->link = 0;
                        netif_carrier_off(phydev->attached_dev);
                        phydev->adjust_link(phydev->attached_dev);
-                       do_suspend = 1;
+                       do_suspend = true;
                }
                break;
        case PHY_RESUMING:
@@ -876,6 +876,7 @@ void phy_state_machine(struct work_struct *work)
                        }
                        phydev->adjust_link(phydev->attached_dev);
                }
+               do_resume = true;
                break;
        }
 
@@ -883,9 +884,10 @@ void phy_state_machine(struct work_struct *work)
 
        if (needs_aneg)
                err = phy_start_aneg(phydev);
-
-       if (do_suspend)
+       else if (do_suspend)
                phy_suspend(phydev);
+       else if (do_resume)
+               phy_resume(phydev);
 
        if (err < 0)
                phy_error(phydev);
index 0ce606624296a80492b18d89a27634614aad5497..4987a1c6dc52e63220edb6fc94eea925a9fdf122 100644 (file)
@@ -614,8 +614,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
        err = phy_init_hw(phydev);
        if (err)
                phy_detach(phydev);
-
-       phy_resume(phydev);
+       else
+               phy_resume(phydev);
 
        return err;
 }
index c9f3281506af568e534a47789418b466a0a77adc..2e025ddcef210bc888dd8a8ff81473a32c3dd154 100644 (file)
@@ -120,6 +120,16 @@ static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf)
        cdc_ncm_unbind(dev, intf);
 }
 
+/* verify that the ethernet protocol is IPv4 or IPv6 */
+static bool is_ip_proto(__be16 proto)
+{
+       switch (proto) {
+       case htons(ETH_P_IP):
+       case htons(ETH_P_IPV6):
+               return true;
+       }
+       return false;
+}
 
 static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
 {
@@ -128,6 +138,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
        struct cdc_ncm_ctx *ctx = info->ctx;
        __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
        u16 tci = 0;
+       bool is_ip;
        u8 *c;
 
        if (!ctx)
@@ -137,25 +148,32 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
                if (skb->len <= ETH_HLEN)
                        goto error;
 
+               /* Some applications using e.g. packet sockets will
+                * bypass the VLAN acceleration and create tagged
+                * ethernet frames directly.  We primarily look for
+                * the accelerated out-of-band tag, but fall back if
+                * required
+                */
+               skb_reset_mac_header(skb);
+               if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
+                   __vlan_get_tag(skb, &tci) == 0) {
+                       is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
+                       skb_pull(skb, VLAN_ETH_HLEN);
+               } else {
+                       is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
+                       skb_pull(skb, ETH_HLEN);
+               }
+
                /* mapping VLANs to MBIM sessions:
                 *   no tag     => IPS session <0>
                 *   1 - 255    => IPS session <vlanid>
                 *   256 - 511  => DSS session <vlanid - 256>
                 *   512 - 4095 => unsupported, drop
                 */
-               vlan_get_tag(skb, &tci);
-
                switch (tci & 0x0f00) {
                case 0x0000: /* VLAN ID 0 - 255 */
-                       /* verify that datagram is IPv4 or IPv6 */
-                       skb_reset_mac_header(skb);
-                       switch (eth_hdr(skb)->h_proto) {
-                       case htons(ETH_P_IP):
-                       case htons(ETH_P_IPV6):
-                               break;
-                       default:
+                       if (!is_ip)
                                goto error;
-                       }
                        c = (u8 *)&sign;
                        c[3] = tci;
                        break;
@@ -169,7 +187,6 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
                                  "unsupported tci=0x%04x\n", tci);
                        goto error;
                }
-               skb_pull(skb, ETH_HLEN);
        }
 
        spin_lock_bh(&ctx->mtx);
@@ -204,17 +221,23 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
                return;
 
        /* need to send the NA on the VLAN dev, if any */
-       if (tci)
+       rcu_read_lock();
+       if (tci) {
                netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q),
                                              tci);
-       else
+               if (!netdev) {
+                       rcu_read_unlock();
+                       return;
+               }
+       } else {
                netdev = dev->net;
-       if (!netdev)
-               return;
+       }
+       dev_hold(netdev);
+       rcu_read_unlock();
 
        in6_dev = in6_dev_get(netdev);
        if (!in6_dev)
-               return;
+               goto out;
        is_router = !!in6_dev->cnf.forwarding;
        in6_dev_put(in6_dev);
 
@@ -224,6 +247,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
                                 true /* solicited */,
                                 false /* override */,
                                 true /* inc_opt */);
+out:
+       dev_put(netdev);
 }
 
 static bool is_neigh_solicit(u8 *buf, size_t len)
index f46cd0250e488217ca4aa517be12924e3b61c6a8..5627917c5ff761137061467e1b9d71f281ce0652 100644 (file)
@@ -95,8 +95,10 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
 
        if ((vif->type == NL80211_IFTYPE_AP ||
             vif->type == NL80211_IFTYPE_MESH_POINT) &&
-           bss_conf->enable_beacon)
+           bss_conf->enable_beacon) {
                priv->reconfig_beacon = true;
+               priv->rearm_ani = true;
+       }
 
        if (bss_conf->assoc) {
                priv->rearm_ani = true;
@@ -257,6 +259,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
 
        ath9k_htc_ps_wakeup(priv);
 
+       ath9k_htc_stop_ani(priv);
        del_timer_sync(&priv->tx.cleanup_timer);
        ath9k_htc_tx_drain(priv);
 
index afb3d15e38ff0379a99c5e2c534be23c57b94e38..be1985296bdc75e725cdc161aa4101cea4a19476 100644 (file)
@@ -4948,7 +4948,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_if *ifp)
        if (!err) {
                /* only set 2G bandwidth using bw_cap command */
                band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
-               band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT);
+               band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ);
                err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
                                               sizeof(band_bwcap));
        } else {
index fa858d548d13c0bd794b98dc4da2053893b460dc..0489314425cbdf4a4b782867644ee9a0a0ca4b63 100644 (file)
@@ -611,14 +611,14 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
                bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
 
        if (IWL_MVM_BT_COEX_CORUNNING) {
-               bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 |
-                                                   BT_VALID_CORUN_LUT_40);
+               bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
+                                                    BT_VALID_CORUN_LUT_40);
                bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
        }
 
        if (IWL_MVM_BT_COEX_MPLUT) {
                bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
-               bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
+               bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
        }
 
        if (mvm->cfg->bt_shared_single_ant)
index 9426905de6b283dc0230cf51d5a694478da7797a..d73a89ecd78aa0963c40a268fc7dd8e3d7fbe29d 100644 (file)
@@ -183,9 +183,9 @@ enum iwl_scan_type {
  *     this number of packets were received (typically 1)
  * @passive2active: is auto switching from passive to active during scan allowed
  * @rxchain_sel_flags: RXON_RX_CHAIN_*
- * @max_out_time: in usecs, max out of serving channel time
+ * @max_out_time: in TUs, max out of serving channel time
  * @suspend_time: how long to pause scan when returning to service channel:
- *     bits 0-19: beacon interal in usecs (suspend before executing)
+ *     bits 0-19: beacon interal in TUs (suspend before executing)
  *     bits 20-23: reserved
  *     bits 24-31: number of beacons (suspend between channels)
  * @rxon_flags: RXON_FLG_*
@@ -383,8 +383,8 @@ enum scan_framework_client {
  * @quiet_plcp_th:     quiet channel num of packets threshold
  * @good_CRC_th:       passive to active promotion threshold
  * @rx_chain:          RXON rx chain.
- * @max_out_time:      max uSec to be out of assoceated channel
- * @suspend_time:      pause scan this long when returning to service channel
+ * @max_out_time:      max TUs to be out of assoceated channel
+ * @suspend_time:      pause scan this TUs when returning to service channel
  * @flags:             RXON flags
  * @filter_flags:      RXONfilter
  * @tx_cmd:            tx command for active scan; for 2GHz and for 5GHz.
index f0cebf12c7b8415a3c787d0cc77a9b2b1c2a15ef..b41dc84e9431e6c625d55a28d5cd3ca7a885cf08 100644 (file)
@@ -1007,7 +1007,7 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
        memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
        len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
        if (ret)
                IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
 }
@@ -1023,7 +1023,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
        if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
                return;
 
-       ieee80211_iterate_active_interfaces(
+       ieee80211_iterate_active_interfaces_atomic(
                mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
                iwl_mvm_mc_iface_iterator, &iter_data);
 }
@@ -1807,6 +1807,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
 
+       if (!iwl_mvm_is_idle(mvm)) {
+               ret = -EBUSY;
+               goto out;
+       }
+
        switch (mvm->scan_status) {
        case IWL_MVM_SCAN_OS:
                IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
index d564233a65da6157c1aaf16a099ddf94b3be933e..f1ec0986c3c912865f0d51e1056ec03c786d8cc2 100644 (file)
@@ -1003,6 +1003,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
        return mvmvif->low_latency;
 }
 
+/* Assoc status */
+bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
+
 /* Thermal management and CT-kill */
 void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
 void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
index 9f52c5b3f0ec0e9b2da5949f2af88bbcd13d89ce..e1c838899363b373d176a71bc32d02282d56c018 100644 (file)
@@ -1010,7 +1010,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
                return;
        }
 
-#ifdef CPTCFG_MAC80211_DEBUGFS
+#ifdef CONFIG_MAC80211_DEBUGFS
        /* Disable last tx check if we are debugging with fixed rate */
        if (lq_sta->dbg_fixed_rate) {
                IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
index c91dc8498852c46653cc43fddb57c382d3d7f3f0..c28de54c75d400d551a0be87d4501cc98fb5967a 100644 (file)
@@ -277,51 +277,22 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
                                            IEEE80211_IFACE_ITER_NORMAL,
                                            iwl_mvm_scan_condition_iterator,
                                            &global_bound);
-       /*
-        * Under low latency traffic passive scan is fragmented meaning
-        * that dwell on a particular channel will be fragmented. Each fragment
-        * dwell time is 20ms and fragments period is 105ms. Skipping to next
-        * channel will be delayed by the same period - 105ms. So suspend_time
-        * parameter describing both fragments and channels skipping periods is
-        * set to 105ms. This value is chosen so that overall passive scan
-        * duration will not be too long. Max_out_time in this case is set to
-        * 70ms, so for active scanning operating channel will be left for 70ms
-        * while for passive still for 20ms (fragment dwell).
-        */
-       if (global_bound) {
-               if (!iwl_mvm_low_latency(mvm)) {
-                       params->suspend_time = ieee80211_tu_to_usec(100);
-                       params->max_out_time = ieee80211_tu_to_usec(600);
-               } else {
-                       params->suspend_time = ieee80211_tu_to_usec(105);
-                       /* P2P doesn't support fragmented passive scan, so
-                        * configure max_out_time to be at least longest dwell
-                        * time for passive scan.
-                        */
-                       if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
-                               params->max_out_time = ieee80211_tu_to_usec(70);
-                               params->passive_fragmented = true;
-                       } else {
-                               u32 passive_dwell;
 
-                               /*
-                                * Use band G so that passive channel dwell time
-                                * will be assigned with maximum value.
-                                */
-                               band = IEEE80211_BAND_2GHZ;
-                               passive_dwell = iwl_mvm_get_passive_dwell(band);
-                               params->max_out_time =
-                                       ieee80211_tu_to_usec(passive_dwell);
-                       }
-               }
+       if (!global_bound)
+               goto not_bound;
+
+       params->suspend_time = 100;
+       params->max_out_time = 600;
+
+       if (iwl_mvm_low_latency(mvm)) {
+               params->suspend_time = 250;
+               params->max_out_time = 250;
        }
 
+not_bound:
+
        for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
-               if (params->passive_fragmented)
-                       params->dwell[band].passive = 20;
-               else
-                       params->dwell[band].passive =
-                               iwl_mvm_get_passive_dwell(band);
+               params->dwell[band].passive = iwl_mvm_get_passive_dwell(band);
                params->dwell[band].active = iwl_mvm_get_active_dwell(band,
                                                                      n_ssids);
        }
@@ -761,7 +732,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
        int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
        int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
        int head = 0;
-       int tail = band_2ghz + band_5ghz;
+       int tail = band_2ghz + band_5ghz - 1;
        u32 ssid_bitmap;
        int cmd_len;
        int ret;
index d619851745a19ba6d3bf605555fcdbd5a09f8341..2180902266ae6636513346df888c9d68abf0a57e 100644 (file)
@@ -644,3 +644,22 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
 
        return result;
 }
+
+static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+       bool *idle = _data;
+
+       if (!vif->bss_conf.idle)
+               *idle = false;
+}
+
+bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
+{
+       bool idle = true;
+
+       ieee80211_iterate_active_interfaces_atomic(
+                       mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                       iwl_mvm_idle_iter, &idle);
+
+       return idle;
+}
index dcfd6d866d095081d7001795c4ec802c3044926f..2365553f1ef79d59c3e8598335e48eb43e5cfdcb 100644 (file)
@@ -1749,6 +1749,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
         * PCI Tx retries from interfering with C3 CPU state */
        pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
 
+       trans->dev = &pdev->dev;
+       trans_pcie->pci_dev = pdev;
+       iwl_disable_interrupts(trans);
+
        err = pci_enable_msi(pdev);
        if (err) {
                dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
@@ -1760,8 +1764,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                }
        }
 
-       trans->dev = &pdev->dev;
-       trans_pcie->pci_dev = pdev;
        trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
        trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
        snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
@@ -1787,8 +1789,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                goto out_pci_disable_msi;
        }
 
-       trans_pcie->inta_mask = CSR_INI_SET_MASK;
-
        if (iwl_pcie_alloc_ict(trans))
                goto out_free_cmd_pool;
 
@@ -1800,6 +1800,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                goto out_free_ict;
        }
 
+       trans_pcie->inta_mask = CSR_INI_SET_MASK;
+
        return trans;
 
 out_free_ict:
index 630a3fcf65bc8113fd6b67ce587f26fbf46c9774..0d4a285cbd7edb45408360a83952ab288debf62d 100644 (file)
@@ -226,7 +226,7 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
                              grant_ref_t rx_ring_ref);
 
 /* Check for SKBs from frontend and schedule backend processing */
-void xenvif_check_rx_xenvif(struct xenvif *vif);
+void xenvif_napi_schedule_or_enable_events(struct xenvif *vif);
 
 /* Prevent the device from generating any further traffic. */
 void xenvif_carrier_off(struct xenvif *vif);
index ef05c5c49d413d5bb23a3e4adbff88cb0c9ad7cd..20e9defa10606d7d54a0a5412eb93365ce748f5c 100644 (file)
@@ -75,32 +75,8 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
        work_done = xenvif_tx_action(vif, budget);
 
        if (work_done < budget) {
-               int more_to_do = 0;
-               unsigned long flags;
-
-               /* It is necessary to disable IRQ before calling
-                * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
-                * lose event from the frontend.
-                *
-                * Consider:
-                *   RING_HAS_UNCONSUMED_REQUESTS
-                *   <frontend generates event to trigger napi_schedule>
-                *   __napi_complete
-                *
-                * This handler is still in scheduled state so the
-                * event has no effect at all. After __napi_complete
-                * this handler is descheduled and cannot get
-                * scheduled again. We lose event in this case and the ring
-                * will be completely stalled.
-                */
-
-               local_irq_save(flags);
-
-               RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
-               if (!more_to_do)
-                       __napi_complete(napi);
-
-               local_irq_restore(flags);
+               napi_complete(napi);
+               xenvif_napi_schedule_or_enable_events(vif);
        }
 
        return work_done;
@@ -194,7 +170,7 @@ static void xenvif_up(struct xenvif *vif)
        enable_irq(vif->tx_irq);
        if (vif->tx_irq != vif->rx_irq)
                enable_irq(vif->rx_irq);
-       xenvif_check_rx_xenvif(vif);
+       xenvif_napi_schedule_or_enable_events(vif);
 }
 
 static void xenvif_down(struct xenvif *vif)
index 76665405c5aac32d57eeadf355007cbb2b50cbec..7367208ee8cdd8b324ce661b48aa69c1d884855b 100644 (file)
@@ -104,7 +104,7 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
 
 /* Find the containing VIF's structure from a pointer in pending_tx_info array
  */
-static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf)
+static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf)
 {
        u16 pending_idx = ubuf->desc;
        struct pending_tx_info *temp =
@@ -322,6 +322,35 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
        }
 }
 
+/*
+ * Find the grant ref for a given frag in a chain of struct ubuf_info's
+ * skb: the skb itself
+ * i: the frag's number
+ * ubuf: a pointer to an element in the chain. It should not be NULL
+ *
+ * Returns a pointer to the element in the chain where the page were found. If
+ * not found, returns NULL.
+ * See the definition of callback_struct in common.h for more details about
+ * the chain.
+ */
+static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
+                                               const int i,
+                                               const struct ubuf_info *ubuf)
+{
+       struct xenvif *foreign_vif = ubuf_to_vif(ubuf);
+
+       do {
+               u16 pending_idx = ubuf->desc;
+
+               if (skb_shinfo(skb)->frags[i].page.p ==
+                   foreign_vif->mmap_pages[pending_idx])
+                       break;
+               ubuf = (struct ubuf_info *) ubuf->ctx;
+       } while (ubuf);
+
+       return ubuf;
+}
+
 /*
  * Prepare an SKB to be transmitted to the frontend.
  *
@@ -346,9 +375,8 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        int head = 1;
        int old_meta_prod;
        int gso_type;
-       struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
-       grant_ref_t foreign_grefs[MAX_SKB_FRAGS];
-       struct xenvif *foreign_vif = NULL;
+       const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
+       const struct ubuf_info *const head_ubuf = ubuf;
 
        old_meta_prod = npo->meta_prod;
 
@@ -386,19 +414,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        npo->copy_off = 0;
        npo->copy_gref = req->gref;
 
-       if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
-                (ubuf->callback == &xenvif_zerocopy_callback)) {
-               int i = 0;
-               foreign_vif = ubuf_to_vif(ubuf);
-
-               do {
-                       u16 pending_idx = ubuf->desc;
-                       foreign_grefs[i++] =
-                               foreign_vif->pending_tx_info[pending_idx].req.gref;
-                       ubuf = (struct ubuf_info *) ubuf->ctx;
-               } while (ubuf);
-       }
-
        data = skb->data;
        while (data < skb_tail_pointer(skb)) {
                unsigned int offset = offset_in_page(data);
@@ -415,13 +430,60 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        }
 
        for (i = 0; i < nr_frags; i++) {
+               /* This variable also signals whether foreign_gref has a real
+                * value or not.
+                */
+               struct xenvif *foreign_vif = NULL;
+               grant_ref_t foreign_gref;
+
+               if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
+                       (ubuf->callback == &xenvif_zerocopy_callback)) {
+                       const struct ubuf_info *const startpoint = ubuf;
+
+                       /* Ideally ubuf points to the chain element which
+                        * belongs to this frag. Or if frags were removed from
+                        * the beginning, then shortly before it.
+                        */
+                       ubuf = xenvif_find_gref(skb, i, ubuf);
+
+                       /* Try again from the beginning of the list, if we
+                        * haven't tried from there. This only makes sense in
+                        * the unlikely event of reordering the original frags.
+                        * For injected local pages it's an unnecessary second
+                        * run.
+                        */
+                       if (unlikely(!ubuf) && startpoint != head_ubuf)
+                               ubuf = xenvif_find_gref(skb, i, head_ubuf);
+
+                       if (likely(ubuf)) {
+                               u16 pending_idx = ubuf->desc;
+
+                               foreign_vif = ubuf_to_vif(ubuf);
+                               foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref;
+                               /* Just a safety measure. If this was the last
+                                * element on the list, the for loop will
+                                * iterate again if a local page were added to
+                                * the end. Using head_ubuf here prevents the
+                                * second search on the chain. Or the original
+                                * frags changed order, but that's less likely.
+                                * In any way, ubuf shouldn't be NULL.
+                                */
+                               ubuf = ubuf->ctx ?
+                                       (struct ubuf_info *) ubuf->ctx :
+                                       head_ubuf;
+                       } else
+                               /* This frag was a local page, added to the
+                                * array after the skb left netback.
+                                */
+                               ubuf = head_ubuf;
+               }
                xenvif_gop_frag_copy(vif, skb, npo,
                                     skb_frag_page(&skb_shinfo(skb)->frags[i]),
                                     skb_frag_size(&skb_shinfo(skb)->frags[i]),
                                     skb_shinfo(skb)->frags[i].page_offset,
                                     &head,
                                     foreign_vif,
-                                    foreign_grefs[i]);
+                                    foreign_vif ? foreign_gref : UINT_MAX);
        }
 
        return npo->meta_prod - old_meta_prod;
@@ -654,7 +716,7 @@ done:
                notify_remote_via_irq(vif->rx_irq);
 }
 
-void xenvif_check_rx_xenvif(struct xenvif *vif)
+void xenvif_napi_schedule_or_enable_events(struct xenvif *vif)
 {
        int more_to_do;
 
@@ -688,7 +750,7 @@ static void tx_credit_callback(unsigned long data)
 {
        struct xenvif *vif = (struct xenvif *)data;
        tx_add_credit(vif);
-       xenvif_check_rx_xenvif(vif);
+       xenvif_napi_schedule_or_enable_events(vif);
 }
 
 static void xenvif_tx_err(struct xenvif *vif,
index 6d4ee22708c93791d53860c0243d6a9258678283..32e969d9531909e575a37b80fb0debd3f4ae73d2 100644 (file)
@@ -1831,6 +1831,10 @@ int of_update_property(struct device_node *np, struct property *newprop)
        if (!found)
                return -ENODEV;
 
+       /* At early boot, bail out and defer setup to of_init() */
+       if (!of_kset)
+               return found ? 0 : -ENODEV;
+
        /* Update the sysfs attribute */
        sysfs_remove_bin_file(&np->kobj, &oldprop->attr);
        __of_add_property_sysfs(np, newprop);
index d3d1cfd51e095f058404d96f063df76d227bd652..e384e2534594731a95eb7524fb75583fce208541 100644 (file)
@@ -293,6 +293,58 @@ static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
        return PCIBIOS_SUCCESSFUL;
 }
 
+/*
+ * Remove windows, starting from the largest ones to the smallest
+ * ones.
+ */
+static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
+                                  phys_addr_t base, size_t size)
+{
+       while (size) {
+               size_t sz = 1 << (fls(size) - 1);
+
+               mvebu_mbus_del_window(base, sz);
+               base += sz;
+               size -= sz;
+       }
+}
+
+/*
+ * MBus windows can only have a power of two size, but PCI BARs do not
+ * have this constraint. Therefore, we have to split the PCI BAR into
+ * areas each having a power of two size. We start from the largest
+ * one (i.e highest order bit set in the size).
+ */
+static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
+                                  unsigned int target, unsigned int attribute,
+                                  phys_addr_t base, size_t size,
+                                  phys_addr_t remap)
+{
+       size_t size_mapped = 0;
+
+       while (size) {
+               size_t sz = 1 << (fls(size) - 1);
+               int ret;
+
+               ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
+                                                       sz, remap);
+               if (ret) {
+                       dev_err(&port->pcie->pdev->dev,
+                               "Could not create MBus window at 0x%x, size 0x%x: %d\n",
+                               base, sz, ret);
+                       mvebu_pcie_del_windows(port, base - size_mapped,
+                                              size_mapped);
+                       return;
+               }
+
+               size -= sz;
+               size_mapped += sz;
+               base += sz;
+               if (remap != MVEBU_MBUS_NO_REMAP)
+                       remap += sz;
+       }
+}
+
 static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
 {
        phys_addr_t iobase;
@@ -304,8 +356,8 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
 
                /* If a window was configured, remove it */
                if (port->iowin_base) {
-                       mvebu_mbus_del_window(port->iowin_base,
-                                             port->iowin_size);
+                       mvebu_pcie_del_windows(port, port->iowin_base,
+                                              port->iowin_size);
                        port->iowin_base = 0;
                        port->iowin_size = 0;
                }
@@ -331,11 +383,11 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
        port->iowin_base = port->pcie->io.start + iobase;
        port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
                            (port->bridge.iolimitupper << 16)) -
-                           iobase);
+                           iobase) + 1;
 
-       mvebu_mbus_add_window_remap_by_id(port->io_target, port->io_attr,
-                                         port->iowin_base, port->iowin_size,
-                                         iobase);
+       mvebu_pcie_add_windows(port, port->io_target, port->io_attr,
+                              port->iowin_base, port->iowin_size,
+                              iobase);
 }
 
 static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
@@ -346,8 +398,8 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
 
                /* If a window was configured, remove it */
                if (port->memwin_base) {
-                       mvebu_mbus_del_window(port->memwin_base,
-                                             port->memwin_size);
+                       mvebu_pcie_del_windows(port, port->memwin_base,
+                                              port->memwin_size);
                        port->memwin_base = 0;
                        port->memwin_size = 0;
                }
@@ -364,10 +416,11 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
        port->memwin_base  = ((port->bridge.membase & 0xFFF0) << 16);
        port->memwin_size  =
                (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
-               port->memwin_base;
+               port->memwin_base + 1;
 
-       mvebu_mbus_add_window_by_id(port->mem_target, port->mem_attr,
-                                   port->memwin_base, port->memwin_size);
+       mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr,
+                              port->memwin_base, port->memwin_size,
+                              MVEBU_MBUS_NO_REMAP);
 }
 
 /*
@@ -743,14 +796,21 @@ static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
 
        /*
         * On the PCI-to-PCI bridge side, the I/O windows must have at
-        * least a 64 KB size and be aligned on their size, and the
-        * memory windows must have at least a 1 MB size and be
-        * aligned on their size
+        * least a 64 KB size and the memory windows must have at
+        * least a 1 MB size. Moreover, MBus windows need to have a
+        * base address aligned on their size, and their size must be
+        * a power of two. This means that if the BAR doesn't have a
+        * power of two size, several MBus windows will actually be
+        * created. We need to ensure that the biggest MBus window
+        * (which will be the first one) is aligned on its size, which
+        * explains the rounddown_pow_of_two() being done here.
         */
        if (res->flags & IORESOURCE_IO)
-               return round_up(start, max_t(resource_size_t, SZ_64K, size));
+               return round_up(start, max_t(resource_size_t, SZ_64K,
+                                            rounddown_pow_of_two(size)));
        else if (res->flags & IORESOURCE_MEM)
-               return round_up(start, max_t(resource_size_t, SZ_1M, size));
+               return round_up(start, max_t(resource_size_t, SZ_1M,
+                                            rounddown_pow_of_two(size)));
        else
                return start;
 }
index 58499277903a4ab4a6225982d88876de399120e2..6efc2ec5e4db0823758a409eb95c2d3054a8ba48 100644 (file)
@@ -282,8 +282,8 @@ static int board_added(struct slot *p_slot)
                return WRONG_BUS_FREQUENCY;
        }
 
-       bsp = ctrl->pci_dev->bus->cur_bus_speed;
-       msp = ctrl->pci_dev->bus->max_bus_speed;
+       bsp = ctrl->pci_dev->subordinate->cur_bus_speed;
+       msp = ctrl->pci_dev->subordinate->max_bus_speed;
 
        /* Check if there are other slots or devices on the same bus */
        if (!list_empty(&ctrl->pci_dev->subordinate->devices))
index 7325d43bf030ce65d5f386f6aeeeb3bfa4d5c482..759475ef6ff3206bd04103043cfed21092766493 100644 (file)
@@ -3067,7 +3067,8 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev)
        if (!pci_is_pcie(dev))
                return 1;
 
-       return pci_wait_for_pending(dev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND);
+       return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
+                                   PCI_EXP_DEVSTA_TRPND);
 }
 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
 
@@ -3109,7 +3110,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
                return 0;
 
        /* Wait for Transaction Pending bit clean */
-       if (pci_wait_for_pending(dev, PCI_AF_STATUS, PCI_AF_STATUS_TP))
+       if (pci_wait_for_pending(dev, pos + PCI_AF_STATUS, PCI_AF_STATUS_TP))
                goto clear;
 
        dev_err(&dev->dev, "transaction is not cleared; "
index 6963bdf5417593921122694d8ae425ff7a599f7e..6aea373547f65f3743faa7236b5035a83e178966 100644 (file)
@@ -6,6 +6,7 @@ menu "PTP clock support"
 
 config PTP_1588_CLOCK
        tristate "PTP clock support"
+       depends on NET
        select PPS
        select NET_PTP_CLASSIFY
        help
@@ -74,7 +75,7 @@ config DP83640_PHY
 config PTP_1588_CLOCK_PCH
        tristate "Intel PCH EG20T as PTP clock"
        depends on X86 || COMPILE_TEST
-       depends on HAS_IOMEM
+       depends on HAS_IOMEM && NET
        select PTP_1588_CLOCK
        help
          This driver adds support for using the PCH EG20T as a PTP
index bd628a6f981d8b550c7e651c845232fcaf5a57ff..e5f13c4310feb368b5855269a043c88bc698da74 100644 (file)
@@ -569,6 +569,9 @@ static int hym8563_probe(struct i2c_client *client,
        if (IS_ERR(hym8563->rtc))
                return PTR_ERR(hym8563->rtc);
 
+       /* the hym8563 alarm only supports a minute accuracy */
+       hym8563->rtc->uie_unsupported = 1;
+
 #ifdef CONFIG_COMMON_CLK
        hym8563_clkout_register_clk(hym8563);
 #endif
index 1b681427dde045cdad8f2455c07467fba4f077f1..c341f855fadcd433a6730db0d941317615a7fe9e 100644 (file)
@@ -1621,8 +1621,6 @@ void sas_rphy_free(struct sas_rphy *rphy)
        list_del(&rphy->list);
        mutex_unlock(&sas_host->lock);
 
-       sas_bsg_remove(shost, rphy);
-
        transport_destroy_device(dev);
 
        put_device(dev);
@@ -1681,6 +1679,7 @@ sas_rphy_remove(struct sas_rphy *rphy)
        }
 
        sas_rphy_unlink(rphy);
+       sas_bsg_remove(NULL, rphy);
        transport_remove_device(dev);
        device_del(dev);
 }
index fc67f564f02cf77eec2350f4435836d409020314..788ed9b59b4e3f04c3a485fefe6d31dfdb6b681f 100644 (file)
@@ -1,10 +1,12 @@
 #
 # Makefile for the SuperH specific drivers.
 #
-obj-y  := intc/
+obj-$(CONFIG_SUPERH)                   += intc/
+obj-$(CONFIG_ARCH_SHMOBILE_LEGACY)     += intc/
+ifneq ($(CONFIG_COMMON_CLK),y)
+obj-$(CONFIG_HAVE_CLK)                 += clk/
+endif
+obj-$(CONFIG_MAPLE)                    += maple/
+obj-$(CONFIG_SUPERHYWAY)               += superhyway/
 
-obj-$(CONFIG_HAVE_CLK)         += clk/
-obj-$(CONFIG_MAPLE)            += maple/
-obj-$(CONFIG_SUPERHYWAY)       += superhyway/
-
-obj-y                          += pm_runtime.o
+obj-y                                  += pm_runtime.o
index 8afa5a4589f2dd03771acaac8be585447e8033c0..10c65eb51f8587ca79b15291a75c04c16e4604a2 100644 (file)
@@ -50,8 +50,25 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
        .con_ids = { NULL, },
 };
 
+static bool default_pm_on;
+
 static int __init sh_pm_runtime_init(void)
 {
+       if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) {
+               if (!of_machine_is_compatible("renesas,emev2") &&
+                   !of_machine_is_compatible("renesas,r7s72100") &&
+                   !of_machine_is_compatible("renesas,r8a73a4") &&
+                   !of_machine_is_compatible("renesas,r8a7740") &&
+                   !of_machine_is_compatible("renesas,r8a7778") &&
+                   !of_machine_is_compatible("renesas,r8a7779") &&
+                   !of_machine_is_compatible("renesas,r8a7790") &&
+                   !of_machine_is_compatible("renesas,r8a7791") &&
+                   !of_machine_is_compatible("renesas,sh7372") &&
+                   !of_machine_is_compatible("renesas,sh73a0"))
+                       return 0;
+       }
+
+       default_pm_on = true;
        pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
        return 0;
 }
@@ -59,7 +76,8 @@ core_initcall(sh_pm_runtime_init);
 
 static int __init sh_pm_runtime_late_init(void)
 {
-       pm_genpd_poweroff_unused();
+       if (default_pm_on)
+               pm_genpd_poweroff_unused();
        return 0;
 }
 late_initcall(sh_pm_runtime_late_init);
index 713af4806f265e10b87dcfade6b6989055c2f283..f6759dc0153b4a8c45fb83f7660e34be2a47641b 100644 (file)
@@ -29,18 +29,6 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
        struct sg_table *sgt;
        void *buf, *pbuf;
 
-       /*
-        * Some DMA controllers have problems transferring buffers that are
-        * not multiple of 4 bytes. So we truncate the transfer so that it
-        * is suitable for such controllers, and handle the trailing bytes
-        * manually after the DMA completes.
-        *
-        * REVISIT: It would be better if this information could be
-        * retrieved directly from the DMA device in a similar way than
-        * ->copy_align etc. is done.
-        */
-       len = ALIGN(drv_data->len, 4);
-
        if (dir == DMA_TO_DEVICE) {
                dmadev = drv_data->tx_chan->device->dev;
                sgt = &drv_data->tx_sgt;
@@ -144,12 +132,8 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
                if (!error) {
                        pxa2xx_spi_unmap_dma_buffers(drv_data);
 
-                       /* Handle the last bytes of unaligned transfer */
                        drv_data->tx += drv_data->tx_map_len;
-                       drv_data->write(drv_data);
-
                        drv_data->rx += drv_data->rx_map_len;
-                       drv_data->read(drv_data);
 
                        msg->actual_length += drv_data->len;
                        msg->state = pxa2xx_spi_next_transfer(drv_data);
index b032e8885e2435b3585810f1266bca9aa3fce6a8..78c66e3c53ed5f88d8d79ac3c16cdb4800af71f0 100644 (file)
@@ -734,7 +734,7 @@ static int spi_qup_remove(struct platform_device *pdev)
        int ret;
 
        ret = pm_runtime_get_sync(&pdev->dev);
-       if (ret)
+       if (ret < 0)
                return ret;
 
        ret = spi_qup_set_state(controller, QUP_STATE_RESET);
index 4eb9bf02996cf179cf3e6365421867aaf8a10593..939edf473235dca2fb7692fe0fec6dbdd649bbd0 100644 (file)
@@ -580,6 +580,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
                spi->master->set_cs(spi, !enable);
 }
 
+#ifdef CONFIG_HAS_DMA
 static int spi_map_buf(struct spi_master *master, struct device *dev,
                       struct sg_table *sgt, void *buf, size_t len,
                       enum dma_data_direction dir)
@@ -637,55 +638,12 @@ static void spi_unmap_buf(struct spi_master *master, struct device *dev,
        }
 }
 
-static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
+static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
 {
        struct device *tx_dev, *rx_dev;
        struct spi_transfer *xfer;
-       void *tmp;
-       unsigned int max_tx, max_rx;
        int ret;
 
-       if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
-               max_tx = 0;
-               max_rx = 0;
-
-               list_for_each_entry(xfer, &msg->transfers, transfer_list) {
-                       if ((master->flags & SPI_MASTER_MUST_TX) &&
-                           !xfer->tx_buf)
-                               max_tx = max(xfer->len, max_tx);
-                       if ((master->flags & SPI_MASTER_MUST_RX) &&
-                           !xfer->rx_buf)
-                               max_rx = max(xfer->len, max_rx);
-               }
-
-               if (max_tx) {
-                       tmp = krealloc(master->dummy_tx, max_tx,
-                                      GFP_KERNEL | GFP_DMA);
-                       if (!tmp)
-                               return -ENOMEM;
-                       master->dummy_tx = tmp;
-                       memset(tmp, 0, max_tx);
-               }
-
-               if (max_rx) {
-                       tmp = krealloc(master->dummy_rx, max_rx,
-                                      GFP_KERNEL | GFP_DMA);
-                       if (!tmp)
-                               return -ENOMEM;
-                       master->dummy_rx = tmp;
-               }
-
-               if (max_tx || max_rx) {
-                       list_for_each_entry(xfer, &msg->transfers,
-                                           transfer_list) {
-                               if (!xfer->tx_buf)
-                                       xfer->tx_buf = master->dummy_tx;
-                               if (!xfer->rx_buf)
-                                       xfer->rx_buf = master->dummy_rx;
-                       }
-               }
-       }
-
        if (!master->can_dma)
                return 0;
 
@@ -742,6 +700,69 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
 
        return 0;
 }
+#else /* !CONFIG_HAS_DMA */
+static inline int __spi_map_msg(struct spi_master *master,
+                               struct spi_message *msg)
+{
+       return 0;
+}
+
+static inline int spi_unmap_msg(struct spi_master *master,
+                               struct spi_message *msg)
+{
+       return 0;
+}
+#endif /* !CONFIG_HAS_DMA */
+
+static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
+{
+       struct spi_transfer *xfer;
+       void *tmp;
+       unsigned int max_tx, max_rx;
+
+       if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
+               max_tx = 0;
+               max_rx = 0;
+
+               list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+                       if ((master->flags & SPI_MASTER_MUST_TX) &&
+                           !xfer->tx_buf)
+                               max_tx = max(xfer->len, max_tx);
+                       if ((master->flags & SPI_MASTER_MUST_RX) &&
+                           !xfer->rx_buf)
+                               max_rx = max(xfer->len, max_rx);
+               }
+
+               if (max_tx) {
+                       tmp = krealloc(master->dummy_tx, max_tx,
+                                      GFP_KERNEL | GFP_DMA);
+                       if (!tmp)
+                               return -ENOMEM;
+                       master->dummy_tx = tmp;
+                       memset(tmp, 0, max_tx);
+               }
+
+               if (max_rx) {
+                       tmp = krealloc(master->dummy_rx, max_rx,
+                                      GFP_KERNEL | GFP_DMA);
+                       if (!tmp)
+                               return -ENOMEM;
+                       master->dummy_rx = tmp;
+               }
+
+               if (max_tx || max_rx) {
+                       list_for_each_entry(xfer, &msg->transfers,
+                                           transfer_list) {
+                               if (!xfer->tx_buf)
+                                       xfer->tx_buf = master->dummy_tx;
+                               if (!xfer->rx_buf)
+                                       xfer->rx_buf = master->dummy_rx;
+                       }
+               }
+       }
+
+       return __spi_map_msg(master, msg);
+}
 
 /*
  * spi_transfer_one_message - Default implementation of transfer_one_message()
@@ -1151,7 +1172,6 @@ static int spi_master_initialize_queue(struct spi_master *master)
 {
        int ret;
 
-       master->queued = true;
        master->transfer = spi_queued_transfer;
        if (!master->transfer_one_message)
                master->transfer_one_message = spi_transfer_one_message;
@@ -1162,6 +1182,7 @@ static int spi_master_initialize_queue(struct spi_master *master)
                dev_err(&master->dev, "problem initializing queue\n");
                goto err_init_queue;
        }
+       master->queued = true;
        ret = spi_start_queue(master);
        if (ret) {
                dev_err(&master->dev, "problem starting queue\n");
@@ -1171,8 +1192,8 @@ static int spi_master_initialize_queue(struct spi_master *master)
        return 0;
 
 err_start_queue:
-err_init_queue:
        spi_destroy_queue(master);
+err_init_queue:
        return ret;
 }
 
@@ -1756,7 +1777,7 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master);
  */
 int spi_setup(struct spi_device *spi)
 {
-       unsigned        bad_bits;
+       unsigned        bad_bits, ugly_bits;
        int             status = 0;
 
        /* check mode to prevent that DUAL and QUAD set at the same time
@@ -1776,6 +1797,15 @@ int spi_setup(struct spi_device *spi)
         * that aren't supported with their current master
         */
        bad_bits = spi->mode & ~spi->master->mode_bits;
+       ugly_bits = bad_bits &
+                   (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
+       if (ugly_bits) {
+               dev_warn(&spi->dev,
+                        "setup: ignoring unsupported mode bits %x\n",
+                        ugly_bits);
+               spi->mode &= ~ugly_bits;
+               bad_bits &= ~ugly_bits;
+       }
        if (bad_bits) {
                dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
                        bad_bits);
index 4144a75e5f71bc6258d9d8dcdc1a02b3b2a2f035..c270c9ae6d27711d531ebb54a07b1cdc125be9da 100644 (file)
@@ -517,7 +517,7 @@ int imx_drm_encoder_get_mux_id(struct device_node *node,
                of_node_put(port);
                if (port == imx_crtc->port) {
                        ret = of_graph_parse_endpoint(ep, &endpoint);
-                       return ret ? ret : endpoint.id;
+                       return ret ? ret : endpoint.port;
                }
        } while (ep);
 
@@ -675,6 +675,11 @@ static int imx_drm_platform_probe(struct platform_device *pdev)
                        if (!remote || !of_device_is_available(remote)) {
                                of_node_put(remote);
                                continue;
+                       } else if (!of_device_is_available(remote->parent)) {
+                               dev_warn(&pdev->dev, "parent device of %s is not available\n",
+                                        remote->full_name);
+                               of_node_put(remote);
+                               continue;
                        }
 
                        ret = imx_drm_add_component(&pdev->dev, remote);
index 575533f4fd64fc7d53d38c4a0cb307fa429f9f68..a23f4f773146a8891925165622cb5167d30252df 100644 (file)
@@ -582,7 +582,7 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
        tve->dev = dev;
        spin_lock_init(&tve->lock);
 
-       ddc_node = of_parse_phandle(np, "i2c-ddc-bus", 0);
+       ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
        if (ddc_node) {
                tve->ddc = of_find_i2c_adapter_by_node(ddc_node);
                of_node_put(ddc_node);
index 8c101cbbee97646d2b06166865381bf9144df2cb..acc8184c46cde0d85ebeccd41c000b67ae9da429 100644 (file)
@@ -1247,9 +1247,18 @@ static int vpfe_stop_streaming(struct vb2_queue *vq)
        struct vpfe_fh *fh = vb2_get_drv_priv(vq);
        struct vpfe_video_device *video = fh->video;
 
-       if (!vb2_is_streaming(vq))
-               return 0;
        /* release all active buffers */
+       if (video->cur_frm == video->next_frm) {
+               vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_ERROR);
+       } else {
+               if (video->cur_frm != NULL)
+                       vb2_buffer_done(&video->cur_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+               if (video->next_frm != NULL)
+                       vb2_buffer_done(&video->next_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+       }
+
        while (!list_empty(&video->dma_queue)) {
                video->next_frm = list_entry(video->dma_queue.next,
                                                struct vpfe_cap_buffer, list);
index b3d2cc729657df34e57bc300033bafc1baf2bdd6..4ba569258498b9d6248de4d58d13c63750c68561 100644 (file)
@@ -48,10 +48,8 @@ static const struct usb_device_id sn9c102_id_table[] = {
        { SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), },
 /*     { SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */
        { SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), },
-#endif
        { SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), },
        { SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), },
-#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
        { SN9C102_USB_DEVICE(0x0c45, 0x6028, BRIDGE_SN9C102), },
        { SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), },
        { SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), },
index 57eca7a45672b94674677f88cfef688ca403c386..4fe751f7c2bf2438f0e49054731d911c4e84d239 100644 (file)
@@ -953,8 +953,6 @@ static int netdev_close(struct net_device *pnetdev)
 #endif /* CONFIG_8723AU_P2P */
 
        rtw_scan_abort23a(padapter);
-        /* set this at the end */
-       padapter->rtw_wdev->iftype = NL80211_IFTYPE_MONITOR;
 
        RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - drv_close\n"));
        DBG_8723A("-871x_drv - drv_close, bup =%d\n", padapter->bup);
index c49160e477d8d0245392fea0db2a2d432c186b2f..07e542e5d1562b18fa3caad2903b0928dccdd1e2 100644 (file)
@@ -26,7 +26,7 @@ unsigned int ffaddr2pipehdl23a(struct dvobj_priv *pdvobj, u32 addr)
        if (addr == RECV_BULK_IN_ADDR) {
                pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[0]);
        } else if (addr == RECV_INT_IN_ADDR) {
-               pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[1]);
+               pipe = usb_rcvintpipe(pusbd, pdvobj->RtInPipe[1]);
        } else if (addr < HW_QUEUE_ENTRY) {
                ep_num = pdvobj->Queue2Pipe[addr];
                pipe = usb_sndbulkpipe(pusbd, ep_num);
index 78cab13bbb1be3796b0e00af4a0667329ed4a2d8..46588c85d39bd0ce206f213aebba9330db7e33b8 100644 (file)
@@ -1593,7 +1593,9 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
         * Initiator is expecting a NopIN ping reply..
         */
        if (hdr->itt != RESERVED_ITT) {
-               BUG_ON(!cmd);
+               if (!cmd)
+                       return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+                                               (unsigned char *)hdr);
 
                spin_lock_bh(&conn->cmd_lock);
                list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
index 6960f22909ae2eeddd651d59217d1f09519e9154..302eb3b7871558bb2a1241fedc521e20efebf415 100644 (file)
@@ -775,6 +775,7 @@ struct iscsi_np {
        int                     np_ip_proto;
        int                     np_sock_type;
        enum np_thread_state_table np_thread_state;
+       bool                    enabled;
        enum iscsi_timer_flags_table np_login_timer_flags;
        u32                     np_exports;
        enum np_flags_table     np_flags;
index 8739b98f6f93539b8c6eb95f27d7fde3601b40d7..ca31fa1b8a4b69058290243bc61b0007d2c7d3cc 100644 (file)
@@ -436,7 +436,7 @@ static int iscsi_login_zero_tsih_s2(
                }
                off = mrdsl % PAGE_SIZE;
                if (!off)
-                       return 0;
+                       goto check_prot;
 
                if (mrdsl < PAGE_SIZE)
                        mrdsl = PAGE_SIZE;
@@ -452,6 +452,31 @@ static int iscsi_login_zero_tsih_s2(
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
                        return -1;
                }
+               /*
+                * ISER currently requires that ImmediateData + Unsolicited
+                * Data be disabled when protection / signature MRs are enabled.
+                */
+check_prot:
+               if (sess->se_sess->sup_prot_ops &
+                  (TARGET_PROT_DOUT_STRIP | TARGET_PROT_DOUT_PASS |
+                   TARGET_PROT_DOUT_INSERT)) {
+
+                       sprintf(buf, "ImmediateData=No");
+                       if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+                               iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+                                                   ISCSI_LOGIN_STATUS_NO_RESOURCES);
+                               return -1;
+                       }
+
+                       sprintf(buf, "InitialR2T=Yes");
+                       if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+                               iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+                                                   ISCSI_LOGIN_STATUS_NO_RESOURCES);
+                               return -1;
+                       }
+                       pr_debug("Forcing ImmediateData=No + InitialR2T=Yes for"
+                                " T10-PI enabled ISER session\n");
+               }
        }
 
        return 0;
@@ -984,6 +1009,7 @@ int iscsi_target_setup_login_socket(
        }
 
        np->np_transport = t;
+       np->enabled = true;
        return 0;
 }
 
index eb96b20dc09e13ffe32e226df38b73241f176a63..ca1811858afd01fa4b09ba6e1523f32bd0c3e1b2 100644 (file)
@@ -184,6 +184,7 @@ static void iscsit_clear_tpg_np_login_thread(
                return;
        }
 
+       tpg_np->tpg_np->enabled = false;
        iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown);
 }
 
index 65001e1336702966108081443d5a44f39988d5af..26416c15d65c25c1b915f6bdf00b03db341b9152 100644 (file)
@@ -798,10 +798,10 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
                pr_err("emulate_write_cache not supported for pSCSI\n");
                return -EINVAL;
        }
-       if (dev->transport->get_write_cache) {
-               pr_warn("emulate_write_cache cannot be changed when underlying"
-                       " HW reports WriteCacheEnabled, ignoring request\n");
-               return 0;
+       if (flag &&
+           dev->transport->get_write_cache) {
+               pr_err("emulate_write_cache not supported for this device\n");
+               return -EINVAL;
        }
 
        dev->dev_attrib.emulate_write_cache = flag;
@@ -936,6 +936,10 @@ int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
                return 0;
        }
        if (!dev->transport->init_prot || !dev->transport->free_prot) {
+               /* 0 is only allowed value for non-supporting backends */
+               if (flag == 0)
+                       return 0;
+
                pr_err("DIF protection not supported by backend: %s\n",
                       dev->transport->name);
                return -ENOSYS;
index d4b98690a73680244676b6e608ede6c85ff724cb..789aa9eb0a1e590b8853a49e8f0ae137f04d3241 100644 (file)
@@ -1113,6 +1113,7 @@ void transport_init_se_cmd(
        init_completion(&cmd->cmd_wait_comp);
        init_completion(&cmd->task_stop_comp);
        spin_lock_init(&cmd->t_state_lock);
+       kref_init(&cmd->cmd_kref);
        cmd->transport_state = CMD_T_DEV_ACTIVE;
 
        cmd->se_tfo = tfo;
@@ -2357,7 +2358,6 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
        unsigned long flags;
        int ret = 0;
 
-       kref_init(&se_cmd->cmd_kref);
        /*
         * Add a second kref if the fabric caller is expecting to handle
         * fabric acknowledgement that requires two target_put_sess_cmd()
index 01cf37f212c30724ed6a0addbe8c7cbe69dfd6a3..f5fd515b2bee266dd9c8279ea804bc7372d955f3 100644 (file)
@@ -90,18 +90,18 @@ static void ft_free_cmd(struct ft_cmd *cmd)
 {
        struct fc_frame *fp;
        struct fc_lport *lport;
-       struct se_session *se_sess;
+       struct ft_sess *sess;
 
        if (!cmd)
                return;
-       se_sess = cmd->sess->se_sess;
+       sess = cmd->sess;
        fp = cmd->req_frame;
        lport = fr_dev(fp);
        if (fr_seq(fp))
                lport->tt.seq_release(fr_seq(fp));
        fc_frame_free(fp);
-       percpu_ida_free(&se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
-       ft_sess_put(cmd->sess); /* undo get from lookup at recv */
+       percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
+       ft_sess_put(sess);      /* undo get from lookup at recv */
 }
 
 void ft_release_cmd(struct se_cmd *se_cmd)
index 96109a9972b6113cdb88d1861bf00353a00a0a93..84b4bfb843443ef934d2d80abc12131bb8616a5a 100644 (file)
@@ -66,7 +66,22 @@ static DEFINE_PER_CPU(struct evtchn_fifo_queue, cpu_queue);
 static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly;
 static unsigned event_array_pages __read_mostly;
 
+/*
+ * sync_set_bit() and friends must be unsigned long aligned on non-x86
+ * platforms.
+ */
+#if !defined(CONFIG_X86) && BITS_PER_LONG > 32
+
+#define BM(w) (unsigned long *)((unsigned long)w & ~0x7UL)
+#define EVTCHN_FIFO_BIT(b, w) \
+    (((unsigned long)w & 0x4UL) ? (EVTCHN_FIFO_ ##b + 32) : EVTCHN_FIFO_ ##b)
+
+#else
+
 #define BM(w) ((unsigned long *)(w))
+#define EVTCHN_FIFO_BIT(b, w) EVTCHN_FIFO_ ##b
+
+#endif
 
 static inline event_word_t *event_word_from_port(unsigned port)
 {
@@ -161,33 +176,38 @@ static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu)
 static void evtchn_fifo_clear_pending(unsigned port)
 {
        event_word_t *word = event_word_from_port(port);
-       sync_clear_bit(EVTCHN_FIFO_PENDING, BM(word));
+       sync_clear_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
 }
 
 static void evtchn_fifo_set_pending(unsigned port)
 {
        event_word_t *word = event_word_from_port(port);
-       sync_set_bit(EVTCHN_FIFO_PENDING, BM(word));
+       sync_set_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
 }
 
 static bool evtchn_fifo_is_pending(unsigned port)
 {
        event_word_t *word = event_word_from_port(port);
-       return sync_test_bit(EVTCHN_FIFO_PENDING, BM(word));
+       return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
 }
 
 static bool evtchn_fifo_test_and_set_mask(unsigned port)
 {
        event_word_t *word = event_word_from_port(port);
-       return sync_test_and_set_bit(EVTCHN_FIFO_MASKED, BM(word));
+       return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
 }
 
 static void evtchn_fifo_mask(unsigned port)
 {
        event_word_t *word = event_word_from_port(port);
-       sync_set_bit(EVTCHN_FIFO_MASKED, BM(word));
+       sync_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
 }
 
+static bool evtchn_fifo_is_masked(unsigned port)
+{
+       event_word_t *word = event_word_from_port(port);
+       return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
+}
 /*
  * Clear MASKED, spinning if BUSY is set.
  */
@@ -211,7 +231,7 @@ static void evtchn_fifo_unmask(unsigned port)
        BUG_ON(!irqs_disabled());
 
        clear_masked(word);
-       if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word))) {
+       if (evtchn_fifo_is_pending(port)) {
                struct evtchn_unmask unmask = { .port = port };
                (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
        }
@@ -243,7 +263,7 @@ static void handle_irq_for_port(unsigned port)
 
 static void consume_one_event(unsigned cpu,
                              struct evtchn_fifo_control_block *control_block,
-                             unsigned priority, uint32_t *ready)
+                             unsigned priority, unsigned long *ready)
 {
        struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
        uint32_t head;
@@ -273,10 +293,9 @@ static void consume_one_event(unsigned cpu,
         * copy of the ready word.
         */
        if (head == 0)
-               clear_bit(priority, BM(ready));
+               clear_bit(priority, ready);
 
-       if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word))
-           && !sync_test_bit(EVTCHN_FIFO_MASKED, BM(word)))
+       if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port))
                handle_irq_for_port(port);
 
        q->head[priority] = head;
@@ -285,7 +304,7 @@ static void consume_one_event(unsigned cpu,
 static void evtchn_fifo_handle_events(unsigned cpu)
 {
        struct evtchn_fifo_control_block *control_block;
-       uint32_t ready;
+       unsigned long ready;
        unsigned q;
 
        control_block = per_cpu(cpu_control_block, cpu);
index 1c8c6cc6de3097ceab15a5a16307e0b569ee6a29..4b0eff6da6740552043679764f0ebc76a47917a0 100644 (file)
@@ -130,6 +130,15 @@ static void afs_cm_destructor(struct afs_call *call)
 {
        _enter("");
 
+       /* Break the callbacks here so that we do it after the final ACK is
+        * received.  The step number here must match the final number in
+        * afs_deliver_cb_callback().
+        */
+       if (call->unmarshall == 6) {
+               ASSERT(call->server && call->count && call->request);
+               afs_break_callbacks(call->server, call->count, call->request);
+       }
+
        afs_put_server(call->server);
        call->server = NULL;
        kfree(call->buffer);
@@ -272,6 +281,16 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
                _debug("trailer");
                if (skb->len != 0)
                        return -EBADMSG;
+
+               /* Record that the message was unmarshalled successfully so
+                * that the call destructor can know do the callback breaking
+                * work, even if the final ACK isn't received.
+                *
+                * If the step number changes, then afs_cm_destructor() must be
+                * updated also.
+                */
+               call->unmarshall++;
+       case 6:
                break;
        }
 
index be75b500005d0d4f68b2d6e3855e71660016353d..590b55f46d61dd1ca169ff78a3dbf4f5d32b813c 100644 (file)
@@ -75,7 +75,7 @@ struct afs_call {
        const struct afs_call_type *type;       /* type of call */
        const struct afs_wait_mode *wait_mode;  /* completion wait mode */
        wait_queue_head_t       waitq;          /* processes awaiting completion */
-       work_func_t             async_workfn;
+       void (*async_workfn)(struct afs_call *call); /* asynchronous work function */
        struct work_struct      async_work;     /* asynchronous work processor */
        struct work_struct      work;           /* actual work processor */
        struct sk_buff_head     rx_queue;       /* received packets */
index ef943df73b8cdee2c6964439b81417a9c1110b12..03a3beb170048df40c436dff51c41373104cf9e7 100644 (file)
@@ -25,7 +25,7 @@ static void afs_wake_up_call_waiter(struct afs_call *);
 static int afs_wait_for_call_to_complete(struct afs_call *);
 static void afs_wake_up_async_call(struct afs_call *);
 static int afs_dont_wait_for_call_to_complete(struct afs_call *);
-static void afs_process_async_call(struct work_struct *);
+static void afs_process_async_call(struct afs_call *);
 static void afs_rx_interceptor(struct sock *, unsigned long, struct sk_buff *);
 static int afs_deliver_cm_op_id(struct afs_call *, struct sk_buff *, bool);
 
@@ -58,6 +58,13 @@ static void afs_collect_incoming_call(struct work_struct *);
 static struct sk_buff_head afs_incoming_calls;
 static DECLARE_WORK(afs_collect_incoming_call_work, afs_collect_incoming_call);
 
+static void afs_async_workfn(struct work_struct *work)
+{
+       struct afs_call *call = container_of(work, struct afs_call, async_work);
+
+       call->async_workfn(call);
+}
+
 /*
  * open an RxRPC socket and bind it to be a server for callback notifications
  * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
@@ -183,6 +190,28 @@ static void afs_free_call(struct afs_call *call)
        kfree(call);
 }
 
+/*
+ * End a call but do not free it
+ */
+static void afs_end_call_nofree(struct afs_call *call)
+{
+       if (call->rxcall) {
+               rxrpc_kernel_end_call(call->rxcall);
+               call->rxcall = NULL;
+       }
+       if (call->type->destructor)
+               call->type->destructor(call);
+}
+
+/*
+ * End a call and free it
+ */
+static void afs_end_call(struct afs_call *call)
+{
+       afs_end_call_nofree(call);
+       afs_free_call(call);
+}
+
 /*
  * allocate a call with flat request and reply buffers
  */
@@ -326,7 +355,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
               atomic_read(&afs_outstanding_calls));
 
        call->wait_mode = wait_mode;
-       INIT_WORK(&call->async_work, afs_process_async_call);
+       call->async_workfn = afs_process_async_call;
+       INIT_WORK(&call->async_work, afs_async_workfn);
 
        memset(&srx, 0, sizeof(srx));
        srx.srx_family = AF_RXRPC;
@@ -383,11 +413,8 @@ error_do_abort:
        rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT);
        while ((skb = skb_dequeue(&call->rx_queue)))
                afs_free_skb(skb);
-       rxrpc_kernel_end_call(rxcall);
-       call->rxcall = NULL;
 error_kill_call:
-       call->type->destructor(call);
-       afs_free_call(call);
+       afs_end_call(call);
        _leave(" = %d", ret);
        return ret;
 }
@@ -509,12 +536,8 @@ static void afs_deliver_to_call(struct afs_call *call)
        if (call->state >= AFS_CALL_COMPLETE) {
                while ((skb = skb_dequeue(&call->rx_queue)))
                        afs_free_skb(skb);
-               if (call->incoming) {
-                       rxrpc_kernel_end_call(call->rxcall);
-                       call->rxcall = NULL;
-                       call->type->destructor(call);
-                       afs_free_call(call);
-               }
+               if (call->incoming)
+                       afs_end_call(call);
        }
 
        _leave("");
@@ -564,10 +587,7 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
        }
 
        _debug("call complete");
-       rxrpc_kernel_end_call(call->rxcall);
-       call->rxcall = NULL;
-       call->type->destructor(call);
-       afs_free_call(call);
+       afs_end_call(call);
        _leave(" = %d", ret);
        return ret;
 }
@@ -603,11 +623,8 @@ static int afs_dont_wait_for_call_to_complete(struct afs_call *call)
 /*
  * delete an asynchronous call
  */
-static void afs_delete_async_call(struct work_struct *work)
+static void afs_delete_async_call(struct afs_call *call)
 {
-       struct afs_call *call =
-               container_of(work, struct afs_call, async_work);
-
        _enter("");
 
        afs_free_call(call);
@@ -620,11 +637,8 @@ static void afs_delete_async_call(struct work_struct *work)
  * - on a multiple-thread workqueue this work item may try to run on several
  *   CPUs at the same time
  */
-static void afs_process_async_call(struct work_struct *work)
+static void afs_process_async_call(struct afs_call *call)
 {
-       struct afs_call *call =
-               container_of(work, struct afs_call, async_work);
-
        _enter("");
 
        if (!skb_queue_empty(&call->rx_queue))
@@ -637,10 +651,7 @@ static void afs_process_async_call(struct work_struct *work)
                call->reply = NULL;
 
                /* kill the call */
-               rxrpc_kernel_end_call(call->rxcall);
-               call->rxcall = NULL;
-               if (call->type->destructor)
-                       call->type->destructor(call);
+               afs_end_call_nofree(call);
 
                /* we can't just delete the call because the work item may be
                 * queued */
@@ -663,13 +674,6 @@ void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb)
        call->reply_size += len;
 }
 
-static void afs_async_workfn(struct work_struct *work)
-{
-       struct afs_call *call = container_of(work, struct afs_call, async_work);
-
-       call->async_workfn(work);
-}
-
 /*
  * accept the backlog of incoming calls
  */
@@ -790,10 +794,7 @@ void afs_send_empty_reply(struct afs_call *call)
                _debug("oom");
                rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
        default:
-               rxrpc_kernel_end_call(call->rxcall);
-               call->rxcall = NULL;
-               call->type->destructor(call);
-               afs_free_call(call);
+               afs_end_call(call);
                _leave(" [error]");
                return;
        }
@@ -823,17 +824,16 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
        call->state = AFS_CALL_AWAIT_ACK;
        n = rxrpc_kernel_send_data(call->rxcall, &msg, len);
        if (n >= 0) {
+               /* Success */
                _leave(" [replied]");
                return;
        }
+
        if (n == -ENOMEM) {
                _debug("oom");
                rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
        }
-       rxrpc_kernel_end_call(call->rxcall);
-       call->rxcall = NULL;
-       call->type->destructor(call);
-       afs_free_call(call);
+       afs_end_call(call);
        _leave(" [error]");
 }
 
index 2ad7de94efef71e58af4e747cd05d3cb3aeeba40..2f6d7b13b5bdacaba4df505b57b1c3cd9417ee13 100644 (file)
@@ -3120,6 +3120,8 @@ process_slot:
                        } else if (type == BTRFS_FILE_EXTENT_INLINE) {
                                u64 skip = 0;
                                u64 trim = 0;
+                               u64 aligned_end = 0;
+
                                if (off > key.offset) {
                                        skip = off - key.offset;
                                        new_key.offset += skip;
@@ -3136,9 +3138,11 @@ process_slot:
                                size -= skip + trim;
                                datal -= skip + trim;
 
+                               aligned_end = ALIGN(new_key.offset + datal,
+                                                   root->sectorsize);
                                ret = btrfs_drop_extents(trans, root, inode,
                                                         new_key.offset,
-                                                        new_key.offset + datal,
+                                                        aligned_end,
                                                         1);
                                if (ret) {
                                        if (ret != -EOPNOTSUPP)
index eb6537a08c1bf4438f0bc90df319977cb964bee7..fd38b5053479cf62f3297d43038288027646ebe0 100644 (file)
@@ -1668,7 +1668,7 @@ static int get_first_ref(struct btrfs_root *root, u64 ino,
                goto out;
        }
 
-       if (key.type == BTRFS_INODE_REF_KEY) {
+       if (found_key.type == BTRFS_INODE_REF_KEY) {
                struct btrfs_inode_ref *iref;
                iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
                                      struct btrfs_inode_ref);
index aadc2b68678b7d70c0381d10c847c86624589c4c..a22d667f1069e5eb8485d121f241725de732b8a9 100644 (file)
@@ -1737,6 +1737,9 @@ cifs_inode_needs_reval(struct inode *inode)
        if (cifs_i->time == 0)
                return true;
 
+       if (!cifs_sb->actimeo)
+               return true;
+
        if (!time_in_range(jiffies, cifs_i->time,
                                cifs_i->time + cifs_sb->actimeo))
                return true;
index 476f3ebf437ef40ddd7432200080825b7e9e992c..238b7aa26f68ab538df0cc219073a3d26541cc11 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -657,10 +657,10 @@ int setup_arg_pages(struct linux_binprm *bprm,
        unsigned long rlim_stack;
 
 #ifdef CONFIG_STACK_GROWSUP
-       /* Limit stack size to 1GB */
+       /* Limit stack size */
        stack_base = rlimit_max(RLIMIT_STACK);
-       if (stack_base > (1 << 30))
-               stack_base = 1 << 30;
+       if (stack_base > STACK_SIZE_MAX)
+               stack_base = STACK_SIZE_MAX;
 
        /* Make sure we didn't let the argument array grow too large. */
        if (vma->vm_end - vma->vm_start > stack_base)
index e01ea4a14a014b3123dddf6d2ad1ecb9a6381736..5e9a80cfc3d8857c7cfe496eae627c921c47c6f2 100644 (file)
@@ -610,6 +610,7 @@ static void kernfs_put_open_node(struct kernfs_node *kn,
 static int kernfs_fop_open(struct inode *inode, struct file *file)
 {
        struct kernfs_node *kn = file->f_path.dentry->d_fsdata;
+       struct kernfs_root *root = kernfs_root(kn);
        const struct kernfs_ops *ops;
        struct kernfs_open_file *of;
        bool has_read, has_write, has_mmap;
@@ -624,14 +625,16 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
        has_write = ops->write || ops->mmap;
        has_mmap = ops->mmap;
 
-       /* check perms and supported operations */
-       if ((file->f_mode & FMODE_WRITE) &&
-           (!(inode->i_mode & S_IWUGO) || !has_write))
-               goto err_out;
+       /* see the flag definition for details */
+       if (root->flags & KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK) {
+               if ((file->f_mode & FMODE_WRITE) &&
+                   (!(inode->i_mode & S_IWUGO) || !has_write))
+                       goto err_out;
 
-       if ((file->f_mode & FMODE_READ) &&
-           (!(inode->i_mode & S_IRUGO) || !has_read))
-               goto err_out;
+               if ((file->f_mode & FMODE_READ) &&
+                   (!(inode->i_mode & S_IRUGO) || !has_read))
+                       goto err_out;
+       }
 
        /* allocate a kernfs_open_file for the file */
        error = -ENOMEM;
index e663aeac579e5d8aaa2596a177114b17f262bd6c..e390bd9ae068696d4a5425057559d3037e6f4518 100644 (file)
@@ -389,18 +389,6 @@ static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
        fl->fl_ops = NULL;
        fl->fl_lmops = NULL;
 
-       /* Ensure that fl->fl_filp has compatible f_mode */
-       switch (l->l_type) {
-       case F_RDLCK:
-               if (!(filp->f_mode & FMODE_READ))
-                       return -EBADF;
-               break;
-       case F_WRLCK:
-               if (!(filp->f_mode & FMODE_WRITE))
-                       return -EBADF;
-               break;
-       }
-
        return assign_type(fl, l->l_type);
 }
 
@@ -2034,6 +2022,22 @@ static int do_lock_file_wait(struct file *filp, unsigned int cmd,
        return error;
 }
 
+/* Ensure that fl->fl_filp has compatible f_mode for F_SETLK calls */
+static int
+check_fmode_for_setlk(struct file_lock *fl)
+{
+       switch (fl->fl_type) {
+       case F_RDLCK:
+               if (!(fl->fl_file->f_mode & FMODE_READ))
+                       return -EBADF;
+               break;
+       case F_WRLCK:
+               if (!(fl->fl_file->f_mode & FMODE_WRITE))
+                       return -EBADF;
+       }
+       return 0;
+}
+
 /* Apply the lock described by l to an open file descriptor.
  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
  */
@@ -2071,6 +2075,10 @@ again:
        if (error)
                goto out;
 
+       error = check_fmode_for_setlk(file_lock);
+       if (error)
+               goto out;
+
        /*
         * If the cmd is requesting file-private locks, then set the
         * FL_OFDLCK flag and override the owner.
@@ -2206,6 +2214,10 @@ again:
        if (error)
                goto out;
 
+       error = check_fmode_for_setlk(file_lock);
+       if (error)
+               goto out;
+
        /*
         * If the cmd is requesting file-private locks, then set the
         * FL_OFDLCK flag and override the owner.
index 6f3f392d48af76d9b7bdb752f1d13eff9580be1b..f66c66b9f18285a4084114679d0e1d3e555a253c 100644 (file)
@@ -402,8 +402,10 @@ sort_pacl(struct posix_acl *pacl)
         * by uid/gid. */
        int i, j;
 
-       if (pacl->a_count <= 4)
-               return; /* no users or groups */
+       /* no users or groups */
+       if (!pacl || pacl->a_count <= 4)
+               return;
+
        i = 1;
        while (pacl->a_entries[i].e_tag == ACL_USER)
                i++;
@@ -530,13 +532,12 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
 
        /*
         * ACLs with no ACEs are treated differently in the inheritable
-        * and effective cases: when there are no inheritable ACEs, we
-        * set a zero-length default posix acl:
+        * and effective cases: when there are no inheritable ACEs,
+        * calls ->set_acl with a NULL ACL structure.
         */
-       if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT)) {
-               pacl = posix_acl_alloc(0, GFP_KERNEL);
-               return pacl ? pacl : ERR_PTR(-ENOMEM);
-       }
+       if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT))
+               return NULL;
+
        /*
         * When there are no effective ACEs, the following will end
         * up setting a 3-element effective posix ACL with all
@@ -589,7 +590,7 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
                add_to_mask(state, &state->groups->aces[i].perms);
        }
 
-       if (!state->users->n && !state->groups->n) {
+       if (state->users->n || state->groups->n) {
                pace++;
                pace->e_tag = ACL_MASK;
                low_mode_from_nfs4(state->mask.allow, &pace->e_perm, flags);
index 3ba65979a3cde006e73ed2c310e2bc42f6988fb2..9a77a5a21557c4e16740196c16d7f4f218ce2643 100644 (file)
@@ -1078,6 +1078,18 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
                return NULL;
        }
        clp->cl_name.len = name.len;
+       INIT_LIST_HEAD(&clp->cl_sessions);
+       idr_init(&clp->cl_stateids);
+       atomic_set(&clp->cl_refcount, 0);
+       clp->cl_cb_state = NFSD4_CB_UNKNOWN;
+       INIT_LIST_HEAD(&clp->cl_idhash);
+       INIT_LIST_HEAD(&clp->cl_openowners);
+       INIT_LIST_HEAD(&clp->cl_delegations);
+       INIT_LIST_HEAD(&clp->cl_lru);
+       INIT_LIST_HEAD(&clp->cl_callbacks);
+       INIT_LIST_HEAD(&clp->cl_revoked);
+       spin_lock_init(&clp->cl_lock);
+       rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
        return clp;
 }
 
@@ -1095,6 +1107,7 @@ free_client(struct nfs4_client *clp)
                WARN_ON_ONCE(atomic_read(&ses->se_ref));
                free_session(ses);
        }
+       rpc_destroy_wait_queue(&clp->cl_cb_waitq);
        free_svc_cred(&clp->cl_cred);
        kfree(clp->cl_name.data);
        idr_destroy(&clp->cl_stateids);
@@ -1347,7 +1360,6 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
        if (clp == NULL)
                return NULL;
 
-       INIT_LIST_HEAD(&clp->cl_sessions);
        ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
        if (ret) {
                spin_lock(&nn->client_lock);
@@ -1355,20 +1367,9 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
                spin_unlock(&nn->client_lock);
                return NULL;
        }
-       idr_init(&clp->cl_stateids);
-       atomic_set(&clp->cl_refcount, 0);
-       clp->cl_cb_state = NFSD4_CB_UNKNOWN;
-       INIT_LIST_HEAD(&clp->cl_idhash);
-       INIT_LIST_HEAD(&clp->cl_openowners);
-       INIT_LIST_HEAD(&clp->cl_delegations);
-       INIT_LIST_HEAD(&clp->cl_lru);
-       INIT_LIST_HEAD(&clp->cl_callbacks);
-       INIT_LIST_HEAD(&clp->cl_revoked);
-       spin_lock_init(&clp->cl_lock);
        nfsd4_init_callback(&clp->cl_cb_null);
        clp->cl_time = get_seconds();
        clear_bit(0, &clp->cl_cb_slot_busy);
-       rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
        copy_verf(clp, verf);
        rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
        gen_confirm(clp);
@@ -3716,9 +3717,16 @@ out:
 static __be32
 nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
 {
-       if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner)))
+       struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
+
+       if (check_for_locks(stp->st_file, lo))
                return nfserr_locks_held;
-       release_lock_stateid(stp);
+       /*
+        * Currently there's a 1-1 lock stateid<->lockowner
+        * correspondance, and we have to delete the lockowner when we
+        * delete the lock stateid:
+        */
+       unhash_lockowner(lo);
        return nfs_ok;
 }
 
@@ -4158,6 +4166,10 @@ static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, c
 
        if (!same_owner_str(&lo->lo_owner, owner, clid))
                return false;
+       if (list_empty(&lo->lo_owner.so_stateids)) {
+               WARN_ON_ONCE(1);
+               return false;
+       }
        lst = list_first_entry(&lo->lo_owner.so_stateids,
                               struct nfs4_ol_stateid, st_perstateowner);
        return lst->st_file->fi_inode == inode;
index af3f7aa73e13a007d06fd528d8c8d101d2553087..ee1f88419cb0640d38203eb3a16c7b03094f7c56 100644 (file)
@@ -472,11 +472,15 @@ bail:
 
 void dlm_destroy_master_caches(void)
 {
-       if (dlm_lockname_cache)
+       if (dlm_lockname_cache) {
                kmem_cache_destroy(dlm_lockname_cache);
+               dlm_lockname_cache = NULL;
+       }
 
-       if (dlm_lockres_cache)
+       if (dlm_lockres_cache) {
                kmem_cache_destroy(dlm_lockres_cache);
+               dlm_lockres_cache = NULL;
+       }
 }
 
 static void dlm_lockres_release(struct kref *kref)
index 28cc1acd5439bf8caeb6d93e8bf68804ad738b1b..e9ef59b3abb1e5552cdc2a8880df37b85093d249 100644 (file)
@@ -47,12 +47,13 @@ static int sysfs_kf_seq_show(struct seq_file *sf, void *v)
        ssize_t count;
        char *buf;
 
-       /* acquire buffer and ensure that it's >= PAGE_SIZE */
+       /* acquire buffer and ensure that it's >= PAGE_SIZE and clear */
        count = seq_get_buf(sf, &buf);
        if (count < PAGE_SIZE) {
                seq_commit(sf, -1);
                return 0;
        }
+       memset(buf, 0, PAGE_SIZE);
 
        /*
         * Invoke show().  Control may reach here via seq file lseek even
index a66ad6196f59cca2f61a31c4a966d41b92497ad8..8794423f7efbe2c90408ba9dfa6cbb7e55e6c3d9 100644 (file)
@@ -63,7 +63,8 @@ int __init sysfs_init(void)
 {
        int err;
 
-       sysfs_root = kernfs_create_root(NULL, 0, NULL);
+       sysfs_root = kernfs_create_root(NULL, KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK,
+                                       NULL);
        if (IS_ERR(sysfs_root))
                return PTR_ERR(sysfs_root);
 
index 1399e187d425dc7af0f8b5062afaf312fe5a0927..753e467aa1a5991d0175087284ff9cde35591c40 100644 (file)
@@ -237,7 +237,7 @@ xfs_fs_nfs_commit_metadata(
 
        if (!lsn)
                return 0;
-       return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
+       return -_xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
 }
 
 const struct export_operations xfs_export_operations = {
index 951a2321ee010f35c1d3395c09d0830d74197cfd..830c1c937b8888e7adba5557997d8d30dfc91713 100644 (file)
@@ -155,7 +155,7 @@ xfs_dir_fsync(
 
        if (!lsn)
                return 0;
-       return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
+       return -_xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
 }
 
 STATIC int
@@ -295,7 +295,7 @@ xfs_file_aio_read(
                xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
 
                if (inode->i_mapping->nrpages) {
-                       ret = -filemap_write_and_wait_range(
+                       ret = filemap_write_and_wait_range(
                                                        VFS_I(ip)->i_mapping,
                                                        pos, -1);
                        if (ret) {
@@ -837,7 +837,7 @@ xfs_file_fallocate(
                unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
 
                if (offset & blksize_mask || len & blksize_mask) {
-                       error = -EINVAL;
+                       error = EINVAL;
                        goto out_unlock;
                }
 
@@ -846,7 +846,7 @@ xfs_file_fallocate(
                 * in which case it is effectively a truncate operation
                 */
                if (offset + len >= i_size_read(inode)) {
-                       error = -EINVAL;
+                       error = EINVAL;
                        goto out_unlock;
                }
 
index 301ecbfcc0bee48cff5a8cc419a17d67c7f56e74..36d630319a2784c7fe39f83fc9d66b74ee3add7b 100644 (file)
@@ -72,8 +72,8 @@ xfs_initxattrs(
        int                     error = 0;
 
        for (xattr = xattr_array; xattr->name != NULL; xattr++) {
-               error = xfs_attr_set(ip, xattr->name, xattr->value,
-                                    xattr->value_len, ATTR_SECURE);
+               error = -xfs_attr_set(ip, xattr->name, xattr->value,
+                                     xattr->value_len, ATTR_SECURE);
                if (error < 0)
                        break;
        }
@@ -93,8 +93,8 @@ xfs_init_security(
        struct inode    *dir,
        const struct qstr *qstr)
 {
-       return security_inode_init_security(inode, dir, qstr,
-                                           &xfs_initxattrs, NULL);
+       return -security_inode_init_security(inode, dir, qstr,
+                                            &xfs_initxattrs, NULL);
 }
 
 static void
@@ -173,12 +173,12 @@ xfs_generic_create(
 
 #ifdef CONFIG_XFS_POSIX_ACL
        if (default_acl) {
-               error = xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+               error = -xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
                if (error)
                        goto out_cleanup_inode;
        }
        if (acl) {
-               error = xfs_set_acl(inode, acl, ACL_TYPE_ACCESS);
+               error = -xfs_set_acl(inode, acl, ACL_TYPE_ACCESS);
                if (error)
                        goto out_cleanup_inode;
        }
index 348e4d2ed6e6e9621b82c535212e32093b51da0c..dc977b6e6a365a4222fb79b212f5c2b23b46b84c 100644 (file)
@@ -843,22 +843,17 @@ xfs_qm_init_quotainfo(
 
        qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
 
-       if ((error = list_lru_init(&qinf->qi_lru))) {
-               kmem_free(qinf);
-               mp->m_quotainfo = NULL;
-               return error;
-       }
+       error = -list_lru_init(&qinf->qi_lru);
+       if (error)
+               goto out_free_qinf;
 
        /*
         * See if quotainodes are setup, and if not, allocate them,
         * and change the superblock accordingly.
         */
-       if ((error = xfs_qm_init_quotainos(mp))) {
-               list_lru_destroy(&qinf->qi_lru);
-               kmem_free(qinf);
-               mp->m_quotainfo = NULL;
-               return error;
-       }
+       error = xfs_qm_init_quotainos(mp);
+       if (error)
+               goto out_free_lru;
 
        INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
        INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
@@ -918,7 +913,7 @@ xfs_qm_init_quotainfo(
                qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
                qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
                qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
+
                xfs_qm_dqdestroy(dqp);
        } else {
                qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
@@ -935,6 +930,13 @@ xfs_qm_init_quotainfo(
        qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
        register_shrinker(&qinf->qi_shrinker);
        return 0;
+
+out_free_lru:
+       list_lru_destroy(&qinf->qi_lru);
+out_free_qinf:
+       kmem_free(qinf);
+       mp->m_quotainfo = NULL;
+       return error;
 }
 
 
index 2053767763773b60d7b7f8aca31f3b31a9219785..3494eff8e4ebfbce7256fd8d95769e2a1799619c 100644 (file)
@@ -1433,11 +1433,11 @@ xfs_fs_fill_super(
        if (error)
                goto out_free_fsname;
 
-       error = xfs_init_mount_workqueues(mp);
+       error = -xfs_init_mount_workqueues(mp);
        if (error)
                goto out_close_devices;
 
-       error = xfs_icsb_init_counters(mp);
+       error = -xfs_icsb_init_counters(mp);
        if (error)
                goto out_destroy_workqueues;
 
index b4ea8f50fc65ed409335bf37b3088dfde626d4a2..5e752b9590548448b42080151169cc7e38a1245c 100644 (file)
@@ -12,7 +12,7 @@
        [RLIMIT_CPU]            = {  RLIM_INFINITY,  RLIM_INFINITY },   \
        [RLIMIT_FSIZE]          = {  RLIM_INFINITY,  RLIM_INFINITY },   \
        [RLIMIT_DATA]           = {  RLIM_INFINITY,  RLIM_INFINITY },   \
-       [RLIMIT_STACK]          = {       _STK_LIM,   _STK_LIM_MAX },   \
+       [RLIMIT_STACK]          = {       _STK_LIM,  RLIM_INFINITY },   \
        [RLIMIT_CORE]           = {              0,  RLIM_INFINITY },   \
        [RLIMIT_RSS]            = {  RLIM_INFINITY,  RLIM_INFINITY },   \
        [RLIMIT_NPROC]          = {              0,              0 },   \
index c2515851c1aa2038c5298c8ab3288d469cd3bb5b..d60904b9e50532410af2956c3546fa5072cae08e 100644 (file)
@@ -473,6 +473,7 @@ struct cftype {
 };
 
 extern struct cgroup_root cgrp_dfl_root;
+extern struct css_set init_css_set;
 
 static inline bool cgroup_on_dfl(const struct cgroup *cgrp)
 {
@@ -700,6 +701,20 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
        return task_css_check(task, subsys_id, false);
 }
 
+/**
+ * task_css_is_root - test whether a task belongs to the root css
+ * @task: the target task
+ * @subsys_id: the target subsystem ID
+ *
+ * Test whether @task belongs to the root css on the specified subsystem.
+ * May be invoked in any context.
+ */
+static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
+{
+       return task_css_check(task, subsys_id, true) ==
+               init_css_set.subsys[subsys_id];
+}
+
 static inline struct cgroup *task_cgroup(struct task_struct *task,
                                         int subsys_id)
 {
index 8300fb87b84ac1329d8facb1d560767c29c549d8..72cb0ddb9678d21eb2f2edd20a36b594b72160d9 100644 (file)
@@ -429,6 +429,7 @@ typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
 typedef void (*dma_async_tx_callback)(void *dma_async_param);
 
 struct dmaengine_unmap_data {
+       u8 map_cnt;
        u8 to_cnt;
        u8 from_cnt;
        u8 bidi_cnt;
index 7c8b20b120eac680f27e3cd1e99630b1fe521799..a9a53b12397b0c4fd29a11cbf5f9cbbd18944fd8 100644 (file)
@@ -56,6 +56,7 @@ struct macvlan_dev {
        int                     numqueues;
        netdev_features_t       tap_features;
        int                     minor;
+       int                     nest_level;
 };
 
 static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
index 13bbbde00e68de454c8cf30f0581796d55eb0a40..b2acc4a1b13c9bd906869bc52d501f53f67d3c87 100644 (file)
@@ -73,7 +73,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
 /* found in socket.c */
 extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
 
-static inline int is_vlan_dev(struct net_device *dev)
+static inline bool is_vlan_dev(struct net_device *dev)
 {
         return dev->priv_flags & IFF_802_1Q_VLAN;
 }
@@ -159,6 +159,7 @@ struct vlan_dev_priv {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        struct netpoll                          *netpoll;
 #endif
+       unsigned int                            nest_level;
 };
 
 static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
@@ -197,6 +198,12 @@ extern void vlan_vids_del_by_dev(struct net_device *dev,
                                 const struct net_device *by_dev);
 
 extern bool vlan_uses_dev(const struct net_device *dev);
+
+static inline int vlan_get_encap_level(struct net_device *dev)
+{
+       BUG_ON(!is_vlan_dev(dev));
+       return vlan_dev_priv(dev)->nest_level;
+}
 #else
 static inline struct net_device *
 __vlan_find_dev_deep(struct net_device *real_dev,
@@ -263,6 +270,11 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
 {
        return false;
 }
+static inline int vlan_get_encap_level(struct net_device *dev)
+{
+       BUG();
+       return 0;
+}
 #endif
 
 static inline bool vlan_hw_offload_capable(netdev_features_t features,
@@ -483,4 +495,5 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
                 */
                skb->protocol = htons(ETH_P_802_2);
 }
+
 #endif /* !(_LINUX_IF_VLAN_H_) */
index 97ac926c78a707fb6bf45293d00e8f4d86515f43..051c85032f481ae8fa1fb5ca6542df83588a9153 100644 (file)
@@ -272,6 +272,11 @@ static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
        return -EINVAL;
 }
 
+static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+{
+       return 0;
+}
+
 static inline int irq_can_set_affinity(unsigned int irq)
 {
        return 0;
index b0122dc6f96a0a21324f86b5a28c725ac3ea74cf..ca1be5c9136c4557de511143b4c9b36e1eec3734 100644 (file)
@@ -50,7 +50,24 @@ enum kernfs_node_flag {
 
 /* @flags for kernfs_create_root() */
 enum kernfs_root_flag {
-       KERNFS_ROOT_CREATE_DEACTIVATED = 0x0001,
+       /*
+        * kernfs_nodes are created in the deactivated state and invisible.
+        * They require explicit kernfs_activate() to become visible.  This
+        * can be used to make related nodes become visible atomically
+        * after all nodes are created successfully.
+        */
+       KERNFS_ROOT_CREATE_DEACTIVATED          = 0x0001,
+
+       /*
+        * For regular flies, if the opener has CAP_DAC_OVERRIDE, open(2)
+        * succeeds regardless of the RW permissions.  sysfs had an extra
+        * layer of enforcement where open(2) fails with -EACCES regardless
+        * of CAP_DAC_OVERRIDE if the permission doesn't have the
+        * respective read or write access at all (none of S_IRUGO or
+        * S_IWUGO) or the respective operation isn't implemented.  The
+        * following flag enables that behavior.
+        */
+       KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK       = 0x0002,
 };
 
 /* type-specific structures for kernfs_node union members */
index b66e7610d4eec9f4d67e5f8bbd745bb6cbd3c99a..7040dc98ff8baa59118cd42dd728d3152afb7c86 100644 (file)
@@ -421,6 +421,17 @@ struct mlx4_wqe_inline_seg {
        __be32                  byte_count;
 };
 
+enum mlx4_update_qp_attr {
+       MLX4_UPDATE_QP_SMAC             = 1 << 0,
+};
+
+struct mlx4_update_qp_params {
+       u8      smac_index;
+};
+
+int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+                  enum mlx4_update_qp_attr attr,
+                  struct mlx4_update_qp_params *params);
 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
                   enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
                   struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
index 94734a6259a4d9ee36e25b342c86d3c1bf9d5fcf..17d83393afcc4337d50f00cfa837030d9429c6f8 100644 (file)
@@ -248,24 +248,17 @@ do {                                                              \
 bool __net_get_random_once(void *buf, int nbytes, bool *done,
                           struct static_key *done_key);
 
-#ifdef HAVE_JUMP_LABEL
-#define ___NET_RANDOM_STATIC_KEY_INIT ((struct static_key) \
-               { .enabled = ATOMIC_INIT(0), .entries = (void *)1 })
-#else /* !HAVE_JUMP_LABEL */
-#define ___NET_RANDOM_STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
-#endif /* HAVE_JUMP_LABEL */
-
 #define net_get_random_once(buf, nbytes)                               \
        ({                                                              \
                bool ___ret = false;                                    \
                static bool ___done = false;                            \
-               static struct static_key ___done_key =                  \
-                       ___NET_RANDOM_STATIC_KEY_INIT;                  \
-               if (!static_key_true(&___done_key))                     \
+               static struct static_key ___once_key =                  \
+                       STATIC_KEY_INIT_TRUE;                           \
+               if (static_key_true(&___once_key))                      \
                        ___ret = __net_get_random_once(buf,             \
                                                       nbytes,          \
                                                       &___done,        \
-                                                      &___done_key);   \
+                                                      &___once_key);   \
                ___ret;                                                 \
        })
 
index 7ed3a3aa6604b7b8511ea709ea2c2312a1d38bb2..b42d07b0390b2e95c3e551bb6639b906472bf70b 100644 (file)
@@ -1144,6 +1144,7 @@ struct net_device_ops {
        netdev_tx_t             (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
                                                        struct net_device *dev,
                                                        void *priv);
+       int                     (*ndo_get_lock_subclass)(struct net_device *dev);
 };
 
 /**
@@ -2950,7 +2951,12 @@ static inline void netif_addr_lock(struct net_device *dev)
 
 static inline void netif_addr_lock_nested(struct net_device *dev)
 {
-       spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
+       int subclass = SINGLE_DEPTH_NESTING;
+
+       if (dev->netdev_ops->ndo_get_lock_subclass)
+               subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
+
+       spin_lock_nested(&dev->addr_list_lock, subclass);
 }
 
 static inline void netif_addr_lock_bh(struct net_device *dev)
@@ -3050,9 +3056,18 @@ extern int               weight_p;
 extern int             bpf_jit_enable;
 
 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
+struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
+                                                    struct list_head **iter);
 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
                                                     struct list_head **iter);
 
+/* iterate through upper list, must be called under RCU read lock */
+#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
+       for (iter = &(dev)->adj_list.upper, \
+            updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
+            updev; \
+            updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
+
 /* iterate through upper list, must be called under RCU read lock */
 #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
        for (iter = &(dev)->all_adj_list.upper, \
@@ -3077,6 +3092,14 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
             priv; \
             priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
 
+void *netdev_lower_get_next(struct net_device *dev,
+                               struct list_head **iter);
+#define netdev_for_each_lower_dev(dev, ldev, iter) \
+       for (iter = &(dev)->adj_list.lower, \
+            ldev = netdev_lower_get_next(dev, &(iter)); \
+            ldev; \
+            ldev = netdev_lower_get_next(dev, &(iter)))
+
 void *netdev_adjacent_get_private(struct list_head *adj_list);
 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
@@ -3092,6 +3115,8 @@ void netdev_upper_dev_unlink(struct net_device *dev,
 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
 void *netdev_lower_dev_get_private(struct net_device *dev,
                                   struct net_device *lower_dev);
+int dev_get_nest_level(struct net_device *dev,
+                      bool (*type_check)(struct net_device *dev));
 int skb_checksum_help(struct sk_buff *skb);
 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
                                  netdev_features_t features, bool tx_path);
@@ -3180,12 +3205,7 @@ void netdev_change_features(struct net_device *dev);
 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
                                        struct net_device *dev);
 
-netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
-                                        const struct net_device *dev);
-static inline netdev_features_t netif_skb_features(struct sk_buff *skb)
-{
-       return netif_skb_dev_features(skb, skb->dev);
-}
+netdev_features_t netif_skb_features(struct sk_buff *skb);
 
 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
 {
index 3bad8d106e0ea01b2ae7524e216c83b4e654ced3..e6f0988c1c68a18dbc96c804d5d1c01c0aa6d9fb 100644 (file)
@@ -349,7 +349,7 @@ int of_device_is_stdout_path(struct device_node *dn);
 
 #else /* CONFIG_OF */
 
-static inline const char* of_node_full_name(struct device_node *np)
+static inline const char* of_node_full_name(const struct device_node *np)
 {
        return "<no-node>";
 }
index 6fe8464ed767f0dac6481a6ffc7b39ecaebdd648..881a7c3571f4617b99e0845995800d98eb4f9349 100644 (file)
@@ -31,7 +31,12 @@ extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
 #else /* CONFIG_OF */
 static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
 {
-       return -ENOSYS;
+       /*
+        * Fall back to the non-DT function to register a bus.
+        * This way, we don't have to keep compat bits around in drivers.
+        */
+
+       return mdiobus_register(mdio);
 }
 
 static inline struct phy_device *of_phy_find_device(struct device_node *phy_np)
index 3356abcfff184e707eccb08d6f99042a8fd51acc..3ef6ea12806a297107ff65b2ad554f5176c8e301 100644 (file)
@@ -402,6 +402,8 @@ struct perf_event {
 
        struct ring_buffer              *rb;
        struct list_head                rb_entry;
+       unsigned long                   rcu_batches;
+       int                             rcu_pending;
 
        /* poll related */
        wait_queue_head_t               waitq;
diff --git a/include/linux/platform_data/ipmmu-vmsa.h b/include/linux/platform_data/ipmmu-vmsa.h
new file mode 100644 (file)
index 0000000..5275b3a
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * IPMMU VMSA Platform Data
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#ifndef __IPMMU_VMSA_H__
+#define __IPMMU_VMSA_H__
+
+struct ipmmu_vmsa_master {
+       const char *name;
+       unsigned int utlb;
+};
+
+struct ipmmu_vmsa_platform_data {
+       const struct ipmmu_vmsa_master *masters;
+       unsigned int num_masters;
+};
+
+#endif /* __IPMMU_VMSA_H__ */
index 8e3e66ac0a5215d221042e15e631fb2fe2fb51d1..953937ea5233c770d631bf3ae59be60b72630d88 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/mutex.h>
 #include <linux/netdevice.h>
+#include <linux/wait.h>
 #include <uapi/linux/rtnetlink.h>
 
 extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
@@ -22,6 +23,10 @@ extern void rtnl_lock(void);
 extern void rtnl_unlock(void);
 extern int rtnl_trylock(void);
 extern int rtnl_is_locked(void);
+
+extern wait_queue_head_t netdev_unregistering_wq;
+extern struct mutex net_mutex;
+
 #ifdef CONFIG_PROVE_LOCKING
 extern int lockdep_rtnl_is_held(void);
 #else
index 25f54c79f75772a9f133c585e17a2d8e4a59e8ac..221b2bde372363765b5328638bf9320d9c95f5fd 100644 (file)
@@ -220,7 +220,7 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
 #define TASK_PARKED            512
 #define TASK_STATE_MAX         1024
 
-#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
+#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
 
 extern char ___assert_task_state[1 - 2*!!(
                sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
@@ -1153,9 +1153,12 @@ struct sched_dl_entity {
         *
         * @dl_boosted tells if we are boosted due to DI. If so we are
         * outside bandwidth enforcement mechanism (but only until we
-        * exit the critical section).
+        * exit the critical section);
+        *
+        * @dl_yielded tells if task gave up the cpu before consuming
+        * all its available runtime during the last job.
         */
-       int dl_throttled, dl_new, dl_boosted;
+       int dl_throttled, dl_new, dl_boosted, dl_yielded;
 
        /*
         * Bandwidth enforcement timer. Each -deadline task has its
index f3539a15c41103b743c0571913fbb93dc402f40a..f856e5a746fae66e2c91357cfe94fc89d0acae18 100644 (file)
@@ -3668,6 +3668,18 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy);
  */
 void cfg80211_sched_scan_stopped(struct wiphy *wiphy);
 
+/**
+ * cfg80211_sched_scan_stopped_rtnl - notify that the scheduled scan has stopped
+ *
+ * @wiphy: the wiphy on which the scheduled scan stopped
+ *
+ * The driver can call this function to inform cfg80211 that the
+ * scheduled scan had to be stopped, for whatever reason.  The driver
+ * is then called back via the sched_scan_stop operation when done.
+ * This function should be called with rtnl locked.
+ */
+void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy);
+
 /**
  * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame
  *
index 6c4f5eac98e7be133af4b868507f0d217ac05c1b..216cecce65e9e1200cd760de64c27ac852ffc85b 100644 (file)
@@ -127,6 +127,7 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg);
 void rt6_ifdown(struct net *net, struct net_device *dev);
 void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
+void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
 
 
 /*
index 80f500a29498e1fc9b8892e5c66be6bd02362eaa..b2704fd0ec80d714bc9e7dd9b87721da1853173c 100644 (file)
@@ -20,6 +20,11 @@ struct local_ports {
        int             range[2];
 };
 
+struct ping_group_range {
+       seqlock_t       lock;
+       kgid_t          range[2];
+};
+
 struct netns_ipv4 {
 #ifdef CONFIG_SYSCTL
        struct ctl_table_header *forw_hdr;
@@ -66,13 +71,13 @@ struct netns_ipv4 {
        int sysctl_icmp_ratemask;
        int sysctl_icmp_errors_use_inbound_ifaddr;
 
-       struct local_ports sysctl_local_ports;
+       struct local_ports ip_local_ports;
 
        int sysctl_tcp_ecn;
        int sysctl_ip_no_pmtu_disc;
        int sysctl_ip_fwd_use_pmtu;
 
-       kgid_t sysctl_ping_group_range[2];
+       struct ping_group_range ping_group_range;
 
        atomic_t dev_addr_genid;
 
index f863428796d532a42df39e45527da2f8c6eb9c88..c6d10af50123e1c5810aff634400e8c273949221 100644 (file)
 # define RLIM_INFINITY         (~0UL)
 #endif
 
-/*
- * RLIMIT_STACK default maximum - some architectures override it:
- */
-#ifndef _STK_LIM_MAX
-# define _STK_LIM_MAX          RLIM_INFINITY
-#endif
-
 
 #endif /* _UAPI_ASM_GENERIC_RESOURCE_H */
index 6db66783d268d9a286a86b836c9277bd89f5ac13..3336406080874bf2bd06cb1f0563aea6d8e56650 100644 (file)
@@ -697,9 +697,11 @@ __SYSCALL(__NR_finit_module, sys_finit_module)
 __SYSCALL(__NR_sched_setattr, sys_sched_setattr)
 #define __NR_sched_getattr 275
 __SYSCALL(__NR_sched_getattr, sys_sched_getattr)
+#define __NR_renameat2 276
+__SYSCALL(__NR_renameat2, sys_renameat2)
 
 #undef __NR_syscalls
-#define __NR_syscalls 276
+#define __NR_syscalls 277
 
 /*
  * All syscalls below here should go away really,
index 11917f747cb401be5b7dc8ab788fa5d0d4e8e47c..1b1efddb91cd924140fd5ad3009f338b31925e05 100644 (file)
@@ -331,9 +331,17 @@ enum {
 #define AUDIT_FAIL_PRINTK      1
 #define AUDIT_FAIL_PANIC       2
 
+/*
+ * These bits disambiguate different calling conventions that share an
+ * ELF machine type, bitness, and endianness
+ */
+#define __AUDIT_ARCH_CONVENTION_MASK 0x30000000
+#define __AUDIT_ARCH_CONVENTION_MIPS64_N32 0x20000000
+
 /* distinguish syscall tables */
 #define __AUDIT_ARCH_64BIT 0x80000000
 #define __AUDIT_ARCH_LE           0x40000000
+
 #define AUDIT_ARCH_ALPHA       (EM_ALPHA|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_ARM         (EM_ARM|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_ARMEB       (EM_ARM)
@@ -346,7 +354,11 @@ enum {
 #define AUDIT_ARCH_MIPS                (EM_MIPS)
 #define AUDIT_ARCH_MIPSEL      (EM_MIPS|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_MIPS64      (EM_MIPS|__AUDIT_ARCH_64BIT)
+#define AUDIT_ARCH_MIPS64N32   (EM_MIPS|__AUDIT_ARCH_64BIT|\
+                                __AUDIT_ARCH_CONVENTION_MIPS64_N32)
 #define AUDIT_ARCH_MIPSEL64    (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_MIPSEL64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE\
+                                __AUDIT_ARCH_CONVENTION_MIPS64_N32)
 #define AUDIT_ARCH_OPENRISC    (EM_OPENRISC)
 #define AUDIT_ARCH_PARISC      (EM_PARISC)
 #define AUDIT_ARCH_PARISC64    (EM_PARISC|__AUDIT_ARCH_64BIT)
index 1ba9d626aa833db91c462560f27054b30e91939d..194c1eab04d8ad9cb37ae855d8c011d48722c829 100644 (file)
@@ -3856,6 +3856,8 @@ enum nl80211_ap_sme_features {
  * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested
  *     to work properly to suppport receiving regulatory hints from
  *     cellular base stations.
+ * @NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL: (no longer available, only
+ *     here to reserve the value for API/ABI compatibility)
  * @NL80211_FEATURE_SAE: This driver supports simultaneous authentication of
  *     equals (SAE) with user space SME (NL80211_CMD_AUTHENTICATE) in station
  *     mode
@@ -3897,7 +3899,7 @@ enum nl80211_feature_flags {
        NL80211_FEATURE_HT_IBSS                         = 1 << 1,
        NL80211_FEATURE_INACTIVITY_TIMER                = 1 << 2,
        NL80211_FEATURE_CELL_BASE_REG_HINTS             = 1 << 3,
-       /* bit 4 is reserved - don't use */
+       NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL        = 1 << 4,
        NL80211_FEATURE_SAE                             = 1 << 5,
        NL80211_FEATURE_LOW_PRIORITY_SCAN               = 1 << 6,
        NL80211_FEATURE_SCAN_FLUSH                      = 1 << 7,
index 9fcdaa705b6cb7442b4babcb3a9ab40564afa27b..3f1ca934a2378495e5129dbe807bfc7111f53e8b 100644 (file)
@@ -348,7 +348,7 @@ struct cgrp_cset_link {
  * reference-counted, to improve performance when child cgroups
  * haven't been created.
  */
-static struct css_set init_css_set = {
+struct css_set init_css_set = {
        .refcount               = ATOMIC_INIT(1),
        .cgrp_links             = LIST_HEAD_INIT(init_css_set.cgrp_links),
        .tasks                  = LIST_HEAD_INIT(init_css_set.tasks),
@@ -1495,7 +1495,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
         */
        if (!use_task_css_set_links)
                cgroup_enable_task_cg_lists();
-retry:
+
        mutex_lock(&cgroup_tree_mutex);
        mutex_lock(&cgroup_mutex);
 
@@ -1503,7 +1503,7 @@ retry:
        ret = parse_cgroupfs_options(data, &opts);
        if (ret)
                goto out_unlock;
-
+retry:
        /* look for a matching existing root */
        if (!opts.subsys_mask && !opts.none && !opts.name) {
                cgrp_dfl_root_visible = true;
@@ -1562,9 +1562,9 @@ retry:
                if (!atomic_inc_not_zero(&root->cgrp.refcnt)) {
                        mutex_unlock(&cgroup_mutex);
                        mutex_unlock(&cgroup_tree_mutex);
-                       kfree(opts.release_agent);
-                       kfree(opts.name);
                        msleep(10);
+                       mutex_lock(&cgroup_tree_mutex);
+                       mutex_lock(&cgroup_mutex);
                        goto retry;
                }
 
index 2bc4a2256444ebf500269c5dc276871e48e0efa8..345628c78b5b3779460038ec6f036f9e8b7c1a32 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/uaccess.h>
 #include <linux/freezer.h>
 #include <linux/seq_file.h>
+#include <linux/mutex.h>
 
 /*
  * A cgroup is freezing if any FREEZING flags are set.  FREEZING_SELF is
@@ -42,9 +43,10 @@ enum freezer_state_flags {
 struct freezer {
        struct cgroup_subsys_state      css;
        unsigned int                    state;
-       spinlock_t                      lock;
 };
 
+static DEFINE_MUTEX(freezer_mutex);
+
 static inline struct freezer *css_freezer(struct cgroup_subsys_state *css)
 {
        return css ? container_of(css, struct freezer, css) : NULL;
@@ -93,7 +95,6 @@ freezer_css_alloc(struct cgroup_subsys_state *parent_css)
        if (!freezer)
                return ERR_PTR(-ENOMEM);
 
-       spin_lock_init(&freezer->lock);
        return &freezer->css;
 }
 
@@ -110,14 +111,7 @@ static int freezer_css_online(struct cgroup_subsys_state *css)
        struct freezer *freezer = css_freezer(css);
        struct freezer *parent = parent_freezer(freezer);
 
-       /*
-        * The following double locking and freezing state inheritance
-        * guarantee that @cgroup can never escape ancestors' freezing
-        * states.  See css_for_each_descendant_pre() for details.
-        */
-       if (parent)
-               spin_lock_irq(&parent->lock);
-       spin_lock_nested(&freezer->lock, SINGLE_DEPTH_NESTING);
+       mutex_lock(&freezer_mutex);
 
        freezer->state |= CGROUP_FREEZER_ONLINE;
 
@@ -126,10 +120,7 @@ static int freezer_css_online(struct cgroup_subsys_state *css)
                atomic_inc(&system_freezing_cnt);
        }
 
-       spin_unlock(&freezer->lock);
-       if (parent)
-               spin_unlock_irq(&parent->lock);
-
+       mutex_unlock(&freezer_mutex);
        return 0;
 }
 
@@ -144,14 +135,14 @@ static void freezer_css_offline(struct cgroup_subsys_state *css)
 {
        struct freezer *freezer = css_freezer(css);
 
-       spin_lock_irq(&freezer->lock);
+       mutex_lock(&freezer_mutex);
 
        if (freezer->state & CGROUP_FREEZING)
                atomic_dec(&system_freezing_cnt);
 
        freezer->state = 0;
 
-       spin_unlock_irq(&freezer->lock);
+       mutex_unlock(&freezer_mutex);
 }
 
 static void freezer_css_free(struct cgroup_subsys_state *css)
@@ -175,7 +166,7 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
        struct task_struct *task;
        bool clear_frozen = false;
 
-       spin_lock_irq(&freezer->lock);
+       mutex_lock(&freezer_mutex);
 
        /*
         * Make the new tasks conform to the current state of @new_css.
@@ -197,21 +188,13 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
                }
        }
 
-       spin_unlock_irq(&freezer->lock);
-
-       /*
-        * Propagate FROZEN clearing upwards.  We may race with
-        * update_if_frozen(), but as long as both work bottom-up, either
-        * update_if_frozen() sees child's FROZEN cleared or we clear the
-        * parent's FROZEN later.  No parent w/ !FROZEN children can be
-        * left FROZEN.
-        */
+       /* propagate FROZEN clearing upwards */
        while (clear_frozen && (freezer = parent_freezer(freezer))) {
-               spin_lock_irq(&freezer->lock);
                freezer->state &= ~CGROUP_FROZEN;
                clear_frozen = freezer->state & CGROUP_FREEZING;
-               spin_unlock_irq(&freezer->lock);
        }
+
+       mutex_unlock(&freezer_mutex);
 }
 
 /**
@@ -228,9 +211,6 @@ static void freezer_fork(struct task_struct *task)
 {
        struct freezer *freezer;
 
-       rcu_read_lock();
-       freezer = task_freezer(task);
-
        /*
         * The root cgroup is non-freezable, so we can skip locking the
         * freezer.  This is safe regardless of race with task migration.
@@ -238,24 +218,18 @@ static void freezer_fork(struct task_struct *task)
         * to do.  If we lost and root is the new cgroup, noop is still the
         * right thing to do.
         */
-       if (!parent_freezer(freezer))
-               goto out;
+       if (task_css_is_root(task, freezer_cgrp_id))
+               return;
 
-       /*
-        * Grab @freezer->lock and freeze @task after verifying @task still
-        * belongs to @freezer and it's freezing.  The former is for the
-        * case where we have raced against task migration and lost and
-        * @task is already in a different cgroup which may not be frozen.
-        * This isn't strictly necessary as freeze_task() is allowed to be
-        * called spuriously but let's do it anyway for, if nothing else,
-        * documentation.
-        */
-       spin_lock_irq(&freezer->lock);
-       if (freezer == task_freezer(task) && (freezer->state & CGROUP_FREEZING))
+       mutex_lock(&freezer_mutex);
+       rcu_read_lock();
+
+       freezer = task_freezer(task);
+       if (freezer->state & CGROUP_FREEZING)
                freeze_task(task);
-       spin_unlock_irq(&freezer->lock);
-out:
+
        rcu_read_unlock();
+       mutex_unlock(&freezer_mutex);
 }
 
 /**
@@ -281,22 +255,24 @@ static void update_if_frozen(struct cgroup_subsys_state *css)
        struct css_task_iter it;
        struct task_struct *task;
 
-       WARN_ON_ONCE(!rcu_read_lock_held());
-
-       spin_lock_irq(&freezer->lock);
+       lockdep_assert_held(&freezer_mutex);
 
        if (!(freezer->state & CGROUP_FREEZING) ||
            (freezer->state & CGROUP_FROZEN))
-               goto out_unlock;
+               return;
 
        /* are all (live) children frozen? */
+       rcu_read_lock();
        css_for_each_child(pos, css) {
                struct freezer *child = css_freezer(pos);
 
                if ((child->state & CGROUP_FREEZER_ONLINE) &&
-                   !(child->state & CGROUP_FROZEN))
-                       goto out_unlock;
+                   !(child->state & CGROUP_FROZEN)) {
+                       rcu_read_unlock();
+                       return;
+               }
        }
+       rcu_read_unlock();
 
        /* are all tasks frozen? */
        css_task_iter_start(css, &it);
@@ -317,21 +293,29 @@ static void update_if_frozen(struct cgroup_subsys_state *css)
        freezer->state |= CGROUP_FROZEN;
 out_iter_end:
        css_task_iter_end(&it);
-out_unlock:
-       spin_unlock_irq(&freezer->lock);
 }
 
 static int freezer_read(struct seq_file *m, void *v)
 {
        struct cgroup_subsys_state *css = seq_css(m), *pos;
 
+       mutex_lock(&freezer_mutex);
        rcu_read_lock();
 
        /* update states bottom-up */
-       css_for_each_descendant_post(pos, css)
+       css_for_each_descendant_post(pos, css) {
+               if (!css_tryget(pos))
+                       continue;
+               rcu_read_unlock();
+
                update_if_frozen(pos);
 
+               rcu_read_lock();
+               css_put(pos);
+       }
+
        rcu_read_unlock();
+       mutex_unlock(&freezer_mutex);
 
        seq_puts(m, freezer_state_strs(css_freezer(css)->state));
        seq_putc(m, '\n');
@@ -373,7 +357,7 @@ static void freezer_apply_state(struct freezer *freezer, bool freeze,
                                unsigned int state)
 {
        /* also synchronizes against task migration, see freezer_attach() */
-       lockdep_assert_held(&freezer->lock);
+       lockdep_assert_held(&freezer_mutex);
 
        if (!(freezer->state & CGROUP_FREEZER_ONLINE))
                return;
@@ -414,31 +398,29 @@ static void freezer_change_state(struct freezer *freezer, bool freeze)
         * descendant will try to inherit its parent's FREEZING state as
         * CGROUP_FREEZING_PARENT.
         */
+       mutex_lock(&freezer_mutex);
        rcu_read_lock();
        css_for_each_descendant_pre(pos, &freezer->css) {
                struct freezer *pos_f = css_freezer(pos);
                struct freezer *parent = parent_freezer(pos_f);
 
-               spin_lock_irq(&pos_f->lock);
+               if (!css_tryget(pos))
+                       continue;
+               rcu_read_unlock();
 
-               if (pos_f == freezer) {
+               if (pos_f == freezer)
                        freezer_apply_state(pos_f, freeze,
                                            CGROUP_FREEZING_SELF);
-               } else {
-                       /*
-                        * Our update to @parent->state is already visible
-                        * which is all we need.  No need to lock @parent.
-                        * For more info on synchronization, see
-                        * freezer_post_create().
-                        */
+               else
                        freezer_apply_state(pos_f,
                                            parent->state & CGROUP_FREEZING,
                                            CGROUP_FREEZING_PARENT);
-               }
 
-               spin_unlock_irq(&pos_f->lock);
+               rcu_read_lock();
+               css_put(pos);
        }
        rcu_read_unlock();
+       mutex_unlock(&freezer_mutex);
 }
 
 static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft,
index f83a71a3e46d75547e540ed317f99531838f408b..440eefc67397e48f15b58bc8cf31712bec91286b 100644 (file)
@@ -1443,6 +1443,11 @@ group_sched_out(struct perf_event *group_event,
                cpuctx->exclusive = 0;
 }
 
+struct remove_event {
+       struct perf_event *event;
+       bool detach_group;
+};
+
 /*
  * Cross CPU call to remove a performance event
  *
@@ -1451,12 +1456,15 @@ group_sched_out(struct perf_event *group_event,
  */
 static int __perf_remove_from_context(void *info)
 {
-       struct perf_event *event = info;
+       struct remove_event *re = info;
+       struct perf_event *event = re->event;
        struct perf_event_context *ctx = event->ctx;
        struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
 
        raw_spin_lock(&ctx->lock);
        event_sched_out(event, cpuctx, ctx);
+       if (re->detach_group)
+               perf_group_detach(event);
        list_del_event(event, ctx);
        if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
                ctx->is_active = 0;
@@ -1481,10 +1489,14 @@ static int __perf_remove_from_context(void *info)
  * When called from perf_event_exit_task, it's OK because the
  * context has been detached from its task.
  */
-static void perf_remove_from_context(struct perf_event *event)
+static void perf_remove_from_context(struct perf_event *event, bool detach_group)
 {
        struct perf_event_context *ctx = event->ctx;
        struct task_struct *task = ctx->task;
+       struct remove_event re = {
+               .event = event,
+               .detach_group = detach_group,
+       };
 
        lockdep_assert_held(&ctx->mutex);
 
@@ -1493,12 +1505,12 @@ static void perf_remove_from_context(struct perf_event *event)
                 * Per cpu events are removed via an smp call and
                 * the removal is always successful.
                 */
-               cpu_function_call(event->cpu, __perf_remove_from_context, event);
+               cpu_function_call(event->cpu, __perf_remove_from_context, &re);
                return;
        }
 
 retry:
-       if (!task_function_call(task, __perf_remove_from_context, event))
+       if (!task_function_call(task, __perf_remove_from_context, &re))
                return;
 
        raw_spin_lock_irq(&ctx->lock);
@@ -1515,6 +1527,8 @@ retry:
         * Since the task isn't running, its safe to remove the event, us
         * holding the ctx->lock ensures the task won't get scheduled in.
         */
+       if (detach_group)
+               perf_group_detach(event);
        list_del_event(event, ctx);
        raw_spin_unlock_irq(&ctx->lock);
 }
@@ -3178,7 +3192,8 @@ static void free_event_rcu(struct rcu_head *head)
 }
 
 static void ring_buffer_put(struct ring_buffer *rb);
-static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
+static void ring_buffer_attach(struct perf_event *event,
+                              struct ring_buffer *rb);
 
 static void unaccount_event_cpu(struct perf_event *event, int cpu)
 {
@@ -3238,8 +3253,6 @@ static void free_event(struct perf_event *event)
        unaccount_event(event);
 
        if (event->rb) {
-               struct ring_buffer *rb;
-
                /*
                 * Can happen when we close an event with re-directed output.
                 *
@@ -3247,12 +3260,7 @@ static void free_event(struct perf_event *event)
                 * over us; possibly making our ring_buffer_put() the last.
                 */
                mutex_lock(&event->mmap_mutex);
-               rb = event->rb;
-               if (rb) {
-                       rcu_assign_pointer(event->rb, NULL);
-                       ring_buffer_detach(event, rb);
-                       ring_buffer_put(rb); /* could be last */
-               }
+               ring_buffer_attach(event, NULL);
                mutex_unlock(&event->mmap_mutex);
        }
 
@@ -3281,10 +3289,7 @@ int perf_event_release_kernel(struct perf_event *event)
         *     to trigger the AB-BA case.
         */
        mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
-       raw_spin_lock_irq(&ctx->lock);
-       perf_group_detach(event);
-       raw_spin_unlock_irq(&ctx->lock);
-       perf_remove_from_context(event);
+       perf_remove_from_context(event, true);
        mutex_unlock(&ctx->mutex);
 
        free_event(event);
@@ -3839,28 +3844,47 @@ unlock:
 static void ring_buffer_attach(struct perf_event *event,
                               struct ring_buffer *rb)
 {
+       struct ring_buffer *old_rb = NULL;
        unsigned long flags;
 
-       if (!list_empty(&event->rb_entry))
-               return;
+       if (event->rb) {
+               /*
+                * Should be impossible, we set this when removing
+                * event->rb_entry and wait/clear when adding event->rb_entry.
+                */
+               WARN_ON_ONCE(event->rcu_pending);
 
-       spin_lock_irqsave(&rb->event_lock, flags);
-       if (list_empty(&event->rb_entry))
-               list_add(&event->rb_entry, &rb->event_list);
-       spin_unlock_irqrestore(&rb->event_lock, flags);
-}
+               old_rb = event->rb;
+               event->rcu_batches = get_state_synchronize_rcu();
+               event->rcu_pending = 1;
 
-static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
-{
-       unsigned long flags;
+               spin_lock_irqsave(&old_rb->event_lock, flags);
+               list_del_rcu(&event->rb_entry);
+               spin_unlock_irqrestore(&old_rb->event_lock, flags);
+       }
 
-       if (list_empty(&event->rb_entry))
-               return;
+       if (event->rcu_pending && rb) {
+               cond_synchronize_rcu(event->rcu_batches);
+               event->rcu_pending = 0;
+       }
+
+       if (rb) {
+               spin_lock_irqsave(&rb->event_lock, flags);
+               list_add_rcu(&event->rb_entry, &rb->event_list);
+               spin_unlock_irqrestore(&rb->event_lock, flags);
+       }
+
+       rcu_assign_pointer(event->rb, rb);
 
-       spin_lock_irqsave(&rb->event_lock, flags);
-       list_del_init(&event->rb_entry);
-       wake_up_all(&event->waitq);
-       spin_unlock_irqrestore(&rb->event_lock, flags);
+       if (old_rb) {
+               ring_buffer_put(old_rb);
+               /*
+                * Since we detached before setting the new rb, so that we
+                * could attach the new rb, we could have missed a wakeup.
+                * Provide it now.
+                */
+               wake_up_all(&event->waitq);
+       }
 }
 
 static void ring_buffer_wakeup(struct perf_event *event)
@@ -3929,7 +3953,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
 {
        struct perf_event *event = vma->vm_file->private_data;
 
-       struct ring_buffer *rb = event->rb;
+       struct ring_buffer *rb = ring_buffer_get(event);
        struct user_struct *mmap_user = rb->mmap_user;
        int mmap_locked = rb->mmap_locked;
        unsigned long size = perf_data_size(rb);
@@ -3937,18 +3961,14 @@ static void perf_mmap_close(struct vm_area_struct *vma)
        atomic_dec(&rb->mmap_count);
 
        if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
-               return;
+               goto out_put;
 
-       /* Detach current event from the buffer. */
-       rcu_assign_pointer(event->rb, NULL);
-       ring_buffer_detach(event, rb);
+       ring_buffer_attach(event, NULL);
        mutex_unlock(&event->mmap_mutex);
 
        /* If there's still other mmap()s of this buffer, we're done. */
-       if (atomic_read(&rb->mmap_count)) {
-               ring_buffer_put(rb); /* can't be last */
-               return;
-       }
+       if (atomic_read(&rb->mmap_count))
+               goto out_put;
 
        /*
         * No other mmap()s, detach from all other events that might redirect
@@ -3978,11 +3998,9 @@ again:
                 * still restart the iteration to make sure we're not now
                 * iterating the wrong list.
                 */
-               if (event->rb == rb) {
-                       rcu_assign_pointer(event->rb, NULL);
-                       ring_buffer_detach(event, rb);
-                       ring_buffer_put(rb); /* can't be last, we still have one */
-               }
+               if (event->rb == rb)
+                       ring_buffer_attach(event, NULL);
+
                mutex_unlock(&event->mmap_mutex);
                put_event(event);
 
@@ -4007,6 +4025,7 @@ again:
        vma->vm_mm->pinned_vm -= mmap_locked;
        free_uid(mmap_user);
 
+out_put:
        ring_buffer_put(rb); /* could be last */
 }
 
@@ -4124,7 +4143,6 @@ again:
        vma->vm_mm->pinned_vm += extra;
 
        ring_buffer_attach(event, rb);
-       rcu_assign_pointer(event->rb, rb);
 
        perf_event_init_userpage(event);
        perf_event_update_userpage(event);
@@ -5408,6 +5426,9 @@ struct swevent_htable {
 
        /* Recursion avoidance in each contexts */
        int                             recursion[PERF_NR_CONTEXTS];
+
+       /* Keeps track of cpu being initialized/exited */
+       bool                            online;
 };
 
 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
@@ -5654,8 +5675,14 @@ static int perf_swevent_add(struct perf_event *event, int flags)
        hwc->state = !(flags & PERF_EF_START);
 
        head = find_swevent_head(swhash, event);
-       if (WARN_ON_ONCE(!head))
+       if (!head) {
+               /*
+                * We can race with cpu hotplug code. Do not
+                * WARN if the cpu just got unplugged.
+                */
+               WARN_ON_ONCE(swhash->online);
                return -EINVAL;
+       }
 
        hlist_add_head_rcu(&event->hlist_entry, head);
 
@@ -6914,7 +6941,7 @@ err_size:
 static int
 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
 {
-       struct ring_buffer *rb = NULL, *old_rb = NULL;
+       struct ring_buffer *rb = NULL;
        int ret = -EINVAL;
 
        if (!output_event)
@@ -6942,8 +6969,6 @@ set:
        if (atomic_read(&event->mmap_count))
                goto unlock;
 
-       old_rb = event->rb;
-
        if (output_event) {
                /* get the rb we want to redirect to */
                rb = ring_buffer_get(output_event);
@@ -6951,23 +6976,7 @@ set:
                        goto unlock;
        }
 
-       if (old_rb)
-               ring_buffer_detach(event, old_rb);
-
-       if (rb)
-               ring_buffer_attach(event, rb);
-
-       rcu_assign_pointer(event->rb, rb);
-
-       if (old_rb) {
-               ring_buffer_put(old_rb);
-               /*
-                * Since we detached before setting the new rb, so that we
-                * could attach the new rb, we could have missed a wakeup.
-                * Provide it now.
-                */
-               wake_up_all(&event->waitq);
-       }
+       ring_buffer_attach(event, rb);
 
        ret = 0;
 unlock:
@@ -7018,6 +7027,9 @@ SYSCALL_DEFINE5(perf_event_open,
        if (attr.freq) {
                if (attr.sample_freq > sysctl_perf_event_sample_rate)
                        return -EINVAL;
+       } else {
+               if (attr.sample_period & (1ULL << 63))
+                       return -EINVAL;
        }
 
        /*
@@ -7165,7 +7177,7 @@ SYSCALL_DEFINE5(perf_event_open,
                struct perf_event_context *gctx = group_leader->ctx;
 
                mutex_lock(&gctx->mutex);
-               perf_remove_from_context(group_leader);
+               perf_remove_from_context(group_leader, false);
 
                /*
                 * Removing from the context ends up with disabled
@@ -7175,7 +7187,7 @@ SYSCALL_DEFINE5(perf_event_open,
                perf_event__state_init(group_leader);
                list_for_each_entry(sibling, &group_leader->sibling_list,
                                    group_entry) {
-                       perf_remove_from_context(sibling);
+                       perf_remove_from_context(sibling, false);
                        perf_event__state_init(sibling);
                        put_ctx(gctx);
                }
@@ -7305,7 +7317,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
        mutex_lock(&src_ctx->mutex);
        list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
                                 event_entry) {
-               perf_remove_from_context(event);
+               perf_remove_from_context(event, false);
                unaccount_event_cpu(event, src_cpu);
                put_ctx(src_ctx);
                list_add(&event->migrate_entry, &events);
@@ -7367,13 +7379,7 @@ __perf_event_exit_task(struct perf_event *child_event,
                         struct perf_event_context *child_ctx,
                         struct task_struct *child)
 {
-       if (child_event->parent) {
-               raw_spin_lock_irq(&child_ctx->lock);
-               perf_group_detach(child_event);
-               raw_spin_unlock_irq(&child_ctx->lock);
-       }
-
-       perf_remove_from_context(child_event);
+       perf_remove_from_context(child_event, !!child_event->parent);
 
        /*
         * It can happen that the parent exits first, and has events
@@ -7724,6 +7730,8 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
         * swapped under us.
         */
        parent_ctx = perf_pin_task_context(parent, ctxn);
+       if (!parent_ctx)
+               return 0;
 
        /*
         * No need to check if parent_ctx != NULL here; since we saw
@@ -7835,6 +7843,7 @@ static void perf_event_init_cpu(int cpu)
        struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
 
        mutex_lock(&swhash->hlist_mutex);
+       swhash->online = true;
        if (swhash->hlist_refcount > 0) {
                struct swevent_hlist *hlist;
 
@@ -7857,14 +7866,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
 
 static void __perf_event_exit_context(void *__info)
 {
+       struct remove_event re = { .detach_group = false };
        struct perf_event_context *ctx = __info;
-       struct perf_event *event;
 
        perf_pmu_rotate_stop(ctx->pmu);
 
        rcu_read_lock();
-       list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
-               __perf_remove_from_context(event);
+       list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
+               __perf_remove_from_context(&re);
        rcu_read_unlock();
 }
 
@@ -7892,6 +7901,7 @@ static void perf_event_exit_cpu(int cpu)
        perf_event_exit_cpu_context(cpu);
 
        mutex_lock(&swhash->hlist_mutex);
+       swhash->online = false;
        swevent_hlist_release(swhash);
        mutex_unlock(&swhash->hlist_mutex);
 }
index 6b715c0af1b117b5b61bd32629a00845f0313557..e0501fe7140d7c97daba3f3438b51272cf6d9932 100644 (file)
@@ -990,11 +990,8 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
        /* Remove an active timer from the queue: */
        ret = remove_hrtimer(timer, base);
 
-       /* Switch the timer base, if necessary: */
-       new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
-
        if (mode & HRTIMER_MODE_REL) {
-               tim = ktime_add_safe(tim, new_base->get_time());
+               tim = ktime_add_safe(tim, base->get_time());
                /*
                 * CONFIG_TIME_LOW_RES is a temporary way for architectures
                 * to signal that they simply return xtime in
@@ -1009,6 +1006,9 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
 
        hrtimer_set_expires_range_ns(timer, tim, delta_ns);
 
+       /* Switch the timer base, if necessary: */
+       new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
+
        timer_stats_hrtimer_set_start_info(timer);
 
        leftmost = enqueue_hrtimer(timer, new_base);
index d9d8ece46a15885410a1bb138f4c6c7155573803..204d3d281809aa90686b8d4f0199c7d933710908 100644 (file)
@@ -2592,8 +2592,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
        if (likely(prev->sched_class == class &&
                   rq->nr_running == rq->cfs.h_nr_running)) {
                p = fair_sched_class.pick_next_task(rq, prev);
-               if (likely(p && p != RETRY_TASK))
-                       return p;
+               if (unlikely(p == RETRY_TASK))
+                       goto again;
+
+               /* assumes fair_sched_class->next == idle_sched_class */
+               if (unlikely(!p))
+                       p = idle_sched_class.pick_next_task(rq, prev);
+
+               return p;
        }
 
 again:
@@ -3124,6 +3130,7 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
        dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
        dl_se->dl_throttled = 0;
        dl_se->dl_new = 1;
+       dl_se->dl_yielded = 0;
 }
 
 static void __setscheduler_params(struct task_struct *p,
@@ -3639,6 +3646,7 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
  * sys_sched_setattr - same as above, but with extended sched_attr
  * @pid: the pid in question.
  * @uattr: structure containing the extended parameters.
+ * @flags: for future extension.
  */
 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
                               unsigned int, flags)
@@ -3783,6 +3791,7 @@ err_size:
  * @pid: the pid in question.
  * @uattr: structure containing the extended parameters.
  * @size: sizeof(attr) for fwd/bwd comp.
+ * @flags: for future extension.
  */
 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
                unsigned int, size, unsigned int, flags)
@@ -6017,6 +6026,8 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
                                        ,
                .last_balance           = jiffies,
                .balance_interval       = sd_weight,
+               .max_newidle_lb_cost    = 0,
+               .next_decay_max_lb_cost = jiffies,
        };
        SD_INIT_NAME(sd, NUMA);
        sd->private = &tl->data;
index 5b9bb42b2d47e760f3a7cd17af24ba37d2cf07ea..ab001b5d50487815da984947b592b26038cce794 100644 (file)
@@ -210,7 +210,5 @@ int cpudl_init(struct cpudl *cp)
  */
 void cpudl_cleanup(struct cpudl *cp)
 {
-       /*
-        * nothing to do for the moment
-        */
+       free_cpumask_var(cp->free_cpus);
 }
index 8b836b376d9129760066326eabf5040f72b2e4f3..3031bac8aa3ea990bc7425e835675c7a5a386ab5 100644 (file)
@@ -70,8 +70,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
        int idx = 0;
        int task_pri = convert_prio(p->prio);
 
-       if (task_pri >= MAX_RT_PRIO)
-               return 0;
+       BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
 
        for (idx = 0; idx < task_pri; idx++) {
                struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
index a95097cb4591b5bfa2466adb5600895e782fc661..72fdf06ef8652d5cb443b080f53ac117bd5517ba 100644 (file)
@@ -332,50 +332,50 @@ out:
  * softirq as those do not count in task exec_runtime any more.
  */
 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
-                                               struct rq *rq)
+                                        struct rq *rq, int ticks)
 {
-       cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
+       cputime_t scaled = cputime_to_scaled(cputime_one_jiffy);
+       u64 cputime = (__force u64) cputime_one_jiffy;
        u64 *cpustat = kcpustat_this_cpu->cpustat;
 
        if (steal_account_process_tick())
                return;
 
+       cputime *= ticks;
+       scaled *= ticks;
+
        if (irqtime_account_hi_update()) {
-               cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy;
+               cpustat[CPUTIME_IRQ] += cputime;
        } else if (irqtime_account_si_update()) {
-               cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy;
+               cpustat[CPUTIME_SOFTIRQ] += cputime;
        } else if (this_cpu_ksoftirqd() == p) {
                /*
                 * ksoftirqd time do not get accounted in cpu_softirq_time.
                 * So, we have to handle it separately here.
                 * Also, p->stime needs to be updated for ksoftirqd.
                 */
-               __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
-                                       CPUTIME_SOFTIRQ);
+               __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
        } else if (user_tick) {
-               account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
+               account_user_time(p, cputimescaled);
        } else if (p == rq->idle) {
-               account_idle_time(cputime_one_jiffy);
+               account_idle_time(cputime);
        } else if (p->flags & PF_VCPU) { /* System time or guest time */
-               account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
+               account_guest_time(p, cputimescaled);
        } else {
-               __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
-                                       CPUTIME_SYSTEM);
+               __account_system_time(p, cputime, scaled,       CPUTIME_SYSTEM);
        }
 }
 
 static void irqtime_account_idle_ticks(int ticks)
 {
-       int i;
        struct rq *rq = this_rq();
 
-       for (i = 0; i < ticks; i++)
-               irqtime_account_process_tick(current, 0, rq);
+       irqtime_account_process_tick(current, 0, rq, ticks);
 }
 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
 static inline void irqtime_account_idle_ticks(int ticks) {}
 static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
-                                               struct rq *rq) {}
+                                               struct rq *rq, int nr_ticks) {}
 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
 
 /*
@@ -464,7 +464,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
                return;
 
        if (sched_clock_irqtime) {
-               irqtime_account_process_tick(p, user_tick, rq);
+               irqtime_account_process_tick(p, user_tick, rq, 1);
                return;
        }
 
index b08095786cb8fff0c96773eb5f6a582da503bcb8..800e99b99075141421d82f0bdc07e42f09baea9d 100644 (file)
@@ -528,6 +528,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
        sched_clock_tick();
        update_rq_clock(rq);
        dl_se->dl_throttled = 0;
+       dl_se->dl_yielded = 0;
        if (p->on_rq) {
                enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
                if (task_has_dl_policy(rq->curr))
@@ -893,10 +894,10 @@ static void yield_task_dl(struct rq *rq)
         * We make the task go to sleep until its current deadline by
         * forcing its runtime to zero. This way, update_curr_dl() stops
         * it and the bandwidth timer will wake it up and will give it
-        * new scheduling parameters (thanks to dl_new=1).
+        * new scheduling parameters (thanks to dl_yielded=1).
         */
        if (p->dl.runtime > 0) {
-               rq->curr->dl.dl_new = 1;
+               rq->curr->dl.dl_yielded = 1;
                p->dl.runtime = 0;
        }
        update_curr_dl(rq);
index 7570dd969c2838e9aab12ba9cb2c24cb87e21855..0fdb96de81a5b8a92c302961769cdefdb5cad915 100644 (file)
@@ -6653,6 +6653,7 @@ static int idle_balance(struct rq *this_rq)
        int this_cpu = this_rq->cpu;
 
        idle_enter_fair(this_rq);
+
        /*
         * We must set idle_stamp _before_ calling idle_balance(), such that we
         * measure the duration of idle_balance() as idle time.
@@ -6705,14 +6706,16 @@ static int idle_balance(struct rq *this_rq)
 
        raw_spin_lock(&this_rq->lock);
 
+       if (curr_cost > this_rq->max_idle_balance_cost)
+               this_rq->max_idle_balance_cost = curr_cost;
+
        /*
-        * While browsing the domains, we released the rq lock.
-        * A task could have be enqueued in the meantime
+        * While browsing the domains, we released the rq lock, a task could
+        * have been enqueued in the meantime. Since we're not going idle,
+        * pretend we pulled a task.
         */
-       if (this_rq->cfs.h_nr_running && !pulled_task) {
+       if (this_rq->cfs.h_nr_running && !pulled_task)
                pulled_task = 1;
-               goto out;
-       }
 
        if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
                /*
@@ -6722,9 +6725,6 @@ static int idle_balance(struct rq *this_rq)
                this_rq->next_balance = next_balance;
        }
 
-       if (curr_cost > this_rq->max_idle_balance_cost)
-               this_rq->max_idle_balance_cost = curr_cost;
-
 out:
        /* Is there a task of a high priority class? */
        if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
index 0ee63af30bd14a4ad7f4b8f846d19b100fd596b3..8edc87185427cb17fa02ed93498fcf6f8301cb7e 100644 (file)
@@ -1916,6 +1916,12 @@ static void send_mayday(struct work_struct *work)
 
        /* mayday mayday mayday */
        if (list_empty(&pwq->mayday_node)) {
+               /*
+                * If @pwq is for an unbound wq, its base ref may be put at
+                * any time due to an attribute change.  Pin @pwq until the
+                * rescuer is done with it.
+                */
+               get_pwq(pwq);
                list_add_tail(&pwq->mayday_node, &wq->maydays);
                wake_up_process(wq->rescuer->task);
        }
@@ -2398,6 +2404,7 @@ static int rescuer_thread(void *__rescuer)
        struct worker *rescuer = __rescuer;
        struct workqueue_struct *wq = rescuer->rescue_wq;
        struct list_head *scheduled = &rescuer->scheduled;
+       bool should_stop;
 
        set_user_nice(current, RESCUER_NICE_LEVEL);
 
@@ -2409,11 +2416,15 @@ static int rescuer_thread(void *__rescuer)
 repeat:
        set_current_state(TASK_INTERRUPTIBLE);
 
-       if (kthread_should_stop()) {
-               __set_current_state(TASK_RUNNING);
-               rescuer->task->flags &= ~PF_WQ_WORKER;
-               return 0;
-       }
+       /*
+        * By the time the rescuer is requested to stop, the workqueue
+        * shouldn't have any work pending, but @wq->maydays may still have
+        * pwq(s) queued.  This can happen by non-rescuer workers consuming
+        * all the work items before the rescuer got to them.  Go through
+        * @wq->maydays processing before acting on should_stop so that the
+        * list is always empty on exit.
+        */
+       should_stop = kthread_should_stop();
 
        /* see whether any pwq is asking for help */
        spin_lock_irq(&wq_mayday_lock);
@@ -2444,6 +2455,12 @@ repeat:
 
                process_scheduled_works(rescuer);
 
+               /*
+                * Put the reference grabbed by send_mayday().  @pool won't
+                * go away while we're holding its lock.
+                */
+               put_pwq(pwq);
+
                /*
                 * Leave this pool.  If keep_working() is %true, notify a
                 * regular worker; otherwise, we end up with 0 concurrency
@@ -2459,6 +2476,12 @@ repeat:
 
        spin_unlock_irq(&wq_mayday_lock);
 
+       if (should_stop) {
+               __set_current_state(TASK_RUNNING);
+               rescuer->task->flags &= ~PF_WQ_WORKER;
+               return 0;
+       }
+
        /* rescuers should never participate in concurrency management */
        WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
        schedule();
@@ -4100,7 +4123,8 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
        if (!pwq) {
                pr_warning("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
                           wq->name);
-               goto out_unlock;
+               mutex_lock(&wq->mutex);
+               goto use_dfl_pwq;
        }
 
        /*
index ebe5880c29d6cbe2306054f19bda87ffb3daa59a..1b5a95f0fa013ca428e877ae39d6f0148a49cd61 100644 (file)
@@ -581,3 +581,18 @@ config PGTABLE_MAPPING
 
 config GENERIC_EARLY_IOREMAP
        bool
+
+config MAX_STACK_SIZE_MB
+       int "Maximum user stack size for 32-bit processes (MB)"
+       default 80
+       range 8 256 if METAG
+       range 8 2048
+       depends on STACK_GROWSUP && (!64BIT || COMPAT)
+       help
+         This is the maximum stack size in Megabytes in the VM layout of 32-bit
+         user processes when the stack grows upwards (currently only on parisc
+         and metag arch). The stack will be located at the highest memory
+         address minus the given value, unless the RLIMIT_STACK hard limit is
+         changed to a smaller value in which case that is used.
+
+         A sane initial value is 80 MB.
index 000a220e2a4142f8ed93b5e4c98125f9cee9c443..088358c8006bb9c109da188c2b5ccf4a91614114 100644 (file)
@@ -257,9 +257,11 @@ static int filemap_check_errors(struct address_space *mapping)
 {
        int ret = 0;
        /* Check for outstanding write errors */
-       if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
+       if (test_bit(AS_ENOSPC, &mapping->flags) &&
+           test_and_clear_bit(AS_ENOSPC, &mapping->flags))
                ret = -ENOSPC;
-       if (test_and_clear_bit(AS_EIO, &mapping->flags))
+       if (test_bit(AS_EIO, &mapping->flags) &&
+           test_and_clear_bit(AS_EIO, &mapping->flags))
                ret = -EIO;
        return ret;
 }
index 91d67eaee0500796c9e5569fedc7cc5775002dda..8d2fcdfeff7fdb319f58c838cd8b94a6cc59121e 100644 (file)
@@ -1775,10 +1775,9 @@ void __init kmemleak_init(void)
        int i;
        unsigned long flags;
 
-       kmemleak_early_log = 0;
-
 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
        if (!kmemleak_skip_disable) {
+               kmemleak_early_log = 0;
                kmemleak_disable();
                return;
        }
@@ -1796,6 +1795,7 @@ void __init kmemleak_init(void)
 
        /* the kernel is still in UP mode, so disabling the IRQs is enough */
        local_irq_save(flags);
+       kmemleak_early_log = 0;
        if (kmemleak_error) {
                local_irq_restore(flags);
                return;
index 539eeb96b323bf649f83783e0dddcb4f907e1d6e..a402f8fdc68e94888ea177104524085c9f490fd5 100644 (file)
@@ -195,7 +195,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
        for (; start < end; start += PAGE_SIZE) {
                index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
 
-               page = find_get_page(mapping, index);
+               page = find_get_entry(mapping, index);
                if (!radix_tree_exceptional_entry(page)) {
                        if (page)
                                page_cache_release(page);
index c47dffdcb246b0cc3120d50d6dfff1a6cd0369ea..5177c6d4a2ddbf6d28ece095287c1a4a36ef9a2a 100644 (file)
@@ -1077,9 +1077,18 @@ static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
 
        rcu_read_lock();
        do {
-               memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
-               if (unlikely(!memcg))
+               /*
+                * Page cache insertions can happen withou an
+                * actual mm context, e.g. during disk probing
+                * on boot, loopback IO, acct() writes etc.
+                */
+               if (unlikely(!mm))
                        memcg = root_mem_cgroup;
+               else {
+                       memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+                       if (unlikely(!memcg))
+                               memcg = root_mem_cgroup;
+               }
        } while (!css_tryget(&memcg->css));
        rcu_read_unlock();
        return memcg;
@@ -3958,17 +3967,9 @@ int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
                return 0;
        }
 
-       /*
-        * Page cache insertions can happen without an actual mm
-        * context, e.g. during disk probing on boot.
-        */
-       if (unlikely(!mm))
-               memcg = root_mem_cgroup;
-       else {
-               memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
-               if (!memcg)
-                       return -ENOMEM;
-       }
+       memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
+       if (!memcg)
+               return -ENOMEM;
        __mem_cgroup_commit_charge(memcg, page, 1, type, false);
        return 0;
 }
index 35ef28acf137c0ab76393ede3dbc1c3d820f5c37..9ccef39a9de261c96f4e5775d7dca48b63d4d133 100644 (file)
@@ -1081,15 +1081,16 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
                        return 0;
                } else if (PageHuge(hpage)) {
                        /*
-                        * Check "just unpoisoned", "filter hit", and
-                        * "race with other subpage."
+                        * Check "filter hit" and "race with other subpage."
                         */
                        lock_page(hpage);
-                       if (!PageHWPoison(hpage)
-                           || (hwpoison_filter(p) && TestClearPageHWPoison(p))
-                           || (p != hpage && TestSetPageHWPoison(hpage))) {
-                               atomic_long_sub(nr_pages, &num_poisoned_pages);
-                               return 0;
+                       if (PageHWPoison(hpage)) {
+                               if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
+                                   || (p != hpage && TestSetPageHWPoison(hpage))) {
+                                       atomic_long_sub(nr_pages, &num_poisoned_pages);
+                                       unlock_page(hpage);
+                                       return 0;
+                               }
                        }
                        set_page_hwpoison_huge_page(hpage);
                        res = dequeue_hwpoisoned_huge_page(hpage);
@@ -1152,6 +1153,8 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
         */
        if (!PageHWPoison(p)) {
                printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
+               atomic_long_sub(nr_pages, &num_poisoned_pages);
+               put_page(hpage);
                res = 0;
                goto out;
        }
index 0843feb66f3d0236abd4386b5bfd0170c24ae0ef..05f1180e9f21822e99a5f11a2a7a03af663a422c 100644 (file)
@@ -194,10 +194,17 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
                        break;
                if (pmd_trans_huge(*old_pmd)) {
                        int err = 0;
-                       if (extent == HPAGE_PMD_SIZE)
+                       if (extent == HPAGE_PMD_SIZE) {
+                               VM_BUG_ON(vma->vm_file || !vma->anon_vma);
+                               /* See comment in move_ptes() */
+                               if (need_rmap_locks)
+                                       anon_vma_lock_write(vma->anon_vma);
                                err = move_huge_pmd(vma, new_vma, old_addr,
                                                    new_addr, old_end,
                                                    old_pmd, new_pmd);
+                               if (need_rmap_locks)
+                                       anon_vma_unlock_write(vma->anon_vma);
+                       }
                        if (err > 0) {
                                need_flush = true;
                                continue;
index 63e24fb4387b6d305960f9e7ba8c0554e6818ca5..2ddf9a990dbd057228782a3af5ac6901a0af632b 100644 (file)
@@ -610,7 +610,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
        chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
                                                sizeof(chunk->map[0]));
        if (!chunk->map) {
-               kfree(chunk);
+               pcpu_mem_free(chunk, pcpu_chunk_struct_size);
                return NULL;
        }
 
index 175273f38cb1bd59f5aeb88cb8c815033475dfe8..44ebd5c2cd4aef0f86bd6475132cc40c501c6fef 100644 (file)
@@ -169,6 +169,7 @@ int register_vlan_dev(struct net_device *dev)
        if (err < 0)
                goto out_uninit_mvrp;
 
+       vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1;
        err = register_netdevice(dev);
        if (err < 0)
                goto out_uninit_mvrp;
index 733ec283ed1b9e85f9181f67116052f88bb49951..019efb79708f81976bc6484cc371a8fa6c0c080e 100644 (file)
@@ -493,48 +493,10 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
        }
 }
 
-static int vlan_calculate_locking_subclass(struct net_device *real_dev)
-{
-       int subclass = 0;
-
-       while (is_vlan_dev(real_dev)) {
-               subclass++;
-               real_dev = vlan_dev_priv(real_dev)->real_dev;
-       }
-
-       return subclass;
-}
-
-static void vlan_dev_mc_sync(struct net_device *to, struct net_device *from)
-{
-       int err = 0, subclass;
-
-       subclass = vlan_calculate_locking_subclass(to);
-
-       spin_lock_nested(&to->addr_list_lock, subclass);
-       err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
-       if (!err)
-               __dev_set_rx_mode(to);
-       spin_unlock(&to->addr_list_lock);
-}
-
-static void vlan_dev_uc_sync(struct net_device *to, struct net_device *from)
-{
-       int err = 0, subclass;
-
-       subclass = vlan_calculate_locking_subclass(to);
-
-       spin_lock_nested(&to->addr_list_lock, subclass);
-       err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
-       if (!err)
-               __dev_set_rx_mode(to);
-       spin_unlock(&to->addr_list_lock);
-}
-
 static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
 {
-       vlan_dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
-       vlan_dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+       dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+       dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
 }
 
 /*
@@ -562,6 +524,11 @@ static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
        netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
 }
 
+static int vlan_dev_get_lock_subclass(struct net_device *dev)
+{
+       return vlan_dev_priv(dev)->nest_level;
+}
+
 static const struct header_ops vlan_header_ops = {
        .create  = vlan_dev_hard_header,
        .rebuild = vlan_dev_rebuild_header,
@@ -597,7 +564,6 @@ static const struct net_device_ops vlan_netdev_ops;
 static int vlan_dev_init(struct net_device *dev)
 {
        struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
-       int subclass = 0;
 
        netif_carrier_off(dev);
 
@@ -646,8 +612,7 @@ static int vlan_dev_init(struct net_device *dev)
 
        SET_NETDEV_DEVTYPE(dev, &vlan_type);
 
-       subclass = vlan_calculate_locking_subclass(dev);
-       vlan_dev_set_lockdep_class(dev, subclass);
+       vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
 
        vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
        if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
@@ -819,6 +784,7 @@ static const struct net_device_ops vlan_netdev_ops = {
        .ndo_netpoll_cleanup    = vlan_dev_netpoll_cleanup,
 #endif
        .ndo_fix_features       = vlan_dev_fix_features,
+       .ndo_get_lock_subclass  = vlan_dev_get_lock_subclass,
 };
 
 void vlan_setup(struct net_device *dev)
index b3bd4ec3fd9452f0d1f9a99dd4782260ab65c818..f04224c32005aa9a732622805915fe9aead9ee3e 100644 (file)
@@ -1545,6 +1545,8 @@ out_neigh:
        if ((orig_neigh_node) && (!is_single_hop_neigh))
                batadv_orig_node_free_ref(orig_neigh_node);
 out:
+       if (router_ifinfo)
+               batadv_neigh_ifinfo_free_ref(router_ifinfo);
        if (router)
                batadv_neigh_node_free_ref(router);
        if (router_router)
index b25fd64d727b0d6e8227671f860b133095df5100..aa5d4946d0d784d32fdcdce217c4f2bb482502f0 100644 (file)
@@ -940,8 +940,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                 * additional DAT answer may trigger kernel warnings about
                 * a packet coming from the wrong port.
                 */
-               if (batadv_is_my_client(bat_priv, dat_entry->mac_addr,
-                                       BATADV_NO_FLAGS)) {
+               if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, vid)) {
                        ret = true;
                        goto out;
                }
index bcc4bea632fa69ead6567e016f2f265840f7968b..f14e54a0569178e8b423b4921c4b610e4c63039c 100644 (file)
@@ -418,12 +418,13 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
                             struct batadv_neigh_node *neigh_node)
 {
        struct batadv_priv *bat_priv;
-       struct batadv_hard_iface *primary_if;
+       struct batadv_hard_iface *primary_if = NULL;
        struct batadv_frag_packet frag_header;
        struct sk_buff *skb_fragment;
        unsigned mtu = neigh_node->if_incoming->net_dev->mtu;
        unsigned header_size = sizeof(frag_header);
        unsigned max_fragment_size, max_packet_size;
+       bool ret = false;
 
        /* To avoid merge and refragmentation at next-hops we never send
         * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
@@ -483,7 +484,11 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
                           skb->len + ETH_HLEN);
        batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
 
-       return true;
+       ret = true;
+
 out_err:
-       return false;
+       if (primary_if)
+               batadv_hardif_free_ref(primary_if);
+
+       return ret;
 }
index c835e137423bb9ec70b98b5130d5a33c12f9b139..90cff585b37d5a3779cdb8f7b3b90a26eb2df88e 100644 (file)
 
 static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node)
 {
-       if (atomic_dec_and_test(&gw_node->refcount))
+       if (atomic_dec_and_test(&gw_node->refcount)) {
+               batadv_orig_node_free_ref(gw_node->orig_node);
                kfree_rcu(gw_node, rcu);
+       }
 }
 
 static struct batadv_gw_node *
@@ -406,9 +408,14 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
        if (gateway->bandwidth_down == 0)
                return;
 
+       if (!atomic_inc_not_zero(&orig_node->refcount))
+               return;
+
        gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
-       if (!gw_node)
+       if (!gw_node) {
+               batadv_orig_node_free_ref(orig_node);
                return;
+       }
 
        INIT_HLIST_NODE(&gw_node->list);
        gw_node->orig_node = orig_node;
index b851cc58085330acbab02848fedf3cb01751a060..fbda6b54baffccf798375cb8add49bb179738386 100644 (file)
@@ -83,7 +83,7 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
                return true;
 
        /* no more parents..stop recursion */
-       if (net_dev->iflink == net_dev->ifindex)
+       if (net_dev->iflink == 0 || net_dev->iflink == net_dev->ifindex)
                return false;
 
        /* recurse over the parent device */
index ffd9dfbd9b0e856e35e2ac6ea594739e8feb614d..6a484514cd3e98b9e0b27a924b4dcb92f2682055 100644 (file)
@@ -501,12 +501,17 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
 static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
 {
        struct batadv_orig_ifinfo *orig_ifinfo;
+       struct batadv_neigh_node *router;
 
        orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
 
        if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
                batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
 
+       /* this is the last reference to this object */
+       router = rcu_dereference_protected(orig_ifinfo->router, true);
+       if (router)
+               batadv_neigh_node_free_ref_now(router);
        kfree(orig_ifinfo);
 }
 
@@ -701,6 +706,47 @@ free_orig_node:
        return NULL;
 }
 
+/**
+ * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
+ * @bat_priv: the bat priv with all the soft interface information
+ * @neigh: orig node which is to be checked
+ */
+static void
+batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
+                         struct batadv_neigh_node *neigh)
+{
+       struct batadv_neigh_ifinfo *neigh_ifinfo;
+       struct batadv_hard_iface *if_outgoing;
+       struct hlist_node *node_tmp;
+
+       spin_lock_bh(&neigh->ifinfo_lock);
+
+       /* for all ifinfo objects for this neighinator */
+       hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
+                                 &neigh->ifinfo_list, list) {
+               if_outgoing = neigh_ifinfo->if_outgoing;
+
+               /* always keep the default interface */
+               if (if_outgoing == BATADV_IF_DEFAULT)
+                       continue;
+
+               /* don't purge if the interface is not (going) down */
+               if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
+                   (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
+                   (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
+                       continue;
+
+               batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+                          "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
+                          neigh->addr, if_outgoing->net_dev->name);
+
+               hlist_del_rcu(&neigh_ifinfo->list);
+               batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
+       }
+
+       spin_unlock_bh(&neigh->ifinfo_lock);
+}
+
 /**
  * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
  * @bat_priv: the bat priv with all the soft interface information
@@ -800,6 +846,11 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
 
                        hlist_del_rcu(&neigh_node->list);
                        batadv_neigh_node_free_ref(neigh_node);
+               } else {
+                       /* only necessary if not the whole neighbor is to be
+                        * deleted, but some interface has been removed.
+                        */
+                       batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
                }
        }
 
@@ -857,7 +908,7 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
 {
        struct batadv_neigh_node *best_neigh_node;
        struct batadv_hard_iface *hard_iface;
-       bool changed;
+       bool changed_ifinfo, changed_neigh;
 
        if (batadv_has_timed_out(orig_node->last_seen,
                                 2 * BATADV_PURGE_TIMEOUT)) {
@@ -867,10 +918,10 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
                           jiffies_to_msecs(orig_node->last_seen));
                return true;
        }
-       changed = batadv_purge_orig_ifinfo(bat_priv, orig_node);
-       changed = changed || batadv_purge_orig_neighbors(bat_priv, orig_node);
+       changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
+       changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
 
-       if (!changed)
+       if (!changed_ifinfo && !changed_neigh)
                return false;
 
        /* first for NULL ... */
@@ -1028,7 +1079,8 @@ int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
        bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
 
 out:
-       batadv_hardif_free_ref(hard_iface);
+       if (hard_iface)
+               batadv_hardif_free_ref(hard_iface);
        return 0;
 }
 
index 80e1b0f60a30214002684a42b1bab1a02e9d9962..2acf7fa1fec6c2309123dc189ea964f66f230d07 100644 (file)
@@ -859,12 +859,12 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
        return NF_STOLEN;
 }
 
-#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV4)
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
 {
        int ret;
 
-       if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
+       if (skb->protocol == htons(ETH_P_IP) &&
            skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
            !skb_is_gso(skb)) {
                if (br_parse_ip_options(skb))
index dac7f9b986877efa88f8e308e783c216563ad243..1948d592aa54c7a1831df546702904898cd68da4 100644 (file)
@@ -557,7 +557,7 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
        return r;
 }
 
-static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
+static int __ceph_tcp_sendpage(struct socket *sock, struct page *page,
                     int offset, size_t size, bool more)
 {
        int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
@@ -570,6 +570,24 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
        return ret;
 }
 
+static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
+                    int offset, size_t size, bool more)
+{
+       int ret;
+       struct kvec iov;
+
+       /* sendpage cannot properly handle pages with page_count == 0,
+        * we need to fallback to sendmsg if that's the case */
+       if (page_count(page) >= 1)
+               return __ceph_tcp_sendpage(sock, page, offset, size, more);
+
+       iov.iov_base = kmap(page) + offset;
+       iov.iov_len = size;
+       ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more);
+       kunmap(page);
+
+       return ret;
+}
 
 /*
  * Shutdown/close the socket for the given connection.
index 8b8a5a24b223ef268c28cf5e5ac5379314bba237..c547e46084d360c14abd34f97b6d1d3592d1c641 100644 (file)
@@ -329,6 +329,11 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
        dout("crush decode tunable chooseleaf_descend_once = %d",
             c->chooseleaf_descend_once);
 
+       ceph_decode_need(p, end, sizeof(u8), done);
+       c->chooseleaf_vary_r = ceph_decode_8(p);
+       dout("crush decode tunable chooseleaf_vary_r = %d",
+            c->chooseleaf_vary_r);
+
 done:
        dout("crush_decode success\n");
        return c;
index d2c8a06b3a9883b618c55a8cddf0929a4bcd049f..9abc503b19b7dad367b83a8179468ee3e1ae72d5 100644 (file)
@@ -2418,7 +2418,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
  * 2. No high memory really exists on this machine.
  */
 
-static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
+static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
 {
 #ifdef CONFIG_HIGHMEM
        int i;
@@ -2493,38 +2493,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
 }
 
 static netdev_features_t harmonize_features(struct sk_buff *skb,
-                                           const struct net_device *dev,
-                                           netdev_features_t features)
+       netdev_features_t features)
 {
        int tmp;
 
        if (skb->ip_summed != CHECKSUM_NONE &&
            !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) {
                features &= ~NETIF_F_ALL_CSUM;
-       } else if (illegal_highdma(dev, skb)) {
+       } else if (illegal_highdma(skb->dev, skb)) {
                features &= ~NETIF_F_SG;
        }
 
        return features;
 }
 
-netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
-                                        const struct net_device *dev)
+netdev_features_t netif_skb_features(struct sk_buff *skb)
 {
        __be16 protocol = skb->protocol;
-       netdev_features_t features = dev->features;
+       netdev_features_t features = skb->dev->features;
 
-       if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
+       if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
                features &= ~NETIF_F_GSO_MASK;
 
        if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
                struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
                protocol = veh->h_vlan_encapsulated_proto;
        } else if (!vlan_tx_tag_present(skb)) {
-               return harmonize_features(skb, dev, features);
+               return harmonize_features(skb, features);
        }
 
-       features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
+       features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
                                               NETIF_F_HW_VLAN_STAG_TX);
 
        if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
@@ -2532,9 +2530,9 @@ netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
                                NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
                                NETIF_F_HW_VLAN_STAG_TX;
 
-       return harmonize_features(skb, dev, features);
+       return harmonize_features(skb, features);
 }
-EXPORT_SYMBOL(netif_skb_dev_features);
+EXPORT_SYMBOL(netif_skb_features);
 
 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                        struct netdev_queue *txq)
@@ -3953,6 +3951,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
        }
        NAPI_GRO_CB(skb)->count = 1;
        NAPI_GRO_CB(skb)->age = jiffies;
+       NAPI_GRO_CB(skb)->last = skb;
        skb_shinfo(skb)->gso_size = skb_gro_len(skb);
        skb->next = napi->gro_list;
        napi->gro_list = skb;
@@ -4542,6 +4541,32 @@ void *netdev_adjacent_get_private(struct list_head *adj_list)
 }
 EXPORT_SYMBOL(netdev_adjacent_get_private);
 
+/**
+ * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next device from the dev's upper list, starting from iter
+ * position. The caller must hold RCU read lock.
+ */
+struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
+                                                struct list_head **iter)
+{
+       struct netdev_adjacent *upper;
+
+       WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
+
+       upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
+
+       if (&upper->list == &dev->adj_list.upper)
+               return NULL;
+
+       *iter = &upper->list;
+
+       return upper->dev;
+}
+EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
+
 /**
  * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
  * @dev: device
@@ -4623,6 +4648,32 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
 }
 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
 
+/**
+ * netdev_lower_get_next - Get the next device from the lower neighbour
+ *                         list
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next netdev_adjacent from the dev's lower neighbour
+ * list, starting from iter position. The caller must hold RTNL lock or
+ * its own locking that guarantees that the neighbour lower
+ * list will remain unchainged.
+ */
+void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
+{
+       struct netdev_adjacent *lower;
+
+       lower = list_entry((*iter)->next, struct netdev_adjacent, list);
+
+       if (&lower->list == &dev->adj_list.lower)
+               return NULL;
+
+       *iter = &lower->list;
+
+       return lower->dev;
+}
+EXPORT_SYMBOL(netdev_lower_get_next);
+
 /**
  * netdev_lower_get_first_private_rcu - Get the first ->private from the
  *                                    lower neighbour list, RCU
@@ -5073,6 +5124,30 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
 }
 EXPORT_SYMBOL(netdev_lower_dev_get_private);
 
+
+int dev_get_nest_level(struct net_device *dev,
+                      bool (*type_check)(struct net_device *dev))
+{
+       struct net_device *lower = NULL;
+       struct list_head *iter;
+       int max_nest = -1;
+       int nest;
+
+       ASSERT_RTNL();
+
+       netdev_for_each_lower_dev(dev, lower, iter) {
+               nest = dev_get_nest_level(lower, type_check);
+               if (max_nest < nest)
+                       max_nest = nest;
+       }
+
+       if (type_check(dev))
+               max_nest++;
+
+       return max_nest;
+}
+EXPORT_SYMBOL(dev_get_nest_level);
+
 static void dev_change_rx_flags(struct net_device *dev, int flags)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
@@ -5238,7 +5313,6 @@ void __dev_set_rx_mode(struct net_device *dev)
        if (ops->ndo_set_rx_mode)
                ops->ndo_set_rx_mode(dev);
 }
-EXPORT_SYMBOL(__dev_set_rx_mode);
 
 void dev_set_rx_mode(struct net_device *dev)
 {
@@ -5543,7 +5617,7 @@ static int dev_new_index(struct net *net)
 
 /* Delayed registration/unregisteration */
 static LIST_HEAD(net_todo_list);
-static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
+DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
 
 static void net_set_todo(struct net_device *dev)
 {
index 8f8a96ef9f3f64ba519fe4c872d46c7b7c680ec9..32d872eec7f5c535221898cdb45ab8f235d0b4bb 100644 (file)
@@ -1248,8 +1248,8 @@ void __neigh_set_probe_once(struct neighbour *neigh)
        neigh->updated = jiffies;
        if (!(neigh->nud_state & NUD_FAILED))
                return;
-       neigh->nud_state = NUD_PROBE;
-       atomic_set(&neigh->probes, NEIGH_VAR(neigh->parms, UCAST_PROBES));
+       neigh->nud_state = NUD_INCOMPLETE;
+       atomic_set(&neigh->probes, neigh_max_probes(neigh));
        neigh_add_timer(neigh,
                        jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
 }
index 81d3a9a084536541867afe9350602c0c73253006..7c8ffd97496175c60d3947019bfd138bb9746938 100644 (file)
@@ -24,7 +24,7 @@
 
 static LIST_HEAD(pernet_list);
 static struct list_head *first_device = &pernet_list;
-static DEFINE_MUTEX(net_mutex);
+DEFINE_MUTEX(net_mutex);
 
 LIST_HEAD(net_namespace_list);
 EXPORT_SYMBOL_GPL(net_namespace_list);
index 9837bebf93cea9e9a2f909947326b83b3a3356f9..2d8d8fcfa060c51e2f6cbe240aff3d01b3f964cc 100644 (file)
@@ -353,15 +353,46 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
 }
 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
 
+/* Return with the rtnl_lock held when there are no network
+ * devices unregistering in any network namespace.
+ */
+static void rtnl_lock_unregistering_all(void)
+{
+       struct net *net;
+       bool unregistering;
+       DEFINE_WAIT(wait);
+
+       for (;;) {
+               prepare_to_wait(&netdev_unregistering_wq, &wait,
+                               TASK_UNINTERRUPTIBLE);
+               unregistering = false;
+               rtnl_lock();
+               for_each_net(net) {
+                       if (net->dev_unreg_count > 0) {
+                               unregistering = true;
+                               break;
+                       }
+               }
+               if (!unregistering)
+                       break;
+               __rtnl_unlock();
+               schedule();
+       }
+       finish_wait(&netdev_unregistering_wq, &wait);
+}
+
 /**
  * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
  * @ops: struct rtnl_link_ops * to unregister
  */
 void rtnl_link_unregister(struct rtnl_link_ops *ops)
 {
-       rtnl_lock();
+       /* Close the race with cleanup_net() */
+       mutex_lock(&net_mutex);
+       rtnl_lock_unregistering_all();
        __rtnl_link_unregister(ops);
        rtnl_unlock();
+       mutex_unlock(&net_mutex);
 }
 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
 
index 1b62343f58378b3d8fc0e3ea048dbb45ce1e3a76..8383b2bddeb923bd629da7d9eac2cdd1ff1d81bd 100644 (file)
@@ -3076,7 +3076,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
        if (unlikely(p->len + len >= 65536))
                return -E2BIG;
 
-       lp = NAPI_GRO_CB(p)->last ?: p;
+       lp = NAPI_GRO_CB(p)->last;
        pinfo = skb_shinfo(lp);
 
        if (headlen <= offset) {
@@ -3192,7 +3192,7 @@ merge:
 
        __skb_pull(skb, offset);
 
-       if (!NAPI_GRO_CB(p)->last)
+       if (NAPI_GRO_CB(p)->last == p)
                skb_shinfo(p)->frag_list = skb;
        else
                NAPI_GRO_CB(p)->last->next = skb;
index 2f737bf90b3fe4235c75ccca6640c735ecaa076b..eed34338736c275aa02bfa40448801d46dda736b 100644 (file)
@@ -348,8 +348,8 @@ static void __net_random_once_deferred(struct work_struct *w)
 {
        struct __net_random_once_work *work =
                container_of(w, struct __net_random_once_work, work);
-       if (!static_key_enabled(work->key))
-               static_key_slow_inc(work->key);
+       BUG_ON(!static_key_enabled(work->key));
+       static_key_slow_dec(work->key);
        kfree(work);
 }
 
@@ -367,7 +367,7 @@ static void __net_random_once_disable_jump(struct static_key *key)
 }
 
 bool __net_get_random_once(void *buf, int nbytes, bool *done,
-                          struct static_key *done_key)
+                          struct static_key *once_key)
 {
        static DEFINE_SPINLOCK(lock);
        unsigned long flags;
@@ -382,7 +382,7 @@ bool __net_get_random_once(void *buf, int nbytes, bool *done,
        *done = true;
        spin_unlock_irqrestore(&lock, flags);
 
-       __net_random_once_disable_jump(done_key);
+       __net_random_once_disable_jump(once_key);
 
        return true;
 }
index 0eb5d5e76dfbe1f99537e8a561f1671389c09e16..5db37cef50a9ccd80c642118f54dd4ecf04219b8 100644 (file)
@@ -406,8 +406,9 @@ static int dsa_of_probe(struct platform_device *pdev)
                goto out_free;
        }
 
-       chip_index = 0;
+       chip_index = -1;
        for_each_available_child_of_node(np, child) {
+               chip_index++;
                cd = &pd->chip[chip_index];
 
                cd->mii_bus = &mdio_bus->dev;
index 8c54870db792cab059bff464d29776510ec3e5ec..6d6dd345bc4d89dea7dac7b179e4ef0f391b3954 100644 (file)
@@ -1650,6 +1650,39 @@ static int __init init_ipv4_mibs(void)
        return register_pernet_subsys(&ipv4_mib_ops);
 }
 
+static __net_init int inet_init_net(struct net *net)
+{
+       /*
+        * Set defaults for local port range
+        */
+       seqlock_init(&net->ipv4.ip_local_ports.lock);
+       net->ipv4.ip_local_ports.range[0] =  32768;
+       net->ipv4.ip_local_ports.range[1] =  61000;
+
+       seqlock_init(&net->ipv4.ping_group_range.lock);
+       /*
+        * Sane defaults - nobody may create ping sockets.
+        * Boot scripts should set this to distro-specific group.
+        */
+       net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
+       net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
+       return 0;
+}
+
+static __net_exit void inet_exit_net(struct net *net)
+{
+}
+
+static __net_initdata struct pernet_operations af_inet_ops = {
+       .init = inet_init_net,
+       .exit = inet_exit_net,
+};
+
+static int __init init_inet_pernet_ops(void)
+{
+       return register_pernet_subsys(&af_inet_ops);
+}
+
 static int ipv4_proc_init(void);
 
 /*
@@ -1794,6 +1827,9 @@ static int __init inet_init(void)
        if (ip_mr_init())
                pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
 #endif
+
+       if (init_inet_pernet_ops())
+               pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
        /*
         *      Initialise per-cpu ipv4 mibs
         */
index 8a043f03c88ecbb418b5466953abefd54c50b1d0..b10cd43a4722730205272d7822d699bc49ea71d3 100644 (file)
@@ -821,13 +821,13 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
        fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
        if (fi == NULL)
                goto failure;
+       fib_info_cnt++;
        if (cfg->fc_mx) {
                fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
                if (!fi->fib_metrics)
                        goto failure;
        } else
                fi->fib_metrics = (u32 *) dst_default_metrics;
-       fib_info_cnt++;
 
        fi->fib_net = hold_net(net);
        fi->fib_protocol = cfg->fc_protocol;
index 0d1e2cb877ec43692c5a7b4fe57e16cf921a8c97..a56b8e6e866a8c4327f86111adac947e9ffc2445 100644 (file)
@@ -37,11 +37,11 @@ void inet_get_local_port_range(struct net *net, int *low, int *high)
        unsigned int seq;
 
        do {
-               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
 
-               *low = net->ipv4.sysctl_local_ports.range[0];
-               *high = net->ipv4.sysctl_local_ports.range[1];
-       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
+               *low = net->ipv4.ip_local_ports.range[0];
+               *high = net->ipv4.ip_local_ports.range[1];
+       } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
 }
 EXPORT_SYMBOL(inet_get_local_port_range);
 
index be8abe73bb9f464a2e68679255acde3b708ce84b..6f111e48e11c15a19ffd60d345b1399fd3c66953 100644 (file)
 static bool ip_may_fragment(const struct sk_buff *skb)
 {
        return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
-              !skb->local_df;
+               skb->local_df;
 }
 
 static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
 {
-       if (skb->len <= mtu || skb->local_df)
+       if (skb->len <= mtu)
                return false;
 
        if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
@@ -56,53 +56,6 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
        return true;
 }
 
-static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb)
-{
-       unsigned int mtu;
-
-       if (skb->local_df || !skb_is_gso(skb))
-               return false;
-
-       mtu = ip_dst_mtu_maybe_forward(skb_dst(skb), true);
-
-       /* if seglen > mtu, do software segmentation for IP fragmentation on
-        * output.  DF bit cannot be set since ip_forward would have sent
-        * icmp error.
-        */
-       return skb_gso_network_seglen(skb) > mtu;
-}
-
-/* called if GSO skb needs to be fragmented on forward */
-static int ip_forward_finish_gso(struct sk_buff *skb)
-{
-       struct dst_entry *dst = skb_dst(skb);
-       netdev_features_t features;
-       struct sk_buff *segs;
-       int ret = 0;
-
-       features = netif_skb_dev_features(skb, dst->dev);
-       segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
-       if (IS_ERR(segs)) {
-               kfree_skb(skb);
-               return -ENOMEM;
-       }
-
-       consume_skb(skb);
-
-       do {
-               struct sk_buff *nskb = segs->next;
-               int err;
-
-               segs->next = NULL;
-               err = dst_output(segs);
-
-               if (err && ret == 0)
-                       ret = err;
-               segs = nskb;
-       } while (segs);
-
-       return ret;
-}
 
 static int ip_forward_finish(struct sk_buff *skb)
 {
@@ -114,9 +67,6 @@ static int ip_forward_finish(struct sk_buff *skb)
        if (unlikely(opt->optlen))
                ip_forward_options(skb);
 
-       if (ip_gso_exceeds_dst_mtu(skb))
-               return ip_forward_finish_gso(skb);
-
        return dst_output(skb);
 }
 
index c10a3ce5cbff0fc0bd0f23ac72188fd9e39fa83f..ed32313e307c43202a4710c6f5b74e14c19a4c20 100644 (file)
@@ -232,8 +232,9 @@ static void ip_expire(unsigned long arg)
                 * "Fragment Reassembly Timeout" message, per RFC792.
                 */
                if (qp->user == IP_DEFRAG_AF_PACKET ||
-                   (qp->user == IP_DEFRAG_CONNTRACK_IN &&
-                    skb_rtable(head)->rt_type != RTN_LOCAL))
+                   ((qp->user >= IP_DEFRAG_CONNTRACK_IN) &&
+                    (qp->user <= __IP_DEFRAG_CONNTRACK_IN_END) &&
+                    (skb_rtable(head)->rt_type != RTN_LOCAL)))
                        goto out_rcu_unlock;
 
 
index 1cbeba5edff90fa1ac891d4dd23cfb65464878a4..a52f50187b5495a1157c2ef8e0785da730414022 100644 (file)
@@ -211,6 +211,48 @@ static inline int ip_finish_output2(struct sk_buff *skb)
        return -EINVAL;
 }
 
+static int ip_finish_output_gso(struct sk_buff *skb)
+{
+       netdev_features_t features;
+       struct sk_buff *segs;
+       int ret = 0;
+
+       /* common case: locally created skb or seglen is <= mtu */
+       if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) ||
+             skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb))
+               return ip_finish_output2(skb);
+
+       /* Slowpath -  GSO segment length is exceeding the dst MTU.
+        *
+        * This can happen in two cases:
+        * 1) TCP GRO packet, DF bit not set
+        * 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly
+        * from host network stack.
+        */
+       features = netif_skb_features(skb);
+       segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+       if (IS_ERR(segs)) {
+               kfree_skb(skb);
+               return -ENOMEM;
+       }
+
+       consume_skb(skb);
+
+       do {
+               struct sk_buff *nskb = segs->next;
+               int err;
+
+               segs->next = NULL;
+               err = ip_fragment(segs, ip_finish_output2);
+
+               if (err && ret == 0)
+                       ret = err;
+               segs = nskb;
+       } while (segs);
+
+       return ret;
+}
+
 static int ip_finish_output(struct sk_buff *skb)
 {
 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
@@ -220,10 +262,13 @@ static int ip_finish_output(struct sk_buff *skb)
                return dst_output(skb);
        }
 #endif
-       if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
+       if (skb_is_gso(skb))
+               return ip_finish_output_gso(skb);
+
+       if (skb->len > ip_skb_dst_mtu(skb))
                return ip_fragment(skb, ip_finish_output2);
-       else
-               return ip_finish_output2(skb);
+
+       return ip_finish_output2(skb);
 }
 
 int ip_mc_output(struct sock *sk, struct sk_buff *skb)
index b3f859731c60eccfdd77517989cff5ba84f6581a..2acc2337d38bfe7f35517e13952cfa8a306c24fd 100644 (file)
@@ -540,9 +540,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
        unsigned int max_headroom;      /* The extra header space needed */
        __be32 dst;
        int err;
-       bool connected = true;
+       bool connected;
 
        inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
+       connected = (tunnel->parms.iph.daddr != 0);
 
        dst = tnl_params->daddr;
        if (dst == 0) {
@@ -882,6 +883,7 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
         */
        if (!IS_ERR(itn->fb_tunnel_dev)) {
                itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
+               itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
                ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
        }
        rtnl_unlock();
index afcee51b90ede30a846bbd7a5c16b6996d955889..13ef00f1e17b88943ddee9ae9ba52b7d0efe7832 100644 (file)
@@ -239,6 +239,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
 static int vti4_err(struct sk_buff *skb, u32 info)
 {
        __be32 spi;
+       __u32 mark;
        struct xfrm_state *x;
        struct ip_tunnel *tunnel;
        struct ip_esp_hdr *esph;
@@ -254,6 +255,8 @@ static int vti4_err(struct sk_buff *skb, u32 info)
        if (!tunnel)
                return -1;
 
+       mark = be32_to_cpu(tunnel->parms.o_key);
+
        switch (protocol) {
        case IPPROTO_ESP:
                esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
@@ -281,7 +284,7 @@ static int vti4_err(struct sk_buff *skb, u32 info)
                return 0;
        }
 
-       x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
+       x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
                              spi, protocol, AF_INET);
        if (!x)
                return 0;
index 12e13bd82b5bba4fdd183d5ba2cda098a1c0c683..f40f321b41fc2e30b21019333efdc5757404fe0a 100644 (file)
@@ -22,7 +22,6 @@
 #endif
 #include <net/netfilter/nf_conntrack_zones.h>
 
-/* Returns new sk_buff, or NULL */
 static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
 {
        int err;
@@ -33,8 +32,10 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
        err = ip_defrag(skb, user);
        local_bh_enable();
 
-       if (!err)
+       if (!err) {
                ip_send_check(ip_hdr(skb));
+               skb->local_df = 1;
+       }
 
        return err;
 }
index 8210964a9f19bedf17d6f3266c1fd0775f3de144..044a0ddf6a791ace04fbb1802e64563bf3fc5518 100644 (file)
@@ -236,15 +236,15 @@ exit:
 static void inet_get_ping_group_range_net(struct net *net, kgid_t *low,
                                          kgid_t *high)
 {
-       kgid_t *data = net->ipv4.sysctl_ping_group_range;
+       kgid_t *data = net->ipv4.ping_group_range.range;
        unsigned int seq;
 
        do {
-               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
 
                *low = data[0];
                *high = data[1];
-       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
+       } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
 }
 
 
index db1e0da871f40a2284d67bd48c0f21d772b923f3..5e676be3daeb19e5f48df5e767a66b83ac73b432 100644 (file)
@@ -1519,7 +1519,7 @@ static int __mkroute_input(struct sk_buff *skb,
        struct in_device *out_dev;
        unsigned int flags = 0;
        bool do_cache;
-       u32 itag;
+       u32 itag = 0;
 
        /* get a working reference to the output device */
        out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
index 44eba052b43d3ab49ba7630bcd82e73e5b094472..5cde8f263d40c0eb0204d310fee99146dabb7a87 100644 (file)
@@ -45,10 +45,10 @@ static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
 /* Update system visible IP port range */
 static void set_local_port_range(struct net *net, int range[2])
 {
-       write_seqlock(&net->ipv4.sysctl_local_ports.lock);
-       net->ipv4.sysctl_local_ports.range[0] = range[0];
-       net->ipv4.sysctl_local_ports.range[1] = range[1];
-       write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
+       write_seqlock(&net->ipv4.ip_local_ports.lock);
+       net->ipv4.ip_local_ports.range[0] = range[0];
+       net->ipv4.ip_local_ports.range[1] = range[1];
+       write_sequnlock(&net->ipv4.ip_local_ports.lock);
 }
 
 /* Validate changes from /proc interface. */
@@ -57,7 +57,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
                                 size_t *lenp, loff_t *ppos)
 {
        struct net *net =
-               container_of(table->data, struct net, ipv4.sysctl_local_ports.range);
+               container_of(table->data, struct net, ipv4.ip_local_ports.range);
        int ret;
        int range[2];
        struct ctl_table tmp = {
@@ -87,14 +87,14 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
 {
        kgid_t *data = table->data;
        struct net *net =
-               container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
+               container_of(table->data, struct net, ipv4.ping_group_range.range);
        unsigned int seq;
        do {
-               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
 
                *low = data[0];
                *high = data[1];
-       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
+       } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
 }
 
 /* Update system visible IP port range */
@@ -102,11 +102,11 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
 {
        kgid_t *data = table->data;
        struct net *net =
-               container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
-       write_seqlock(&net->ipv4.sysctl_local_ports.lock);
+               container_of(table->data, struct net, ipv4.ping_group_range.range);
+       write_seqlock(&net->ipv4.ip_local_ports.lock);
        data[0] = low;
        data[1] = high;
-       write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
+       write_sequnlock(&net->ipv4.ip_local_ports.lock);
 }
 
 /* Validate changes from /proc interface. */
@@ -805,7 +805,7 @@ static struct ctl_table ipv4_net_table[] = {
        },
        {
                .procname       = "ping_group_range",
-               .data           = &init_net.ipv4.sysctl_ping_group_range,
+               .data           = &init_net.ipv4.ping_group_range.range,
                .maxlen         = sizeof(gid_t)*2,
                .mode           = 0644,
                .proc_handler   = ipv4_ping_group_range,
@@ -819,8 +819,8 @@ static struct ctl_table ipv4_net_table[] = {
        },
        {
                .procname       = "ip_local_port_range",
-               .maxlen         = sizeof(init_net.ipv4.sysctl_local_ports.range),
-               .data           = &init_net.ipv4.sysctl_local_ports.range,
+               .maxlen         = sizeof(init_net.ipv4.ip_local_ports.range),
+               .data           = &init_net.ipv4.ip_local_ports.range,
                .mode           = 0644,
                .proc_handler   = ipv4_local_port_range,
        },
@@ -858,20 +858,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
                        table[i].data += (void *)net - (void *)&init_net;
        }
 
-       /*
-        * Sane defaults - nobody may create ping sockets.
-        * Boot scripts should set this to distro-specific group.
-        */
-       net->ipv4.sysctl_ping_group_range[0] = make_kgid(&init_user_ns, 1);
-       net->ipv4.sysctl_ping_group_range[1] = make_kgid(&init_user_ns, 0);
-
-       /*
-        * Set defaults for local port range
-        */
-       seqlock_init(&net->ipv4.sysctl_local_ports.lock);
-       net->ipv4.sysctl_local_ports.range[0] =  32768;
-       net->ipv4.sysctl_local_ports.range[1] =  61000;
-
        net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
        if (net->ipv4.ipv4_hdr == NULL)
                goto err_reg;
index 40e701f2e1e0324af6f0af781ac6715866ad88d3..186a8ecf92fa84bda9d0f6b050131da603c7694a 100644 (file)
@@ -62,10 +62,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
        if (err)
                return err;
 
-       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
-       IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED;
-
-       skb->protocol = htons(ETH_P_IP);
+       IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
 
        return x->outer_mode->output2(x, skb);
 }
@@ -73,27 +70,34 @@ EXPORT_SYMBOL(xfrm4_prepare_output);
 
 int xfrm4_output_finish(struct sk_buff *skb)
 {
+       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+       skb->protocol = htons(ETH_P_IP);
+
+#ifdef CONFIG_NETFILTER
+       IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
+#endif
+
+       return xfrm_output(skb);
+}
+
+static int __xfrm4_output(struct sk_buff *skb)
+{
+       struct xfrm_state *x = skb_dst(skb)->xfrm;
+
 #ifdef CONFIG_NETFILTER
-       if (!skb_dst(skb)->xfrm) {
+       if (!x) {
                IPCB(skb)->flags |= IPSKB_REROUTED;
                return dst_output(skb);
        }
-
-       IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
 #endif
 
-       skb->protocol = htons(ETH_P_IP);
-       return xfrm_output(skb);
+       return x->outer_mode->afinfo->output_finish(skb);
 }
 
 int xfrm4_output(struct sock *sk, struct sk_buff *skb)
 {
-       struct dst_entry *dst = skb_dst(skb);
-       struct xfrm_state *x = dst->xfrm;
-
        return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb,
-                           NULL, dst->dev,
-                           x->outer_mode->afinfo->output_finish,
+                           NULL, skb_dst(skb)->dev, __xfrm4_output,
                            !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
 
index 7f7b243e8139defccf14165971a92a7261c29b71..a2ce0101eaac846b1e36e69a5a89c804ca247c44 100644 (file)
@@ -50,8 +50,12 @@ int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
 {
        int ret;
        struct xfrm4_protocol *handler;
+       struct xfrm4_protocol __rcu **head = proto_handlers(protocol);
 
-       for_each_protocol_rcu(*proto_handlers(protocol), handler)
+       if (!head)
+               return 0;
+
+       for_each_protocol_rcu(*head, handler)
                if ((ret = handler->cb_handler(skb, err)) <= 0)
                        return ret;
 
@@ -64,15 +68,20 @@ int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
 {
        int ret;
        struct xfrm4_protocol *handler;
+       struct xfrm4_protocol __rcu **head = proto_handlers(nexthdr);
 
        XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
        XFRM_SPI_SKB_CB(skb)->family = AF_INET;
        XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
 
-       for_each_protocol_rcu(*proto_handlers(nexthdr), handler)
+       if (!head)
+               goto out;
+
+       for_each_protocol_rcu(*head, handler)
                if ((ret = handler->input_handler(skb, nexthdr, spi, encap_type)) != -EINVAL)
                        return ret;
 
+out:
        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 
        kfree_skb(skb);
@@ -208,6 +217,9 @@ int xfrm4_protocol_register(struct xfrm4_protocol *handler,
        int ret = -EEXIST;
        int priority = handler->priority;
 
+       if (!proto_handlers(protocol) || !netproto(protocol))
+               return -EINVAL;
+
        mutex_lock(&xfrm4_protocol_mutex);
 
        if (!rcu_dereference_protected(*proto_handlers(protocol),
@@ -250,6 +262,9 @@ int xfrm4_protocol_deregister(struct xfrm4_protocol *handler,
        struct xfrm4_protocol *t;
        int ret = -ENOENT;
 
+       if (!proto_handlers(protocol) || !netproto(protocol))
+               return -EINVAL;
+
        mutex_lock(&xfrm4_protocol_mutex);
 
        for (pprev = proto_handlers(protocol);
index 59f95affceb0773d052184bdf5fec9f276433d63..b2f091566f88453bce5eb3c33b47e1c9c040447c 100644 (file)
@@ -196,7 +196,6 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
        unsigned int off;
        u16 flush = 1;
        int proto;
-       __wsum csum;
 
        off = skb_gro_offset(skb);
        hlen = off + sizeof(*iph);
@@ -264,13 +263,10 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
 
        NAPI_GRO_CB(skb)->flush |= flush;
 
-       csum = skb->csum;
-       skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));
+       skb_gro_postpull_rcsum(skb, iph, nlen);
 
        pp = ops->callbacks.gro_receive(head, skb);
 
-       skb->csum = csum;
-
 out_unlock:
        rcu_read_unlock();
 
index 40e7581374f7006c6f8c436ed686919ac93c2b19..fbf11562b54c1a7c8809f33cc969c66319517377 100644 (file)
@@ -344,12 +344,16 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
 
 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
 {
-       if (skb->len <= mtu || skb->local_df)
+       if (skb->len <= mtu)
                return false;
 
+       /* ipv6 conntrack defrag sets max_frag_size + local_df */
        if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
                return true;
 
+       if (skb->local_df)
+               return false;
+
        if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
                return false;
 
@@ -1225,7 +1229,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
                unsigned int maxnonfragsize, headersize;
 
                headersize = sizeof(struct ipv6hdr) +
-                            (opt ? opt->tot_len : 0) +
+                            (opt ? opt->opt_flen + opt->opt_nflen : 0) +
                             (dst_allfrag(&rt->dst) ?
                              sizeof(struct frag_hdr) : 0) +
                             rt->rt6i_nfheader_len;
index b05b609f69d1cd3e58bd525cb0b5e8b11d429b80..f6a66bb4114db3af0f23ee1f950acf8b9720304d 100644 (file)
@@ -1557,7 +1557,7 @@ static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[])
 {
        u8 proto;
 
-       if (!data)
+       if (!data || !data[IFLA_IPTUN_PROTO])
                return 0;
 
        proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
index b7c0f827140b402685cc29049cb56646471c2cf2..6cc9f9371cc57cd6b0233815a73134dddfa6207c 100644 (file)
@@ -511,6 +511,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                    u8 type, u8 code, int offset, __be32 info)
 {
        __be32 spi;
+       __u32 mark;
        struct xfrm_state *x;
        struct ip6_tnl *t;
        struct ip_esp_hdr *esph;
@@ -524,6 +525,8 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        if (!t)
                return -1;
 
+       mark = be32_to_cpu(t->parms.o_key);
+
        switch (protocol) {
        case IPPROTO_ESP:
                esph = (struct ip_esp_hdr *)(skb->data + offset);
@@ -545,7 +548,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
            type != NDISC_REDIRECT)
                return 0;
 
-       x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
+       x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
                              spi, protocol, AF_INET6);
        if (!x)
                return 0;
@@ -1097,7 +1100,6 @@ static int __init vti6_tunnel_init(void)
 
        err = xfrm6_protocol_register(&vti_esp6_protocol, IPPROTO_ESP);
        if (err < 0) {
-               unregister_pernet_device(&vti6_net_ops);
                pr_err("%s: can't register vti6 protocol\n", __func__);
 
                goto out;
@@ -1106,7 +1108,6 @@ static int __init vti6_tunnel_init(void)
        err = xfrm6_protocol_register(&vti_ah6_protocol, IPPROTO_AH);
        if (err < 0) {
                xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
-               unregister_pernet_device(&vti6_net_ops);
                pr_err("%s: can't register vti6 protocol\n", __func__);
 
                goto out;
@@ -1116,7 +1117,6 @@ static int __init vti6_tunnel_init(void)
        if (err < 0) {
                xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
                xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
-               unregister_pernet_device(&vti6_net_ops);
                pr_err("%s: can't register vti6 protocol\n", __func__);
 
                goto out;
index 09a22f4f36c9e069c6dfb3074909691ef2c82399..ca8d4ea48a5d9fa641bf129a6fc5e3b428799fa4 100644 (file)
@@ -851,7 +851,7 @@ out:
 static void ndisc_recv_na(struct sk_buff *skb)
 {
        struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
-       const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
+       struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
        const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
        u8 *lladdr = NULL;
        u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) +
@@ -944,10 +944,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
                        /*
                         * Change: router to host
                         */
-                       struct rt6_info *rt;
-                       rt = rt6_get_dflt_router(saddr, dev);
-                       if (rt)
-                               ip6_del_rt(rt);
+                       rt6_clean_tohost(dev_net(dev),  saddr);
                }
 
 out:
index 95f3f1da0d7f2ff20c3afa3eeda315dd9e2e6b5f..d38e6a8d8b9fb82ec7d583a5ab2abc652838d470 100644 (file)
@@ -30,13 +30,15 @@ int ip6_route_me_harder(struct sk_buff *skb)
                .daddr = iph->daddr,
                .saddr = iph->saddr,
        };
+       int err;
 
        dst = ip6_route_output(net, skb->sk, &fl6);
-       if (dst->error) {
+       err = dst->error;
+       if (err) {
                IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
                LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n");
                dst_release(dst);
-               return dst->error;
+               return err;
        }
 
        /* Drop old route. */
index 004fffb6c2218a12a9619eb08f359a8f2b391297..6ebdb7b6744cc933801dcf4c6b3dea6ddce8a89c 100644 (file)
@@ -2234,6 +2234,27 @@ void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
        fib6_clean_all(net, fib6_remove_prefsrc, &adni);
 }
 
+#define RTF_RA_ROUTER          (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
+#define RTF_CACHE_GATEWAY      (RTF_GATEWAY | RTF_CACHE)
+
+/* Remove routers and update dst entries when gateway turn into host. */
+static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
+{
+       struct in6_addr *gateway = (struct in6_addr *)arg;
+
+       if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
+            ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
+            ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
+               return -1;
+       }
+       return 0;
+}
+
+void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
+{
+       fib6_clean_all(net, fib6_clean_tohost, gateway);
+}
+
 struct arg_dev_net {
        struct net_device *dev;
        struct net *net;
@@ -2709,6 +2730,9 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh)
        if (tb[RTA_OIF])
                oif = nla_get_u32(tb[RTA_OIF]);
 
+       if (tb[RTA_MARK])
+               fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
+
        if (iif) {
                struct net_device *dev;
                int flags = 0;
index 0d78132ff18aa018fa4e9918dbfb0dd57f95147a..8517d3cd1aed460bbfb1bfb0f515924f008b790d 100644 (file)
@@ -42,7 +42,7 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
        if (NAPI_GRO_CB(skb)->flush)
                goto skip_csum;
 
-       wsum = skb->csum;
+       wsum = NAPI_GRO_CB(skb)->csum;
 
        switch (skb->ip_summed) {
        case CHECKSUM_NONE:
index 19ef329bdbf8e7418fa1d352bb6c90218935831e..b930d080c66f231f338c7ea860c8c1d798d91fea 100644 (file)
@@ -114,12 +114,6 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
        if (err)
                return err;
 
-       memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
-#ifdef CONFIG_NETFILTER
-       IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
-#endif
-
-       skb->protocol = htons(ETH_P_IPV6);
        skb->local_df = 1;
 
        return x->outer_mode->output2(x, skb);
@@ -128,11 +122,13 @@ EXPORT_SYMBOL(xfrm6_prepare_output);
 
 int xfrm6_output_finish(struct sk_buff *skb)
 {
+       memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+       skb->protocol = htons(ETH_P_IPV6);
+
 #ifdef CONFIG_NETFILTER
        IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
 #endif
 
-       skb->protocol = htons(ETH_P_IPV6);
        return xfrm_output(skb);
 }
 
@@ -142,6 +138,13 @@ static int __xfrm6_output(struct sk_buff *skb)
        struct xfrm_state *x = dst->xfrm;
        int mtu;
 
+#ifdef CONFIG_NETFILTER
+       if (!x) {
+               IP6CB(skb)->flags |= IP6SKB_REROUTED;
+               return dst_output(skb);
+       }
+#endif
+
        if (skb->protocol == htons(ETH_P_IPV6))
                mtu = ip6_skb_dst_mtu(skb);
        else
@@ -165,6 +168,7 @@ static int __xfrm6_output(struct sk_buff *skb)
 
 int xfrm6_output(struct sock *sk, struct sk_buff *skb)
 {
-       return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL,
-                      skb_dst(skb)->dev, __xfrm6_output);
+       return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb,
+                           NULL, skb_dst(skb)->dev, __xfrm6_output,
+                           !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 }
index 6ab989c486f7ee66c5cd6235ce4f758fe9f277a6..54d13f8dbbae10670756eee0b16b898423d06060 100644 (file)
@@ -50,6 +50,10 @@ int xfrm6_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
 {
        int ret;
        struct xfrm6_protocol *handler;
+       struct xfrm6_protocol __rcu **head = proto_handlers(protocol);
+
+       if (!head)
+               return 0;
 
        for_each_protocol_rcu(*proto_handlers(protocol), handler)
                if ((ret = handler->cb_handler(skb, err)) <= 0)
@@ -184,10 +188,12 @@ int xfrm6_protocol_register(struct xfrm6_protocol *handler,
        struct xfrm6_protocol __rcu **pprev;
        struct xfrm6_protocol *t;
        bool add_netproto = false;
-
        int ret = -EEXIST;
        int priority = handler->priority;
 
+       if (!proto_handlers(protocol) || !netproto(protocol))
+               return -EINVAL;
+
        mutex_lock(&xfrm6_protocol_mutex);
 
        if (!rcu_dereference_protected(*proto_handlers(protocol),
@@ -230,6 +236,9 @@ int xfrm6_protocol_deregister(struct xfrm6_protocol *handler,
        struct xfrm6_protocol *t;
        int ret = -ENOENT;
 
+       if (!proto_handlers(protocol) || !netproto(protocol))
+               return -EINVAL;
+
        mutex_lock(&xfrm6_protocol_mutex);
 
        for (pprev = proto_handlers(protocol);
index 01e77b0ae0755d037093e7597a42db2a66378a51..8c9d7302c84682f4eec438405cf312da02cf9aab 100644 (file)
@@ -1830,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
                spin_lock_irqsave(&list->lock, flags);
 
                while (list_skb != (struct sk_buff *)list) {
-                       if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {
+                       if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
                                this = list_skb;
                                break;
                        }
index 222c28b75315f1ab43226e08566a5f911c6bacc7..f169b6ee94ee8d6ee9c6daa5dc047c5f8b1d2740 100644 (file)
@@ -317,6 +317,7 @@ struct ieee80211_roc_work {
 
        bool started, abort, hw_begun, notified;
        bool to_be_freed;
+       bool on_channel;
 
        unsigned long hw_start_time;
 
index dee50aefd6e868e247ba869e9e9883d4640330e3..27600a9808baeaa31be82ecb0f4218225b28665e 100644 (file)
@@ -3598,18 +3598,24 @@ void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata)
 
        sdata_lock(sdata);
 
-       if (ifmgd->auth_data) {
+       if (ifmgd->auth_data || ifmgd->assoc_data) {
+               const u8 *bssid = ifmgd->auth_data ?
+                               ifmgd->auth_data->bss->bssid :
+                               ifmgd->assoc_data->bss->bssid;
+
                /*
-                * If we are trying to authenticate while suspending, cfg80211
-                * won't know and won't actually abort those attempts, thus we
-                * need to do that ourselves.
+                * If we are trying to authenticate / associate while suspending,
+                * cfg80211 won't know and won't actually abort those attempts,
+                * thus we need to do that ourselves.
                 */
-               ieee80211_send_deauth_disassoc(sdata,
-                                              ifmgd->auth_data->bss->bssid,
+               ieee80211_send_deauth_disassoc(sdata, bssid,
                                               IEEE80211_STYPE_DEAUTH,
                                               WLAN_REASON_DEAUTH_LEAVING,
                                               false, frame_buf);
-               ieee80211_destroy_auth_data(sdata, false);
+               if (ifmgd->assoc_data)
+                       ieee80211_destroy_assoc_data(sdata, false);
+               if (ifmgd->auth_data)
+                       ieee80211_destroy_auth_data(sdata, false);
                cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
                                      IEEE80211_DEAUTH_FRAME_LEN);
        }
index 6fb38558a5e6c79d81fc6ba4a4b03ff4313a701a..7a17decd27f91af8646da20b9ab75fc3e303e3c4 100644 (file)
@@ -333,7 +333,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
                container_of(work, struct ieee80211_roc_work, work.work);
        struct ieee80211_sub_if_data *sdata = roc->sdata;
        struct ieee80211_local *local = sdata->local;
-       bool started;
+       bool started, on_channel;
 
        mutex_lock(&local->mtx);
 
@@ -354,14 +354,26 @@ void ieee80211_sw_roc_work(struct work_struct *work)
        if (!roc->started) {
                struct ieee80211_roc_work *dep;
 
-               /* start this ROC */
-               ieee80211_offchannel_stop_vifs(local);
+               WARN_ON(local->use_chanctx);
+
+               /* If actually operating on the desired channel (with at least
+                * 20 MHz channel width) don't stop all the operations but still
+                * treat it as though the ROC operation started properly, so
+                * other ROC operations won't interfere with this one.
+                */
+               roc->on_channel = roc->chan == local->_oper_chandef.chan &&
+                                 local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 &&
+                                 local->_oper_chandef.width != NL80211_CHAN_WIDTH_10;
 
-               /* switch channel etc */
+               /* start this ROC */
                ieee80211_recalc_idle(local);
 
-               local->tmp_channel = roc->chan;
-               ieee80211_hw_config(local, 0);
+               if (!roc->on_channel) {
+                       ieee80211_offchannel_stop_vifs(local);
+
+                       local->tmp_channel = roc->chan;
+                       ieee80211_hw_config(local, 0);
+               }
 
                /* tell userspace or send frame */
                ieee80211_handle_roc_started(roc);
@@ -380,9 +392,10 @@ void ieee80211_sw_roc_work(struct work_struct *work)
  finish:
                list_del(&roc->list);
                started = roc->started;
+               on_channel = roc->on_channel;
                ieee80211_roc_notify_destroy(roc, !roc->abort);
 
-               if (started) {
+               if (started && !on_channel) {
                        ieee80211_flush_queues(local, NULL);
 
                        local->tmp_channel = NULL;
index 216c45b949e513382447050eb560098a5edaa4b3..2b608b2b70ece8a6ed0cb8f8d8eed5b1d44e1536 100644 (file)
@@ -1231,7 +1231,8 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
                if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
                    test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
                        sta->last_rx = jiffies;
-                       if (ieee80211_is_data(hdr->frame_control)) {
+                       if (ieee80211_is_data(hdr->frame_control) &&
+                           !is_multicast_ether_addr(hdr->addr1)) {
                                sta->last_rx_rate_idx = status->rate_idx;
                                sta->last_rx_rate_flag = status->flag;
                                sta->last_rx_rate_vht_flag = status->vht_flag;
index 137a192e64bc3c2aa61cc9c5912a89bd3008cbe3..847d92f6bef60856be53ad13f0534695e4c6dac7 100644 (file)
@@ -1148,7 +1148,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
        atomic_dec(&ps->num_sta_ps);
 
        /* This station just woke up and isn't aware of our SMPS state */
-       if (!ieee80211_smps_is_restrictive(sta->known_smps_mode,
+       if (!ieee80211_vif_is_mesh(&sdata->vif) &&
+           !ieee80211_smps_is_restrictive(sta->known_smps_mode,
                                           sdata->smps_mode) &&
            sta->known_smps_mode != sdata->bss->req_smps &&
            sta_info_tx_streams(sta) != 1) {
index 00ba90b02ab2ab79c01d58dc5ba25785993f8dd6..60cb7a665976e10e7a909a9545b7643cf34e67a4 100644 (file)
@@ -314,10 +314,9 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
            !is_multicast_ether_addr(hdr->addr1))
                txflags |= IEEE80211_RADIOTAP_F_TX_FAIL;
 
-       if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
-           (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
+       if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
                txflags |= IEEE80211_RADIOTAP_F_TX_CTS;
-       else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+       if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
                txflags |= IEEE80211_RADIOTAP_F_TX_RTS;
 
        put_unaligned_le16(txflags, pos);
index a0b0aea76525c341711c129519a1c3f89a704bb9..cec5b60487a4032e2b805df374616ea9ad038669 100644 (file)
 
 #define VIF_ENTRY      __field(enum nl80211_iftype, vif_type) __field(void *, sdata)   \
                        __field(bool, p2p)                                              \
-                       __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
+                       __string(vif_name, sdata->name)
 #define VIF_ASSIGN     __entry->vif_type = sdata->vif.type; __entry->sdata = sdata;    \
                        __entry->p2p = sdata->vif.p2p;                                  \
-                       __assign_str(vif_name, sdata->dev ? sdata->dev->name : sdata->name)
+                       __assign_str(vif_name, sdata->name)
 #define VIF_PR_FMT     " vif:%s(%d%s)"
 #define VIF_PR_ARG     __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : ""
 
index 275c94f995f7c8401749cbbafb249bb52a418be7..3c365837e910edce60ac2348b0002361c4fca6ea 100644 (file)
@@ -1780,7 +1780,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
        mutex_unlock(&local->mtx);
 
        if (sched_scan_stopped)
-               cfg80211_sched_scan_stopped(local->hw.wiphy);
+               cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy);
 
        /*
         * If this is for hw restart things are still running.
index e9e36a256165842ac112e35e612ba79c8f86d305..9265adfdabfcf99acbdf1598067884188c500a49 100644 (file)
@@ -129,9 +129,12 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
        if (!vht_cap_ie || !sband->vht_cap.vht_supported)
                return;
 
-       /* A VHT STA must support 40 MHz */
-       if (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
-               return;
+       /*
+        * A VHT STA must support 40 MHz, but if we verify that here
+        * then we break a few things - some APs (e.g. Netgear R6300v2
+        * and others based on the BCM4360 chipset) will unset this
+        * capability bit when operating in 20 MHz.
+        */
 
        vht_cap->vht_supported = true;
 
index ccc46fa5edbce5e52710a22ae502e49a0f59e0a5..58579634427d2fcbf7f35556424a959697b8655e 100644 (file)
@@ -1336,6 +1336,9 @@ ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
 #ifdef CONFIG_NF_NAT_NEEDED
        int ret;
 
+       if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
+               return 0;
+
        ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST,
                                        cda[CTA_NAT_DST]);
        if (ret < 0)
index 804105391b9a903354ae9517d602c8c4638a8879..345acfb1720b14f00aae0e5937ab07bfb90e9482 100644 (file)
@@ -66,20 +66,6 @@ struct nft_jumpstack {
        int                     rulenum;
 };
 
-static inline void
-nft_chain_stats(const struct nft_chain *this, const struct nft_pktinfo *pkt,
-               struct nft_jumpstack *jumpstack, unsigned int stackptr)
-{
-       struct nft_stats __percpu *stats;
-       const struct nft_chain *chain = stackptr ? jumpstack[0].chain : this;
-
-       rcu_read_lock_bh();
-       stats = rcu_dereference(nft_base_chain(chain)->stats);
-       __this_cpu_inc(stats->pkts);
-       __this_cpu_add(stats->bytes, pkt->skb->len);
-       rcu_read_unlock_bh();
-}
-
 enum nft_trace {
        NFT_TRACE_RULE,
        NFT_TRACE_RETURN,
@@ -117,13 +103,14 @@ static void nft_trace_packet(const struct nft_pktinfo *pkt,
 unsigned int
 nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
 {
-       const struct nft_chain *chain = ops->priv;
+       const struct nft_chain *chain = ops->priv, *basechain = chain;
        const struct nft_rule *rule;
        const struct nft_expr *expr, *last;
        struct nft_data data[NFT_REG_MAX + 1];
        unsigned int stackptr = 0;
        struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
-       int rulenum = 0;
+       struct nft_stats __percpu *stats;
+       int rulenum;
        /*
         * Cache cursor to avoid problems in case that the cursor is updated
         * while traversing the ruleset.
@@ -131,6 +118,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
        unsigned int gencursor = ACCESS_ONCE(chain->net->nft.gencursor);
 
 do_chain:
+       rulenum = 0;
        rule = list_entry(&chain->rules, struct nft_rule, list);
 next_rule:
        data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
@@ -156,8 +144,10 @@ next_rule:
                switch (data[NFT_REG_VERDICT].verdict) {
                case NFT_BREAK:
                        data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
-                       /* fall through */
+                       continue;
                case NFT_CONTINUE:
+                       if (unlikely(pkt->skb->nf_trace))
+                               nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
                        continue;
                }
                break;
@@ -183,37 +173,44 @@ next_rule:
                jumpstack[stackptr].rule  = rule;
                jumpstack[stackptr].rulenum = rulenum;
                stackptr++;
-               /* fall through */
+               chain = data[NFT_REG_VERDICT].chain;
+               goto do_chain;
        case NFT_GOTO:
+               if (unlikely(pkt->skb->nf_trace))
+                       nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
+
                chain = data[NFT_REG_VERDICT].chain;
                goto do_chain;
        case NFT_RETURN:
                if (unlikely(pkt->skb->nf_trace))
                        nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN);
-
-               /* fall through */
+               break;
        case NFT_CONTINUE:
+               if (unlikely(pkt->skb->nf_trace && !(chain->flags & NFT_BASE_CHAIN)))
+                       nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
                break;
        default:
                WARN_ON(1);
        }
 
        if (stackptr > 0) {
-               if (unlikely(pkt->skb->nf_trace))
-                       nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
-
                stackptr--;
                chain = jumpstack[stackptr].chain;
                rule  = jumpstack[stackptr].rule;
                rulenum = jumpstack[stackptr].rulenum;
                goto next_rule;
        }
-       nft_chain_stats(chain, pkt, jumpstack, stackptr);
 
        if (unlikely(pkt->skb->nf_trace))
-               nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_POLICY);
+               nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
+
+       rcu_read_lock_bh();
+       stats = rcu_dereference(nft_base_chain(basechain)->stats);
+       __this_cpu_inc(stats->pkts);
+       __this_cpu_add(stats->bytes, pkt->skb->len);
+       rcu_read_unlock_bh();
 
-       return nft_base_chain(chain)->policy;
+       return nft_base_chain(basechain)->policy;
 }
 EXPORT_SYMBOL_GPL(nft_do_chain);
 
index e009087620e30ecdae7fe1bda155c0d905d9f6c0..23ef77c60fffc60a1678f79dfbb769fe9be6a274 100644 (file)
@@ -256,15 +256,15 @@ replay:
 #endif
                {
                        nfnl_unlock(subsys_id);
-                       kfree_skb(nskb);
-                       return netlink_ack(skb, nlh, -EOPNOTSUPP);
+                       netlink_ack(skb, nlh, -EOPNOTSUPP);
+                       return kfree_skb(nskb);
                }
        }
 
        if (!ss->commit || !ss->abort) {
                nfnl_unlock(subsys_id);
-               kfree_skb(nskb);
-               return netlink_ack(skb, nlh, -EOPNOTSUPP);
+               netlink_ack(skb, nlh, -EOPNOTSUPP);
+               return kfree_skb(skb);
        }
 
        while (skb->len >= nlmsg_total_size(0)) {
index 7633a752c65e99189c3e7603e2ebdd6e7438b356..0ad080790a32a341a1ddc57d632302563964a247 100644 (file)
@@ -99,7 +99,7 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
        _debug("tktlen: %x", tktlen);
        if (tktlen > AFSTOKEN_RK_TIX_MAX)
                return -EKEYREJECTED;
-       if (8 * 4 + tktlen != toklen)
+       if (toklen < 8 * 4 + tktlen)
                return -EKEYREJECTED;
 
        plen = sizeof(*token) + sizeof(*token->kad) + tktlen;
index eed8404443d8f0145942c3e459b79934ada9c48d..f435a88d899afff7c132085ccece5427461e5490 100644 (file)
@@ -188,6 +188,12 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
        [TCA_TCINDEX_CLASSID]           = { .type = NLA_U32 },
 };
 
+static void tcindex_filter_result_init(struct tcindex_filter_result *r)
+{
+       memset(r, 0, sizeof(*r));
+       tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+}
+
 static int
 tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
                  u32 handle, struct tcindex_data *p,
@@ -207,15 +213,11 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
                return err;
 
        memcpy(&cp, p, sizeof(cp));
-       memset(&new_filter_result, 0, sizeof(new_filter_result));
-       tcf_exts_init(&new_filter_result.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+       tcindex_filter_result_init(&new_filter_result);
 
+       tcindex_filter_result_init(&cr);
        if (old_r)
-               memcpy(&cr, r, sizeof(cr));
-       else {
-               memset(&cr, 0, sizeof(cr));
-               tcf_exts_init(&cr.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
-       }
+               cr.res = r->res;
 
        if (tb[TCA_TCINDEX_HASH])
                cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
@@ -267,9 +269,14 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
        err = -ENOMEM;
        if (!cp.perfect && !cp.h) {
                if (valid_perfect_hash(&cp)) {
+                       int i;
+
                        cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
                        if (!cp.perfect)
                                goto errout;
+                       for (i = 0; i < cp.hash; i++)
+                               tcf_exts_init(&cp.perfect[i].exts, TCA_TCINDEX_ACT,
+                                             TCA_TCINDEX_POLICE);
                        balloc = 1;
                } else {
                        cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
@@ -295,14 +302,17 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
                tcf_bind_filter(tp, &cr.res, base);
        }
 
-       tcf_exts_change(tp, &cr.exts, &e);
+       if (old_r)
+               tcf_exts_change(tp, &r->exts, &e);
+       else
+               tcf_exts_change(tp, &cr.exts, &e);
 
        tcf_tree_lock(tp);
        if (old_r && old_r != r)
-               memset(old_r, 0, sizeof(*old_r));
+               tcindex_filter_result_init(old_r);
 
        memcpy(p, &cp, sizeof(cp));
-       memcpy(r, &cr, sizeof(cr));
+       r->res = cr.res;
 
        if (r == &new_filter_result) {
                struct tcindex_filter **fp;
index 7d09a712cb1f1353f13310f5c68b38e750d199a6..88f108edfb586ef3b2d17d15cf66b00da84f93f0 100644 (file)
@@ -284,14 +284,22 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy)
 }
 EXPORT_SYMBOL(cfg80211_sched_scan_results);
 
-void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
+void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy)
 {
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
+       ASSERT_RTNL();
+
        trace_cfg80211_sched_scan_stopped(wiphy);
 
-       rtnl_lock();
        __cfg80211_stop_sched_scan(rdev, true);
+}
+EXPORT_SYMBOL(cfg80211_sched_scan_stopped_rtnl);
+
+void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
+{
+       rtnl_lock();
+       cfg80211_sched_scan_stopped_rtnl(wiphy);
        rtnl_unlock();
 }
 EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
index acdcb4a81817b7c78e8e721ff632284b9b806fa9..3546a77033de30d0d2593c6ac4fbac8f58659025 100644 (file)
@@ -234,7 +234,6 @@ void cfg80211_conn_work(struct work_struct *work)
                                        NULL, 0, NULL, 0,
                                        WLAN_STATUS_UNSPECIFIED_FAILURE,
                                        false, NULL);
-                       cfg80211_sme_free(wdev);
                }
                wdev_unlock(wdev);
        }
@@ -648,6 +647,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
                        cfg80211_unhold_bss(bss_from_pub(bss));
                        cfg80211_put_bss(wdev->wiphy, bss);
                }
+               cfg80211_sme_free(wdev);
                return;
        }
 
index fd8fa9aa7c4edd698430a9cb0647a8d26095a9a2..5b3add31f9f1202610e67e4d4868598e5846e594 100755 (executable)
@@ -25,7 +25,7 @@ cat << EOF
 #define __IGNORE_rmdir         /* unlinkat */
 #define __IGNORE_lchown                /* fchownat */
 #define __IGNORE_access                /* faccessat */
-#define __IGNORE_rename                /* renameat */
+#define __IGNORE_rename                /* renameat2 */
 #define __IGNORE_readlink      /* readlinkat */
 #define __IGNORE_symlink       /* symlinkat */
 #define __IGNORE_utimes                /* futimesat */
@@ -37,6 +37,9 @@ cat << EOF
 #define __IGNORE_lstat64       /* fstatat64 */
 #endif
 
+/* Missing flags argument */
+#define __IGNORE_renameat      /* renameat2 */
+
 /* CLOEXEC flag */
 #define __IGNORE_pipe          /* pipe2 */
 #define __IGNORE_dup2          /* dup3 */
index 8365909f5f8cfc6f404fe5ce21b936884298d13f..9134dbf70d3ee6898664f895905c8452e89a01c3 100644 (file)
@@ -306,57 +306,138 @@ static int devcgroup_seq_show(struct seq_file *m, void *v)
 }
 
 /**
- * may_access - verifies if a new exception is part of what is allowed
- *             by a dev cgroup based on the default policy +
- *             exceptions. This is used to make sure a child cgroup
- *             won't have more privileges than its parent or to
- *             verify if a certain access is allowed.
- * @dev_cgroup: dev cgroup to be tested against
- * @refex: new exception
- * @behavior: behavior of the exception
+ * match_exception     - iterates the exception list trying to find a complete match
+ * @exceptions: list of exceptions
+ * @type: device type (DEV_BLOCK or DEV_CHAR)
+ * @major: device file major number, ~0 to match all
+ * @minor: device file minor number, ~0 to match all
+ * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD)
+ *
+ * It is considered a complete match if an exception is found that will
+ * contain the entire range of provided parameters.
+ *
+ * Return: true in case it matches an exception completely
  */
-static bool may_access(struct dev_cgroup *dev_cgroup,
-                      struct dev_exception_item *refex,
-                      enum devcg_behavior behavior)
+static bool match_exception(struct list_head *exceptions, short type,
+                           u32 major, u32 minor, short access)
 {
        struct dev_exception_item *ex;
-       bool match = false;
 
-       rcu_lockdep_assert(rcu_read_lock_held() ||
-                          lockdep_is_held(&devcgroup_mutex),
-                          "device_cgroup::may_access() called without proper synchronization");
+       list_for_each_entry_rcu(ex, exceptions, list) {
+               if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
+                       continue;
+               if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR))
+                       continue;
+               if (ex->major != ~0 && ex->major != major)
+                       continue;
+               if (ex->minor != ~0 && ex->minor != minor)
+                       continue;
+               /* provided access cannot have more than the exception rule */
+               if (access & (~ex->access))
+                       continue;
+               return true;
+       }
+       return false;
+}
+
+/**
+ * match_exception_partial - iterates the exception list trying to find a partial match
+ * @exceptions: list of exceptions
+ * @type: device type (DEV_BLOCK or DEV_CHAR)
+ * @major: device file major number, ~0 to match all
+ * @minor: device file minor number, ~0 to match all
+ * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD)
+ *
+ * It is considered a partial match if an exception's range is found to
+ * contain *any* of the devices specified by provided parameters. This is
+ * used to make sure no extra access is being granted that is forbidden by
+ * any of the exception list.
+ *
+ * Return: true in case the provided range mat matches an exception completely
+ */
+static bool match_exception_partial(struct list_head *exceptions, short type,
+                                   u32 major, u32 minor, short access)
+{
+       struct dev_exception_item *ex;
 
-       list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) {
-               if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
+       list_for_each_entry_rcu(ex, exceptions, list) {
+               if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
                        continue;
-               if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR))
+               if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR))
                        continue;
-               if (ex->major != ~0 && ex->major != refex->major)
+               /*
+                * We must be sure that both the exception and the provided
+                * range aren't masking all devices
+                */
+               if (ex->major != ~0 && major != ~0 && ex->major != major)
                        continue;
-               if (ex->minor != ~0 && ex->minor != refex->minor)
+               if (ex->minor != ~0 && minor != ~0 && ex->minor != minor)
                        continue;
-               if (refex->access & (~ex->access))
+               /*
+                * In order to make sure the provided range isn't matching
+                * an exception, all its access bits shouldn't match the
+                * exception's access bits
+                */
+               if (!(access & ex->access))
                        continue;
-               match = true;
-               break;
+               return true;
        }
+       return false;
+}
+
+/**
+ * verify_new_ex - verifies if a new exception is allowed by parent cgroup's permissions
+ * @dev_cgroup: dev cgroup to be tested against
+ * @refex: new exception
+ * @behavior: behavior of the exception's dev_cgroup
+ *
+ * This is used to make sure a child cgroup won't have more privileges
+ * than its parent
+ */
+static bool verify_new_ex(struct dev_cgroup *dev_cgroup,
+                         struct dev_exception_item *refex,
+                         enum devcg_behavior behavior)
+{
+       bool match = false;
+
+       rcu_lockdep_assert(rcu_read_lock_held() ||
+                          lockdep_is_held(&devcgroup_mutex),
+                          "device_cgroup:verify_new_ex called without proper synchronization");
 
        if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
                if (behavior == DEVCG_DEFAULT_ALLOW) {
-                       /* the exception will deny access to certain devices */
+                       /*
+                        * new exception in the child doesn't matter, only
+                        * adding extra restrictions
+                        */ 
                        return true;
                } else {
-                       /* the exception will allow access to certain devices */
+                       /*
+                        * new exception in the child will add more devices
+                        * that can be acessed, so it can't match any of
+                        * parent's exceptions, even slightly
+                        */ 
+                       match = match_exception_partial(&dev_cgroup->exceptions,
+                                                       refex->type,
+                                                       refex->major,
+                                                       refex->minor,
+                                                       refex->access);
+
                        if (match)
-                               /*
-                                * a new exception allowing access shouldn't
-                                * match an parent's exception
-                                */
                                return false;
                        return true;
                }
        } else {
-               /* only behavior == DEVCG_DEFAULT_DENY allowed here */
+               /*
+                * Only behavior == DEVCG_DEFAULT_DENY allowed here, therefore
+                * the new exception will add access to more devices and must
+                * be contained completely in an parent's exception to be
+                * allowed
+                */
+               match = match_exception(&dev_cgroup->exceptions, refex->type,
+                                       refex->major, refex->minor,
+                                       refex->access);
+
                if (match)
                        /* parent has an exception that matches the proposed */
                        return true;
@@ -378,7 +459,38 @@ static int parent_has_perm(struct dev_cgroup *childcg,
 
        if (!parent)
                return 1;
-       return may_access(parent, ex, childcg->behavior);
+       return verify_new_ex(parent, ex, childcg->behavior);
+}
+
+/**
+ * parent_allows_removal - verify if it's ok to remove an exception
+ * @childcg: child cgroup from where the exception will be removed
+ * @ex: exception being removed
+ *
+ * When removing an exception in cgroups with default ALLOW policy, it must
+ * be checked if removing it will give the child cgroup more access than the
+ * parent.
+ *
+ * Return: true if it's ok to remove exception, false otherwise
+ */
+static bool parent_allows_removal(struct dev_cgroup *childcg,
+                                 struct dev_exception_item *ex)
+{
+       struct dev_cgroup *parent = css_to_devcgroup(css_parent(&childcg->css));
+
+       if (!parent)
+               return true;
+
+       /* It's always allowed to remove access to devices */
+       if (childcg->behavior == DEVCG_DEFAULT_DENY)
+               return true;
+
+       /*
+        * Make sure you're not removing part or a whole exception existing in
+        * the parent cgroup
+        */
+       return !match_exception_partial(&parent->exceptions, ex->type,
+                                       ex->major, ex->minor, ex->access);
 }
 
 /**
@@ -616,17 +728,21 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
 
        switch (filetype) {
        case DEVCG_ALLOW:
-               if (!parent_has_perm(devcgroup, &ex))
-                       return -EPERM;
                /*
                 * If the default policy is to allow by default, try to remove
                 * an matching exception instead. And be silent about it: we
                 * don't want to break compatibility
                 */
                if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
+                       /* Check if the parent allows removing it first */
+                       if (!parent_allows_removal(devcgroup, &ex))
+                               return -EPERM;
                        dev_exception_rm(devcgroup, &ex);
-                       return 0;
+                       break;
                }
+
+               if (!parent_has_perm(devcgroup, &ex))
+                       return -EPERM;
                rc = dev_exception_add(devcgroup, &ex);
                break;
        case DEVCG_DENY:
@@ -704,18 +820,18 @@ static int __devcgroup_check_permission(short type, u32 major, u32 minor,
                                        short access)
 {
        struct dev_cgroup *dev_cgroup;
-       struct dev_exception_item ex;
-       int rc;
-
-       memset(&ex, 0, sizeof(ex));
-       ex.type = type;
-       ex.major = major;
-       ex.minor = minor;
-       ex.access = access;
+       bool rc;
 
        rcu_read_lock();
        dev_cgroup = task_devcgroup(current);
-       rc = may_access(dev_cgroup, &ex, dev_cgroup->behavior);
+       if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW)
+               /* Can't match any of the exceptions, even partially */
+               rc = !match_exception_partial(&dev_cgroup->exceptions,
+                                             type, major, minor, access);
+       else
+               /* Need to match completely one exception to be allowed */
+               rc = match_exception(&dev_cgroup->exceptions, type, major,
+                                    minor, access);
        rcu_read_unlock();
 
        if (!rc)
index 6496822c1808b53c172d56df4382fb88e9e20e95..1ff78ec9f0ac508734d536afbe187b9039127e14 100644 (file)
@@ -818,12 +818,14 @@ int snd_sbmixer_new(struct snd_sb *chip)
                        return err;
                break;
        case SB_HW_DT019X:
-               if ((err = snd_sbmixer_init(chip,
-                                           snd_dt019x_controls,
-                                           ARRAY_SIZE(snd_dt019x_controls),
-                                           snd_dt019x_init_values,
-                                           ARRAY_SIZE(snd_dt019x_init_values),
-                                           "DT019X")) < 0)
+               err = snd_sbmixer_init(chip,
+                                      snd_dt019x_controls,
+                                      ARRAY_SIZE(snd_dt019x_controls),
+                                      snd_dt019x_init_values,
+                                      ARRAY_SIZE(snd_dt019x_init_values),
+                                      "DT019X");
+               if (err < 0)
+                       return err;
                break;
        default:
                strcpy(card->mixername, "???");
index b540ad71eb0d733ab217550a66ac40eb35e22da6..2c54629d62d10db0166d6feb32588f1f0d3f0543 100644 (file)
@@ -1367,6 +1367,12 @@ static int azx_first_init(struct azx *chip)
        /* initialize streams */
        azx_init_stream(chip);
 
+       /* workaround for Broadwell HDMI: the first stream is broken,
+        * so mask it by keeping it as if opened
+        */
+       if (pci->vendor == 0x8086 && pci->device == 0x160c)
+               chip->azx_dev[0].opened = 1;
+
        /* initialize chip */
        azx_init_pci(chip);
        azx_init_chip(chip, (probe_only[dev] & 2) == 0);
index 1edbb9c47c2d3b4893d2286d91a7cc8fda555eca..b4218a19df22209e227538b0056a79cbe1c21279 100644 (file)
@@ -3332,6 +3332,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
 { .id = 0x10de0051, .name = "GPU 51 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0060, .name = "GPU 60 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0067, .name = "MCP67 HDMI",      .patch = patch_nvhdmi_2ch },
+{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de8001, .name = "MCP73 HDMI",      .patch = patch_nvhdmi_2ch },
 { .id = 0x11069f80, .name = "VX900 HDMI/DP",   .patch = patch_via_hdmi },
 { .id = 0x11069f81, .name = "VX900 HDMI/DP",   .patch = patch_via_hdmi },
@@ -3387,6 +3388,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0044");
 MODULE_ALIAS("snd-hda-codec-id:10de0051");
 MODULE_ALIAS("snd-hda-codec-id:10de0060");
 MODULE_ALIAS("snd-hda-codec-id:10de0067");
+MODULE_ALIAS("snd-hda-codec-id:10de0071");
 MODULE_ALIAS("snd-hda-codec-id:10de8001");
 MODULE_ALIAS("snd-hda-codec-id:11069f80");
 MODULE_ALIAS("snd-hda-codec-id:11069f81");
index 5f7c765391f111f70970239b0a486a03c0aab99f..49e884fb3e5db064426758b8c8612a42a1633830 100644 (file)
@@ -4616,6 +4616,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0653, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0657, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0658, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x065c, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x065f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0662, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0667, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -4624,6 +4625,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0674, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x067e, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x067f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0680, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0684, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
index fa158cfe9b32d396d09bdb266727c57203c7e62d..d1929de641e24eb4ec733820f8b7dc717d542d33 100644 (file)
@@ -376,7 +376,7 @@ static int aic31xx_dapm_power_event(struct snd_soc_dapm_widget *w,
                reg = AIC31XX_ADCFLAG;
                break;
        default:
-               dev_err(w->codec->dev, "Unknown widget '%s' calling %s/n",
+               dev_err(w->codec->dev, "Unknown widget '%s' calling %s\n",
                        w->name, __func__);
                return -EINVAL;
        }
index 5522d2566c6742d5ee19f91b4358b30f51276b62..ecd26dd2e442fb2e4019eca0238fdb9edba4a73e 100644 (file)
@@ -154,6 +154,7 @@ static struct reg_default wm8962_reg[] = {
        { 40, 0x0000 },   /* R40    - SPKOUTL volume */
        { 41, 0x0000 },   /* R41    - SPKOUTR volume */
 
+       { 49, 0x0010 },   /* R49    - Class D Control 1 */
        { 51, 0x0003 },   /* R51    - Class D Control 2 */
 
        { 56, 0x0506 },   /* R56    - Clocking 4 */
@@ -795,7 +796,6 @@ static bool wm8962_volatile_register(struct device *dev, unsigned int reg)
        case WM8962_ALC2:
        case WM8962_THERMAL_SHUTDOWN_STATUS:
        case WM8962_ADDITIONAL_CONTROL_4:
-       case WM8962_CLASS_D_CONTROL_1:
        case WM8962_DC_SERVO_6:
        case WM8962_INTERRUPT_STATUS_1:
        case WM8962_INTERRUPT_STATUS_2:
@@ -2929,13 +2929,22 @@ static int wm8962_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
 static int wm8962_mute(struct snd_soc_dai *dai, int mute)
 {
        struct snd_soc_codec *codec = dai->codec;
-       int val;
+       int val, ret;
 
        if (mute)
-               val = WM8962_DAC_MUTE;
+               val = WM8962_DAC_MUTE | WM8962_DAC_MUTE_ALT;
        else
                val = 0;
 
+       /**
+        * The DAC mute bit is mirrored in two registers, update both to keep
+        * the register cache consistent.
+        */
+       ret = snd_soc_update_bits(codec, WM8962_CLASS_D_CONTROL_1,
+                                 WM8962_DAC_MUTE_ALT, val);
+       if (ret < 0)
+               return ret;
+
        return snd_soc_update_bits(codec, WM8962_ADC_DAC_CONTROL_1,
                                   WM8962_DAC_MUTE, val);
 }
index a1a5d5294c19dea3d76ce03be2dfe490d925bd78..910aafd09d21e210d2b6e0b36c7e3243533d5e6b 100644 (file)
 #define WM8962_SPKOUTL_ENA_MASK                 0x0040  /* SPKOUTL_ENA */
 #define WM8962_SPKOUTL_ENA_SHIFT                     6  /* SPKOUTL_ENA */
 #define WM8962_SPKOUTL_ENA_WIDTH                     1  /* SPKOUTL_ENA */
+#define WM8962_DAC_MUTE_ALT                     0x0010  /* DAC_MUTE */
+#define WM8962_DAC_MUTE_ALT_MASK                0x0010  /* DAC_MUTE */
+#define WM8962_DAC_MUTE_ALT_SHIFT                    4  /* DAC_MUTE */
+#define WM8962_DAC_MUTE_ALT_WIDTH                    1  /* DAC_MUTE */
 #define WM8962_SPKOUTL_PGA_MUTE                 0x0002  /* SPKOUTL_PGA_MUTE */
 #define WM8962_SPKOUTL_PGA_MUTE_MASK            0x0002  /* SPKOUTL_PGA_MUTE */
 #define WM8962_SPKOUTL_PGA_MUTE_SHIFT                1  /* SPKOUTL_PGA_MUTE */
index c8e5db1414d7e75f4077728765a210877f4b3cd9..496ce2eb2f1f31f4c4f0b6776ffd4d9a60243992 100644 (file)
@@ -258,10 +258,16 @@ static int fsl_esai_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
                return -EINVAL;
        }
 
-       if (ratio == 1) {
+       /* Only EXTAL source can be output directly without using PSR and PM */
+       if (ratio == 1 && clksrc == esai_priv->extalclk) {
                /* Bypass all the dividers if not being needed */
                ecr |= tx ? ESAI_ECR_ETO : ESAI_ECR_ERO;
                goto out;
+       } else if (ratio < 2) {
+               /* The ratio should be no less than 2 if using other sources */
+               dev_err(dai->dev, "failed to derive required HCK%c rate\n",
+                               tx ? 'T' : 'R');
+               return -EINVAL;
        }
 
        ret = fsl_esai_divisor_cal(dai, tx, ratio, false, 0);
@@ -307,7 +313,8 @@ static int fsl_esai_set_bclk(struct snd_soc_dai *dai, bool tx, u32 freq)
                return -EINVAL;
        }
 
-       if (esai_priv->sck_div[tx] && (ratio > 16 || ratio == 0)) {
+       /* The ratio should be contented by FP alone if bypassing PM and PSR */
+       if (!esai_priv->sck_div[tx] && (ratio > 16 || ratio == 0)) {
                dev_err(dai->dev, "the ratio is out of range (1 ~ 16)\n");
                return -EINVAL;
        }
@@ -454,12 +461,6 @@ static int fsl_esai_startup(struct snd_pcm_substream *substream,
        }
 
        if (!dai->active) {
-               /* Reset Port C */
-               regmap_update_bits(esai_priv->regmap, REG_ESAI_PRRC,
-                                  ESAI_PRRC_PDC_MASK, ESAI_PRRC_PDC(ESAI_GPIO));
-               regmap_update_bits(esai_priv->regmap, REG_ESAI_PCRC,
-                                  ESAI_PCRC_PC_MASK, ESAI_PCRC_PC(ESAI_GPIO));
-
                /* Set synchronous mode */
                regmap_update_bits(esai_priv->regmap, REG_ESAI_SAICR,
                                   ESAI_SAICR_SYNC, esai_priv->synchronous ?
@@ -519,6 +520,11 @@ static int fsl_esai_hw_params(struct snd_pcm_substream *substream,
 
        regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx), mask, val);
 
+       /* Remove ESAI personal reset by configuring ESAI_PCRC and ESAI_PRRC */
+       regmap_update_bits(esai_priv->regmap, REG_ESAI_PRRC,
+                          ESAI_PRRC_PDC_MASK, ESAI_PRRC_PDC(ESAI_GPIO));
+       regmap_update_bits(esai_priv->regmap, REG_ESAI_PCRC,
+                          ESAI_PCRC_PC_MASK, ESAI_PCRC_PC(ESAI_GPIO));
        return 0;
 }
 
index ac869931d7f16c9c4049aefaffb4a7d416417e49..267717aa96c14e971329cfe2f728d1f141de72c3 100644 (file)
@@ -145,7 +145,7 @@ static const struct file_operations audmux_debugfs_fops = {
        .llseek = default_llseek,
 };
 
-static void __init audmux_debugfs_init(void)
+static void audmux_debugfs_init(void)
 {
        int i;
        char buf[20];
index 5d06eecb61986da272fcda4858db56c91d285153..18aee77f8d4a55276194542c41464feb363784d3 100644 (file)
@@ -138,6 +138,7 @@ static int sst_acpi_probe(struct platform_device *pdev)
 
        sst_pdata = &sst_acpi->sst_pdata;
        sst_pdata->id = desc->sst_id;
+       sst_pdata->dma_dev = dev;
        sst_acpi->desc = desc;
        sst_acpi->mach = mach;
 
index a50bf7fc0e3abf8dc729e7ce77f75ef40848480e..adf0aca5aca60423d2643a81679ce291f44d1208 100644 (file)
@@ -324,7 +324,7 @@ static int sst_byt_init(struct sst_dsp *sst, struct sst_pdata *pdata)
        memcpy_toio(sst->addr.lpe + SST_BYT_MAILBOX_OFFSET,
               &pdata->fw_base, sizeof(u32));
 
-       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+       ret = dma_coerce_mask_and_coherent(sst->dma_dev, DMA_BIT_MASK(32));
        if (ret)
                return ret;
 
index d0eaeee21be4c634ae88e984cf964c5067b5cae6..0d31dbbf480652e773243dca639f042a4c531bc0 100644 (file)
@@ -542,16 +542,20 @@ struct sst_byt_stream *sst_byt_stream_new(struct sst_byt *byt, int id,
        void *data)
 {
        struct sst_byt_stream *stream;
+       struct sst_dsp *sst = byt->dsp;
+       unsigned long flags;
 
        stream = kzalloc(sizeof(*stream), GFP_KERNEL);
        if (stream == NULL)
                return NULL;
 
+       spin_lock_irqsave(&sst->spinlock, flags);
        list_add(&stream->node, &byt->stream_list);
        stream->notify_position = notify_position;
        stream->pdata = data;
        stream->byt = byt;
        stream->str_id = id;
+       spin_unlock_irqrestore(&sst->spinlock, flags);
 
        return stream;
 }
@@ -630,6 +634,8 @@ int sst_byt_stream_free(struct sst_byt *byt, struct sst_byt_stream *stream)
 {
        u64 header;
        int ret = 0;
+       struct sst_dsp *sst = byt->dsp;
+       unsigned long flags;
 
        if (!stream->commited)
                goto out;
@@ -644,8 +650,10 @@ int sst_byt_stream_free(struct sst_byt *byt, struct sst_byt_stream *stream)
 
        stream->commited = false;
 out:
+       spin_lock_irqsave(&sst->spinlock, flags);
        list_del(&stream->node);
        kfree(stream);
+       spin_unlock_irqrestore(&sst->spinlock, flags);
 
        return ret;
 }
index 30ca14a6a83595d5acf4e644d7f8d50827a99a98..401213455497258111dcf939c7d963fbbbeb3e37 100644 (file)
@@ -228,6 +228,7 @@ struct sst_dsp {
        spinlock_t spinlock;    /* IPC locking */
        struct mutex mutex;     /* DSP FW lock */
        struct device *dev;
+       struct device *dma_dev;
        void *thread_context;
        int irq;
        u32 id;
index 0c129fd85ecf8a37b3758dfc924a9e5091c9e01e..0b715b20a2d7d46b9f06189de1ee57d9aacc28ab 100644 (file)
@@ -337,6 +337,7 @@ struct sst_dsp *sst_dsp_new(struct device *dev,
        spin_lock_init(&sst->spinlock);
        mutex_init(&sst->mutex);
        sst->dev = dev;
+       sst->dma_dev = pdata->dma_dev;
        sst->thread_context = sst_dev->thread_context;
        sst->sst_dev = sst_dev;
        sst->id = pdata->id;
index 74052b59485ca1ce942444d0a3871ed8a6051672..e44423be66c459ba721249d0efe21bccdaeea4b5 100644 (file)
@@ -169,6 +169,7 @@ struct sst_pdata {
        u32 dma_base;
        u32 dma_size;
        int dma_engine;
+       struct device *dma_dev;
 
        /* DSP */
        u32 id;
index f7687107cf7f51f19a95992b79819270e3dd4734..928f228c38e754db3f41551208493045ce9ee561 100644 (file)
@@ -57,14 +57,8 @@ struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
        sst_fw->private = private;
        sst_fw->size = fw->size;
 
-       err = dma_coerce_mask_and_coherent(dsp->dev, DMA_BIT_MASK(32));
-       if (err < 0) {
-               kfree(sst_fw);
-               return NULL;
-       }
-
        /* allocate DMA buffer to store FW data */
-       sst_fw->dma_buf = dma_alloc_coherent(dsp->dev, sst_fw->size,
+       sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
                                &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
        if (!sst_fw->dma_buf) {
                dev_err(dsp->dev, "error: DMA alloc failed\n");
@@ -106,7 +100,7 @@ void sst_fw_free(struct sst_fw *sst_fw)
        list_del(&sst_fw->list);
        mutex_unlock(&dsp->mutex);
 
-       dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
+       dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
                        sst_fw->dmable_fw_paddr);
        kfree(sst_fw);
 }
@@ -202,6 +196,9 @@ static int block_alloc_contiguous(struct sst_module *module,
                size -= block->size;
        }
 
+       list_for_each_entry(block, &tmp, list)
+               list_add(&block->module_list, &module->block_list);
+
        list_splice(&tmp, &dsp->used_block_list);
        return 0;
 }
@@ -247,8 +244,7 @@ static int block_alloc(struct sst_module *module,
                /* do we span > 1 blocks */
                if (data->size > block->size) {
                        ret = block_alloc_contiguous(module, data,
-                               block->offset + block->size,
-                               data->size - block->size);
+                               block->offset, data->size);
                        if (ret == 0)
                                return ret;
                }
@@ -344,7 +340,7 @@ static int block_alloc_fixed(struct sst_module *module,
 
                        err = block_alloc_contiguous(module, data,
                                block->offset + block->size,
-                               data->size - block->size + data->offset - block->offset);
+                               data->size - block->size);
                        if (err < 0)
                                return -ENOMEM;
 
@@ -371,15 +367,10 @@ static int block_alloc_fixed(struct sst_module *module,
                if (data->offset >= block->offset && data->offset < block_end) {
 
                        err = block_alloc_contiguous(module, data,
-                               block->offset + block->size,
-                               data->size - block->size);
+                               block->offset, data->size);
                        if (err < 0)
                                return -ENOMEM;
 
-                       /* add block */
-                       block->data_type = data->data_type;
-                       list_move(&block->list, &dsp->used_block_list);
-                       list_add(&block->module_list, &module->block_list);
                        return 0;
                }
 
index f5ebf36af8898d8a94d4a46886bfcfe0964d0567..535f517629fd608fb7c4bd3eb79a05a67d98a518 100644 (file)
@@ -433,7 +433,7 @@ static int hsw_init(struct sst_dsp *sst, struct sst_pdata *pdata)
        int ret = -ENODEV, i, j, region_count;
        u32 offset, size;
 
-       dev = sst->dev;
+       dev = sst->dma_dev;
 
        switch (sst->id) {
        case SST_DEV_ID_LYNX_POINT:
@@ -466,7 +466,7 @@ static int hsw_init(struct sst_dsp *sst, struct sst_pdata *pdata)
                return ret;
        }
 
-       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
        if (ret)
                return ret;
 
index 50e4246d4b57a2df8c9ad0c54deabb4e346ca572..e7996b39a48480a8577ac26d6cad28e3392259aa 100644 (file)
@@ -1159,11 +1159,14 @@ struct sst_hsw_stream *sst_hsw_stream_new(struct sst_hsw *hsw, int id,
        void *data)
 {
        struct sst_hsw_stream *stream;
+       struct sst_dsp *sst = hsw->dsp;
+       unsigned long flags;
 
        stream = kzalloc(sizeof(*stream), GFP_KERNEL);
        if (stream == NULL)
                return NULL;
 
+       spin_lock_irqsave(&sst->spinlock, flags);
        list_add(&stream->node, &hsw->stream_list);
        stream->notify_position = notify_position;
        stream->pdata = data;
@@ -1172,6 +1175,7 @@ struct sst_hsw_stream *sst_hsw_stream_new(struct sst_hsw *hsw, int id,
 
        /* work to process notification messages */
        INIT_WORK(&stream->notify_work, hsw_notification_work);
+       spin_unlock_irqrestore(&sst->spinlock, flags);
 
        return stream;
 }
@@ -1180,6 +1184,8 @@ int sst_hsw_stream_free(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
 {
        u32 header;
        int ret = 0;
+       struct sst_dsp *sst = hsw->dsp;
+       unsigned long flags;
 
        /* dont free DSP streams that are not commited */
        if (!stream->commited)
@@ -1201,8 +1207,11 @@ int sst_hsw_stream_free(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
        trace_hsw_stream_free_req(stream, &stream->free_req);
 
 out:
+       cancel_work_sync(&stream->notify_work);
+       spin_lock_irqsave(&sst->spinlock, flags);
        list_del(&stream->node);
        kfree(stream);
+       spin_unlock_irqrestore(&sst->spinlock, flags);
 
        return ret;
 }
@@ -1538,10 +1547,28 @@ int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
 }
 
 /* Stream pointer positions */
-int sst_hsw_get_dsp_position(struct sst_hsw *hsw,
+u32 sst_hsw_get_dsp_position(struct sst_hsw *hsw,
        struct sst_hsw_stream *stream)
 {
-       return stream->rpos.position;
+       u32 rpos;
+
+       sst_dsp_read(hsw->dsp, &rpos,
+               stream->reply.read_position_register_address, sizeof(rpos));
+
+       return rpos;
+}
+
+/* Stream presentation (monotonic) positions */
+u64 sst_hsw_get_dsp_presentation_position(struct sst_hsw *hsw,
+       struct sst_hsw_stream *stream)
+{
+       u64 ppos;
+
+       sst_dsp_read(hsw->dsp, &ppos,
+               stream->reply.presentation_position_register_address,
+               sizeof(ppos));
+
+       return ppos;
 }
 
 int sst_hsw_stream_set_write_position(struct sst_hsw *hsw,
index d517929ccc389e2aaca3321106b6088250929482..2ac194a6d04b226eb86cb42c2361c643f7400635 100644 (file)
@@ -464,7 +464,9 @@ int sst_hsw_stream_get_write_pos(struct sst_hsw *hsw,
        struct sst_hsw_stream *stream, u32 *position);
 int sst_hsw_stream_set_write_position(struct sst_hsw *hsw,
        struct sst_hsw_stream *stream, u32 stage_id, u32 position);
-int sst_hsw_get_dsp_position(struct sst_hsw *hsw,
+u32 sst_hsw_get_dsp_position(struct sst_hsw *hsw,
+       struct sst_hsw_stream *stream);
+u64 sst_hsw_get_dsp_presentation_position(struct sst_hsw *hsw,
        struct sst_hsw_stream *stream);
 
 /* HW port config */
index 0a32dd13a23d28ab96282b75592c20d5b133ee9f..9d5f64a583a388bd501973c32ae3d5d086d6acc6 100644 (file)
@@ -99,6 +99,7 @@ struct hsw_pcm_data {
        struct snd_compr_stream *cstream;
        unsigned int wpos;
        struct mutex mutex;
+       bool allocated;
 };
 
 /* private data for the driver */
@@ -107,12 +108,14 @@ struct hsw_priv_data {
        struct sst_hsw *hsw;
 
        /* page tables */
-       unsigned char *pcm_pg[HSW_PCM_COUNT][2];
+       struct snd_dma_buffer dmab[HSW_PCM_COUNT][2];
 
        /* DAI data */
        struct hsw_pcm_data pcm[HSW_PCM_COUNT];
 };
 
+static u32 hsw_notify_pointer(struct sst_hsw_stream *stream, void *data);
+
 static inline u32 hsw_mixer_to_ipc(unsigned int value)
 {
        if (value >= ARRAY_SIZE(volume_map))
@@ -273,28 +276,26 @@ static const struct snd_kcontrol_new hsw_volume_controls[] = {
 };
 
 /* Create DMA buffer page table for DSP */
-static int create_adsp_page_table(struct hsw_priv_data *pdata,
-       struct snd_soc_pcm_runtime *rtd,
-       unsigned char *dma_area, size_t size, int pcm, int stream)
+static int create_adsp_page_table(struct snd_pcm_substream *substream,
+       struct hsw_priv_data *pdata, struct snd_soc_pcm_runtime *rtd,
+       unsigned char *dma_area, size_t size, int pcm)
 {
-       int i, pages;
+       struct snd_dma_buffer *dmab = snd_pcm_get_dma_buf(substream);
+       int i, pages, stream = substream->stream;
 
-       if (size % PAGE_SIZE)
-               pages = (size / PAGE_SIZE) + 1;
-       else
-               pages = size / PAGE_SIZE;
+       pages = snd_sgbuf_aligned_pages(size);
 
        dev_dbg(rtd->dev, "generating page table for %p size 0x%zu pages %d\n",
                dma_area, size, pages);
 
        for (i = 0; i < pages; i++) {
                u32 idx = (((i << 2) + i)) >> 1;
-               u32 pfn = (virt_to_phys(dma_area + i * PAGE_SIZE)) >> PAGE_SHIFT;
+               u32 pfn = snd_sgbuf_get_addr(dmab, i * PAGE_SIZE) >> PAGE_SHIFT;
                u32 *pg_table;
 
                dev_dbg(rtd->dev, "pfn i %i idx %d pfn %x\n", i, idx, pfn);
 
-               pg_table = (u32*)(pdata->pcm_pg[pcm][stream] + idx);
+               pg_table = (u32 *)(pdata->dmab[pcm][stream].area + idx);
 
                if (i & 1)
                        *pg_table |= (pfn << 4);
@@ -317,12 +318,36 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
        struct sst_hsw *hsw = pdata->hsw;
        struct sst_module *module_data;
        struct sst_dsp *dsp;
+       struct snd_dma_buffer *dmab;
        enum sst_hsw_stream_type stream_type;
        enum sst_hsw_stream_path_id path_id;
        u32 rate, bits, map, pages, module_id;
        u8 channels;
        int ret;
 
+       /* check if we are being called a subsequent time */
+       if (pcm_data->allocated) {
+               ret = sst_hsw_stream_reset(hsw, pcm_data->stream);
+               if (ret < 0)
+                       dev_dbg(rtd->dev, "error: reset stream failed %d\n",
+                               ret);
+
+               ret = sst_hsw_stream_free(hsw, pcm_data->stream);
+               if (ret < 0) {
+                       dev_dbg(rtd->dev, "error: free stream failed %d\n",
+                               ret);
+                       return ret;
+               }
+               pcm_data->allocated = false;
+
+               pcm_data->stream = sst_hsw_stream_new(hsw, rtd->cpu_dai->id,
+                       hsw_notify_pointer, pcm_data);
+               if (pcm_data->stream == NULL) {
+                       dev_err(rtd->dev, "error: failed to create stream\n");
+                       return -EINVAL;
+               }
+       }
+
        /* stream direction */
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
                path_id = SST_HSW_STREAM_PATH_SSP0_OUT;
@@ -416,8 +441,10 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
                return ret;
        }
 
-       ret = create_adsp_page_table(pdata, rtd, runtime->dma_area,
-               runtime->dma_bytes, rtd->cpu_dai->id, substream->stream);
+       dmab = snd_pcm_get_dma_buf(substream);
+
+       ret = create_adsp_page_table(substream, pdata, rtd, runtime->dma_area,
+               runtime->dma_bytes, rtd->cpu_dai->id);
        if (ret < 0)
                return ret;
 
@@ -430,9 +457,9 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
                pages = runtime->dma_bytes / PAGE_SIZE;
 
        ret = sst_hsw_stream_buffer(hsw, pcm_data->stream,
-               virt_to_phys(pdata->pcm_pg[rtd->cpu_dai->id][substream->stream]),
+               pdata->dmab[rtd->cpu_dai->id][substream->stream].addr,
                pages, runtime->dma_bytes, 0,
-               (u32)(virt_to_phys(runtime->dma_area) >> PAGE_SHIFT));
+               snd_sgbuf_get_addr(dmab, 0) >> PAGE_SHIFT);
        if (ret < 0) {
                dev_err(rtd->dev, "error: failed to set DMA buffer %d\n", ret);
                return ret;
@@ -474,6 +501,7 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
                dev_err(rtd->dev, "error: failed to commit stream %d\n", ret);
                return ret;
        }
+       pcm_data->allocated = true;
 
        ret = sst_hsw_stream_pause(hsw, pcm_data->stream, 1);
        if (ret < 0)
@@ -541,12 +569,14 @@ static snd_pcm_uframes_t hsw_pcm_pointer(struct snd_pcm_substream *substream)
        struct hsw_pcm_data *pcm_data = snd_soc_pcm_get_drvdata(rtd);
        struct sst_hsw *hsw = pdata->hsw;
        snd_pcm_uframes_t offset;
+       uint64_t ppos;
+       u32 position = sst_hsw_get_dsp_position(hsw, pcm_data->stream);
 
-       offset = bytes_to_frames(runtime,
-               sst_hsw_get_dsp_position(hsw, pcm_data->stream));
+       offset = bytes_to_frames(runtime, position);
+       ppos = sst_hsw_get_dsp_presentation_position(hsw, pcm_data->stream);
 
-       dev_dbg(rtd->dev, "PCM: DMA pointer %zu bytes\n",
-               frames_to_bytes(runtime, (u32)offset));
+       dev_dbg(rtd->dev, "PCM: DMA pointer %du bytes, pos %llu\n",
+               position, ppos);
        return offset;
 }
 
@@ -606,6 +636,7 @@ static int hsw_pcm_close(struct snd_pcm_substream *substream)
                dev_dbg(rtd->dev, "error: free stream failed %d\n", ret);
                goto out;
        }
+       pcm_data->allocated = 0;
        pcm_data->stream = NULL;
 
 out:
@@ -621,7 +652,7 @@ static struct snd_pcm_ops hsw_pcm_ops = {
        .hw_free        = hsw_pcm_hw_free,
        .trigger        = hsw_pcm_trigger,
        .pointer        = hsw_pcm_pointer,
-       .mmap           = snd_pcm_lib_default_mmap,
+       .page           = snd_pcm_sgbuf_ops_page,
 };
 
 static void hsw_pcm_free(struct snd_pcm *pcm)
@@ -632,17 +663,16 @@ static void hsw_pcm_free(struct snd_pcm *pcm)
 static int hsw_pcm_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_pcm *pcm = rtd->pcm;
+       struct snd_soc_platform *platform = rtd->platform;
+       struct sst_pdata *pdata = dev_get_platdata(platform->dev);
+       struct device *dev = pdata->dma_dev;
        int ret = 0;
 
-       ret = dma_coerce_mask_and_coherent(rtd->card->dev, DMA_BIT_MASK(32));
-       if (ret)
-               return ret;
-
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream ||
                        pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
                ret = snd_pcm_lib_preallocate_pages_for_all(pcm,
-                       SNDRV_DMA_TYPE_DEV,
-                       rtd->card->dev,
+                       SNDRV_DMA_TYPE_DEV_SG,
+                       dev,
                        hsw_pcm_hardware.buffer_bytes_max,
                        hsw_pcm_hardware.buffer_bytes_max);
                if (ret) {
@@ -742,11 +772,14 @@ static int hsw_pcm_probe(struct snd_soc_platform *platform)
 {
        struct sst_pdata *pdata = dev_get_platdata(platform->dev);
        struct hsw_priv_data *priv_data;
-       int i;
+       struct device *dma_dev;
+       int i, ret = 0;
 
        if (!pdata)
                return -ENODEV;
 
+       dma_dev = pdata->dma_dev;
+
        priv_data = devm_kzalloc(platform->dev, sizeof(*priv_data), GFP_KERNEL);
        priv_data->hsw = pdata->dsp;
        snd_soc_platform_set_drvdata(platform, priv_data);
@@ -758,15 +791,17 @@ static int hsw_pcm_probe(struct snd_soc_platform *platform)
 
                /* playback */
                if (hsw_dais[i].playback.channels_min) {
-                       priv_data->pcm_pg[i][0] = kzalloc(PAGE_SIZE, GFP_DMA);
-                       if (priv_data->pcm_pg[i][0] == NULL)
+                       ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dma_dev,
+                               PAGE_SIZE, &priv_data->dmab[i][0]);
+                       if (ret < 0)
                                goto err;
                }
 
                /* capture */
                if (hsw_dais[i].capture.channels_min) {
-                       priv_data->pcm_pg[i][1] = kzalloc(PAGE_SIZE, GFP_DMA);
-                       if (priv_data->pcm_pg[i][1] == NULL)
+                       ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dma_dev,
+                               PAGE_SIZE, &priv_data->dmab[i][1]);
+                       if (ret < 0)
                                goto err;
                }
        }
@@ -776,11 +811,11 @@ static int hsw_pcm_probe(struct snd_soc_platform *platform)
 err:
        for (;i >= 0; i--) {
                if (hsw_dais[i].playback.channels_min)
-                       kfree(priv_data->pcm_pg[i][0]);
+                       snd_dma_free_pages(&priv_data->dmab[i][0]);
                if (hsw_dais[i].capture.channels_min)
-                       kfree(priv_data->pcm_pg[i][1]);
+                       snd_dma_free_pages(&priv_data->dmab[i][1]);
        }
-       return -ENOMEM;
+       return ret;
 }
 
 static int hsw_pcm_remove(struct snd_soc_platform *platform)
@@ -791,9 +826,9 @@ static int hsw_pcm_remove(struct snd_soc_platform *platform)
 
        for (i = 0; i < ARRAY_SIZE(hsw_dais); i++) {
                if (hsw_dais[i].playback.channels_min)
-                       kfree(priv_data->pcm_pg[i][0]);
+                       snd_dma_free_pages(&priv_data->dmab[i][0]);
                if (hsw_dais[i].capture.channels_min)
-                       kfree(priv_data->pcm_pg[i][1]);
+                       snd_dma_free_pages(&priv_data->dmab[i][1]);
        }
 
        return 0;
index 215b668166be6c50d01963cac2ef62598cf6f7b2..89424470a1f3860eab989eabeab579184e070359 100644 (file)
@@ -197,13 +197,12 @@ static void rsnd_dma_complete(void *data)
         * rsnd_dai_pointer_update() will be called twice,
         * ant it will breaks io->byte_pos
         */
-
-       rsnd_dai_pointer_update(io, io->byte_per_period);
-
        if (dma->submit_loop)
                rsnd_dma_continue(dma);
 
        rsnd_unlock(priv, flags);
+
+       rsnd_dai_pointer_update(io, io->byte_per_period);
 }
 
 static void __rsnd_dma_start(struct rsnd_dma *dma)
index 7769b0a2bc5a5287f932d20524216f7c390fd4c7..6d6ceee447d559e711ba8eccc2e734953585d9ad 100644 (file)
@@ -1612,8 +1612,11 @@ static void dapm_pre_sequence_async(void *data, async_cookie_t cookie)
                                "ASoC: Failed to turn on bias: %d\n", ret);
        }
 
-       /* Prepare for a STADDBY->ON or ON->STANDBY transition */
-       if (d->bias_level != d->target_bias_level) {
+       /* Prepare for a transition to ON or away from ON */
+       if ((d->target_bias_level == SND_SOC_BIAS_ON &&
+            d->bias_level != SND_SOC_BIAS_ON) ||
+           (d->target_bias_level != SND_SOC_BIAS_ON &&
+            d->bias_level == SND_SOC_BIAS_ON)) {
                ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_PREPARE);
                if (ret != 0)
                        dev_err(d->dev,
@@ -3475,8 +3478,11 @@ void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card)
                cpu_dai = rtd->cpu_dai;
                codec_dai = rtd->codec_dai;
 
-               /* dynamic FE links have no fixed DAI mapping */
-               if (rtd->dai_link->dynamic)
+               /*
+                * dynamic FE links have no fixed DAI mapping.
+                * CODEC<->CODEC links have no direct connection.
+                */
+               if (rtd->dai_link->dynamic || rtd->dai_link->params)
                        continue;
 
                /* there is no point in connecting BE DAI links with dummies */
index 2cedf09f6d9613c7b34bb22888806fde598052c6..a391de05803765403fc94e989717ad7dae91db5c 100644 (file)
@@ -1675,7 +1675,7 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
                        be->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP;
                        break;
                case SNDRV_PCM_TRIGGER_SUSPEND:
-                       if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP)
+                       if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START)
                                continue;
 
                        if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream))
index bcae806b0c398d5450975ab08001e7a808e376c4..9a617adc6675dc06552de428c93b3c611599900b 100644 (file)
@@ -44,6 +44,9 @@ cpupower: FORCE
 cgroup firewire hv guest usb virtio vm net: FORCE
        $(call descend,$@)
 
+liblockdep: FORCE
+       $(call descend,lib/lockdep)
+
 libapikfs: FORCE
        $(call descend,lib/api)
 
@@ -91,6 +94,9 @@ cpupower_clean:
 cgroup_clean hv_clean firewire_clean lguest_clean usb_clean virtio_clean vm_clean net_clean:
        $(call descend,$(@:_clean=),clean)
 
+liblockdep_clean:
+       $(call descend,lib/lockdep,clean)
+
 libapikfs_clean:
        $(call descend,lib/api,clean)
 
index cb09d3ff8f5856dab489130133ec9c2e2ba73db8..bba2f5253b6e281ff85abdf65a76406d88d6503b 100644 (file)
@@ -1,8 +1,7 @@
 # file format version
 FILE_VERSION = 1
 
-MAKEFLAGS += --no-print-directory
-LIBLOCKDEP_VERSION=$(shell make -sC ../../.. kernelversion)
+LIBLOCKDEP_VERSION=$(shell make --no-print-directory -sC ../../.. kernelversion)
 
 # Makefiles suck: This macro sets a default value of $(2) for the
 # variable named by $(1), unless the variable has been set by
@@ -231,7 +230,7 @@ install_lib: all_cmd
 install: install_lib
 
 clean:
-       $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d
+       $(RM) *.o *~ $(TARGETS) *.a *liblockdep*.so* $(VERSION_FILES) .*.d
        $(RM) tags TAGS
 
 endif # skip-makefile