]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge tag 'nfc-next-4.3-1' of git://git.kernel.org/pub/scm/linux/kernel/git/sameo...
authorDavid S. Miller <davem@davemloft.net>
Mon, 24 Aug 2015 03:42:57 +0000 (20:42 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 24 Aug 2015 03:42:57 +0000 (20:42 -0700)
Samuel Ortiz says:

====================
NFC 4.3 pull request

This is the NFC pull request for 4.3.
With this one we have:

- A new driver for Samsung's S3FWRN5 NFC chipset. In order to
  properly support this driver, a few NCI core routines needed
  to be exported. Future drivers like Intel's Fields Peak will
  benefit from this.

- SPI support as a physical transport for STM st21nfcb.

- An additional netlink API for sending replies back to userspace
  from vendor commands.

- 2 small fixes for TI's trf7970a

- A few st-nci fixes.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
602 files changed:
.mailmap
Documentation/devicetree/bindings/arm/cpus.txt
Documentation/devicetree/bindings/net/dsa/dsa.txt
Documentation/networking/6lowpan.txt [new file with mode: 0644]
Documentation/networking/ip-sysctl.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/k2e.dtsi
arch/arm/boot/dts/k2hk.dtsi
arch/arm/boot/dts/k2l.dtsi
arch/arm/boot/dts/keystone.dtsi
arch/arm/boot/dts/omap2430.dtsi
arch/arm/boot/dts/omap4.dtsi
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/ste-dbx5x0.dtsi
arch/arm/kernel/entry-common.S
arch/arm/kernel/head.S
arch/arm/kernel/vdso.c
arch/arm/mach-exynos/pm_domains.c
arch/arm/vdso/Makefile
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/x86/entry/entry_64_compat.S
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_cqm.c
arch/x86/kernel/step.c
arch/x86/kvm/x86.c
arch/x86/math-emu/fpu_entry.c
arch/x86/math-emu/fpu_system.h
arch/x86/math-emu/get_address.c
arch/x86/xen/Kconfig
block/blk-settings.c
crypto/authencesn.c
drivers/acpi/video_detect.c
drivers/ata/ahci_brcmstb.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/libata-scsi.c
drivers/ata/libata.h
drivers/ata/sata_sx4.c
drivers/base/property.c
drivers/bcma/Kconfig
drivers/bcma/bcma_private.h
drivers/bcma/driver_gpio.c
drivers/bcma/main.c
drivers/block/zram/zram_drv.c
drivers/bluetooth/Kconfig
drivers/bluetooth/Makefile
drivers/bluetooth/btmrvl_sdio.c
drivers/bluetooth/btqca.c [new file with mode: 0644]
drivers/bluetooth/btqca.h [new file with mode: 0644]
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_bcm.c
drivers/bluetooth/hci_ldisc.c
drivers/bluetooth/hci_qca.c [new file with mode: 0644]
drivers/bluetooth/hci_uart.h
drivers/clk/pxa/clk-pxa3xx.c
drivers/clocksource/sh_cmt.c
drivers/cpufreq/exynos-cpufreq.c
drivers/crypto/caam/caamhash.c
drivers/crypto/nx/nx-sha256.c
drivers/crypto/nx/nx-sha512.c
drivers/dma/dmaengine.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/i915/intel_atomic.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/memory/omap-gpmc.c
drivers/net/Kconfig
drivers/net/bonding/bond_main.c
drivers/net/caif/caif_hsi.c
drivers/net/caif/caif_serial.c
drivers/net/caif/caif_spi.c
drivers/net/can/flexcan.c
drivers/net/can/usb/gs_usb.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/dummy.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/cisco/enic/enic.h
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/cisco/enic/vnic_cq.c
drivers/net/ethernet/cisco/enic/vnic_dev.c
drivers/net/ethernet/cisco/enic/vnic_dev.h
drivers/net/ethernet/cisco/enic/vnic_devcmd.h
drivers/net/ethernet/cisco/enic/vnic_intr.c
drivers/net/ethernet/cisco/enic/vnic_resource.h
drivers/net/ethernet/cisco/enic/vnic_rq.c
drivers/net/ethernet/cisco/enic/vnic_wq.c
drivers/net/ethernet/cisco/enic/vnic_wq.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/ezchip/nps_enet.c
drivers/net/ethernet/ezchip/nps_enet.h
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/intel/e100.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/e1000_defines.h
drivers/net/ethernet/intel/igb/e1000_phy.c
drivers/net/ethernet/intel/igb/e1000_phy.h
drivers/net/ethernet/intel/igb/e1000_regs.h
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/micrel/ks8842.c
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/geneve.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/ieee802154/at86rf230.c
drivers/net/ieee802154/cc2520.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/loopback.c
drivers/net/nlmon.c
drivers/net/phy/phy.c
drivers/net/phy/smsc.c
drivers/net/ppp/ppp_generic.c
drivers/net/team/team.c
drivers/net/usb/qmi_wwan.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/net/wan/hdlc_fr.c
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/core.h
drivers/net/wireless/ath/ath10k/debug.c
drivers/net/wireless/ath/ath10k/htt.c
drivers/net/wireless/ath/ath10k/htt.h
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/htt_tx.c
drivers/net/wireless/ath/ath10k/hw.h
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/ath/ath10k/rx_desc.h
drivers/net/wireless/ath/ath10k/txrx.c
drivers/net/wireless/ath/ath10k/wmi-ops.h
drivers/net/wireless/ath/ath10k/wmi-tlv.c
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/ath/ath10k/wow.c
drivers/net/wireless/ath/ath5k/Kconfig
drivers/net/wireless/ath/ath5k/ani.c
drivers/net/wireless/ath/ath5k/ath5k.h
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath5k/debug.c
drivers/net/wireless/ath/ath6kl/htc.h
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/channel.c
drivers/net/wireless/ath/ath9k/debug_sta.c
drivers/net/wireless/ath/ath9k/htc_drv_init.c
drivers/net/wireless/ath/ath9k/htc_hst.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/link.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/wmi.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/debug.c
drivers/net/wireless/ath/wil6210/Makefile
drivers/net/wireless/ath/wil6210/boot_loader.h [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/ath/wil6210/debugfs.c
drivers/net/wireless/ath/wil6210/ethtool.c
drivers/net/wireless/ath/wil6210/fw.c
drivers/net/wireless/ath/wil6210/fw_inc.c
drivers/net/wireless/ath/wil6210/interrupt.c
drivers/net/wireless/ath/wil6210/ioctl.c
drivers/net/wireless/ath/wil6210/main.c
drivers/net/wireless/ath/wil6210/netdev.c
drivers/net/wireless/ath/wil6210/pcie_bus.c
drivers/net/wireless/ath/wil6210/pm.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/rx_reorder.c
drivers/net/wireless/ath/wil6210/txrx.c
drivers/net/wireless/ath/wil6210/txrx.h
drivers/net/wireless/ath/wil6210/wil6210.h
drivers/net/wireless/ath/wil6210/wil_platform.c
drivers/net/wireless/ath/wil6210/wmi.c
drivers/net/wireless/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/hostap/hostap_main.c
drivers/net/wireless/iwlwifi/dvm/agn.h
drivers/net/wireless/iwlwifi/dvm/debugfs.c
drivers/net/wireless/iwlwifi/dvm/dev.h
drivers/net/wireless/iwlwifi/dvm/lib.c
drivers/net/wireless/iwlwifi/dvm/mac80211.c
drivers/net/wireless/iwlwifi/dvm/main.c
drivers/net/wireless/iwlwifi/dvm/rs.c
drivers/net/wireless/iwlwifi/dvm/rx.c
drivers/net/wireless/iwlwifi/dvm/rxon.c
drivers/net/wireless/iwlwifi/dvm/scan.c
drivers/net/wireless/iwlwifi/dvm/sta.c
drivers/net/wireless/iwlwifi/dvm/tx.c
drivers/net/wireless/iwlwifi/dvm/ucode.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-8000.c
drivers/net/wireless/iwlwifi/iwl-config.h
drivers/net/wireless/iwlwifi/iwl-csr.h
drivers/net/wireless/iwlwifi/iwl-devtrace-data.h
drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
drivers/net/wireless/iwlwifi/iwl-fh.h
drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
drivers/net/wireless/iwlwifi/iwl-fw-file.h
drivers/net/wireless/iwlwifi/iwl-fw.h
drivers/net/wireless/iwlwifi/iwl-notif-wait.c
drivers/net/wireless/iwlwifi/iwl-notif-wait.h
drivers/net/wireless/iwlwifi/iwl-op-mode.h
drivers/net/wireless/iwlwifi/iwl-prph.h
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/Makefile
drivers/net/wireless/iwlwifi/mvm/coex.c
drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
drivers/net/wireless/iwlwifi/mvm/constants.h
drivers/net/wireless/iwlwifi/mvm/d3.c
drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/iwlwifi/mvm/debugfs.c
drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/nvm.c
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/power.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/rs.h
drivers/net/wireless/iwlwifi/mvm/rx.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/sta.c
drivers/net/wireless/iwlwifi/mvm/sta.h
drivers/net/wireless/iwlwifi/mvm/tdls.c
drivers/net/wireless/iwlwifi/mvm/time-event.c
drivers/net/wireless/iwlwifi/mvm/time-event.h
drivers/net/wireless/iwlwifi/mvm/tof.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/tof.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/tt.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mediatek/mt7601u/dma.c
drivers/net/wireless/mediatek/mt7601u/init.c
drivers/net/wireless/mediatek/mt7601u/mac.c
drivers/net/wireless/mediatek/mt7601u/mt7601u.h
drivers/net/wireless/mediatek/mt7601u/tx.c
drivers/net/wireless/mediatek/mt7601u/usb.c
drivers/net/wireless/mediatek/mt7601u/usb.h
drivers/net/wireless/mwifiex/Kconfig
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/mwifiex/fw.h
drivers/net/wireless/mwifiex/init.c
drivers/net/wireless/mwifiex/main.h
drivers/net/wireless/mwifiex/pcie.c
drivers/net/wireless/mwifiex/pcie.h
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/mwifiex/sdio.h
drivers/net/wireless/mwifiex/sta_cmdresp.c
drivers/net/wireless/mwifiex/sta_ioctl.c
drivers/net/wireless/mwifiex/tdls.c
drivers/net/wireless/mwifiex/uap_event.c
drivers/net/wireless/mwifiex/usb.c
drivers/net/wireless/mwifiex/usb.h
drivers/net/wireless/mwifiex/util.c
drivers/net/wireless/mwl8k.c
drivers/net/wireless/orinoco/main.c
drivers/net/wireless/orinoco/orinoco_cs.c
drivers/net/wireless/orinoco/orinoco_nortel.c
drivers/net/wireless/orinoco/orinoco_pci.c
drivers/net/wireless/orinoco/orinoco_plx.c
drivers/net/wireless/orinoco/orinoco_usb.c
drivers/net/wireless/rt2x00/Kconfig
drivers/net/wireless/rt2x00/rt2500usb.h
drivers/net/wireless/rt2x00/rt2x00.h
drivers/net/wireless/rt2x00/rt2x00link.c
drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
drivers/net/wireless/rtlwifi/rtl8188ee/fw.h
drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
drivers/net/wireless/rtlwifi/rtl8192de/dm.c
drivers/net/wireless/rtlwifi/rtl8192de/fw.h
drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
drivers/net/wireless/rtlwifi/rtl8192ee/fw.h
drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
drivers/net/wireless/rtlwifi/rtl8723be/sw.c
drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c
drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h
drivers/net/wireless/rtlwifi/rtl8821ae/fw.c
drivers/net/wireless/rtlwifi/rtl8821ae/fw.h
drivers/net/wireless/rtlwifi/wifi.h
drivers/net/wireless/ti/wl12xx/scan.c
drivers/net/wireless/ti/wl18xx/acx.c
drivers/net/wireless/ti/wl18xx/acx.h
drivers/net/wireless/ti/wl18xx/debugfs.c
drivers/net/wireless/ti/wl18xx/event.c
drivers/net/wireless/ti/wl18xx/event.h
drivers/net/wireless/ti/wl18xx/main.c
drivers/net/wireless/ti/wl18xx/scan.c
drivers/net/wireless/ti/wl18xx/scan.h
drivers/net/wireless/ti/wlcore/cmd.c
drivers/net/wireless/ti/wlcore/cmd.h
drivers/net/wireless/ti/wlcore/conf.h
drivers/net/wireless/ti/wlcore/init.c
drivers/net/wireless/ti/wlcore/init.h
drivers/net/wireless/ti/wlcore/main.c
drivers/net/wireless/ti/wlcore/rx.c
drivers/net/wireless/ti/wlcore/scan.h
drivers/net/wireless/ti/wlcore/wlcore.h
drivers/net/wireless/ti/wlcore/wlcore_i.h
drivers/net/xen-netfront.c
drivers/scsi/libfc/fc_exch.c
drivers/scsi/libfc/fc_fcp.c
drivers/scsi/libiscsi.c
drivers/scsi/scsi_error.c
drivers/scsi/sd.c
drivers/staging/wilc1000/linux_mon.c
drivers/target/iscsi/iscsi_target.c
drivers/target/target_core_configfs.c
drivers/target/target_core_hba.c
drivers/target/target_core_spc.c
drivers/thermal/cpu_cooling.c
drivers/thermal/power_allocator.c
fs/fuse/dev.c
include/drm/drm_crtc.h
include/linux/ata.h
include/linux/average.h
include/linux/bcma/bcma_driver_chipcommon.h
include/linux/etherdevice.h
include/linux/ieee80211.h
include/linux/mlx5/driver.h
include/linux/netdevice.h
include/linux/netfilter/nfnetlink_acct.h
include/net/6lowpan.h
include/net/bluetooth/hci_core.h
include/net/cfg80211.h
include/net/cfg802154.h
include/net/checksum.h
include/net/dsa.h
include/net/dst.h
include/net/dst_metadata.h
include/net/flow.h
include/net/ip6_fib.h
include/net/ip_tunnels.h
include/net/lwtunnel.h
include/net/mac80211.h
include/net/ndisc.h
include/net/net_namespace.h
include/net/netfilter/ipv4/nf_dup_ipv4.h [new file with mode: 0644]
include/net/netfilter/ipv6/nf_dup_ipv6.h [new file with mode: 0644]
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_conntrack_core.h
include/net/netfilter/nf_conntrack_expect.h
include/net/netfilter/nf_conntrack_zones.h
include/net/netfilter/nft_dup.h [new file with mode: 0644]
include/net/nl802154.h
include/net/route.h
include/net/vrf.h
include/net/vxlan.h
include/net/xfrm.h
include/scsi/scsi_eh.h
include/sound/soc-topology.h
include/uapi/linux/if_packet.h
include/uapi/linux/ila.h [new file with mode: 0644]
include/uapi/linux/lwtunnel.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/netfilter/nfnetlink_conntrack.h
include/uapi/linux/netfilter/xt_CT.h
include/uapi/linux/rtnetlink.h
include/uapi/sound/asoc.h
ipc/sem.c
kernel/cpuset.c
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/locking/qspinlock_paravirt.h
lib/Kconfig
lib/average.c [deleted file]
lib/test_rhashtable.c
mm/cma.h
mm/kasan/kasan.c
mm/kasan/report.c
mm/memory-failure.c
mm/memory_hotplug.c
mm/page_alloc.c
net/6lowpan/Makefile
net/6lowpan/core.c [new file with mode: 0644]
net/6lowpan/iphc.c
net/8021q/vlan_dev.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/gateway_client.c
net/batman-adv/multicast.c
net/batman-adv/network-coding.c
net/batman-adv/originator.c
net/batman-adv/send.c
net/batman-adv/soft-interface.c
net/batman-adv/translation-table.c
net/batman-adv/types.h
net/bluetooth/6lowpan.c
net/bluetooth/amp.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hci_request.c
net/bluetooth/l2cap_core.c
net/bluetooth/mgmt.c
net/bridge/br_device.c
net/bridge/br_multicast.c
net/bridge/br_netlink.c
net/caif/caif_dev.c
net/core/dev.c
net/core/dst.c
net/core/filter.c
net/core/lwtunnel.c
net/core/skbuff.c
net/core/utils.c
net/dsa/dsa.c
net/dsa/slave.c
net/hsr/hsr_device.c
net/ieee802154/6lowpan/6lowpan_i.h
net/ieee802154/6lowpan/core.c
net/ieee802154/6lowpan/rx.c
net/ieee802154/6lowpan/tx.c
net/ieee802154/nl802154.c
net/ieee802154/rdev-ops.h
net/ieee802154/trace.h
net/ipv4/af_inet.c
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/fou.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_gre.c
net/ipv4/ip_tunnel_core.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/Makefile
net/ipv4/netfilter/ipt_ECN.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/netfilter/nf_conntrack_proto_icmp.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/netfilter/nf_dup_ipv4.c [new file with mode: 0644]
net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
net/ipv4/netfilter/nf_nat_proto_icmp.c
net/ipv4/netfilter/nft_dup_ipv4.c [new file with mode: 0644]
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/xfrm4_policy.c
net/ipv6/Kconfig
net/ipv6/Makefile
net/ipv6/addrconf.c
net/ipv6/ila.c [new file with mode: 0644]
net/ipv6/ip6_fib.c
net/ipv6/mcast_snoop.c
net/ipv6/ndisc.c
net/ipv6/netfilter/Kconfig
net/ipv6/netfilter/Makefile
net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
net/ipv6/netfilter/nf_dup_ipv6.c [new file with mode: 0644]
net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
net/ipv6/netfilter/nf_nat_proto_icmpv6.c
net/ipv6/netfilter/nft_dup_ipv6.c [new file with mode: 0644]
net/ipv6/route.c
net/ipv6/udp.c
net/ipv6/xfrm6_mode_tunnel.c
net/ipv6/xfrm6_policy.c
net/mac80211/Kconfig
net/mac80211/Makefile
net/mac80211/aes_cmac.c
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/debugfs.c
net/mac80211/debugfs_key.c
net/mac80211/debugfs_netdev.c
net/mac80211/driver-ops.c [new file with mode: 0644]
net/mac80211/driver-ops.h
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/key.c
net/mac80211/key.h
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mesh_hwmp.c
net/mac80211/mesh_plink.c
net/mac80211/mesh_ps.c
net/mac80211/mesh_sync.c
net/mac80211/mlme.c
net/mac80211/ocb.c
net/mac80211/rate.c
net/mac80211/rate.h
net/mac80211/rc80211_minstrel.c
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/status.c
net/mac80211/tdls.c
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/vht.c
net/mac80211/wpa.c
net/mac802154/cfg.c
net/mac802154/iface.c
net/mac802154/main.c
net/mpls/mpls_iptunnel.c
net/netfilter/Kconfig
net/netfilter/ipvs/ip_vs_nfct.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_pptp.c
net/netfilter/nf_conntrack_seqadj.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_nat_core.c
net/netfilter/nf_nat_proto_dccp.c
net/netfilter/nf_nat_proto_tcp.c
net/netfilter/nf_nat_proto_udp.c
net/netfilter/nf_nat_proto_udplite.c
net/netfilter/nf_synproxy_core.c
net/netfilter/nfnetlink_acct.c
net/netfilter/nft_counter.c
net/netfilter/nft_limit.c
net/netfilter/nft_payload.c
net/netfilter/xt_CT.c
net/netfilter/xt_TCPMSS.c
net/netfilter/xt_TCPOPTSTRIP.c
net/netfilter/xt_TEE.c
net/netfilter/xt_connlimit.c
net/netfilter/xt_nfacct.c
net/openvswitch/actions.c
net/openvswitch/flow_netlink.c
net/openvswitch/flow_table.c
net/openvswitch/vport-geneve.c
net/openvswitch/vport-netdev.c
net/openvswitch/vport.c
net/openvswitch/vport.h
net/packet/af_packet.c
net/packet/internal.h
net/rfkill/Kconfig
net/rfkill/rfkill-gpio.c
net/sched/act_connmark.c
net/sched/act_nat.c
net/sched/sch_fifo.c
net/sched/sch_generic.c
net/sched/sch_gred.c
net/sched/sch_htb.c
net/sched/sch_plug.c
net/sched/sch_sfb.c
net/tipc/link.c
net/tipc/netlink_compat.c
net/tipc/node.c
net/wireless/core.c
net/wireless/core.h
net/wireless/mlme.c
net/wireless/nl80211.c
net/wireless/rdev-ops.h
net/wireless/reg.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
sound/pci/hda/patch_realtek.c
sound/soc/Kconfig
sound/soc/Makefile
sound/usb/card.c
tools/perf/config/Makefile
tools/perf/util/stat-shadow.c
tools/testing/selftests/net/psock_fanout.c
tools/testing/selftests/net/psock_lib.h

index b4091b7a78fe11ccd0e5f44f0703ace69dc09707..4b31af54ccd5864359c0810f9733f3026181a631 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -17,6 +17,7 @@ Aleksey Gorelov <aleksey_gorelov@phoenix.com>
 Al Viro <viro@ftp.linux.org.uk>
 Al Viro <viro@zenIV.linux.org.uk>
 Andreas Herrmann <aherrman@de.ibm.com>
+Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
 Andrew Morton <akpm@linux-foundation.org>
 Andrew Vasquez <andrew.vasquez@qlogic.com>
 Andy Adamson <andros@citi.umich.edu>
index d6b794cef0b8b9907ab5a055a6502180b4350148..91e6e5c478d006245c5a88e7ae7e304d6fa7f097 100644 (file)
@@ -199,6 +199,7 @@ nodes to be present and contain the properties described below.
                            "qcom,kpss-acc-v1"
                            "qcom,kpss-acc-v2"
                            "rockchip,rk3066-smp"
+                           "ste,dbx500-smp"
 
        - cpu-release-addr
                Usage: required for systems that have an "enable-method"
index 9cf9a0ec333c053932a81b971358bb8d75dbdf6a..04e6bef3ac3ff431560f53456a34d4b932c7eca0 100644 (file)
@@ -44,9 +44,10 @@ Note that a port labelled "dsa" will imply checking for the uplink phandle
 described below.
 
 Optionnal property:
-- link                 : Should be a phandle to another switch's DSA port.
+- link                 : Should be a list of phandles to another switch's DSA port.
                          This property is only used when switches are being
-                         chained/cascaded together.
+                         chained/cascaded together. This port is used as outgoing port
+                         towards the phandle port, which can be more than one hop away.
 
 - phy-handle           : Phandle to a PHY on an external MDIO bus, not the
                          switch internal one. See
@@ -100,10 +101,11 @@ Example:
                                label = "cpu";
                        };
 
-                       switch0uplink: port@6 {
+                       switch0port6: port@6 {
                                reg = <6>;
                                label = "dsa";
-                               link = <&switch1uplink>;
+                               link = <&switch1port0
+                                       &switch2port0>;
                        };
                };
 
@@ -113,10 +115,29 @@ Example:
                        reg = <17 1>;   /* MDIO address 17, switch 1 in tree */
                        mii-bus = <&mii_bus1>;
 
-                       switch1uplink: port@0 {
+                       switch1port0: port@0 {
                                reg = <0>;
                                label = "dsa";
-                               link = <&switch0uplink>;
+                               link = <&switch0port6>;
+                       };
+                       switch1port1: port@1 {
+                               reg = <1>;
+                               label = "dsa";
+                               link = <&switch2port1>;
+                       };
+               };
+
+               switch@2 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <18 2>;   /* MDIO address 18, switch 2 in tree */
+                       mii-bus = <&mii_bus1>;
+
+                       switch2port0: port@0 {
+                               reg = <0>;
+                               label = "dsa";
+                               link = <&switch1port1
+                                       &switch0port6>;
                        };
                };
        };
diff --git a/Documentation/networking/6lowpan.txt b/Documentation/networking/6lowpan.txt
new file mode 100644 (file)
index 0000000..a7dc7e9
--- /dev/null
@@ -0,0 +1,50 @@
+
+Netdev private dataroom for 6lowpan interfaces:
+
+All 6lowpan able net devices, means all interfaces with ARPHRD_6LOWPAN,
+must have "struct lowpan_priv" placed at beginning of netdev_priv.
+
+The priv_size of each interface should be calculate by:
+
+ dev->priv_size = LOWPAN_PRIV_SIZE(LL_6LOWPAN_PRIV_DATA);
+
+Where LL_PRIV_6LOWPAN_DATA is sizeof linklayer 6lowpan private data struct.
+To access the LL_PRIV_6LOWPAN_DATA structure you can cast:
+
+ lowpan_priv(dev)-priv;
+
+to your LL_6LOWPAN_PRIV_DATA structure.
+
+Before registering the lowpan netdev interface you must run:
+
+ lowpan_netdev_setup(dev, LOWPAN_LLTYPE_FOOBAR);
+
+wheres LOWPAN_LLTYPE_FOOBAR is a define for your 6LoWPAN linklayer type of
+enum lowpan_lltypes.
+
+Example to evaluate the private usually you can do:
+
+static inline sturct lowpan_priv_foobar *
+lowpan_foobar_priv(struct net_device *dev)
+{
+       return (sturct lowpan_priv_foobar *)lowpan_priv(dev)->priv;
+}
+
+switch (dev->type) {
+case ARPHRD_6LOWPAN:
+       lowpan_priv = lowpan_priv(dev);
+       /* do great stuff which is ARPHRD_6LOWPAN related */
+       switch (lowpan_priv->lltype) {
+       case LOWPAN_LLTYPE_FOOBAR:
+               /* do 802.15.4 6LoWPAN handling here */
+               lowpan_foobar_priv(dev)->bar = foo;
+               break;
+       ...
+       }
+       break;
+...
+}
+
+In case of generic 6lowpan branch ("net/6lowpan") you can remove the check
+on ARPHRD_6LOWPAN, because you can be sure that these function are called
+by ARPHRD_6LOWPAN interfaces.
index 56db1efd7189ac6b25fdfc4a18b579f28e01a4b7..46e88ed7f41d202326b3f198eb9bd0c02b62c5f9 100644 (file)
@@ -1181,6 +1181,11 @@ tag - INTEGER
        Allows you to write a number, which can be used as required.
        Default value is 0.
 
+xfrm4_gc_thresh - INTEGER
+       The threshold at which we will start garbage collecting for IPv4
+       destination cache entries.  At twice this value the system will
+       refuse new allocations.
+
 Alexey Kuznetsov.
 kuznet@ms2.inr.ac.ru
 
@@ -1617,6 +1622,11 @@ ratelimit - INTEGER
        otherwise the minimal space between responses in milliseconds.
        Default: 1000
 
+xfrm6_gc_thresh - INTEGER
+       The threshold at which we will start garbage collecting for IPv6
+       destination cache entries.  At twice this value the system will
+       refuse new allocations.
+
 
 IPv6 Update by:
 Pekka Savola <pekkas@netcore.fi>
index 7a3b1b901d22e7858c74dd0113146e486adb84bb..4e6dcb692d3069eb2777d8304f511679906f3188 100644 (file)
@@ -158,6 +158,7 @@ L:  linux-wpan@vger.kernel.org
 S:     Maintained
 F:     net/6lowpan/
 F:     include/net/6lowpan.h
+F:     Documentation/networking/6lowpan.txt
 
 6PACK NETWORK DRIVER FOR AX.25
 M:     Andreas Koensgen <ajk@comnets.uni-bremen.de>
@@ -3587,6 +3588,15 @@ S:       Maintained
 F:     drivers/gpu/drm/rockchip/
 F:     Documentation/devicetree/bindings/video/rockchip*
 
+DRM DRIVERS FOR STI
+M:     Benjamin Gaignard <benjamin.gaignard@linaro.org>
+M:     Vincent Abriou <vincent.abriou@st.com>
+L:     dri-devel@lists.freedesktop.org
+T:     git http://git.linaro.org/people/benjamin.gaignard/kernel.git
+S:     Maintained
+F:     drivers/gpu/drm/sti
+F:     Documentation/devicetree/bindings/gpu/st,stih4xx.txt
+
 DSBR100 USB FM RADIO DRIVER
 M:     Alexey Klimov <klimov.linux@gmail.com>
 L:     linux-media@vger.kernel.org
index 35b4c196c171306f8ce66791eb6f91f57fbbb215..6e88c371b32f760fb8c13601d23869e91ebd0289 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 2
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*
index 0001e959bf49ad153bf50be8e2d6a515d75988a6..6dbbc02d18b4d635dc10b21cc2f73b04b8490e49 100644 (file)
                                ranges = <0 0x2000 0x2000>;
 
                                scm_conf: scm_conf@0 {
-                                       compatible = "syscon";
+                                       compatible = "syscon", "simple-bus";
                                        reg = <0x0 0x1400>;
                                        #address-cells = <1>;
                                        #size-cells = <1>;
index e6d13592080d7c701056c2f6a73326aa11e715b5..b57033e8c633187a5f52c367a788f46196967fdc 100644 (file)
                        interrupt-names = "msi";
                        #interrupt-cells = <1>;
                        interrupt-map-mask = <0 0 0 0x7>;
-                       interrupt-map = <0 0 0 1 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
-                                       <0 0 0 2 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
-                                       <0 0 0 3 &intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
-                                       <0 0 0 4 &intc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-map = <0 0 0 1 &gpc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 2 &gpc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 3 &gpc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 4 &gpc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&clks IMX6QDL_CLK_PCIE_AXI>,
                                 <&clks IMX6QDL_CLK_LVDS1_GATE>,
                                 <&clks IMX6QDL_CLK_PCIE_REF_125M>;
index 1b6494fbdb91b9301c607652efbd9a9fb34a9f36..675fb8e492c6aa0478a6d5df01b30fbe1e281b7d 100644 (file)
                                        <GIC_SPI 376 IRQ_TYPE_EDGE_RISING>;
                        };
                };
+
+               mdio: mdio@24200f00 {
+                       compatible      = "ti,keystone_mdio", "ti,davinci_mdio";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0x24200f00 0x100>;
+                       status = "disabled";
+                       clocks = <&clkcpgmac>;
+                       clock-names = "fck";
+                       bus_freq        = <2500000>;
+               };
                /include/ "k2e-netcp.dtsi"
        };
 };
-
-&mdio {
-       reg = <0x24200f00 0x100>;
-};
index ae6472407b2277012096d733bb80951592555d03..d0810a5f296857394397c7f1c60bffa0011bb6e1 100644 (file)
                        #gpio-cells = <2>;
                        gpio,syscon-dev = <&devctrl 0x25c>;
                };
+
+               mdio: mdio@02090300 {
+                       compatible      = "ti,keystone_mdio", "ti,davinci_mdio";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0x02090300 0x100>;
+                       status = "disabled";
+                       clocks = <&clkcpgmac>;
+                       clock-names = "fck";
+                       bus_freq        = <2500000>;
+               };
                /include/ "k2hk-netcp.dtsi"
        };
 };
index 0e007483615e4f097bb747a2d882b2e2d3a030aa..49fd414f680c93ab50cf0dae72d2e9261181da21 100644 (file)
@@ -29,7 +29,6 @@
        };
 
        soc {
-
                /include/ "k2l-clocks.dtsi"
 
                uart2: serial@02348400 {
                        #gpio-cells = <2>;
                        gpio,syscon-dev = <&devctrl 0x24c>;
                };
+
+               mdio: mdio@26200f00 {
+                       compatible      = "ti,keystone_mdio", "ti,davinci_mdio";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0x26200f00 0x100>;
+                       status = "disabled";
+                       clocks = <&clkcpgmac>;
+                       clock-names = "fck";
+                       bus_freq        = <2500000>;
+               };
                /include/ "k2l-netcp.dtsi"
        };
 };
        /* Pin muxed. Enabled and configured by Bootloader */
        status = "disabled";
 };
-
-&mdio {
-       reg = <0x26200f00 0x100>;
-};
index e7a6f6deabb6c0d89d4ca1e2c2ae63639249d010..72816d65f7ec3fcf5d7c47ce792ae57db369754b 100644 (file)
                                  1 0 0x21000A00 0x00000100>;
                };
 
-               mdio: mdio@02090300 {
-                       compatible      = "ti,keystone_mdio", "ti,davinci_mdio";
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-                       reg             = <0x02090300 0x100>;
-                       status = "disabled";
-                       clocks = <&clkpa>;
-                       clock-names = "fck";
-                       bus_freq        = <2500000>;
-               };
-
                kirq0: keystone_irq@26202a0 {
                        compatible = "ti,keystone-irq";
                        interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>;
index 11a7963be0035a002fa77c2bec6809b34444e584..2390f387c27163bb76e918bb73e26966bee7fb48 100644 (file)
@@ -51,7 +51,8 @@
                                };
 
                                scm_conf: scm_conf@270 {
-                                       compatible = "syscon";
+                                       compatible = "syscon",
+                                                    "simple-bus";
                                        reg = <0x270 0x240>;
                                        #address-cells = <1>;
                                        #size-cells = <1>;
index 7d31c6ff246f47b14afd5eeb332d01a955faef35..abc4473e6f8a17e51d5e66416be089ab24d7b472 100644 (file)
                                };
 
                                omap4_padconf_global: omap4_padconf_global@5a0 {
-                                       compatible = "syscon";
+                                       compatible = "syscon",
+                                                    "simple-bus";
                                        reg = <0x5a0 0x170>;
                                        #address-cells = <1>;
                                        #size-cells = <1>;
index c8fd648a7108515def0e9492936fd3760f156579..b1a1263e600168291091a963f9f56aefc87fd59e 100644 (file)
                                };
 
                                omap5_padconf_global: omap5_padconf_global@5a0 {
-                                       compatible = "syscon";
+                                       compatible = "syscon",
+                                                    "simple-bus";
                                        reg = <0x5a0 0xec>;
                                        #address-cells = <1>;
                                        #size-cells = <1>;
index a75f3289e653ab2973e2d7dd1cb12c8a12724451..b8f81fb418ce60039ad4e8e04f2892ca34d26bc8 100644 (file)
 #include "skeleton.dtsi"
 
 / {
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               enable-method = "ste,dbx500-smp";
+
+               cpu-map {
+                       cluster0 {
+                               core0 {
+                                       cpu = <&CPU0>;
+                               };
+                               core1 {
+                                       cpu = <&CPU1>;
+                               };
+                       };
+               };
+               CPU0: cpu@300 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a9";
+                       reg = <0x300>;
+               };
+               CPU1: cpu@301 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a9";
+                       reg = <0x301>;
+               };
+       };
+
        soc {
                #address-cells = <1>;
                #size-cells = <1>;
                interrupt-parent = <&intc>;
                ranges;
 
-               cpus {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-
-                       cpu-map {
-                               cluster0 {
-                                       core0 {
-                                               cpu = <&CPU0>;
-                                       };
-                                       core1 {
-                                               cpu = <&CPU1>;
-                                       };
-                               };
-                       };
-                       CPU0: cpu@0 {
-                               device_type = "cpu";
-                               compatible = "arm,cortex-a9";
-                               reg = <0>;
-                       };
-                       CPU1: cpu@1 {
-                               device_type = "cpu";
-                               compatible = "arm,cortex-a9";
-                               reg = <1>;
-                       };
-               };
-
                ptm@801ae000 {
                        compatible = "arm,coresight-etm3x", "arm,primecell";
                        reg = <0x801ae000 0x1000>;
index 92828a1dec80c1c33d051d9b76063727598495d5..b48dd4f37f8067e781ee3e135ed7aff27940371f 100644 (file)
@@ -61,6 +61,7 @@ work_pending:
        movlt   scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
        ldmia   sp, {r0 - r6}                   @ have to reload r0 - r6
        b       local_restart                   @ ... and off we go
+ENDPROC(ret_fast_syscall)
 
 /*
  * "slow" syscall return path.  "why" tells us if this was a real syscall.
index bd755d97e459d77ff05cc8a1264f336c58c1b598..29e2991465cb27b579f729deec65e2293a0a04b5 100644 (file)
@@ -399,6 +399,9 @@ ENTRY(secondary_startup)
        sub     lr, r4, r5                      @ mmu has been enabled
        add     r3, r7, lr
        ldrd    r4, [r3, #0]                    @ get secondary_data.pgdir
+ARM_BE8(eor    r4, r4, r5)                     @ Swap r5 and r4 in BE:
+ARM_BE8(eor    r5, r4, r5)                     @ it can be done in 3 steps
+ARM_BE8(eor    r4, r4, r5)                     @ without using a temp reg.
        ldr     r8, [r3, #8]                    @ get secondary_data.swapper_pg_dir
        badr    lr, __enable_mmu                @ return address
        mov     r13, r12                        @ __secondary_switched address
index efe17dd9b9218b7ef16299700a0f2a6d74ca61c1..54a5aeab988d3526657b8e3089942ca8cfe4fe5e 100644 (file)
@@ -296,7 +296,6 @@ static bool tk_is_cntvct(const struct timekeeper *tk)
  */
 void update_vsyscall(struct timekeeper *tk)
 {
-       struct timespec xtime_coarse;
        struct timespec64 *wtm = &tk->wall_to_monotonic;
 
        if (!cntvct_ok) {
@@ -308,10 +307,10 @@ void update_vsyscall(struct timekeeper *tk)
 
        vdso_write_begin(vdso_data);
 
-       xtime_coarse = __current_kernel_time();
        vdso_data->tk_is_cntvct                 = tk_is_cntvct(tk);
-       vdso_data->xtime_coarse_sec             = xtime_coarse.tv_sec;
-       vdso_data->xtime_coarse_nsec            = xtime_coarse.tv_nsec;
+       vdso_data->xtime_coarse_sec             = tk->xtime_sec;
+       vdso_data->xtime_coarse_nsec            = (u32)(tk->tkr_mono.xtime_nsec >>
+                                                       tk->tkr_mono.shift);
        vdso_data->wtm_clock_sec                = wtm->tv_sec;
        vdso_data->wtm_clock_nsec               = wtm->tv_nsec;
 
index 6001f1c9d136f45fabd7d61e97638855d0beb46a..4a87e86dec45d1546153ca0ebb7310bbd5f82d93 100644 (file)
@@ -146,9 +146,8 @@ static __init int exynos4_pm_init_power_domain(void)
                pd->base = of_iomap(np, 0);
                if (!pd->base) {
                        pr_warn("%s: failed to map memory\n", __func__);
-                       kfree(pd->pd.name);
+                       kfree_const(pd->pd.name);
                        kfree(pd);
-                       of_node_put(np);
                        continue;
                }
 
index 9d259d94e429c4cc493542ad4cf238a513b13743..1160434eece0509c3797733b49e8fcb1262e42e7 100644 (file)
@@ -14,7 +14,7 @@ VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
 VDSO_LDFLAGS += -nostdlib -shared
 VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
 VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
-VDSO_LDFLAGS += $(call cc-option, -fuse-ld=bfd)
+VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
 
 obj-$(CONFIG_VDSO) += vdso.o
 extra-$(CONFIG_VDSO) += vdso.lds
index ad4d44635c7601162ca0dd8f1b626df28eeeafb2..a6f6b762c47a4c5a2d395e13a1d564964595abe1 100644 (file)
@@ -80,7 +80,7 @@ syscall_trace_entry:
        SAVE_STATIC
        move    s0, t2
        move    a0, sp
-       daddiu  a1, v0, __NR_64_Linux
+       move    a1, v0
        jal     syscall_trace_enter
 
        bltz    v0, 2f                  # seccomp failed? Skip syscall
index 446cc654da56c5f5fcaad749242dd98d593776e1..4b2010654c463158b7dee80194de736195c04595 100644 (file)
@@ -72,7 +72,7 @@ n32_syscall_trace_entry:
        SAVE_STATIC
        move    s0, t2
        move    a0, sp
-       daddiu  a1, v0, __NR_N32_Linux
+       move    a1, v0
        jal     syscall_trace_enter
 
        bltz    v0, 2f                  # seccomp failed? Skip syscall
index 5a1844765a7aba6dab47b878daf6eb723c044c03..a7e257d9cb90b9f34ecb03180fec8c54f2afd82f 100644 (file)
@@ -140,6 +140,7 @@ sysexit_from_sys_call:
         */
        andl    $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
        movl    RIP(%rsp), %ecx         /* User %eip */
+       movq    RAX(%rsp), %rax
        RESTORE_RSI_RDI
        xorl    %edx, %edx              /* Do not leak kernel information */
        xorq    %r8, %r8
@@ -219,7 +220,6 @@ sysexit_from_sys_call:
 1:     setbe   %al                     /* 1 if error, 0 if not */
        movzbl  %al, %edi               /* zero-extend that into %edi */
        call    __audit_syscall_exit
-       movq    RAX(%rsp), %rax         /* reload syscall return value */
        movl    $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
@@ -368,6 +368,7 @@ sysretl_from_sys_call:
        RESTORE_RSI_RDI_RDX
        movl    RIP(%rsp), %ecx
        movl    EFLAGS(%rsp), %r11d
+       movq    RAX(%rsp), %rax
        xorq    %r10, %r10
        xorq    %r9, %r9
        xorq    %r8, %r8
index b9826a981fb20fa45a7c1255e277e9ad1cd5d150..6326ae24e4d5b4f3d228111c10f5c85df0e40d3f 100644 (file)
@@ -2534,7 +2534,7 @@ static int intel_pmu_cpu_prepare(int cpu)
        if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
                cpuc->shared_regs = allocate_shared_regs(cpu);
                if (!cpuc->shared_regs)
-                       return NOTIFY_BAD;
+                       goto err;
        }
 
        if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
@@ -2542,18 +2542,27 @@ static int intel_pmu_cpu_prepare(int cpu)
 
                cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
                if (!cpuc->constraint_list)
-                       return NOTIFY_BAD;
+                       goto err_shared_regs;
 
                cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
-               if (!cpuc->excl_cntrs) {
-                       kfree(cpuc->constraint_list);
-                       kfree(cpuc->shared_regs);
-                       return NOTIFY_BAD;
-               }
+               if (!cpuc->excl_cntrs)
+                       goto err_constraint_list;
+
                cpuc->excl_thread_id = 0;
        }
 
        return NOTIFY_OK;
+
+err_constraint_list:
+       kfree(cpuc->constraint_list);
+       cpuc->constraint_list = NULL;
+
+err_shared_regs:
+       kfree(cpuc->shared_regs);
+       cpuc->shared_regs = NULL;
+
+err:
+       return NOTIFY_BAD;
 }
 
 static void intel_pmu_cpu_starting(int cpu)
index 63eb68b73589bcbbc21f9c526193adca0de2e52d..377e8f8ed39186ad4ef57b33264592ed8459a037 100644 (file)
@@ -1255,7 +1255,7 @@ static inline void cqm_pick_event_reader(int cpu)
        cpumask_set_cpu(cpu, &cqm_cpumask);
 }
 
-static void intel_cqm_cpu_prepare(unsigned int cpu)
+static void intel_cqm_cpu_starting(unsigned int cpu)
 {
        struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
        struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -1296,13 +1296,11 @@ static int intel_cqm_cpu_notifier(struct notifier_block *nb,
        unsigned int cpu  = (unsigned long)hcpu;
 
        switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_UP_PREPARE:
-               intel_cqm_cpu_prepare(cpu);
-               break;
        case CPU_DOWN_PREPARE:
                intel_cqm_cpu_exit(cpu);
                break;
        case CPU_STARTING:
+               intel_cqm_cpu_starting(cpu);
                cqm_pick_event_reader(cpu);
                break;
        }
@@ -1373,7 +1371,7 @@ static int __init intel_cqm_init(void)
                goto out;
 
        for_each_online_cpu(i) {
-               intel_cqm_cpu_prepare(i);
+               intel_cqm_cpu_starting(i);
                cqm_pick_event_reader(i);
        }
 
index 6273324186ac5ca7adba69be5ded69f23d8882f7..0ccb53a9fcd9361b83c7acd26e1f64601816a3d1 100644 (file)
@@ -28,11 +28,11 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
                struct desc_struct *desc;
                unsigned long base;
 
-               seg &= ~7UL;
+               seg >>= 3;
 
                mutex_lock(&child->mm->context.lock);
                if (unlikely(!child->mm->context.ldt ||
-                            (seg >> 3) >= child->mm->context.ldt->size))
+                            seg >= child->mm->context.ldt->size))
                        addr = -1L; /* bogus selector, access would fault */
                else {
                        desc = &child->mm->context.ldt->entries[seg];
index 5ef2560075bfb80e6fdabcdf51f71258091e4339..8f0f6eca69da1dc6db95c16782871580bf57091d 100644 (file)
@@ -2105,7 +2105,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (guest_cpuid_has_tsc_adjust(vcpu)) {
                        if (!msr_info->host_initiated) {
                                s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
-                               kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
+                               adjust_tsc_offset_guest(vcpu, adj);
                        }
                        vcpu->arch.ia32_tsc_adjust_msr = data;
                }
@@ -6327,6 +6327,7 @@ static void process_smi_save_state_64(struct kvm_vcpu *vcpu, char *buf)
 static void process_smi(struct kvm_vcpu *vcpu)
 {
        struct kvm_segment cs, ds;
+       struct desc_ptr dt;
        char buf[512];
        u32 cr0;
 
@@ -6359,6 +6360,10 @@ static void process_smi(struct kvm_vcpu *vcpu)
 
        kvm_x86_ops->set_cr4(vcpu, 0);
 
+       /* Undocumented: IDT limit is set to zero on entry to SMM.  */
+       dt.address = dt.size = 0;
+       kvm_x86_ops->set_idt(vcpu, &dt);
+
        __kvm_set_dr(vcpu, 7, DR7_FIXED_1);
 
        cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
index f37e84ab49f38e335bde57880a6cbe8640fb2c4b..3d8f2e421466a8af255eba9602748fee8753a377 100644 (file)
@@ -29,7 +29,6 @@
 
 #include <asm/uaccess.h>
 #include <asm/traps.h>
-#include <asm/desc.h>
 #include <asm/user.h>
 #include <asm/fpu/internal.h>
 
@@ -181,7 +180,7 @@ void math_emulate(struct math_emu_info *info)
                        math_abort(FPU_info, SIGILL);
                }
 
-               code_descriptor = LDT_DESCRIPTOR(FPU_CS);
+               code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
                if (SEG_D_SIZE(code_descriptor)) {
                        /* The above test may be wrong, the book is not clear */
                        /* Segmented 32 bit protected mode */
index 9ccecb61a4fa129a82028b27edc18b91a2f99042..5e044d506b7aae8b17b2142966b11477cfe8e372 100644 (file)
 #include <linux/kernel.h>
 #include <linux/mm.h>
 
-/* s is always from a cpu register, and the cpu does bounds checking
- * during register load --> no further bounds checks needed */
-#define LDT_DESCRIPTOR(s)      (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3])
+#include <asm/desc.h>
+#include <asm/mmu_context.h>
+
+static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
+{
+       static struct desc_struct zero_desc;
+       struct desc_struct ret = zero_desc;
+
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+       seg >>= 3;
+       mutex_lock(&current->mm->context.lock);
+       if (current->mm->context.ldt && seg < current->mm->context.ldt->size)
+               ret = current->mm->context.ldt->entries[seg];
+       mutex_unlock(&current->mm->context.lock);
+#endif
+       return ret;
+}
+
 #define SEG_D_SIZE(x)          ((x).b & (3 << 21))
 #define SEG_G_BIT(x)           ((x).b & (1 << 23))
 #define SEG_GRANULARITY(x)     (((x).b & (1 << 23)) ? 4096 : 1)
index 6ef5e99380f92134ba86a6a693b5ac6d3434e6d4..8300db71c2a62681006e137350961742190ec9dc 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/stddef.h>
 
 #include <asm/uaccess.h>
-#include <asm/desc.h>
 
 #include "fpu_system.h"
 #include "exception.h"
@@ -158,7 +157,7 @@ static long pm_address(u_char FPU_modrm, u_char segment,
                addr->selector = PM_REG_(segment);
        }
 
-       descriptor = LDT_DESCRIPTOR(PM_REG_(segment));
+       descriptor = FPU_get_ldt_descriptor(addr->selector);
        base_address = SEG_BASE_ADDR(descriptor);
        address = base_address + offset;
        limit = base_address
index e88fda867a33b198bc356aded57d59f48fcfb4ee..484145368a241207d8aa80a5f758a7d0f3ef54cb 100644 (file)
@@ -8,7 +8,7 @@ config XEN
        select PARAVIRT_CLOCK
        select XEN_HAVE_PVMMU
        depends on X86_64 || (X86_32 && X86_PAE)
-       depends on X86_TSC
+       depends on X86_LOCAL_APIC && X86_TSC
        help
          This is the Linux Xen port.  Enabling this will allow the
          kernel to boot in a paravirtualized environment under the
@@ -17,7 +17,7 @@ config XEN
 config XEN_DOM0
        def_bool y
        depends on XEN && PCI_XEN && SWIOTLB_XEN
-       depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI
+       depends on X86_IO_APIC && ACPI && PCI
 
 config XEN_PVHVM
        def_bool y
index 12600bfffca93f4547e2325eeda9669ff443a7a7..e0057d035200c4dd5e42d191f0395a7769489905 100644 (file)
@@ -241,8 +241,8 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
  * Description:
  *    Enables a low level driver to set a hard upper limit,
  *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
- *    the device driver based upon the combined capabilities of I/O
- *    controller and storage device.
+ *    the device driver based upon the capabilities of the I/O
+ *    controller.
  *
  *    max_sectors is a soft limit imposed by the block layer for
  *    filesystem type requests.  This value can be overridden on a
index a3da6770bc9ed2bf66d59e8e74461829eeb4fe4e..b8efe36ce1142d0c6b0b8e45ec23965ec7135c40 100644 (file)
@@ -393,8 +393,6 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
        struct scatterlist *cipher = areq_ctx->cipher;
        struct scatterlist *hsg = areq_ctx->hsg;
        struct scatterlist *tsg = areq_ctx->tsg;
-       struct scatterlist *assoc1;
-       struct scatterlist *assoc2;
        unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
        unsigned int cryptlen = req->cryptlen;
        struct page *dstp;
@@ -412,27 +410,19 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
                cryptlen += ivsize;
        }
 
-       if (sg_is_last(assoc))
-               return -EINVAL;
-
-       assoc1 = assoc + 1;
-       if (sg_is_last(assoc1))
-               return -EINVAL;
-
-       assoc2 = assoc + 2;
-       if (!sg_is_last(assoc2))
+       if (assoc->length < 12)
                return -EINVAL;
 
        sg_init_table(hsg, 2);
-       sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
-       sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
+       sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
+       sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
 
        sg_init_table(tsg, 1);
-       sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
+       sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
 
        areq_ctx->cryptlen = cryptlen;
-       areq_ctx->headlen = assoc->length + assoc2->length;
-       areq_ctx->trailen = assoc1->length;
+       areq_ctx->headlen = 8;
+       areq_ctx->trailen = 4;
        areq_ctx->sg = dst;
 
        areq_ctx->complete = authenc_esn_geniv_ahash_done;
@@ -563,8 +553,6 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
        struct scatterlist *cipher = areq_ctx->cipher;
        struct scatterlist *hsg = areq_ctx->hsg;
        struct scatterlist *tsg = areq_ctx->tsg;
-       struct scatterlist *assoc1;
-       struct scatterlist *assoc2;
        unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
        struct page *srcp;
        u8 *vsrc;
@@ -580,27 +568,19 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
                cryptlen += ivsize;
        }
 
-       if (sg_is_last(assoc))
-               return -EINVAL;
-
-       assoc1 = assoc + 1;
-       if (sg_is_last(assoc1))
-               return -EINVAL;
-
-       assoc2 = assoc + 2;
-       if (!sg_is_last(assoc2))
+       if (assoc->length < 12)
                return -EINVAL;
 
        sg_init_table(hsg, 2);
-       sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
-       sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
+       sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
+       sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
 
        sg_init_table(tsg, 1);
-       sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
+       sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
 
        areq_ctx->cryptlen = cryptlen;
-       areq_ctx->headlen = assoc->length + assoc2->length;
-       areq_ctx->trailen = assoc1->length;
+       areq_ctx->headlen = 8;
+       areq_ctx->trailen = 4;
        areq_ctx->sg = src;
 
        areq_ctx->complete = authenc_esn_verify_ahash_done;
index 815f75ef24119eab28ce3c0c2047295c6e464c58..2922f1f252d58aafd2d6c233404ae7ca21abb524 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/types.h>
+#include <linux/workqueue.h>
 #include <acpi/video.h>
 
 ACPI_MODULE_NAME("video");
@@ -41,6 +42,7 @@ void acpi_video_unregister_backlight(void);
 
 static bool backlight_notifier_registered;
 static struct notifier_block backlight_nb;
+static struct work_struct backlight_notify_work;
 
 static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
 static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef;
@@ -262,6 +264,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
        { },
 };
 
+/* This uses a workqueue to avoid various locking ordering issues */
+static void acpi_video_backlight_notify_work(struct work_struct *work)
+{
+       if (acpi_video_get_backlight_type() != acpi_backlight_video)
+               acpi_video_unregister_backlight();
+}
+
 static int acpi_video_backlight_notify(struct notifier_block *nb,
                                       unsigned long val, void *bd)
 {
@@ -269,9 +278,8 @@ static int acpi_video_backlight_notify(struct notifier_block *nb,
 
        /* A raw bl registering may change video -> native */
        if (backlight->props.type == BACKLIGHT_RAW &&
-           val == BACKLIGHT_REGISTERED &&
-           acpi_video_get_backlight_type() != acpi_backlight_video)
-               acpi_video_unregister_backlight();
+           val == BACKLIGHT_REGISTERED)
+               schedule_work(&backlight_notify_work);
 
        return NOTIFY_OK;
 }
@@ -304,6 +312,8 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
                acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
                                    ACPI_UINT32_MAX, find_video, NULL,
                                    &video_caps, NULL);
+               INIT_WORK(&backlight_notify_work,
+                         acpi_video_backlight_notify_work);
                backlight_nb.notifier_call = acpi_video_backlight_notify;
                backlight_nb.priority = 0;
                if (backlight_register_notifier(&backlight_nb) == 0)
index ce1e3a8859815ca5724e376de6d6ab0d549c9831..14b7305d2ba0b3cc24aa101e76c87e242f01f537 100644 (file)
@@ -92,7 +92,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
         * Other architectures (e.g., ARM) either do not support big endian, or
         * else leave I/O in little endian mode.
         */
-       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
                return __raw_readl(addr);
        else
                return readl_relaxed(addr);
@@ -101,7 +101,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
 static inline void brcm_sata_writereg(u32 val, void __iomem *addr)
 {
        /* See brcm_sata_readreg() comments */
-       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
                __raw_writel(val, addr);
        else
                writel_relaxed(val, addr);
@@ -209,6 +209,7 @@ static void brcm_sata_init(struct brcm_ahci_priv *priv)
                           priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL);
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int brcm_ahci_suspend(struct device *dev)
 {
        struct ata_host *host = dev_get_drvdata(dev);
@@ -231,6 +232,7 @@ static int brcm_ahci_resume(struct device *dev)
        brcm_sata_phys_enable(priv);
        return ahci_platform_resume(dev);
 }
+#endif
 
 static struct scsi_host_template ahci_platform_sht = {
        AHCI_SHT(DRV_NAME),
index db5d9f79a247c5ceb2cb590f206927c22f6f2b7c..19bcb80b20313932021b1ee613eed97f4473e17e 100644 (file)
@@ -694,11 +694,11 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
  *     RETURNS:
  *     Block address read from @tf.
  */
-u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
+u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
 {
        u64 block = 0;
 
-       if (!dev || tf->flags & ATA_TFLAG_LBA) {
+       if (tf->flags & ATA_TFLAG_LBA) {
                if (tf->flags & ATA_TFLAG_LBA48) {
                        block |= (u64)tf->hob_lbah << 40;
                        block |= (u64)tf->hob_lbam << 32;
@@ -2147,24 +2147,6 @@ static int ata_dev_config_ncq(struct ata_device *dev,
        return 0;
 }
 
-static void ata_dev_config_sense_reporting(struct ata_device *dev)
-{
-       unsigned int err_mask;
-
-       if (!ata_id_has_sense_reporting(dev->id))
-               return;
-
-       if (ata_id_sense_reporting_enabled(dev->id))
-               return;
-
-       err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
-       if (err_mask) {
-               ata_dev_dbg(dev,
-                           "failed to enable Sense Data Reporting, Emask 0x%x\n",
-                           err_mask);
-       }
-}
-
 /**
  *     ata_dev_configure - Configure the specified ATA/ATAPI device
  *     @dev: Target device to configure
@@ -2387,7 +2369,7 @@ int ata_dev_configure(struct ata_device *dev)
                                        dev->devslp_timing[i] = sata_setting[j];
                                }
                }
-               ata_dev_config_sense_reporting(dev);
+
                dev->cdb_len = 16;
        }
 
index 7465031a893c60c9e61f2c911abf218b39c81d2e..cb0508af1459ac43f4aa26f1a16d94134bd9d0bc 100644 (file)
@@ -1592,8 +1592,6 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
        tf->hob_lbah = buf[10];
        tf->nsect = buf[12];
        tf->hob_nsect = buf[13];
-       if (ata_id_has_ncq_autosense(dev->id))
-               tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
 
        return 0;
 }
@@ -1629,70 +1627,6 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
        return err_mask;
 }
 
-/**
- *     ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
- *     @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
- *     @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
- *     @dfl_sense_key: default sense key to use
- *
- *     Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
- *     SENSE.  This function is EH helper.
- *
- *     LOCKING:
- *     Kernel thread context (may sleep).
- *
- *     RETURNS:
- *     encoded sense data on success, 0 on failure or if sense data
- *     is not available.
- */
-static u32 ata_eh_request_sense(struct ata_queued_cmd *qc,
-                               struct scsi_cmnd *cmd)
-{
-       struct ata_device *dev = qc->dev;
-       struct ata_taskfile tf;
-       unsigned int err_mask;
-
-       if (!cmd)
-               return 0;
-
-       DPRINTK("ATA request sense\n");
-       ata_dev_warn(dev, "request sense\n");
-       if (!ata_id_sense_reporting_enabled(dev->id)) {
-               ata_dev_warn(qc->dev, "sense data reporting disabled\n");
-               return 0;
-       }
-       ata_tf_init(dev, &tf);
-
-       tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
-       tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
-       tf.command = ATA_CMD_REQ_SENSE_DATA;
-       tf.protocol = ATA_PROT_NODATA;
-
-       err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
-       /*
-        * ACS-4 states:
-        * The device may set the SENSE DATA AVAILABLE bit to one in the
-        * STATUS field and clear the ERROR bit to zero in the STATUS field
-        * to indicate that the command returned completion without an error
-        * and the sense data described in table 306 is available.
-        *
-        * IOW the 'ATA_SENSE' bit might not be set even though valid
-        * sense data is available.
-        * So check for both.
-        */
-       if ((tf.command & ATA_SENSE) ||
-               tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) {
-               ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal);
-               qc->flags |= ATA_QCFLAG_SENSE_VALID;
-               ata_dev_warn(dev, "sense data %02x/%02x/%02x\n",
-                            tf.lbah, tf.lbam, tf.lbal);
-       } else {
-               ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
-                            tf.command, err_mask);
-       }
-       return err_mask;
-}
-
 /**
  *     atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
  *     @dev: device to perform REQUEST_SENSE to
@@ -1855,19 +1789,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
        memcpy(&qc->result_tf, &tf, sizeof(tf));
        qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
        qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
-       if (qc->result_tf.auxiliary) {
-               char sense_key, asc, ascq;
-
-               sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
-               asc = (qc->result_tf.auxiliary >> 8) & 0xff;
-               ascq = qc->result_tf.auxiliary & 0xff;
-               ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n",
-                           sense_key, asc, ascq);
-               ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq);
-               ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf);
-               qc->flags |= ATA_QCFLAG_SENSE_VALID;
-       }
-
        ehc->i.err_mask &= ~AC_ERR_DEV;
 }
 
@@ -1897,27 +1818,6 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
                return ATA_EH_RESET;
        }
 
-       /*
-        * Sense data reporting does not work if the
-        * device fault bit is set.
-        */
-       if ((stat & ATA_SENSE) && !(stat & ATA_DF) &&
-           !(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
-               if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
-                       tmp = ata_eh_request_sense(qc, qc->scsicmd);
-                       if (tmp)
-                               qc->err_mask |= tmp;
-                       else
-                               ata_scsi_set_sense_information(qc->scsicmd, tf);
-               } else {
-                       ata_dev_warn(qc->dev, "sense data available but port frozen\n");
-               }
-       }
-
-       /* Set by NCQ autosense or request sense above */
-       if (qc->flags & ATA_QCFLAG_SENSE_VALID)
-               return 0;
-
        if (stat & (ATA_ERR | ATA_DF))
                qc->err_mask |= AC_ERR_DEV;
        else
@@ -2661,15 +2561,14 @@ static void ata_eh_link_report(struct ata_link *link)
 
 #ifdef CONFIG_ATA_VERBOSE_ERROR
                if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
-                                   ATA_SENSE | ATA_ERR)) {
+                                   ATA_ERR)) {
                        if (res->command & ATA_BUSY)
                                ata_dev_err(qc->dev, "status: { Busy }\n");
                        else
-                               ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
+                               ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
                                  res->command & ATA_DRDY ? "DRDY " : "",
                                  res->command & ATA_DF ? "DF " : "",
                                  res->command & ATA_DRQ ? "DRQ " : "",
-                                 res->command & ATA_SENSE ? "SENSE " : "",
                                  res->command & ATA_ERR ? "ERR " : "");
                }
 
index 641a61a59e89c00036af65d3a31fe2cf67eb22b8..0d7f0da3a26929622080f94a2a3125c63676999e 100644 (file)
@@ -270,28 +270,13 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
            ata_scsi_park_show, ata_scsi_park_store);
 EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
 
-void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
+static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
 {
-       if (!cmd)
-               return;
-
        cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
 
        scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
 }
 
-void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
-                                   const struct ata_taskfile *tf)
-{
-       u64 information;
-
-       if (!cmd)
-               return;
-
-       information = ata_tf_read_block(tf, NULL);
-       scsi_set_sense_information(cmd->sense_buffer, information);
-}
-
 static ssize_t
 ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
                          const char *buf, size_t count)
@@ -1792,9 +1777,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
            ((cdb[2] & 0x20) || need_sense)) {
                ata_gen_passthru_sense(qc);
        } else {
-               if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
-                       cmd->result = SAM_STAT_CHECK_CONDITION;
-               } else if (!need_sense) {
+               if (!need_sense) {
                        cmd->result = SAM_STAT_GOOD;
                } else {
                        /* TODO: decide which descriptor format to use
index a998a175f9f144b50e4df782bbf7d1afd5f506cb..f840ca18a7c014f5151d22e4bc55dff9fca459de 100644 (file)
@@ -67,8 +67,7 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
 extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
                           u64 block, u32 n_block, unsigned int tf_flags,
                           unsigned int tag);
-extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
-                            struct ata_device *dev);
+extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
 extern unsigned ata_exec_internal(struct ata_device *dev,
                                  struct ata_taskfile *tf, const u8 *cdb,
                                  int dma_dir, void *buf, unsigned int buflen,
@@ -138,9 +137,6 @@ extern int ata_scsi_add_hosts(struct ata_host *host,
                              struct scsi_host_template *sht);
 extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
 extern int ata_scsi_offline_dev(struct ata_device *dev);
-extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
-extern void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
-                                          const struct ata_taskfile *tf);
 extern void ata_scsi_media_change_notify(struct ata_device *dev);
 extern void ata_scsi_hotplug(struct work_struct *work);
 extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
index 3a18a8a719b4ff1fa562a515b4701da241e4aeb7..fab504fd9cfd7ace54d772927a01650373d02206 100644 (file)
@@ -1238,8 +1238,12 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
        readl(mmio + PDC_SDRAM_CONTROL);
 
        /* Turn on for ECC */
-       pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
-                         PDC_DIMM_SPD_TYPE, &spd0);
+       if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+                              PDC_DIMM_SPD_TYPE, &spd0)) {
+               pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
+                      PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
+               return 1;
+       }
        if (spd0 == 0x02) {
                data |= (0x01 << 16);
                writel(data, mmio + PDC_SDRAM_CONTROL);
@@ -1380,8 +1384,12 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
 
        /* ECC initiliazation. */
 
-       pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
-                         PDC_DIMM_SPD_TYPE, &spd0);
+       if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+                              PDC_DIMM_SPD_TYPE, &spd0)) {
+               pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
+                      PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
+               return 1;
+       }
        if (spd0 == 0x02) {
                void *buf;
                VPRINTK("Start ECC initialization\n");
index 2e8cd147f02d847c3c8f1bd89da070fc76e93027..4c20828993222164251e6294f1319572bcdc3e89 100644 (file)
@@ -537,7 +537,7 @@ bool device_dma_is_coherent(struct device *dev)
 EXPORT_SYMBOL_GPL(device_dma_is_coherent);
 
 /**
- * device_get_phy_mode - Get phy mode for given device_node
+ * device_get_phy_mode - Get phy mode for given device
  * @dev:       Pointer to the given device
  *
  * The function gets phy interface string from property 'phy-mode' or
@@ -570,13 +570,18 @@ static void *device_get_mac_addr(struct device *dev,
 {
        int ret = device_property_read_u8_array(dev, name, addr, alen);
 
-       if (ret == 0 && is_valid_ether_addr(addr))
+       if (ret == 0 && alen == ETH_ALEN && is_valid_ether_addr(addr))
                return addr;
        return NULL;
 }
 
 /**
- * Search the device tree for the best MAC address to use.  'mac-address' is
+ * device_get_mac_address - Get the MAC for a given device
+ * @dev:       Pointer to the device
+ * @addr:      Address of buffer to store the MAC in
+ * @alen:      Length of the buffer pointed to by addr, should be ETH_ALEN
+ *
+ * Search the firmware node for the best MAC address to use.  'mac-address' is
  * checked first, because that is supposed to contain to "most recent" MAC
  * address. If that isn't set, then 'local-mac-address' is checked next,
  * because that is the default address.  If that isn't set, then the obsolete
@@ -587,11 +592,11 @@ static void *device_get_mac_addr(struct device *dev,
  * MAC address.
  *
  * All-zero MAC addresses are rejected, because those could be properties that
- * exist in the device tree, but were not set by U-Boot.  For example, the
- * DTS could define 'mac-address' and 'local-mac-address', with zero MAC
- * addresses.  Some older U-Boots only initialized 'local-mac-address'.  In
- * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists
- * but is all zeros.
+ * exist in the firmware tables, but were not updated by the firmware.  For
+ * example, the DTS could define 'mac-address' and 'local-mac-address', with
+ * zero MAC addresses.  Some older U-Boots only initialized 'local-mac-address'.
+ * In this case, the real MAC is in 'local-mac-address', and 'mac-address'
+ * exists but is all zeros.
 */
 void *device_get_mac_address(struct device *dev, char *addr, int alen)
 {
index be5fffb6da2480845d14f920bd012db184887734..023d448ed3fa6047549d635fade7fb26205a8633 100644 (file)
@@ -92,7 +92,7 @@ config BCMA_DRIVER_GMAC_CMN
 config BCMA_DRIVER_GPIO
        bool "BCMA GPIO driver"
        depends on BCMA && GPIOLIB
-       select IRQ_DOMAIN if BCMA_HOST_SOC
+       select GPIOLIB_IRQCHIP if BCMA_HOST_SOC
        help
          Driver to provide access to the GPIO pins of the bcma bus.
 
index 15f2b2e242ea76b9ed882383f51a976a72d7b325..38f156745d533a666deae90f167dd6d51d718d54 100644 (file)
@@ -34,6 +34,7 @@ int __init bcma_bus_early_register(struct bcma_bus *bus);
 int bcma_bus_suspend(struct bcma_bus *bus);
 int bcma_bus_resume(struct bcma_bus *bus);
 #endif
+struct device *bcma_bus_get_host_dev(struct bcma_bus *bus);
 
 /* scan.c */
 void bcma_detect_chip(struct bcma_bus *bus);
index 5f6018e7cd4c42c5b4e2f7670c28e05a2bc21eb5..504899a7296649cecf922065e29282189f6d3d3b 100644 (file)
@@ -8,10 +8,8 @@
  * Licensed under the GNU/GPL. See COPYING for details.
  */
 
-#include <linux/gpio.h>
-#include <linux/irq.h>
+#include <linux/gpio/driver.h>
 #include <linux/interrupt.h>
-#include <linux/irqdomain.h>
 #include <linux/export.h>
 #include <linux/bcma/bcma.h>
 
@@ -79,19 +77,11 @@ static void bcma_gpio_free(struct gpio_chip *chip, unsigned gpio)
 }
 
 #if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X)
-static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
-{
-       struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
-
-       if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
-               return irq_find_mapping(cc->irq_domain, gpio);
-       else
-               return -EINVAL;
-}
 
 static void bcma_gpio_irq_unmask(struct irq_data *d)
 {
-       struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d);
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct bcma_drv_cc *cc = bcma_gpio_get_cc(gc);
        int gpio = irqd_to_hwirq(d);
        u32 val = bcma_chipco_gpio_in(cc, BIT(gpio));
 
@@ -101,7 +91,8 @@ static void bcma_gpio_irq_unmask(struct irq_data *d)
 
 static void bcma_gpio_irq_mask(struct irq_data *d)
 {
-       struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d);
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct bcma_drv_cc *cc = bcma_gpio_get_cc(gc);
        int gpio = irqd_to_hwirq(d);
 
        bcma_chipco_gpio_intmask(cc, BIT(gpio), 0);
@@ -116,6 +107,7 @@ static struct irq_chip bcma_gpio_irq_chip = {
 static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id)
 {
        struct bcma_drv_cc *cc = dev_id;
+       struct gpio_chip *gc = &cc->gpio;
        u32 val = bcma_cc_read32(cc, BCMA_CC_GPIOIN);
        u32 mask = bcma_cc_read32(cc, BCMA_CC_GPIOIRQ);
        u32 pol = bcma_cc_read32(cc, BCMA_CC_GPIOPOL);
@@ -125,81 +117,58 @@ static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id)
        if (!irqs)
                return IRQ_NONE;
 
-       for_each_set_bit(gpio, &irqs, cc->gpio.ngpio)
-               generic_handle_irq(bcma_gpio_to_irq(&cc->gpio, gpio));
+       for_each_set_bit(gpio, &irqs, gc->ngpio)
+               generic_handle_irq(irq_find_mapping(gc->irqdomain, gpio));
        bcma_chipco_gpio_polarity(cc, irqs, val & irqs);
 
        return IRQ_HANDLED;
 }
 
-static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc)
+static int bcma_gpio_irq_init(struct bcma_drv_cc *cc)
 {
        struct gpio_chip *chip = &cc->gpio;
-       int gpio, hwirq, err;
+       int hwirq, err;
 
        if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
                return 0;
 
-       cc->irq_domain = irq_domain_add_linear(NULL, chip->ngpio,
-                                              &irq_domain_simple_ops, cc);
-       if (!cc->irq_domain) {
-               err = -ENODEV;
-               goto err_irq_domain;
-       }
-       for (gpio = 0; gpio < chip->ngpio; gpio++) {
-               int irq = irq_create_mapping(cc->irq_domain, gpio);
-
-               irq_set_chip_data(irq, cc);
-               irq_set_chip_and_handler(irq, &bcma_gpio_irq_chip,
-                                        handle_simple_irq);
-       }
-
        hwirq = bcma_core_irq(cc->core, 0);
        err = request_irq(hwirq, bcma_gpio_irq_handler, IRQF_SHARED, "gpio",
                          cc);
        if (err)
-               goto err_req_irq;
+               return err;
 
        bcma_chipco_gpio_intmask(cc, ~0, 0);
        bcma_cc_set32(cc, BCMA_CC_IRQMASK, BCMA_CC_IRQ_GPIO);
 
-       return 0;
-
-err_req_irq:
-       for (gpio = 0; gpio < chip->ngpio; gpio++) {
-               int irq = irq_find_mapping(cc->irq_domain, gpio);
-
-               irq_dispose_mapping(irq);
+       err =  gpiochip_irqchip_add(chip,
+                                   &bcma_gpio_irq_chip,
+                                   0,
+                                   handle_simple_irq,
+                                   IRQ_TYPE_NONE);
+       if (err) {
+               free_irq(hwirq, cc);
+               return err;
        }
-       irq_domain_remove(cc->irq_domain);
-err_irq_domain:
-       return err;
+
+       return 0;
 }
 
-static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
+static void bcma_gpio_irq_exit(struct bcma_drv_cc *cc)
 {
-       struct gpio_chip *chip = &cc->gpio;
-       int gpio;
-
        if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
                return;
 
        bcma_cc_mask32(cc, BCMA_CC_IRQMASK, ~BCMA_CC_IRQ_GPIO);
        free_irq(bcma_core_irq(cc->core, 0), cc);
-       for (gpio = 0; gpio < chip->ngpio; gpio++) {
-               int irq = irq_find_mapping(cc->irq_domain, gpio);
-
-               irq_dispose_mapping(irq);
-       }
-       irq_domain_remove(cc->irq_domain);
 }
 #else
-static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc)
+static int bcma_gpio_irq_init(struct bcma_drv_cc *cc)
 {
        return 0;
 }
 
-static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
+static void bcma_gpio_irq_exit(struct bcma_drv_cc *cc)
 {
 }
 #endif
@@ -218,9 +187,8 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
        chip->set               = bcma_gpio_set_value;
        chip->direction_input   = bcma_gpio_direction_input;
        chip->direction_output  = bcma_gpio_direction_output;
-#if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X)
-       chip->to_irq            = bcma_gpio_to_irq;
-#endif
+       chip->owner             = THIS_MODULE;
+       chip->dev               = bcma_bus_get_host_dev(bus);
 #if IS_BUILTIN(CONFIG_OF)
        if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
                chip->of_node   = cc->core->dev.of_node;
@@ -248,13 +216,13 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
        else
                chip->base              = -1;
 
-       err = bcma_gpio_irq_domain_init(cc);
+       err = gpiochip_add(chip);
        if (err)
                return err;
 
-       err = gpiochip_add(chip);
+       err = bcma_gpio_irq_init(cc);
        if (err) {
-               bcma_gpio_irq_domain_exit(cc);
+               gpiochip_remove(chip);
                return err;
        }
 
@@ -263,7 +231,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
 
 int bcma_gpio_unregister(struct bcma_drv_cc *cc)
 {
-       bcma_gpio_irq_domain_exit(cc);
+       bcma_gpio_irq_exit(cc);
        gpiochip_remove(&cc->gpio);
        return 0;
 }
index 8d973c4fc84e3185af77b6c4da0a64103be2e7b9..24882c18fcbe6911c7266aa4dcf42903ffb7dbef 100644 (file)
@@ -7,7 +7,9 @@
 
 #include "bcma_private.h"
 #include <linux/module.h>
+#include <linux/mmc/sdio_func.h>
 #include <linux/platform_device.h>
+#include <linux/pci.h>
 #include <linux/bcma/bcma.h>
 #include <linux/slab.h>
 #include <linux/of_address.h>
@@ -269,6 +271,28 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
        }
 }
 
+struct device *bcma_bus_get_host_dev(struct bcma_bus *bus)
+{
+       switch (bus->hosttype) {
+       case BCMA_HOSTTYPE_PCI:
+               if (bus->host_pci)
+                       return &bus->host_pci->dev;
+               else
+                       return NULL;
+       case BCMA_HOSTTYPE_SOC:
+               if (bus->host_pdev)
+                       return &bus->host_pdev->dev;
+               else
+                       return NULL;
+       case BCMA_HOSTTYPE_SDIO:
+               if (bus->host_sdio)
+                       return &bus->host_sdio->dev;
+               else
+                       return NULL;
+       }
+       return NULL;
+}
+
 void bcma_init_bus(struct bcma_bus *bus)
 {
        mutex_lock(&bcma_buses_mutex);
@@ -388,6 +412,7 @@ int bcma_bus_register(struct bcma_bus *bus)
 {
        int err;
        struct bcma_device *core;
+       struct device *dev;
 
        /* Scan for devices (cores) */
        err = bcma_bus_scan(bus);
@@ -410,13 +435,12 @@ int bcma_bus_register(struct bcma_bus *bus)
                bcma_core_pci_early_init(&bus->drv_pci[0]);
        }
 
+       dev = bcma_bus_get_host_dev(bus);
        /* TODO: remove check for IS_BUILTIN(CONFIG_BCMA) check when
         * of_default_bus_match_table is exported or in some other way
         * accessible. This is just a temporary workaround.
         */
-       if (IS_BUILTIN(CONFIG_BCMA) && bus->host_pdev) {
-               struct device *dev = &bus->host_pdev->dev;
-
+       if (IS_BUILTIN(CONFIG_BCMA) && dev) {
                of_platform_populate(dev->of_node, of_default_bus_match_table,
                                     NULL, dev);
        }
index fb655e8d1e3b17bf4cda9fd09593bc7dc770f78d..763301c7828c72650f2abaa1c723425bdd3c73f4 100644 (file)
@@ -496,10 +496,9 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
        kfree(meta);
 }
 
-static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
+static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
 {
        size_t num_pages;
-       char pool_name[8];
        struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
 
        if (!meta)
@@ -512,7 +511,6 @@ static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
                goto out_error;
        }
 
-       snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
        meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
        if (!meta->mem_pool) {
                pr_err("Error creating memory pool\n");
@@ -1031,7 +1029,7 @@ static ssize_t disksize_store(struct device *dev,
                return -EINVAL;
 
        disksize = PAGE_ALIGN(disksize);
-       meta = zram_meta_alloc(zram->disk->first_minor, disksize);
+       meta = zram_meta_alloc(zram->disk->disk_name, disksize);
        if (!meta)
                return -ENOMEM;
 
index 79e8234b1aa5995eaf8b8e25d729e6da193518fe..0bd88c942a5202845960ddbf62b35e9d1aa9e9d2 100644 (file)
@@ -13,6 +13,10 @@ config BT_RTL
        tristate
        select FW_LOADER
 
+config BT_QCA
+       tristate
+       select FW_LOADER
+
 config BT_HCIBTUSB
        tristate "HCI USB driver"
        depends on USB
@@ -151,6 +155,19 @@ config BT_HCIUART_BCM
 
          Say Y here to compile support for Broadcom protocol.
 
+config BT_HCIUART_QCA
+       bool "Qualcomm Atheros protocol support"
+       depends on BT_HCIUART
+       select BT_HCIUART_H4
+       select BT_QCA
+       help
+         The Qualcomm Atheros protocol supports HCI In-Band Sleep feature
+         over serial port interface(H4) between controller and host.
+         This protocol is required for UART clock control for QCA Bluetooth
+         devices.
+
+         Say Y here to compile support for QCA protocol.
+
 config BT_HCIBCM203X
        tristate "HCI BCM203x USB driver"
        depends on USB
index f40e194e7080183e999ebb5ac8381130cb7a4b54..07c9cf381e5aeb2585a18beb0f9aa98a75862e9e 100644 (file)
@@ -22,6 +22,7 @@ obj-$(CONFIG_BT_MRVL_SDIO)    += btmrvl_sdio.o
 obj-$(CONFIG_BT_WILINK)                += btwilink.o
 obj-$(CONFIG_BT_BCM)           += btbcm.o
 obj-$(CONFIG_BT_RTL)           += btrtl.o
+obj-$(CONFIG_BT_QCA)           += btqca.o
 
 btmrvl-y                       := btmrvl_main.o
 btmrvl-$(CONFIG_DEBUG_FS)      += btmrvl_debugfs.o
@@ -34,6 +35,7 @@ hci_uart-$(CONFIG_BT_HCIUART_ATH3K)   += hci_ath.o
 hci_uart-$(CONFIG_BT_HCIUART_3WIRE)    += hci_h5.o
 hci_uart-$(CONFIG_BT_HCIUART_INTEL)    += hci_intel.o
 hci_uart-$(CONFIG_BT_HCIUART_BCM)      += hci_bcm.o
+hci_uart-$(CONFIG_BT_HCIUART_QCA)      += hci_qca.o
 hci_uart-objs                          := $(hci_uart-y)
 
 ccflags-y += -D__CHECK_ENDIAN__
index b9a811900f6ab534087e17f3726c840a8afdb34e..7c097629e59312e232757aa8b5f67f4130b71d3e 100644 (file)
@@ -1071,8 +1071,6 @@ static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card)
                }
        }
 
-       sdio_release_host(card->func);
-
        /*
         * winner or not, with this test the FW synchronizes when the
         * module can continue its initialization
@@ -1082,6 +1080,8 @@ static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card)
                return -ETIMEDOUT;
        }
 
+       sdio_release_host(card->func);
+
        return 0;
 
 done:
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
new file mode 100644 (file)
index 0000000..4a62081
--- /dev/null
@@ -0,0 +1,392 @@
+/*
+ *  Bluetooth supports for Qualcomm Atheros chips
+ *
+ *  Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+#include <linux/module.h>
+#include <linux/firmware.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "btqca.h"
+
+#define VERSION "0.1"
+
+static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
+{
+       struct sk_buff *skb;
+       struct edl_event_hdr *edl;
+       struct rome_version *ver;
+       char cmd;
+       int err = 0;
+
+       BT_DBG("%s: ROME Patch Version Request", hdev->name);
+
+       cmd = EDL_PATCH_VER_REQ_CMD;
+       skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
+                               &cmd, HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               BT_ERR("%s: Failed to read version of ROME (%d)", hdev->name,
+                      err);
+               return err;
+       }
+
+       if (skb->len != sizeof(*edl) + sizeof(*ver)) {
+               BT_ERR("%s: Version size mismatch len %d", hdev->name,
+                      skb->len);
+               err = -EILSEQ;
+               goto out;
+       }
+
+       edl = (struct edl_event_hdr *)(skb->data);
+       if (!edl || !edl->data) {
+               BT_ERR("%s: TLV with no header or no data", hdev->name);
+               err = -EILSEQ;
+               goto out;
+       }
+
+       if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
+           edl->rtype != EDL_APP_VER_RES_EVT) {
+               BT_ERR("%s: Wrong packet received %d %d", hdev->name,
+                      edl->cresp, edl->rtype);
+               err = -EIO;
+               goto out;
+       }
+
+       ver = (struct rome_version *)(edl->data);
+
+       BT_DBG("%s: Product:0x%08x", hdev->name, le32_to_cpu(ver->product_id));
+       BT_DBG("%s: Patch  :0x%08x", hdev->name, le16_to_cpu(ver->patch_ver));
+       BT_DBG("%s: ROM    :0x%08x", hdev->name, le16_to_cpu(ver->rome_ver));
+       BT_DBG("%s: SOC    :0x%08x", hdev->name, le32_to_cpu(ver->soc_id));
+
+       /* ROME chipset version can be decided by patch and SoC
+        * version, combination with upper 2 bytes from SoC
+        * and lower 2 bytes from patch will be used.
+        */
+       *rome_version = (le32_to_cpu(ver->soc_id) << 16) |
+                       (le16_to_cpu(ver->rome_ver) & 0x0000ffff);
+
+out:
+       kfree_skb(skb);
+
+       return err;
+}
+
+static int rome_reset(struct hci_dev *hdev)
+{
+       struct sk_buff *skb;
+       int err;
+
+       BT_DBG("%s: ROME HCI_RESET", hdev->name);
+
+       skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               BT_ERR("%s: Reset failed (%d)", hdev->name, err);
+               return err;
+       }
+
+       kfree_skb(skb);
+
+       return 0;
+}
+
+static void rome_tlv_check_data(struct rome_config *config,
+                               const struct firmware *fw)
+{
+       const u8 *data;
+       u32 type_len;
+       u16 tag_id, tag_len;
+       int idx, length;
+       struct tlv_type_hdr *tlv;
+       struct tlv_type_patch *tlv_patch;
+       struct tlv_type_nvm *tlv_nvm;
+
+       tlv = (struct tlv_type_hdr *)fw->data;
+
+       type_len = le32_to_cpu(tlv->type_len);
+       length = (type_len >> 8) & 0x00ffffff;
+
+       BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff);
+       BT_DBG("Length\t\t : %d bytes", length);
+
+       switch (config->type) {
+       case TLV_TYPE_PATCH:
+               tlv_patch = (struct tlv_type_patch *)tlv->data;
+               BT_DBG("Total Length\t\t : %d bytes",
+                      le32_to_cpu(tlv_patch->total_size));
+               BT_DBG("Patch Data Length\t : %d bytes",
+                      le32_to_cpu(tlv_patch->data_length));
+               BT_DBG("Signing Format Version : 0x%x",
+                      tlv_patch->format_version);
+               BT_DBG("Signature Algorithm\t : 0x%x",
+                      tlv_patch->signature);
+               BT_DBG("Reserved\t\t : 0x%x",
+                      le16_to_cpu(tlv_patch->reserved1));
+               BT_DBG("Product ID\t\t : 0x%04x",
+                      le16_to_cpu(tlv_patch->product_id));
+               BT_DBG("Rom Build Version\t : 0x%04x",
+                      le16_to_cpu(tlv_patch->rom_build));
+               BT_DBG("Patch Version\t\t : 0x%04x",
+                      le16_to_cpu(tlv_patch->patch_version));
+               BT_DBG("Reserved\t\t : 0x%x",
+                      le16_to_cpu(tlv_patch->reserved2));
+               BT_DBG("Patch Entry Address\t : 0x%x",
+                      le32_to_cpu(tlv_patch->entry));
+               break;
+
+       case TLV_TYPE_NVM:
+               idx = 0;
+               data = tlv->data;
+               while (idx < length) {
+                       tlv_nvm = (struct tlv_type_nvm *)(data + idx);
+
+                       tag_id = le16_to_cpu(tlv_nvm->tag_id);
+                       tag_len = le16_to_cpu(tlv_nvm->tag_len);
+
+                       /* Update NVM tags as needed */
+                       switch (tag_id) {
+                       case EDL_TAG_ID_HCI:
+                               /* HCI transport layer parameters
+                                * enabling software inband sleep
+                                * onto controller side.
+                                */
+                               tlv_nvm->data[0] |= 0x80;
+
+                               /* UART Baud Rate */
+                               tlv_nvm->data[2] = config->user_baud_rate;
+
+                               break;
+
+                       case EDL_TAG_ID_DEEP_SLEEP:
+                               /* Sleep enable mask
+                                * enabling deep sleep feature on controller.
+                                */
+                               tlv_nvm->data[0] |= 0x01;
+
+                               break;
+                       }
+
+                       idx += (sizeof(u16) + sizeof(u16) + 8 + tag_len);
+               }
+               break;
+
+       default:
+               BT_ERR("Unknown TLV type %d", config->type);
+               break;
+       }
+}
+
+static int rome_tlv_send_segment(struct hci_dev *hdev, int idx, int seg_size,
+                                const u8 *data)
+{
+       struct sk_buff *skb;
+       struct edl_event_hdr *edl;
+       struct tlv_seg_resp *tlv_resp;
+       u8 cmd[MAX_SIZE_PER_TLV_SEGMENT + 2];
+       int err = 0;
+
+       BT_DBG("%s: Download segment #%d size %d", hdev->name, idx, seg_size);
+
+       cmd[0] = EDL_PATCH_TLV_REQ_CMD;
+       cmd[1] = seg_size;
+       memcpy(cmd + 2, data, seg_size);
+
+       skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd,
+                               HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               BT_ERR("%s: Failed to send TLV segment (%d)", hdev->name, err);
+               return err;
+       }
+
+       if (skb->len != sizeof(*edl) + sizeof(*tlv_resp)) {
+               BT_ERR("%s: TLV response size mismatch", hdev->name);
+               err = -EILSEQ;
+               goto out;
+       }
+
+       edl = (struct edl_event_hdr *)(skb->data);
+       if (!edl || !edl->data) {
+               BT_ERR("%s: TLV with no header or no data", hdev->name);
+               err = -EILSEQ;
+               goto out;
+       }
+
+       tlv_resp = (struct tlv_seg_resp *)(edl->data);
+
+       if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
+           edl->rtype != EDL_TVL_DNLD_RES_EVT || tlv_resp->result != 0x00) {
+               BT_ERR("%s: TLV with error stat 0x%x rtype 0x%x (0x%x)",
+                      hdev->name, edl->cresp, edl->rtype, tlv_resp->result);
+               err = -EIO;
+       }
+
+out:
+       kfree_skb(skb);
+
+       return err;
+}
+
+static int rome_tlv_download_request(struct hci_dev *hdev,
+                                    const struct firmware *fw)
+{
+       const u8 *buffer, *data;
+       int total_segment, remain_size;
+       int ret, i;
+
+       if (!fw || !fw->data)
+               return -EINVAL;
+
+       total_segment = fw->size / MAX_SIZE_PER_TLV_SEGMENT;
+       remain_size = fw->size % MAX_SIZE_PER_TLV_SEGMENT;
+
+       BT_DBG("%s: Total segment num %d remain size %d total size %zu",
+              hdev->name, total_segment, remain_size, fw->size);
+
+       data = fw->data;
+       for (i = 0; i < total_segment; i++) {
+               buffer = data + i * MAX_SIZE_PER_TLV_SEGMENT;
+               ret = rome_tlv_send_segment(hdev, i, MAX_SIZE_PER_TLV_SEGMENT,
+                                           buffer);
+               if (ret < 0)
+                       return -EIO;
+       }
+
+       if (remain_size) {
+               buffer = data + total_segment * MAX_SIZE_PER_TLV_SEGMENT;
+               ret = rome_tlv_send_segment(hdev, total_segment, remain_size,
+                                           buffer);
+               if (ret < 0)
+                       return -EIO;
+       }
+
+       return 0;
+}
+
+static int rome_download_firmware(struct hci_dev *hdev,
+                                 struct rome_config *config)
+{
+       const struct firmware *fw;
+       int ret;
+
+       BT_INFO("%s: ROME Downloading %s", hdev->name, config->fwname);
+
+       ret = request_firmware(&fw, config->fwname, &hdev->dev);
+       if (ret) {
+               BT_ERR("%s: Failed to request file: %s (%d)", hdev->name,
+                      config->fwname, ret);
+               return ret;
+       }
+
+       rome_tlv_check_data(config, fw);
+
+       ret = rome_tlv_download_request(hdev, fw);
+       if (ret) {
+               BT_ERR("%s: Failed to download file: %s (%d)", hdev->name,
+                      config->fwname, ret);
+       }
+
+       release_firmware(fw);
+
+       return ret;
+}
+
+int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+       struct sk_buff *skb;
+       u8 cmd[9];
+       int err;
+
+       cmd[0] = EDL_NVM_ACCESS_SET_REQ_CMD;
+       cmd[1] = 0x02;                  /* TAG ID */
+       cmd[2] = sizeof(bdaddr_t);      /* size */
+       memcpy(cmd + 3, bdaddr, sizeof(bdaddr_t));
+       skb = __hci_cmd_sync_ev(hdev, EDL_NVM_ACCESS_OPCODE, sizeof(cmd), cmd,
+                               HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               BT_ERR("%s: Change address command failed (%d)",
+                      hdev->name, err);
+               return err;
+       }
+
+       kfree_skb(skb);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
+
+int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate)
+{
+       u32 rome_ver = 0;
+       struct rome_config config;
+       int err;
+
+       BT_DBG("%s: ROME setup on UART", hdev->name);
+
+       config.user_baud_rate = baudrate;
+
+       /* Get ROME version information */
+       err = rome_patch_ver_req(hdev, &rome_ver);
+       if (err < 0 || rome_ver == 0) {
+               BT_ERR("%s: Failed to get version 0x%x", hdev->name, err);
+               return err;
+       }
+
+       BT_INFO("%s: ROME controller version 0x%08x", hdev->name, rome_ver);
+
+       /* Download rampatch file */
+       config.type = TLV_TYPE_PATCH;
+       snprintf(config.fwname, sizeof(config.fwname), "qca/rampatch_%08x.bin",
+                rome_ver);
+       err = rome_download_firmware(hdev, &config);
+       if (err < 0) {
+               BT_ERR("%s: Failed to download patch (%d)", hdev->name, err);
+               return err;
+       }
+
+       /* Download NVM configuration */
+       config.type = TLV_TYPE_NVM;
+       snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin",
+                rome_ver);
+       err = rome_download_firmware(hdev, &config);
+       if (err < 0) {
+               BT_ERR("%s: Failed to download NVM (%d)", hdev->name, err);
+               return err;
+       }
+
+       /* Perform HCI reset */
+       err = rome_reset(hdev);
+       if (err < 0) {
+               BT_ERR("%s: Failed to run HCI_RESET (%d)", hdev->name, err);
+               return err;
+       }
+
+       BT_INFO("%s: ROME setup on UART is completed", hdev->name);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(qca_uart_setup_rome);
+
+MODULE_AUTHOR("Ben Young Tae Kim <ytkim@qca.qualcomm.com>");
+MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
new file mode 100644 (file)
index 0000000..65e994b
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+ *  Bluetooth supports for Qualcomm Atheros ROME chips
+ *
+ *  Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#define EDL_PATCH_CMD_OPCODE           (0xFC00)
+#define EDL_NVM_ACCESS_OPCODE          (0xFC0B)
+#define EDL_PATCH_CMD_LEN              (1)
+#define EDL_PATCH_VER_REQ_CMD          (0x19)
+#define EDL_PATCH_TLV_REQ_CMD          (0x1E)
+#define EDL_NVM_ACCESS_SET_REQ_CMD     (0x01)
+#define MAX_SIZE_PER_TLV_SEGMENT       (243)
+
+#define EDL_CMD_REQ_RES_EVT            (0x00)
+#define EDL_PATCH_VER_RES_EVT          (0x19)
+#define EDL_APP_VER_RES_EVT            (0x02)
+#define EDL_TVL_DNLD_RES_EVT           (0x04)
+#define EDL_CMD_EXE_STATUS_EVT         (0x00)
+#define EDL_SET_BAUDRATE_RSP_EVT       (0x92)
+#define EDL_NVM_ACCESS_CODE_EVT                (0x0B)
+
+#define EDL_TAG_ID_HCI                 (17)
+#define EDL_TAG_ID_DEEP_SLEEP          (27)
+
+enum qca_bardrate {
+       QCA_BAUDRATE_115200     = 0,
+       QCA_BAUDRATE_57600,
+       QCA_BAUDRATE_38400,
+       QCA_BAUDRATE_19200,
+       QCA_BAUDRATE_9600,
+       QCA_BAUDRATE_230400,
+       QCA_BAUDRATE_250000,
+       QCA_BAUDRATE_460800,
+       QCA_BAUDRATE_500000,
+       QCA_BAUDRATE_720000,
+       QCA_BAUDRATE_921600,
+       QCA_BAUDRATE_1000000,
+       QCA_BAUDRATE_1250000,
+       QCA_BAUDRATE_2000000,
+       QCA_BAUDRATE_3000000,
+       QCA_BAUDRATE_4000000,
+       QCA_BAUDRATE_1600000,
+       QCA_BAUDRATE_3200000,
+       QCA_BAUDRATE_3500000,
+       QCA_BAUDRATE_AUTO       = 0xFE,
+       QCA_BAUDRATE_RESERVED
+};
+
+enum rome_tlv_type {
+       TLV_TYPE_PATCH = 1,
+       TLV_TYPE_NVM
+};
+
+struct rome_config {
+       u8 type;
+       char fwname[64];
+       uint8_t user_baud_rate;
+};
+
+struct edl_event_hdr {
+       __u8 cresp;
+       __u8 rtype;
+       __u8 data[0];
+} __packed;
+
+struct rome_version {
+       __le32 product_id;
+       __le16 patch_ver;
+       __le16 rome_ver;
+       __le32 soc_id;
+} __packed;
+
+struct tlv_seg_resp {
+       __u8 result;
+} __packed;
+
+struct tlv_type_patch {
+       __le32 total_size;
+       __le32 data_length;
+       __u8   format_version;
+       __u8   signature;
+       __le16 reserved1;
+       __le16 product_id;
+       __le16 rom_build;
+       __le16 patch_version;
+       __le16 reserved2;
+       __le32 entry;
+} __packed;
+
+struct tlv_type_nvm {
+       __le16 tag_id;
+       __le16 tag_len;
+       __le32 reserve1;
+       __le32 reserve2;
+       __u8   data[0];
+} __packed;
+
+struct tlv_type_hdr {
+       __le32 type_len;
+       __u8   data[0];
+} __packed;
+
+#if IS_ENABLED(CONFIG_BT_QCA)
+
+int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate);
+
+#else
+
+static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int qca_uart_setup_rome(struct hci_dev *hdev, int speed)
+{
+       return -EOPNOTSUPP;
+}
+
+#endif
index cc92b0f84a5168e139435737cef2c63ab1ee68e6..f759dea7d3baeee54da02784df91417e4632f3f0 100644 (file)
@@ -322,6 +322,9 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK },
        { USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK },
 
+       /* Silicon Wave based devices */
+       { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
+
        { }     /* Terminating entry */
 };
 
index 23523e140a9a11ef29e08541782e3d8627dca221..322302b04710f1764fd339247f0ee32aed924743 100644 (file)
 #include <linux/errno.h>
 #include <linux/skbuff.h>
 #include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/tty.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include "btbcm.h"
 #include "hci_uart.h"
 
+struct bcm_device {
+       struct list_head        list;
+
+       struct platform_device  *pdev;
+
+       const char              *name;
+       struct gpio_desc        *device_wakeup;
+       struct gpio_desc        *shutdown;
+
+       struct clk              *clk;
+       bool                    clk_enabled;
+
+       u32                     init_speed;
+
+#ifdef CONFIG_PM_SLEEP
+       struct hci_uart         *hu;
+       bool                    is_suspended; /* suspend/resume flag */
+#endif
+};
+
 struct bcm_data {
-       struct sk_buff *rx_skb;
-       struct sk_buff_head txq;
+       struct sk_buff          *rx_skb;
+       struct sk_buff_head     txq;
+
+       struct bcm_device       *dev;
 };
 
+/* List of BCM BT UART devices */
+static DEFINE_SPINLOCK(bcm_device_list_lock);
+static LIST_HEAD(bcm_device_list);
+
 static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed)
 {
        struct hci_dev *hdev = hu->hdev;
@@ -86,9 +118,41 @@ static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed)
        return 0;
 }
 
+/* bcm_device_exists should be protected by bcm_device_list_lock */
+static bool bcm_device_exists(struct bcm_device *device)
+{
+       struct list_head *p;
+
+       list_for_each(p, &bcm_device_list) {
+               struct bcm_device *dev = list_entry(p, struct bcm_device, list);
+
+               if (device == dev)
+                       return true;
+       }
+
+       return false;
+}
+
+static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
+{
+       if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled)
+               clk_enable(dev->clk);
+
+       gpiod_set_value_cansleep(dev->shutdown, powered);
+       gpiod_set_value_cansleep(dev->device_wakeup, powered);
+
+       if (!powered && !IS_ERR(dev->clk) && dev->clk_enabled)
+               clk_disable(dev->clk);
+
+       dev->clk_enabled = powered;
+
+       return 0;
+}
+
 static int bcm_open(struct hci_uart *hu)
 {
        struct bcm_data *bcm;
+       struct list_head *p;
 
        BT_DBG("hu %p", hu);
 
@@ -99,6 +163,30 @@ static int bcm_open(struct hci_uart *hu)
        skb_queue_head_init(&bcm->txq);
 
        hu->priv = bcm;
+
+       spin_lock(&bcm_device_list_lock);
+       list_for_each(p, &bcm_device_list) {
+               struct bcm_device *dev = list_entry(p, struct bcm_device, list);
+
+               /* Retrieve saved bcm_device based on parent of the
+                * platform device (saved during device probe) and
+                * parent of tty device used by hci_uart
+                */
+               if (hu->tty->dev->parent == dev->pdev->dev.parent) {
+                       bcm->dev = dev;
+                       hu->init_speed = dev->init_speed;
+#ifdef CONFIG_PM_SLEEP
+                       dev->hu = hu;
+#endif
+                       break;
+               }
+       }
+
+       if (bcm->dev)
+               bcm_gpio_set_power(bcm->dev, true);
+
+       spin_unlock(&bcm_device_list_lock);
+
        return 0;
 }
 
@@ -108,6 +196,16 @@ static int bcm_close(struct hci_uart *hu)
 
        BT_DBG("hu %p", hu);
 
+       /* Protect bcm->dev against removal of the device or driver */
+       spin_lock(&bcm_device_list_lock);
+       if (bcm_device_exists(bcm->dev)) {
+               bcm_gpio_set_power(bcm->dev, false);
+#ifdef CONFIG_PM_SLEEP
+               bcm->dev->hu = NULL;
+#endif
+       }
+       spin_unlock(&bcm_device_list_lock);
+
        skb_queue_purge(&bcm->txq);
        kfree_skb(bcm->rx_skb);
        kfree(bcm);
@@ -232,6 +330,188 @@ static struct sk_buff *bcm_dequeue(struct hci_uart *hu)
        return skb_dequeue(&bcm->txq);
 }
 
+#ifdef CONFIG_PM_SLEEP
+/* Platform suspend callback */
+static int bcm_suspend(struct device *dev)
+{
+       struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
+
+       BT_DBG("suspend (%p): is_suspended %d", bdev, bdev->is_suspended);
+
+       if (!bdev->is_suspended) {
+               hci_uart_set_flow_control(bdev->hu, true);
+
+               /* Once this callback returns, driver suspends BT via GPIO */
+               bdev->is_suspended = true;
+       }
+
+       /* Suspend the device */
+       if (bdev->device_wakeup) {
+               gpiod_set_value(bdev->device_wakeup, false);
+               BT_DBG("suspend, delaying 15 ms");
+               mdelay(15);
+       }
+
+       return 0;
+}
+
+/* Platform resume callback */
+static int bcm_resume(struct device *dev)
+{
+       struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
+
+       BT_DBG("resume (%p): is_suspended %d", bdev, bdev->is_suspended);
+
+       if (bdev->device_wakeup) {
+               gpiod_set_value(bdev->device_wakeup, true);
+               BT_DBG("resume, delaying 15 ms");
+               mdelay(15);
+       }
+
+       /* When this callback executes, the device has woken up already */
+       if (bdev->is_suspended) {
+               bdev->is_suspended = false;
+
+               hci_uart_set_flow_control(bdev->hu, false);
+       }
+
+       return 0;
+}
+#endif
+
+static const struct acpi_gpio_params device_wakeup_gpios = { 0, 0, false };
+static const struct acpi_gpio_params shutdown_gpios = { 1, 0, false };
+
+static const struct acpi_gpio_mapping acpi_bcm_default_gpios[] = {
+       { "device-wakeup-gpios", &device_wakeup_gpios, 1 },
+       { "shutdown-gpios", &shutdown_gpios, 1 },
+       { },
+};
+
+#ifdef CONFIG_ACPI
+static int bcm_resource(struct acpi_resource *ares, void *data)
+{
+       struct bcm_device *dev = data;
+
+       if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+               struct acpi_resource_uart_serialbus *sb;
+
+               sb = &ares->data.uart_serial_bus;
+               if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART)
+                       dev->init_speed = sb->default_baud_rate;
+       }
+
+       /* Always tell the ACPI core to skip this resource */
+       return 1;
+}
+
+static int bcm_acpi_probe(struct bcm_device *dev)
+{
+       struct platform_device *pdev = dev->pdev;
+       const struct acpi_device_id *id;
+       struct acpi_device *adev;
+       LIST_HEAD(resources);
+       int ret;
+
+       id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
+       if (!id)
+               return -ENODEV;
+
+       /* Retrieve GPIO data */
+       dev->name = dev_name(&pdev->dev);
+       ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev),
+                                       acpi_bcm_default_gpios);
+       if (ret)
+               return ret;
+
+       dev->clk = devm_clk_get(&pdev->dev, NULL);
+
+       dev->device_wakeup = devm_gpiod_get_optional(&pdev->dev,
+                                                    "device-wakeup",
+                                                    GPIOD_OUT_LOW);
+       if (IS_ERR(dev->device_wakeup))
+               return PTR_ERR(dev->device_wakeup);
+
+       dev->shutdown = devm_gpiod_get_optional(&pdev->dev, "shutdown",
+                                               GPIOD_OUT_LOW);
+       if (IS_ERR(dev->shutdown))
+               return PTR_ERR(dev->shutdown);
+
+       /* Make sure at-least one of the GPIO is defined and that
+        * a name is specified for this instance
+        */
+       if ((!dev->device_wakeup && !dev->shutdown) || !dev->name) {
+               dev_err(&pdev->dev, "invalid platform data\n");
+               return -EINVAL;
+       }
+
+       /* Retrieve UART ACPI info */
+       adev = ACPI_COMPANION(&dev->pdev->dev);
+       if (!adev)
+               return 0;
+
+       acpi_dev_get_resources(adev, &resources, bcm_resource, dev);
+
+       return 0;
+}
+#else
+static int bcm_acpi_probe(struct bcm_device *dev)
+{
+       return -EINVAL;
+}
+#endif /* CONFIG_ACPI */
+
+static int bcm_probe(struct platform_device *pdev)
+{
+       struct bcm_device *dev;
+       struct acpi_device_id *pdata = pdev->dev.platform_data;
+       int ret;
+
+       dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
+
+       dev->pdev = pdev;
+
+       if (ACPI_HANDLE(&pdev->dev)) {
+               ret = bcm_acpi_probe(dev);
+               if (ret)
+                       return ret;
+       } else if (pdata) {
+               dev->name = pdata->id;
+       } else {
+               return -ENODEV;
+       }
+
+       platform_set_drvdata(pdev, dev);
+
+       dev_info(&pdev->dev, "%s device registered.\n", dev->name);
+
+       /* Place this instance on the device list */
+       spin_lock(&bcm_device_list_lock);
+       list_add_tail(&dev->list, &bcm_device_list);
+       spin_unlock(&bcm_device_list_lock);
+
+       bcm_gpio_set_power(dev, false);
+
+       return 0;
+}
+
+static int bcm_remove(struct platform_device *pdev)
+{
+       struct bcm_device *dev = platform_get_drvdata(pdev);
+
+       spin_lock(&bcm_device_list_lock);
+       list_del(&dev->list);
+       spin_unlock(&bcm_device_list_lock);
+
+       acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev));
+
+       dev_info(&pdev->dev, "%s device unregistered.\n", dev->name);
+
+       return 0;
+}
+
 static const struct hci_uart_proto bcm_proto = {
        .id             = HCI_UART_BCM,
        .name           = "BCM",
@@ -247,12 +527,38 @@ static const struct hci_uart_proto bcm_proto = {
        .dequeue        = bcm_dequeue,
 };
 
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id bcm_acpi_match[] = {
+       { "BCM2E39", 0 },
+       { "BCM2E67", 0 },
+       { },
+};
+MODULE_DEVICE_TABLE(acpi, bcm_acpi_match);
+#endif
+
+/* Platform suspend and resume callbacks */
+static SIMPLE_DEV_PM_OPS(bcm_pm_ops, bcm_suspend, bcm_resume);
+
+static struct platform_driver bcm_driver = {
+       .probe = bcm_probe,
+       .remove = bcm_remove,
+       .driver = {
+               .name = "hci_bcm",
+               .acpi_match_table = ACPI_PTR(bcm_acpi_match),
+               .pm = &bcm_pm_ops,
+       },
+};
+
 int __init bcm_init(void)
 {
+       platform_driver_register(&bcm_driver);
+
        return hci_uart_register_proto(&bcm_proto);
 }
 
 int __exit bcm_deinit(void)
 {
+       platform_driver_unregister(&bcm_driver);
+
        return hci_uart_unregister_proto(&bcm_proto);
 }
index 20c2ac193ff972a9ba8717092f285f81f9aaad59..0d5a05a7c1fd1d46a8b77d71b94f97a9df4190b3 100644 (file)
@@ -810,6 +810,9 @@ static int __init hci_uart_init(void)
 #ifdef CONFIG_BT_HCIUART_BCM
        bcm_init();
 #endif
+#ifdef CONFIG_BT_HCIUART_QCA
+       qca_init();
+#endif
 
        return 0;
 }
@@ -839,6 +842,9 @@ static void __exit hci_uart_exit(void)
 #ifdef CONFIG_BT_HCIUART_BCM
        bcm_deinit();
 #endif
+#ifdef CONFIG_BT_HCIUART_QCA
+       qca_deinit();
+#endif
 
        /* Release tty registration of line discipline */
        err = tty_unregister_ldisc(N_HCI);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
new file mode 100644 (file)
index 0000000..6b9b912
--- /dev/null
@@ -0,0 +1,969 @@
+/*
+ *  Bluetooth Software UART Qualcomm protocol
+ *
+ *  HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management
+ *  protocol extension to H4.
+ *
+ *  Copyright (C) 2007 Texas Instruments, Inc.
+ *  Copyright (c) 2010, 2012 The Linux Foundation. All rights reserved.
+ *
+ *  Acknowledgements:
+ *  This file is based on hci_ll.c, which was...
+ *  Written by Ohad Ben-Cohen <ohad@bencohen.org>
+ *  which was in turn based on hci_h4.c, which was written
+ *  by Maxim Krasnyansky and Marcel Holtmann.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "hci_uart.h"
+#include "btqca.h"
+
+/* HCI_IBS protocol messages */
+#define HCI_IBS_SLEEP_IND      0xFE
+#define HCI_IBS_WAKE_IND       0xFD
+#define HCI_IBS_WAKE_ACK       0xFC
+#define HCI_MAX_IBS_SIZE       10
+
+/* Controller states */
+#define STATE_IN_BAND_SLEEP_ENABLED    1
+
+#define IBS_WAKE_RETRANS_TIMEOUT_MS    100
+#define IBS_TX_IDLE_TIMEOUT_MS                 2000
+#define BAUDRATE_SETTLE_TIMEOUT_MS     300
+
+/* HCI_IBS transmit side sleep protocol states */
+enum tx_ibs_states {
+       HCI_IBS_TX_ASLEEP,
+       HCI_IBS_TX_WAKING,
+       HCI_IBS_TX_AWAKE,
+};
+
+/* HCI_IBS receive side sleep protocol states */
+enum rx_states {
+       HCI_IBS_RX_ASLEEP,
+       HCI_IBS_RX_AWAKE,
+};
+
+/* HCI_IBS transmit and receive side clock state vote */
+enum hci_ibs_clock_state_vote {
+       HCI_IBS_VOTE_STATS_UPDATE,
+       HCI_IBS_TX_VOTE_CLOCK_ON,
+       HCI_IBS_TX_VOTE_CLOCK_OFF,
+       HCI_IBS_RX_VOTE_CLOCK_ON,
+       HCI_IBS_RX_VOTE_CLOCK_OFF,
+};
+
+struct qca_data {
+       struct hci_uart *hu;
+       struct sk_buff *rx_skb;
+       struct sk_buff_head txq;
+       struct sk_buff_head tx_wait_q;  /* HCI_IBS wait queue   */
+       spinlock_t hci_ibs_lock;        /* HCI_IBS state lock   */
+       u8 tx_ibs_state;        /* HCI_IBS transmit side power state*/
+       u8 rx_ibs_state;        /* HCI_IBS receive side power state */
+       u32 tx_vote;            /* Clock must be on for TX */
+       u32 rx_vote;            /* Clock must be on for RX */
+       struct timer_list tx_idle_timer;
+       u32 tx_idle_delay;
+       struct timer_list wake_retrans_timer;
+       u32 wake_retrans;
+       struct workqueue_struct *workqueue;
+       struct work_struct ws_awake_rx;
+       struct work_struct ws_awake_device;
+       struct work_struct ws_rx_vote_off;
+       struct work_struct ws_tx_vote_off;
+       unsigned long flags;
+
+       /* For debugging purpose */
+       u64 ibs_sent_wacks;
+       u64 ibs_sent_slps;
+       u64 ibs_sent_wakes;
+       u64 ibs_recv_wacks;
+       u64 ibs_recv_slps;
+       u64 ibs_recv_wakes;
+       u64 vote_last_jif;
+       u32 vote_on_ms;
+       u32 vote_off_ms;
+       u64 tx_votes_on;
+       u64 rx_votes_on;
+       u64 tx_votes_off;
+       u64 rx_votes_off;
+       u64 votes_on;
+       u64 votes_off;
+};
+
+static void __serial_clock_on(struct tty_struct *tty)
+{
+       /* TODO: Some chipset requires to enable UART clock on client
+        * side to save power consumption or manual work is required.
+        * Please put your code to control UART clock here if needed
+        */
+}
+
+static void __serial_clock_off(struct tty_struct *tty)
+{
+       /* TODO: Some chipset requires to disable UART clock on client
+        * side to save power consumption or manual work is required.
+        * Please put your code to control UART clock off here if needed
+        */
+}
+
+/* serial_clock_vote needs to be called with the ibs lock held */
+static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
+{
+       struct qca_data *qca = hu->priv;
+       unsigned int diff;
+
+       bool old_vote = (qca->tx_vote | qca->rx_vote);
+       bool new_vote;
+
+       switch (vote) {
+       case HCI_IBS_VOTE_STATS_UPDATE:
+               diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
+
+               if (old_vote)
+                       qca->vote_off_ms += diff;
+               else
+                       qca->vote_on_ms += diff;
+               return;
+
+       case HCI_IBS_TX_VOTE_CLOCK_ON:
+               qca->tx_vote = true;
+               qca->tx_votes_on++;
+               new_vote = true;
+               break;
+
+       case HCI_IBS_RX_VOTE_CLOCK_ON:
+               qca->rx_vote = true;
+               qca->rx_votes_on++;
+               new_vote = true;
+               break;
+
+       case HCI_IBS_TX_VOTE_CLOCK_OFF:
+               qca->tx_vote = false;
+               qca->tx_votes_off++;
+               new_vote = qca->rx_vote | qca->tx_vote;
+               break;
+
+       case HCI_IBS_RX_VOTE_CLOCK_OFF:
+               qca->rx_vote = false;
+               qca->rx_votes_off++;
+               new_vote = qca->rx_vote | qca->tx_vote;
+               break;
+
+       default:
+               BT_ERR("Voting irregularity");
+               return;
+       }
+
+       if (new_vote != old_vote) {
+               if (new_vote)
+                       __serial_clock_on(hu->tty);
+               else
+                       __serial_clock_off(hu->tty);
+
+               BT_DBG("Vote serial clock %s(%s)", new_vote? "true" : "false",
+                      vote? "true" : "false");
+
+               diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
+
+               if (new_vote) {
+                       qca->votes_on++;
+                       qca->vote_off_ms += diff;
+               } else {
+                       qca->votes_off++;
+                       qca->vote_on_ms += diff;
+               }
+               qca->vote_last_jif = jiffies;
+       }
+}
+
+/* Builds and sends an HCI_IBS command packet.
+ * These are very simple packets with only 1 cmd byte.
+ */
+static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu)
+{
+       int err = 0;
+       struct sk_buff *skb = NULL;
+       struct qca_data *qca = hu->priv;
+
+       BT_DBG("hu %p send hci ibs cmd 0x%x", hu, cmd);
+
+       skb = bt_skb_alloc(1, GFP_ATOMIC);
+       if (!skb) {
+               BT_ERR("Failed to allocate memory for HCI_IBS packet");
+               return -ENOMEM;
+       }
+
+       /* Assign HCI_IBS type */
+       *skb_put(skb, 1) = cmd;
+
+       skb_queue_tail(&qca->txq, skb);
+
+       return err;
+}
+
+static void qca_wq_awake_device(struct work_struct *work)
+{
+       struct qca_data *qca = container_of(work, struct qca_data,
+                                           ws_awake_device);
+       struct hci_uart *hu = qca->hu;
+       unsigned long retrans_delay;
+
+       BT_DBG("hu %p wq awake device", hu);
+
+       /* Vote for serial clock */
+       serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
+
+       spin_lock(&qca->hci_ibs_lock);
+
+       /* Send wake indication to device */
+       if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
+               BT_ERR("Failed to send WAKE to device");
+
+       qca->ibs_sent_wakes++;
+
+       /* Start retransmit timer */
+       retrans_delay = msecs_to_jiffies(qca->wake_retrans);
+       mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
+
+       spin_unlock(&qca->hci_ibs_lock);
+
+       /* Actually send the packets */
+       hci_uart_tx_wakeup(hu);
+}
+
+static void qca_wq_awake_rx(struct work_struct *work)
+{
+       struct qca_data *qca = container_of(work, struct qca_data,
+                                           ws_awake_rx);
+       struct hci_uart *hu = qca->hu;
+
+       BT_DBG("hu %p wq awake rx", hu);
+
+       serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
+
+       spin_lock(&qca->hci_ibs_lock);
+       qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
+
+       /* Always acknowledge device wake up,
+        * sending IBS message doesn't count as TX ON.
+        */
+       if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0)
+               BT_ERR("Failed to acknowledge device wake up");
+
+       qca->ibs_sent_wacks++;
+
+       spin_unlock(&qca->hci_ibs_lock);
+
+       /* Actually send the packets */
+       hci_uart_tx_wakeup(hu);
+}
+
+static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work)
+{
+       struct qca_data *qca = container_of(work, struct qca_data,
+                                           ws_rx_vote_off);
+       struct hci_uart *hu = qca->hu;
+
+       BT_DBG("hu %p rx clock vote off", hu);
+
+       serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
+}
+
+static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
+{
+       struct qca_data *qca = container_of(work, struct qca_data,
+                                           ws_tx_vote_off);
+       struct hci_uart *hu = qca->hu;
+
+       BT_DBG("hu %p tx clock vote off", hu);
+
+       /* Run HCI tx handling unlocked */
+       hci_uart_tx_wakeup(hu);
+
+       /* Now that message queued to tty driver, vote for tty clocks off.
+        * It is up to the tty driver to pend the clocks off until tx done.
+        */
+       serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
+}
+
+static void hci_ibs_tx_idle_timeout(unsigned long arg)
+{
+       struct hci_uart *hu = (struct hci_uart *)arg;
+       struct qca_data *qca = hu->priv;
+       unsigned long flags;
+
+       BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state);
+
+       spin_lock_irqsave_nested(&qca->hci_ibs_lock,
+                                flags, SINGLE_DEPTH_NESTING);
+
+       switch (qca->tx_ibs_state) {
+       case HCI_IBS_TX_AWAKE:
+               /* TX_IDLE, go to SLEEP */
+               if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) {
+                       BT_ERR("Failed to send SLEEP to device");
+                       break;
+               }
+               qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
+               qca->ibs_sent_slps++;
+               queue_work(qca->workqueue, &qca->ws_tx_vote_off);
+               break;
+
+       case HCI_IBS_TX_ASLEEP:
+       case HCI_IBS_TX_WAKING:
+               /* Fall through */
+
+       default:
+               BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
+               break;
+       }
+
+       spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+}
+
+static void hci_ibs_wake_retrans_timeout(unsigned long arg)
+{
+       struct hci_uart *hu = (struct hci_uart *)arg;
+       struct qca_data *qca = hu->priv;
+       unsigned long flags, retrans_delay;
+       unsigned long retransmit = 0;
+
+       BT_DBG("hu %p wake retransmit timeout in %d state",
+               hu, qca->tx_ibs_state);
+
+       spin_lock_irqsave_nested(&qca->hci_ibs_lock,
+                                flags, SINGLE_DEPTH_NESTING);
+
+       switch (qca->tx_ibs_state) {
+       case HCI_IBS_TX_WAKING:
+               /* No WAKE_ACK, retransmit WAKE */
+               retransmit = 1;
+               if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
+                       BT_ERR("Failed to acknowledge device wake up");
+                       break;
+               }
+               qca->ibs_sent_wakes++;
+               retrans_delay = msecs_to_jiffies(qca->wake_retrans);
+               mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
+               break;
+
+       case HCI_IBS_TX_ASLEEP:
+       case HCI_IBS_TX_AWAKE:
+               /* Fall through */
+
+       default:
+               BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
+               break;
+       }
+
+       spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+       if (retransmit)
+               hci_uart_tx_wakeup(hu);
+}
+
+/* Initialize protocol */
+static int qca_open(struct hci_uart *hu)
+{
+       struct qca_data *qca;
+
+       BT_DBG("hu %p qca_open", hu);
+
+       qca = kzalloc(sizeof(struct qca_data), GFP_ATOMIC);
+       if (!qca)
+               return -ENOMEM;
+
+       skb_queue_head_init(&qca->txq);
+       skb_queue_head_init(&qca->tx_wait_q);
+       spin_lock_init(&qca->hci_ibs_lock);
+       qca->workqueue = create_singlethread_workqueue("qca_wq");
+       if (!qca->workqueue) {
+               BT_ERR("QCA Workqueue not initialized properly");
+               kfree(qca);
+               return -ENOMEM;
+       }
+
+       INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx);
+       INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
+       INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
+       INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
+
+       qca->hu = hu;
+
+       /* Assume we start with both sides asleep -- extra wakes OK */
+       qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
+       qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
+
+       /* clocks actually on, but we start votes off */
+       qca->tx_vote = false;
+       qca->rx_vote = false;
+       qca->flags = 0;
+
+       qca->ibs_sent_wacks = 0;
+       qca->ibs_sent_slps = 0;
+       qca->ibs_sent_wakes = 0;
+       qca->ibs_recv_wacks = 0;
+       qca->ibs_recv_slps = 0;
+       qca->ibs_recv_wakes = 0;
+       qca->vote_last_jif = jiffies;
+       qca->vote_on_ms = 0;
+       qca->vote_off_ms = 0;
+       qca->votes_on = 0;
+       qca->votes_off = 0;
+       qca->tx_votes_on = 0;
+       qca->tx_votes_off = 0;
+       qca->rx_votes_on = 0;
+       qca->rx_votes_off = 0;
+
+       hu->priv = qca;
+
+       init_timer(&qca->wake_retrans_timer);
+       qca->wake_retrans_timer.function = hci_ibs_wake_retrans_timeout;
+       qca->wake_retrans_timer.data = (u_long)hu;
+       qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
+
+       init_timer(&qca->tx_idle_timer);
+       qca->tx_idle_timer.function = hci_ibs_tx_idle_timeout;
+       qca->tx_idle_timer.data = (u_long)hu;
+       qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
+
+       BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
+              qca->tx_idle_delay, qca->wake_retrans);
+
+       return 0;
+}
+
+static void qca_debugfs_init(struct hci_dev *hdev)
+{
+       struct hci_uart *hu = hci_get_drvdata(hdev);
+       struct qca_data *qca = hu->priv;
+       struct dentry *ibs_dir;
+       umode_t mode;
+
+       if (!hdev->debugfs)
+               return;
+
+       ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
+
+       /* read only */
+       mode = S_IRUGO;
+       debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state);
+       debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state);
+       debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir,
+                          &qca->ibs_sent_slps);
+       debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir,
+                          &qca->ibs_sent_wakes);
+       debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir,
+                          &qca->ibs_sent_wacks);
+       debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir,
+                          &qca->ibs_recv_slps);
+       debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir,
+                          &qca->ibs_recv_wakes);
+       debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir,
+                          &qca->ibs_recv_wacks);
+       debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote);
+       debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on);
+       debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off);
+       debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote);
+       debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on);
+       debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off);
+       debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on);
+       debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off);
+       debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms);
+       debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms);
+
+       /* read/write */
+       mode = S_IRUGO | S_IWUSR;
+       debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans);
+       debugfs_create_u32("tx_idle_delay", mode, ibs_dir,
+                          &qca->tx_idle_delay);
+}
+
+/* Flush protocol data */
+static int qca_flush(struct hci_uart *hu)
+{
+       struct qca_data *qca = hu->priv;
+
+       BT_DBG("hu %p qca flush", hu);
+
+       skb_queue_purge(&qca->tx_wait_q);
+       skb_queue_purge(&qca->txq);
+
+       return 0;
+}
+
+/* Close protocol */
+static int qca_close(struct hci_uart *hu)
+{
+       struct qca_data *qca = hu->priv;
+
+       BT_DBG("hu %p qca close", hu);
+
+       serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu);
+
+       skb_queue_purge(&qca->tx_wait_q);
+       skb_queue_purge(&qca->txq);
+       del_timer(&qca->tx_idle_timer);
+       del_timer(&qca->wake_retrans_timer);
+       destroy_workqueue(qca->workqueue);
+       qca->hu = NULL;
+
+       kfree_skb(qca->rx_skb);
+
+       hu->priv = NULL;
+
+       kfree(qca);
+
+       return 0;
+}
+
+/* Called upon a wake-up-indication from the device.
+ */
+static void device_want_to_wakeup(struct hci_uart *hu)
+{
+       unsigned long flags;
+       struct qca_data *qca = hu->priv;
+
+       BT_DBG("hu %p want to wake up", hu);
+
+       spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+       qca->ibs_recv_wakes++;
+
+       switch (qca->rx_ibs_state) {
+       case HCI_IBS_RX_ASLEEP:
+               /* Make sure clock is on - we may have turned clock off since
+                * receiving the wake up indicator awake rx clock.
+                */
+               queue_work(qca->workqueue, &qca->ws_awake_rx);
+               spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+               return;
+
+       case HCI_IBS_RX_AWAKE:
+               /* Always acknowledge device wake up,
+                * sending IBS message doesn't count as TX ON.
+                */
+               if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) {
+                       BT_ERR("Failed to acknowledge device wake up");
+                       break;
+               }
+               qca->ibs_sent_wacks++;
+               break;
+
+       default:
+               /* Any other state is illegal */
+               BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d",
+                      qca->rx_ibs_state);
+               break;
+       }
+
+       spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+       /* Actually send the packets */
+       hci_uart_tx_wakeup(hu);
+}
+
+/* Called upon a sleep-indication from the device.
+ */
+static void device_want_to_sleep(struct hci_uart *hu)
+{
+       unsigned long flags;
+       struct qca_data *qca = hu->priv;
+
+       BT_DBG("hu %p want to sleep", hu);
+
+       spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+       qca->ibs_recv_slps++;
+
+       switch (qca->rx_ibs_state) {
+       case HCI_IBS_RX_AWAKE:
+               /* Update state */
+               qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
+               /* Vote off rx clock under workqueue */
+               queue_work(qca->workqueue, &qca->ws_rx_vote_off);
+               break;
+
+       case HCI_IBS_RX_ASLEEP:
+               /* Fall through */
+
+       default:
+               /* Any other state is illegal */
+               BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d",
+                      qca->rx_ibs_state);
+               break;
+       }
+
+       spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+}
+
+/* Called upon wake-up-acknowledgement from the device
+ */
+static void device_woke_up(struct hci_uart *hu)
+{
+       unsigned long flags, idle_delay;
+       struct qca_data *qca = hu->priv;
+       struct sk_buff *skb = NULL;
+
+       BT_DBG("hu %p woke up", hu);
+
+       spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+       qca->ibs_recv_wacks++;
+
+       switch (qca->tx_ibs_state) {
+       case HCI_IBS_TX_AWAKE:
+               /* Expect one if we send 2 WAKEs */
+               BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d",
+                      qca->tx_ibs_state);
+               break;
+
+       case HCI_IBS_TX_WAKING:
+               /* Send pending packets */
+               while ((skb = skb_dequeue(&qca->tx_wait_q)))
+                       skb_queue_tail(&qca->txq, skb);
+
+               /* Switch timers and change state to HCI_IBS_TX_AWAKE */
+               del_timer(&qca->wake_retrans_timer);
+               idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
+               mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
+               qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
+               break;
+
+       case HCI_IBS_TX_ASLEEP:
+               /* Fall through */
+
+       default:
+               BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
+                      qca->tx_ibs_state);
+               break;
+       }
+
+       spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+       /* Actually send the packets */
+       hci_uart_tx_wakeup(hu);
+}
+
+/* Enqueue frame for transmittion (padding, crc, etc) may be called from
+ * two simultaneous tasklets.
+ */
+static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+       unsigned long flags = 0, idle_delay;
+       struct qca_data *qca = hu->priv;
+
+       BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
+              qca->tx_ibs_state);
+
+       /* Prepend skb with frame type */
+       memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+
+       /* Don't go to sleep in middle of patch download or
+        * Out-Of-Band(GPIOs control) sleep is selected.
+        */
+       if (!test_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags)) {
+               skb_queue_tail(&qca->txq, skb);
+               return 0;
+       }
+
+       spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+       /* Act according to current state */
+       switch (qca->tx_ibs_state) {
+       case HCI_IBS_TX_AWAKE:
+               BT_DBG("Device awake, sending normally");
+               skb_queue_tail(&qca->txq, skb);
+               idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
+               mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
+               break;
+
+       case HCI_IBS_TX_ASLEEP:
+               BT_DBG("Device asleep, waking up and queueing packet");
+               /* Save packet for later */
+               skb_queue_tail(&qca->tx_wait_q, skb);
+
+               qca->tx_ibs_state = HCI_IBS_TX_WAKING;
+               /* Schedule a work queue to wake up device */
+               queue_work(qca->workqueue, &qca->ws_awake_device);
+               break;
+
+       case HCI_IBS_TX_WAKING:
+               BT_DBG("Device waking up, queueing packet");
+               /* Transient state; just keep packet for later */
+               skb_queue_tail(&qca->tx_wait_q, skb);
+               break;
+
+       default:
+               BT_ERR("Illegal tx state: %d (losing packet)",
+                      qca->tx_ibs_state);
+               kfree_skb(skb);
+               break;
+       }
+
+       spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+       return 0;
+}
+
+static int qca_ibs_sleep_ind(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_uart *hu = hci_get_drvdata(hdev);
+
+       BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_SLEEP_IND);
+
+       device_want_to_sleep(hu);
+
+       kfree_skb(skb);
+       return 0;
+}
+
+static int qca_ibs_wake_ind(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_uart *hu = hci_get_drvdata(hdev);
+
+       BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_IND);
+
+       device_want_to_wakeup(hu);
+
+       kfree_skb(skb);
+       return 0;
+}
+
+static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_uart *hu = hci_get_drvdata(hdev);
+
+       BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_ACK);
+
+       device_woke_up(hu);
+
+       kfree_skb(skb);
+       return 0;
+}
+
+#define QCA_IBS_SLEEP_IND_EVENT \
+       .type = HCI_IBS_SLEEP_IND, \
+       .hlen = 0, \
+       .loff = 0, \
+       .lsize = 0, \
+       .maxlen = HCI_MAX_IBS_SIZE
+
+#define QCA_IBS_WAKE_IND_EVENT \
+       .type = HCI_IBS_WAKE_IND, \
+       .hlen = 0, \
+       .loff = 0, \
+       .lsize = 0, \
+       .maxlen = HCI_MAX_IBS_SIZE
+
+#define QCA_IBS_WAKE_ACK_EVENT \
+       .type = HCI_IBS_WAKE_ACK, \
+       .hlen = 0, \
+       .loff = 0, \
+       .lsize = 0, \
+       .maxlen = HCI_MAX_IBS_SIZE
+
+static const struct h4_recv_pkt qca_recv_pkts[] = {
+       { H4_RECV_ACL,             .recv = hci_recv_frame    },
+       { H4_RECV_SCO,             .recv = hci_recv_frame    },
+       { H4_RECV_EVENT,           .recv = hci_recv_frame    },
+       { QCA_IBS_WAKE_IND_EVENT,  .recv = qca_ibs_wake_ind  },
+       { QCA_IBS_WAKE_ACK_EVENT,  .recv = qca_ibs_wake_ack  },
+       { QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind },
+};
+
+static int qca_recv(struct hci_uart *hu, const void *data, int count)
+{
+       struct qca_data *qca = hu->priv;
+
+       if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+               return -EUNATCH;
+
+       qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
+                                 qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
+       if (IS_ERR(qca->rx_skb)) {
+               int err = PTR_ERR(qca->rx_skb);
+               BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+               qca->rx_skb = NULL;
+               return err;
+       }
+
+       return count;
+}
+
+static struct sk_buff *qca_dequeue(struct hci_uart *hu)
+{
+       struct qca_data *qca = hu->priv;
+
+       return skb_dequeue(&qca->txq);
+}
+
+static uint8_t qca_get_baudrate_value(int speed)
+{
+       switch(speed) {
+       case 9600:
+               return QCA_BAUDRATE_9600;
+       case 19200:
+               return QCA_BAUDRATE_19200;
+       case 38400:
+               return QCA_BAUDRATE_38400;
+       case 57600:
+               return QCA_BAUDRATE_57600;
+       case 115200:
+               return QCA_BAUDRATE_115200;
+       case 230400:
+               return QCA_BAUDRATE_230400;
+       case 460800:
+               return QCA_BAUDRATE_460800;
+       case 500000:
+               return QCA_BAUDRATE_500000;
+       case 921600:
+               return QCA_BAUDRATE_921600;
+       case 1000000:
+               return QCA_BAUDRATE_1000000;
+       case 2000000:
+               return QCA_BAUDRATE_2000000;
+       case 3000000:
+               return QCA_BAUDRATE_3000000;
+       case 3500000:
+               return QCA_BAUDRATE_3500000;
+       default:
+               return QCA_BAUDRATE_115200;
+       }
+}
+
+static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
+{
+       struct hci_uart *hu = hci_get_drvdata(hdev);
+       struct qca_data *qca = hu->priv;
+       struct sk_buff *skb;
+       u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
+
+       if (baudrate > QCA_BAUDRATE_3000000)
+               return -EINVAL;
+
+       cmd[4] = baudrate;
+
+       skb = bt_skb_alloc(sizeof(cmd), GFP_ATOMIC);
+       if (!skb) {
+               BT_ERR("Failed to allocate memory for baudrate packet");
+               return -ENOMEM;
+       }
+
+       /* Assign commands to change baudrate and packet type. */
+       memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd));
+       bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
+
+       skb_queue_tail(&qca->txq, skb);
+       hci_uart_tx_wakeup(hu);
+
+       /* wait 300ms to change new baudrate on controller side
+        * controller will come back after they receive this HCI command
+        * then host can communicate with new baudrate to controller
+        */
+       set_current_state(TASK_UNINTERRUPTIBLE);
+       schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
+       set_current_state(TASK_INTERRUPTIBLE);
+
+       return 0;
+}
+
+static int qca_setup(struct hci_uart *hu)
+{
+       struct hci_dev *hdev = hu->hdev;
+       struct qca_data *qca = hu->priv;
+       unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
+       int ret;
+
+       BT_INFO("%s: ROME setup", hdev->name);
+
+       /* Patch downloading has to be done without IBS mode */
+       clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
+
+       /* Setup initial baudrate */
+       speed = 0;
+       if (hu->init_speed)
+               speed = hu->init_speed;
+       else if (hu->proto->init_speed)
+               speed = hu->proto->init_speed;
+
+       if (speed)
+               hci_uart_set_baudrate(hu, speed);
+
+       /* Setup user speed if needed */
+       speed = 0;
+       if (hu->oper_speed)
+               speed = hu->oper_speed;
+       else if (hu->proto->oper_speed)
+               speed = hu->proto->oper_speed;
+
+       if (speed) {
+               qca_baudrate = qca_get_baudrate_value(speed);
+
+               BT_INFO("%s: Set UART speed to %d", hdev->name, speed);
+               ret = qca_set_baudrate(hdev, qca_baudrate);
+               if (ret) {
+                       BT_ERR("%s: Failed to change the baud rate (%d)",
+                              hdev->name, ret);
+                       return ret;
+               }
+               hci_uart_set_baudrate(hu, speed);
+       }
+
+       /* Setup patch / NVM configurations */
+       ret = qca_uart_setup_rome(hdev, qca_baudrate);
+       if (!ret) {
+               set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
+               qca_debugfs_init(hdev);
+       }
+
+       /* Setup bdaddr */
+       hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
+
+       return ret;
+}
+
+static struct hci_uart_proto qca_proto = {
+       .id             = HCI_UART_QCA,
+       .name           = "QCA",
+       .init_speed     = 115200,
+       .oper_speed     = 3000000,
+       .open           = qca_open,
+       .close          = qca_close,
+       .flush          = qca_flush,
+       .setup          = qca_setup,
+       .recv           = qca_recv,
+       .enqueue        = qca_enqueue,
+       .dequeue        = qca_dequeue,
+};
+
+int __init qca_init(void)
+{
+       return hci_uart_register_proto(&qca_proto);
+}
+
+int __exit qca_deinit(void)
+{
+       return hci_uart_unregister_proto(&qca_proto);
+}
index 496587a73a9daa4a2a70ef92bd9fc04b0ef72dbf..495b9ef52bb0fd608519fda39c0b4726c95f3c7f 100644 (file)
@@ -35,7 +35,7 @@
 #define HCIUARTGETFLAGS                _IOR('U', 204, int)
 
 /* UART protocols */
-#define HCI_UART_MAX_PROTO     8
+#define HCI_UART_MAX_PROTO     9
 
 #define HCI_UART_H4    0
 #define HCI_UART_BCSP  1
@@ -45,6 +45,7 @@
 #define HCI_UART_ATH3K 5
 #define HCI_UART_INTEL 6
 #define HCI_UART_BCM   7
+#define HCI_UART_QCA   8
 
 #define HCI_UART_RAW_DEVICE    0
 #define HCI_UART_RESET_ON_INIT 1
@@ -176,3 +177,8 @@ int intel_deinit(void);
 int bcm_init(void);
 int bcm_deinit(void);
 #endif
+
+#ifdef CONFIG_BT_HCIUART_QCA
+int qca_init(void);
+int qca_deinit(void);
+#endif
index 4b93a1efb36d11fa7171735d29bac283e4bb6d97..ac03ba49e9d1952dff14e9383ed86874690a7176 100644 (file)
@@ -126,7 +126,7 @@ PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
 PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
 PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
 
-#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB)
+#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENB : &CKENA)
 #define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
                    div_hp, bit, is_lp, flags)                          \
        PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp,         \
index b8ff3c64cc452a16fc4108426fb6e5b1c54e91e8..c96de14036a0adebfc7628dc9f9cd5413b5c5495 100644 (file)
@@ -661,6 +661,9 @@ static void sh_cmt_clocksource_suspend(struct clocksource *cs)
 {
        struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
 
+       if (!ch->cs_enabled)
+               return;
+
        sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
        pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
 }
@@ -669,6 +672,9 @@ static void sh_cmt_clocksource_resume(struct clocksource *cs)
 {
        struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
 
+       if (!ch->cs_enabled)
+               return;
+
        pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
        sh_cmt_start(ch, FLAG_CLOCKSOURCE);
 }
index ae5b2bd3a9785c63646e3e922fbe17330678b481..fa3dd840a83771735e474a658a5c6516c62f76a0 100644 (file)
@@ -180,7 +180,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
                ret = exynos5250_cpufreq_init(exynos_info);
        } else {
                pr_err("%s: Unknown SoC type\n", __func__);
-               return -ENODEV;
+               ret = -ENODEV;
        }
 
        if (ret)
@@ -188,12 +188,14 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
 
        if (exynos_info->set_freq == NULL) {
                dev_err(&pdev->dev, "No set_freq function (ERR)\n");
+               ret = -EINVAL;
                goto err_vdd_arm;
        }
 
        arm_regulator = regulator_get(NULL, "vdd_arm");
        if (IS_ERR(arm_regulator)) {
                dev_err(&pdev->dev, "failed to get resource vdd_arm\n");
+               ret = -EINVAL;
                goto err_vdd_arm;
        }
 
@@ -225,7 +227,7 @@ err_cpufreq_reg:
        regulator_put(arm_regulator);
 err_vdd_arm:
        kfree(exynos_info);
-       return -EINVAL;
+       return ret;
 }
 
 static struct platform_driver exynos_cpufreq_platdrv = {
index dae1e8099969a192b302703ec291da96ebac3429..f9c78751989ec865491570ed13bf19dbc6b1a799 100644 (file)
@@ -909,13 +909,14 @@ static int ahash_final_ctx(struct ahash_request *req)
                          state->buflen_1;
        u32 *sh_desc = ctx->sh_desc_fin, *desc;
        dma_addr_t ptr = ctx->sh_desc_fin_dma;
-       int sec4_sg_bytes;
+       int sec4_sg_bytes, sec4_sg_src_index;
        int digestsize = crypto_ahash_digestsize(ahash);
        struct ahash_edesc *edesc;
        int ret = 0;
        int sh_len;
 
-       sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
+       sec4_sg_src_index = 1 + (buflen ? 1 : 0);
+       sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
 
        /* allocate space for base edesc and hw desc commands, link tables */
        edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
@@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req)
        state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
                                                buf, state->buf_dma, buflen,
                                                last_buflen);
-       (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
+       (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
 
        edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
                                            sec4_sg_bytes, DMA_TO_DEVICE);
index 08f8d5cd633491e3ff0e28ca8204d7f51be2b05b..becb738c897b1b5d93b632e3ab80ed2b146ead5a 100644 (file)
@@ -71,7 +71,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
        struct sha256_state *sctx = shash_desc_ctx(desc);
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
-       struct nx_sg *in_sg;
        struct nx_sg *out_sg;
        u64 to_process = 0, leftover, total;
        unsigned long irq_flags;
@@ -97,7 +96,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
        NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
        NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
-       in_sg = nx_ctx->in_sg;
        max_sg_len = min_t(u64, nx_ctx->ap->sglen,
                        nx_driver.of.max_sg_len/sizeof(struct nx_sg));
        max_sg_len = min_t(u64, max_sg_len,
@@ -114,17 +112,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
        }
 
        do {
-               /*
-                * to_process: the SHA256_BLOCK_SIZE data chunk to process in
-                * this update. This value is also restricted by the sg list
-                * limits.
-                */
-               to_process = total - to_process;
-               to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+               int used_sgs = 0;
+               struct nx_sg *in_sg = nx_ctx->in_sg;
 
                if (buf_len) {
                        data_len = buf_len;
-                       in_sg = nx_build_sg_list(nx_ctx->in_sg,
+                       in_sg = nx_build_sg_list(in_sg,
                                                 (u8 *) sctx->buf,
                                                 &data_len,
                                                 max_sg_len);
@@ -133,15 +126,27 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
                                rc = -EINVAL;
                                goto out;
                        }
+                       used_sgs = in_sg - nx_ctx->in_sg;
                }
 
+               /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
+                * processed in this iteration. This value is restricted
+                * by sg list limits and number of sgs we already used
+                * for leftover data. (see above)
+                * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+                * but because data may not be aligned, we need to account
+                * for that too. */
+               to_process = min_t(u64, total,
+                       (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+               to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+
                data_len = to_process - buf_len;
                in_sg = nx_build_sg_list(in_sg, (u8 *) data,
                                         &data_len, max_sg_len);
 
                nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
 
-               to_process = (data_len + buf_len);
+               to_process = data_len + buf_len;
                leftover = total - to_process;
 
                /*
index aff0fe58eac0b7aba11b465a192c280ef19fdbac..b6e183d58d73d5a4e38fff2925344783e8e581bc 100644 (file)
@@ -71,7 +71,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
        struct sha512_state *sctx = shash_desc_ctx(desc);
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
-       struct nx_sg *in_sg;
        struct nx_sg *out_sg;
        u64 to_process, leftover = 0, total;
        unsigned long irq_flags;
@@ -97,7 +96,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
        NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
        NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
-       in_sg = nx_ctx->in_sg;
        max_sg_len = min_t(u64, nx_ctx->ap->sglen,
                        nx_driver.of.max_sg_len/sizeof(struct nx_sg));
        max_sg_len = min_t(u64, max_sg_len,
@@ -114,18 +112,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
        }
 
        do {
-               /*
-                * to_process: the SHA512_BLOCK_SIZE data chunk to process in
-                * this update. This value is also restricted by the sg list
-                * limits.
-                */
-               to_process = total - leftover;
-               to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
-               leftover = total - to_process;
+               int used_sgs = 0;
+               struct nx_sg *in_sg = nx_ctx->in_sg;
 
                if (buf_len) {
                        data_len = buf_len;
-                       in_sg = nx_build_sg_list(nx_ctx->in_sg,
+                       in_sg = nx_build_sg_list(in_sg,
                                                 (u8 *) sctx->buf,
                                                 &data_len, max_sg_len);
 
@@ -133,8 +125,20 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
                                rc = -EINVAL;
                                goto out;
                        }
+                       used_sgs = in_sg - nx_ctx->in_sg;
                }
 
+               /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
+                * processed in this iteration. This value is restricted
+                * by sg list limits and number of sgs we already used
+                * for leftover data. (see above)
+                * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+                * but because data may not be aligned, we need to account
+                * for that too. */
+               to_process = min_t(u64, total,
+                       (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+               to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
+
                data_len = to_process - buf_len;
                in_sg = nx_build_sg_list(in_sg, (u8 *) data,
                                         &data_len, max_sg_len);
@@ -146,7 +150,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
                        goto out;
                }
 
-               to_process = (data_len + buf_len);
+               to_process = data_len + buf_len;
                leftover = total - to_process;
 
                /*
index 4a4cce15f25dd65c6a720d949ed0d9c922ff1cba..3ff284c8e3d5aef72f229017c883c73cbe13403f 100644 (file)
@@ -689,6 +689,10 @@ struct dma_chan *dma_request_slave_channel(struct device *dev,
        struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
        if (IS_ERR(ch))
                return NULL;
+
+       dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
+       ch->device->privatecnt++;
+
        return ch;
 }
 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
index b0487c9f018cfd09d040ffb08fe4585f68ab6022..eb603f1defc2250ea158864ea4371e24655138e1 100644 (file)
@@ -873,9 +873,10 @@ static void drm_dp_destroy_port(struct kref *kref)
                   from an EDID retrieval */
                if (port->connector) {
                        mutex_lock(&mgr->destroy_connector_lock);
-                       list_add(&port->connector->destroy_list, &mgr->destroy_connector_list);
+                       list_add(&port->next, &mgr->destroy_connector_list);
                        mutex_unlock(&mgr->destroy_connector_lock);
                        schedule_work(&mgr->destroy_connector_work);
+                       return;
                }
                drm_dp_port_teardown_pdt(port, port->pdt);
 
@@ -2659,7 +2660,7 @@ static void drm_dp_tx_work(struct work_struct *work)
 static void drm_dp_destroy_connector_work(struct work_struct *work)
 {
        struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
-       struct drm_connector *connector;
+       struct drm_dp_mst_port *port;
 
        /*
         * Not a regular list traverse as we have to drop the destroy
@@ -2668,15 +2669,21 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
         */
        for (;;) {
                mutex_lock(&mgr->destroy_connector_lock);
-               connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list);
-               if (!connector) {
+               port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
+               if (!port) {
                        mutex_unlock(&mgr->destroy_connector_lock);
                        break;
                }
-               list_del(&connector->destroy_list);
+               list_del(&port->next);
                mutex_unlock(&mgr->destroy_connector_lock);
 
-               mgr->cbs->destroy_connector(mgr, connector);
+               mgr->cbs->destroy_connector(mgr, port->connector);
+
+               drm_dp_port_teardown_pdt(port, port->pdt);
+
+               if (!port->input && port->vcpi.vcpi > 0)
+                       drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+               kfree(port);
        }
 }
 
index 842d6b8dc3c435ee7d836402d4169847aef75d31..2a652359af644b51f257cde7528d70b6016897da 100644 (file)
@@ -1745,7 +1745,6 @@ static int fimc_probe(struct platform_device *pdev)
        spin_lock_init(&ctx->lock);
        platform_set_drvdata(pdev, ctx);
 
-       pm_runtime_set_active(dev);
        pm_runtime_enable(dev);
 
        ret = exynos_drm_ippdrv_register(ippdrv);
index 8040ed2a831f9a6f226baf8aee3ce00b213be8e6..f1c6b76c127f4db02388267775431fcd25ac7eb8 100644 (file)
@@ -593,8 +593,7 @@ static int gsc_src_set_transf(struct device *dev,
 
        gsc_write(cfg, GSC_IN_CON);
 
-       ctx->rotation = cfg &
-               (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+       ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
        *swap = ctx->rotation;
 
        return 0;
@@ -857,8 +856,7 @@ static int gsc_dst_set_transf(struct device *dev,
 
        gsc_write(cfg, GSC_IN_CON);
 
-       ctx->rotation = cfg &
-               (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+       ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
        *swap = ctx->rotation;
 
        return 0;
index 99e286489031c4a2931565823e0158428548aef2..4a00990e4ae4e8459b94a9a007044af1cc11af62 100644 (file)
@@ -1064,6 +1064,7 @@ static int hdmi_get_modes(struct drm_connector *connector)
 {
        struct hdmi_context *hdata = ctx_from_connector(connector);
        struct edid *edid;
+       int ret;
 
        if (!hdata->ddc_adpt)
                return -ENODEV;
@@ -1079,7 +1080,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
 
        drm_mode_connector_update_edid_property(connector, edid);
 
-       return drm_add_edid_modes(connector, edid);
+       ret = drm_add_edid_modes(connector, edid);
+
+       kfree(edid);
+
+       return ret;
 }
 
 static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
index cae98db3306205e2628b2090b731cf6cfdf79d4f..4706b56902b44f5ba205b30d3aa6e53678bbad52 100644 (file)
@@ -718,6 +718,10 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
 
        /* handling VSYNC */
        if (val & MXR_INT_STATUS_VSYNC) {
+               /* vsync interrupt use different bit for read and clear */
+               val |= MXR_INT_CLEAR_VSYNC;
+               val &= ~MXR_INT_STATUS_VSYNC;
+
                /* interlace scan need to check shadow register */
                if (ctx->interlace) {
                        base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
@@ -743,11 +747,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
 
 out:
        /* clear interrupts */
-       if (~val & MXR_INT_EN_VSYNC) {
-               /* vsync interrupt use different bit for read and clear */
-               val &= ~MXR_INT_EN_VSYNC;
-               val |= MXR_INT_CLEAR_VSYNC;
-       }
        mixer_reg_write(res, MXR_INT_STATUS, val);
 
        spin_unlock(&res->reg_slock);
@@ -907,8 +906,8 @@ static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
        }
 
        /* enable vsync interrupt */
-       mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC,
-                       MXR_INT_EN_VSYNC);
+       mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
+       mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
 
        return 0;
 }
@@ -918,7 +917,13 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
        struct mixer_context *mixer_ctx = crtc->ctx;
        struct mixer_resources *res = &mixer_ctx->mixer_res;
 
+       if (!mixer_ctx->powered) {
+               mixer_ctx->int_en &= MXR_INT_EN_VSYNC;
+               return;
+       }
+
        /* disable vsync interrupt */
+       mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
        mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
 }
 
@@ -1047,6 +1052,8 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
 
        mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
 
+       if (ctx->int_en & MXR_INT_EN_VSYNC)
+               mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
        mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
        mixer_win_reset(ctx);
 }
index 7ed8033aae6097af69d90e83bb6c97f7dc6f7225..8e35e0d013df556d8ac04fc27f9ba2bd7354fae3 100644 (file)
@@ -129,8 +129,9 @@ int intel_atomic_commit(struct drm_device *dev,
                        struct drm_atomic_state *state,
                        bool async)
 {
-       int ret;
-       int i;
+       struct drm_crtc_state *crtc_state;
+       struct drm_crtc *crtc;
+       int ret, i;
 
        if (async) {
                DRM_DEBUG_KMS("i915 does not yet support async commit\n");
@@ -142,48 +143,18 @@ int intel_atomic_commit(struct drm_device *dev,
                return ret;
 
        /* Point of no return */
-
-       /*
-        * FIXME:  The proper sequence here will eventually be:
-        *
-        * drm_atomic_helper_swap_state(dev, state)
-        * drm_atomic_helper_commit_modeset_disables(dev, state);
-        * drm_atomic_helper_commit_planes(dev, state);
-        * drm_atomic_helper_commit_modeset_enables(dev, state);
-        * drm_atomic_helper_wait_for_vblanks(dev, state);
-        * drm_atomic_helper_cleanup_planes(dev, state);
-        * drm_atomic_state_free(state);
-        *
-        * once we have full atomic modeset.  For now, just manually update
-        * plane states to avoid clobbering good states with dummy states
-        * while nuclear pageflipping.
-        */
-       for (i = 0; i < dev->mode_config.num_total_plane; i++) {
-               struct drm_plane *plane = state->planes[i];
-
-               if (!plane)
-                       continue;
-
-               plane->state->state = state;
-               swap(state->plane_states[i], plane->state);
-               plane->state->state = NULL;
-       }
+       drm_atomic_helper_swap_state(dev, state);
 
        /* swap crtc_scaler_state */
-       for (i = 0; i < dev->mode_config.num_crtc; i++) {
-               struct drm_crtc *crtc = state->crtcs[i];
-               if (!crtc) {
-                       continue;
-               }
-
-               to_intel_crtc(crtc)->config->scaler_state =
-                       to_intel_crtc_state(state->crtc_states[i])->scaler_state;
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
 
                if (INTEL_INFO(dev)->gen >= 9)
                        skl_detach_scalers(to_intel_crtc(crtc));
+
+               drm_atomic_helper_commit_planes_on_crtc(crtc_state);
        }
 
-       drm_atomic_helper_commit_planes(dev, state);
        drm_atomic_helper_wait_for_vblanks(dev, state);
        drm_atomic_helper_cleanup_planes(dev, state);
        drm_atomic_state_free(state);
index 30e0f54ba19d1284107958bb6e5d49f6309b63de..87476ff181ddbef0967d948c37119cfcbd758315 100644 (file)
@@ -11826,7 +11826,9 @@ encoder_retry:
                goto encoder_retry;
        }
 
-       pipe_config->dither = pipe_config->pipe_bpp != base_bpp;
+       /* Dithering seems to not pass-through bits correctly when it should, so
+        * only enable it on 6bpc panels. */
+       pipe_config->dither = pipe_config->pipe_bpp == 6*3;
        DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
                      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
 
@@ -12624,17 +12626,17 @@ static int __intel_set_mode(struct drm_crtc *modeset_crtc,
 
        modeset_update_crtc_power_domains(state);
 
-       drm_atomic_helper_commit_planes(dev, state);
-
        /* Now enable the clocks, plane, pipe, and connectors that we set up. */
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
-               if (!needs_modeset(crtc->state) || !crtc->state->enable)
+               if (!needs_modeset(crtc->state) || !crtc->state->enable) {
+                       drm_atomic_helper_commit_planes_on_crtc(crtc_state);
                        continue;
+               }
 
                update_scanline_offset(to_intel_crtc(crtc));
 
                dev_priv->display.crtc_enable(crtc);
-               intel_crtc_enable_planes(crtc);
+               drm_atomic_helper_commit_planes_on_crtc(crtc_state);
        }
 
        /* FIXME: add subpixel order */
@@ -12891,20 +12893,11 @@ intel_modeset_stage_output_state(struct drm_device *dev,
        return 0;
 }
 
-static bool primary_plane_visible(struct drm_crtc *crtc)
-{
-       struct intel_plane_state *plane_state =
-               to_intel_plane_state(crtc->primary->state);
-
-       return plane_state->visible;
-}
-
 static int intel_crtc_set_config(struct drm_mode_set *set)
 {
        struct drm_device *dev;
        struct drm_atomic_state *state = NULL;
        struct intel_crtc_state *pipe_config;
-       bool primary_plane_was_visible;
        int ret;
 
        BUG_ON(!set);
@@ -12943,38 +12936,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
 
        intel_update_pipe_size(to_intel_crtc(set->crtc));
 
-       primary_plane_was_visible = primary_plane_visible(set->crtc);
-
        ret = intel_set_mode_with_config(set->crtc, pipe_config, true);
 
-       if (ret == 0 &&
-           pipe_config->base.enable &&
-           pipe_config->base.planes_changed &&
-           !needs_modeset(&pipe_config->base)) {
-               struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
-
-               /*
-                * We need to make sure the primary plane is re-enabled if it
-                * has previously been turned off.
-                */
-               if (ret == 0 && !primary_plane_was_visible &&
-                   primary_plane_visible(set->crtc)) {
-                       WARN_ON(!intel_crtc->active);
-                       intel_post_enable_primary(set->crtc);
-               }
-
-               /*
-                * In the fastboot case this may be our only check of the
-                * state after boot.  It would be better to only do it on
-                * the first update, but we don't have a nice way of doing that
-                * (and really, set_config isn't used much for high freq page
-                * flipping, so increasing its cost here shouldn't be a big
-                * deal).
-                */
-               if (i915.fastboot && ret == 0)
-                       intel_modeset_check_state(set->crtc->dev);
-       }
-
        if (ret) {
                DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
                              set->crtc->base.id, ret);
@@ -13305,6 +13268,9 @@ intel_check_primary_plane(struct drm_plane *plane,
                         */
                        if (IS_BROADWELL(dev))
                                intel_crtc->atomic.wait_vblank = true;
+
+                       if (crtc_state)
+                               intel_crtc->atomic.post_enable_primary = true;
                }
 
                /*
@@ -13317,6 +13283,10 @@ intel_check_primary_plane(struct drm_plane *plane,
                if (!state->visible || !fb)
                        intel_crtc->atomic.disable_ips = true;
 
+               if (!state->visible && old_state->visible &&
+                   crtc_state && !needs_modeset(&crtc_state->base))
+                       intel_crtc->atomic.pre_disable_primary = true;
+
                intel_crtc->atomic.fb_bits |=
                        INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
 
@@ -15034,6 +15004,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                struct intel_plane_state *plane_state;
 
                memset(crtc->config, 0, sizeof(*crtc->config));
+               crtc->config->base.crtc = &crtc->base;
 
                crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
 
index 52c22b02600598cfa7d18e424d69a99cce4879e7..e10f9644140f5d9fcd6e73446c74634d2b13906a 100644 (file)
@@ -165,31 +165,15 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
        return 0;
 }
 
-static int
-gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
-{
-       struct nvkm_object *obj = (void *)chan;
-       struct gk104_fifo_priv *priv = (void *)obj->engine;
-
-       nv_wr32(priv, 0x002634, chan->base.chid);
-       if (!nv_wait(priv, 0x002634, 0x100000, 0x000000)) {
-               nv_error(priv, "channel %d [%s] kick timeout\n",
-                        chan->base.chid, nvkm_client_name(chan));
-               return -EBUSY;
-       }
-
-       return 0;
-}
-
 static int
 gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
                          struct nvkm_object *object)
 {
        struct nvkm_bar *bar = nvkm_bar(parent);
+       struct gk104_fifo_priv *priv = (void *)parent->engine;
        struct gk104_fifo_base *base = (void *)parent->parent;
        struct gk104_fifo_chan *chan = (void *)parent;
        u32 addr;
-       int ret;
 
        switch (nv_engidx(object->engine)) {
        case NVDEV_ENGINE_SW    : return 0;
@@ -204,9 +188,13 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
                return -EINVAL;
        }
 
-       ret = gk104_fifo_chan_kick(chan);
-       if (ret && suspend)
-               return ret;
+       nv_wr32(priv, 0x002634, chan->base.chid);
+       if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
+               nv_error(priv, "channel %d [%s] kick timeout\n",
+                        chan->base.chid, nvkm_client_name(chan));
+               if (suspend)
+                       return -EBUSY;
+       }
 
        if (addr) {
                nv_wo32(base, addr + 0x00, 0x00000000);
@@ -331,7 +319,6 @@ gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
                gk104_fifo_runlist_update(priv, chan->engine);
        }
 
-       gk104_fifo_chan_kick(chan);
        nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
        return nvkm_fifo_channel_fini(&chan->base, suspend);
 }
index 654c8daeb5ab3d0dd84a2ed1d32af633d6955ac9..97ad3bcb99a75a441a54150f779415dc59236ac7 100644 (file)
@@ -2492,7 +2492,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
                                     true, NULL);
        if (unlikely(ret != 0))
-               goto out_err;
+               goto out_err_nores;
 
        ret = vmw_validate_buffers(dev_priv, sw_context);
        if (unlikely(ret != 0))
@@ -2536,6 +2536,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        vmw_resource_relocations_free(&sw_context->res_relocations);
 
        vmw_fifo_commit(dev_priv, command_size);
+       mutex_unlock(&dev_priv->binding_mutex);
 
        vmw_query_bo_switch_commit(dev_priv, sw_context);
        ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2551,7 +2552,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
                DRM_ERROR("Fence submission error. Syncing.\n");
 
        vmw_resource_list_unreserve(&sw_context->resource_list, false);
-       mutex_unlock(&dev_priv->binding_mutex);
 
        ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
                                    (void *) fence);
index c7aab48f07cdfcdebf3efb6374416619c9095e04..92d518382a9fce90c3e1dbae45034675072da274 100644 (file)
@@ -814,7 +814,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
                        printk(KERN_ERR MOD
                               "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
                               CQE_STATUS(&cqe), CQE_QPID(&cqe));
-                       ret = -EINVAL;
+                       wc->status = IB_WC_FATAL_ERR;
                }
        }
 out:
index 3a27a84ad3ec376a2543c1ac9568c30e5d7c131b..9426276dbe1402b1445dd7b84da6d7fca38893a6 100644 (file)
@@ -2245,6 +2245,9 @@ void omap3_gpmc_save_context(void)
 {
        int i;
 
+       if (!gpmc_base)
+               return;
+
        gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
        gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
        gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
@@ -2277,6 +2280,9 @@ void omap3_gpmc_restore_context(void)
 {
        int i;
 
+       if (!gpmc_base)
+               return;
+
        gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
        gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
        gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
index e58468b02987f28c57252c1acdbfa8ac4c6365ca..f50373645ab4ade89923ae8a7f4d5d35946a8f99 100644 (file)
@@ -282,7 +282,6 @@ config VETH
 config VIRTIO_NET
        tristate "Virtio network driver"
        depends on VIRTIO
-       select AVERAGE
        ---help---
          This is the virtual network driver for virtio.  It can be used with
          lguest or QEMU based VMMs (like KVM or Xen).  Say Y or M.
index 2d7d72c8851962a7fb8d8815dada4d69a1820032..0ef2ed3a610ec9d1fae6641ab74dd219a29692e9 100644 (file)
@@ -4120,9 +4120,8 @@ void bond_setup(struct net_device *bond_dev)
        SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
 
        /* Initialize the device options */
-       bond_dev->tx_queue_len = 0;
        bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
-       bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT;
+       bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
        bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
 
        /* don't acquire bond device's netif_tx_lock when transmitting */
index b3b922adc0e4f68ed15ff34537c21e1bd7e5e81f..615c65da39bedb648817a99002235ffbae2e6c46 100644 (file)
@@ -1120,7 +1120,7 @@ static void cfhsi_setup(struct net_device *dev)
        dev->type = ARPHRD_CAIF;
        dev->flags = IFF_POINTOPOINT | IFF_NOARP;
        dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
-       dev->tx_queue_len = 0;
+       dev->priv_flags |= IFF_NO_QUEUE;
        dev->destructor = free_netdev;
        dev->netdev_ops = &cfhsi_netdevops;
        for (i = 0; i < CFHSI_PRIO_LAST; ++i)
index 9da06537237ff220a16b3c5831c728d809d91bb1..c2dea4916e5d720bb29814153f302ec364fe4f61 100644 (file)
@@ -427,7 +427,7 @@ static void caifdev_setup(struct net_device *dev)
        dev->type = ARPHRD_CAIF;
        dev->flags = IFF_POINTOPOINT | IFF_NOARP;
        dev->mtu = CAIF_MAX_MTU;
-       dev->tx_queue_len = 0;
+       dev->priv_flags |= IFF_NO_QUEUE;
        dev->destructor = free_netdev;
        skb_queue_head_init(&serdev->head);
        serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
index 72ea9ff9bb9c02ae16133de4b12f83e70ec97c0d..de3962014af70c8a979f4cb63b063583ba4927a9 100644 (file)
@@ -710,7 +710,7 @@ static void cfspi_setup(struct net_device *dev)
        dev->netdev_ops = &cfspi_ops;
        dev->type = ARPHRD_CAIF;
        dev->flags = IFF_NOARP | IFF_POINTOPOINT;
-       dev->tx_queue_len = 0;
+       dev->priv_flags |= IFF_NO_QUEUE;
        dev->mtu = SPI_MAX_PAYLOAD_SIZE;
        dev->destructor = free_netdev;
        skb_queue_head_init(&cfspi->qhead);
index b1e8d729851cbb5173c1bbec2b34c4893b2ff595..c83f0f03482ba1d2d9f2b121d2c844af29432f3c 100644 (file)
@@ -805,7 +805,7 @@ static void flexcan_set_bittiming(struct net_device *dev)
        if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
                reg |= FLEXCAN_CTRL_SMP;
 
-       netdev_info(dev, "writing ctrl=0x%08x\n", reg);
+       netdev_dbg(dev, "writing ctrl=0x%08x\n", reg);
        flexcan_write(reg, &regs->ctrl);
 
        /* print chip status */
index 8b4d3e6875eb17e6bca38c812132953d0c5ce2c2..5eee62badf45457798c2fabe5005faa26c6286e2 100644 (file)
@@ -162,7 +162,7 @@ struct gs_can {
        struct can_bittiming_const bt_const;
        unsigned int channel;   /* channel number */
 
-       /* This lock prevents a race condition between xmit and recieve. */
+       /* This lock prevents a race condition between xmit and receive. */
        spinlock_t tx_ctx_lock;
        struct gs_tx_context tx_context[GS_MAX_TX_URBS];
 
@@ -274,7 +274,7 @@ static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
        }
 }
 
-static void gs_usb_recieve_bulk_callback(struct urb *urb)
+static void gs_usb_receive_bulk_callback(struct urb *urb)
 {
        struct gs_usb *usbcan = urb->context;
        struct gs_can *dev;
@@ -376,7 +376,7 @@ static void gs_usb_recieve_bulk_callback(struct urb *urb)
                          usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN),
                          hf,
                          sizeof(struct gs_host_frame),
-                         gs_usb_recieve_bulk_callback,
+                         gs_usb_receive_bulk_callback,
                          usbcan
                          );
 
@@ -605,7 +605,7 @@ static int gs_can_open(struct net_device *netdev)
                                                          GSUSB_ENDPOINT_IN),
                                          buf,
                                          sizeof(struct gs_host_frame),
-                                         gs_usb_recieve_bulk_callback,
+                                         gs_usb_receive_bulk_callback,
                                          parent);
                        urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
 
index 332f2c8090d0f1ce9030774a424123b1c47f1515..3774f53d28d781aaec7f5150352a944ba505d974 100644 (file)
@@ -1926,8 +1926,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
                 * full duplex.
                 */
                reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
-               if (dsa_is_cpu_port(ds, port) ||
-                   ds->dsa_port_mask & (1 << port)) {
+               if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
                        reg |= PORT_PCS_CTRL_FORCE_LINK |
                                PORT_PCS_CTRL_LINK_UP |
                                PORT_PCS_CTRL_DUPLEX_FULL |
@@ -1988,12 +1987,15 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
                                reg |= PORT_CONTROL_EGRESS_ADD_TAG;
                }
        }
-       if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-           mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
-           mv88e6xxx_6320_family(ds)) {
-               if (ds->dsa_port_mask & (1 << port))
+       if (dsa_is_dsa_port(ds, port)) {
+               if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
+                       reg |= PORT_CONTROL_DSA_TAG;
+               if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+                   mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+                   mv88e6xxx_6320_family(ds)) {
                        reg |= PORT_CONTROL_FRAME_MODE_DSA;
+               }
+
                if (port == dsa_upstream_port(ds))
                        reg |= PORT_CONTROL_FORWARD_UNKNOWN |
                                PORT_CONTROL_FORWARD_UNKNOWN_MC;
@@ -2031,7 +2033,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
                        reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
        }
 
-       reg |= PORT_CONTROL_2_8021Q_SECURE;
+       reg |= PORT_CONTROL_2_8021Q_FALLBACK;
 
        if (reg) {
                ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
index 49adbf1b7574211dcd97db2c8cc970e427934c87..815eb94990f5edb50883bf9d9a4f68a1dc535e65 100644 (file)
@@ -144,10 +144,9 @@ static void dummy_setup(struct net_device *dev)
        dev->destructor = free_netdev;
 
        /* Fill in device structure with ethernet-generic values. */
-       dev->tx_queue_len = 0;
        dev->flags |= IFF_NOARP;
        dev->flags &= ~IFF_MULTICAST;
-       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
        dev->features   |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
        dev->features   |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
        eth_hw_addr_random(dev);
index 299eb4315fe647ba8d67302649a2cf928a4d59d5..4f68d19c45bda1f7a628dcb9f1172d8e81269826 100644 (file)
@@ -905,40 +905,6 @@ static int xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pda
        return ret;
 }
 
-static int xgene_get_mac_address(struct device *dev,
-                                unsigned char *addr)
-{
-       int ret;
-
-       ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6);
-       if (ret)
-               ret = device_property_read_u8_array(dev, "mac-address",
-                                                   addr, 6);
-       if (ret)
-               return -ENODEV;
-
-       return ETH_ALEN;
-}
-
-static int xgene_get_phy_mode(struct device *dev)
-{
-       int i, ret;
-       char *modestr;
-
-       ret = device_property_read_string(dev, "phy-connection-type",
-                                         (const char **)&modestr);
-       if (ret)
-               ret = device_property_read_string(dev, "phy-mode",
-                                                 (const char **)&modestr);
-       if (ret)
-               return -ENODEV;
-
-       for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) {
-               if (!strcasecmp(modestr, phy_modes(i)))
-                       return i;
-       }
-       return -ENODEV;
-}
 
 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
 {
@@ -998,12 +964,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
        if (ret)
                return ret;
 
-       if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN)
+       if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
                eth_hw_addr_random(ndev);
 
        memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
 
-       pdata->phy_mode = xgene_get_phy_mode(dev);
+       pdata->phy_mode = device_get_phy_mode(dev);
        if (pdata->phy_mode < 0) {
                dev_err(dev, "Unable to get phy-connection-type\n");
                return pdata->phy_mode;
index 8be9eab733203c2817fe916b38dd6f511e8e91f4..e930aa9a3cfb8ed3c4edad3fc03ae2b3ce6a586f 100644 (file)
@@ -139,6 +139,16 @@ config BNX2X_SRIOV
          Virtualization support in the 578xx and 57712 products. This
          allows for virtual function acceleration in virtual environments.
 
+config BNX2X_VXLAN
+       bool "Virtual eXtensible Local Area Network support"
+       default n
+       depends on BNX2X && VXLAN && !(BNX2X=y && VXLAN=m)
+       ---help---
+         This enables hardward offload support for VXLAN protocol over the
+         NetXtremeII series adapters.
+         Say Y here if you want to enable hardware offload support for
+         Virtual eXtensible Local Area Network (VXLAN) in the driver.
+
 config BGMAC
        tristate "BCMA bus GBit core support"
        depends on BCMA_HOST_SOC && HAS_DMA && (BCM47XX || ARCH_BCM_5301X)
index 5762c485ea06e75305a88784e0b34816680e164e..ba936635322a83eee32f15e49f12e393ed924d38 100644 (file)
@@ -1392,6 +1392,8 @@ enum sp_rtnl_flag {
        BNX2X_SP_RTNL_HYPERVISOR_VLAN,
        BNX2X_SP_RTNL_TX_STOP,
        BNX2X_SP_RTNL_GET_DRV_VERSION,
+       BNX2X_SP_RTNL_ADD_VXLAN_PORT,
+       BNX2X_SP_RTNL_DEL_VXLAN_PORT,
 };
 
 enum bnx2x_iov_flag {
@@ -2571,6 +2573,10 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
                        (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) ||       \
                         IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp))
 
+/* Determines whether BW configuration arrives in 100Mb units or in
+ * percentages from actual physical link speed.
+ */
+#define IS_MF_PERCENT_BW(bp) (IS_MF_SI(bp) || IS_MF_UFP(bp) || IS_MF_BD(bp))
 
 #define SET_FLAG(value, mask, flag) \
        do {\
index a2bb1f0934d53c3caf8641f512afc9d52e295cf6..44173be5cbf0d914111304ba2420954f44d5a7e7 100644 (file)
@@ -1190,7 +1190,7 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
                /* Calculate the current MAX line speed limit for the MF
                 * devices
                 */
-               if (IS_MF_SI(bp))
+               if (IS_MF_PERCENT_BW(bp))
                        line_speed = (line_speed * maxCfg) / 100;
                else { /* SD mode */
                        u16 vn_max_rate = maxCfg * 100;
index e18a0e4d3ed17a0a7a224b60fdd03a1025e45be0..b7d32e8412f14b8f9c3a71d4ff29d750a0d730d7 100644 (file)
@@ -967,6 +967,8 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
        else /* CHIP_IS_E1X */
                start_params->network_cos_mode = FW_WRR;
 
+       start_params->vxlan_dst_port = bp->vxlan_dst_port;
+
        start_params->inner_rss = 1;
 
        if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
index ad73a60de333e720043fec43b43c37befbb2f945..26fbfcc6f7db8d17d9d9ee32cb6171cc62d8516a 100644 (file)
@@ -2494,7 +2494,7 @@ static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
        else {
                u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
 
-               if (IS_MF_SI(bp)) {
+               if (IS_MF_PERCENT_BW(bp)) {
                        /* maxCfg in percents of linkspeed */
                        vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
                } else /* SD modes */
@@ -10075,6 +10075,81 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
        }
 }
 
+#ifdef CONFIG_BNX2X_VXLAN
+static int bnx2x_vxlan_port_update(struct bnx2x *bp, u16 port)
+{
+       struct bnx2x_func_switch_update_params *switch_update_params;
+       struct bnx2x_func_state_params func_params = {NULL};
+       int rc;
+
+       switch_update_params = &func_params.params.switch_update;
+
+       /* Prepare parameters for function state transitions */
+       __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+       __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+       func_params.f_obj = &bp->func_obj;
+       func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
+
+       /* Function parameters */
+       __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
+                 &switch_update_params->changes);
+       switch_update_params->vxlan_dst_port = port;
+       rc = bnx2x_func_state_change(bp, &func_params);
+       if (rc)
+               BNX2X_ERR("failed to change vxlan dst port to %d (rc = 0x%x)\n",
+                         port, rc);
+       return rc;
+}
+
+static void __bnx2x_add_vxlan_port(struct bnx2x *bp, u16 port)
+{
+       if (!netif_running(bp->dev))
+               return;
+
+       if (bp->vxlan_dst_port || !IS_PF(bp)) {
+               DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n");
+               return;
+       }
+
+       bp->vxlan_dst_port = port;
+       bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0);
+}
+
+static void bnx2x_add_vxlan_port(struct net_device *netdev,
+                                sa_family_t sa_family, __be16 port)
+{
+       struct bnx2x *bp = netdev_priv(netdev);
+       u16 t_port = ntohs(port);
+
+       __bnx2x_add_vxlan_port(bp, t_port);
+}
+
+static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port)
+{
+       if (!bp->vxlan_dst_port || bp->vxlan_dst_port != port || !IS_PF(bp)) {
+               DP(BNX2X_MSG_SP, "Invalid vxlan port\n");
+               return;
+       }
+
+       if (netif_running(bp->dev)) {
+               bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0);
+       } else {
+               bp->vxlan_dst_port = 0;
+               netdev_info(bp->dev, "Deleted vxlan dest port %d", port);
+       }
+}
+
+static void bnx2x_del_vxlan_port(struct net_device *netdev,
+                                sa_family_t sa_family, __be16 port)
+{
+       struct bnx2x *bp = netdev_priv(netdev);
+       u16 t_port = ntohs(port);
+
+       __bnx2x_del_vxlan_port(bp, t_port);
+}
+#endif
+
 static int bnx2x_close(struct net_device *dev);
 
 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
@@ -10083,6 +10158,9 @@ static int bnx2x_close(struct net_device *dev);
 static void bnx2x_sp_rtnl_task(struct work_struct *work)
 {
        struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
+#ifdef CONFIG_BNX2X_VXLAN
+       u16 port;
+#endif
 
        rtnl_lock();
 
@@ -10181,6 +10259,27 @@ sp_rtnl_not_reset:
                               &bp->sp_rtnl_state))
                bnx2x_update_mng_version(bp);
 
+#ifdef CONFIG_BNX2X_VXLAN
+       port = bp->vxlan_dst_port;
+       if (test_and_clear_bit(BNX2X_SP_RTNL_ADD_VXLAN_PORT,
+                              &bp->sp_rtnl_state)) {
+               if (!bnx2x_vxlan_port_update(bp, port))
+                       netdev_info(bp->dev, "Added vxlan dest port %d", port);
+               else
+                       bp->vxlan_dst_port = 0;
+       }
+
+       if (test_and_clear_bit(BNX2X_SP_RTNL_DEL_VXLAN_PORT,
+                              &bp->sp_rtnl_state)) {
+               if (!bnx2x_vxlan_port_update(bp, 0)) {
+                       netdev_info(bp->dev,
+                                   "Deleted vxlan dest port %d", port);
+                       bp->vxlan_dst_port = 0;
+                       vxlan_get_rx_port(bp->dev);
+               }
+       }
+#endif
+
        /* work which needs rtnl lock not-taken (as it takes the lock itself and
         * can be called from other contexts as well)
         */
@@ -12379,6 +12478,12 @@ static int bnx2x_open(struct net_device *dev)
        rc = bnx2x_nic_load(bp, LOAD_OPEN);
        if (rc)
                return rc;
+
+#ifdef CONFIG_BNX2X_VXLAN
+       if (IS_PF(bp))
+               vxlan_get_rx_port(dev);
+#endif
+
        return 0;
 }
 
@@ -12894,6 +12999,10 @@ static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_get_phys_port_id   = bnx2x_get_phys_port_id,
        .ndo_set_vf_link_state  = bnx2x_set_vf_link_state,
        .ndo_features_check     = bnx2x_features_check,
+#ifdef CONFIG_BNX2X_VXLAN
+       .ndo_add_vxlan_port     = bnx2x_add_vxlan_port,
+       .ndo_del_vxlan_port     = bnx2x_del_vxlan_port,
+#endif
 };
 
 static int bnx2x_set_coherency_mask(struct bnx2x *bp)
index 1732e29253cd26e22d285f6ce1095281c06bd335..0a87a3247464fdd1939d8bcb5867a9f0e735cf02 100644 (file)
@@ -1289,13 +1289,14 @@ static unsigned int xdigit2int(unsigned char c)
 static ssize_t mps_trc_write(struct file *file, const char __user *buf,
                             size_t count, loff_t *pos)
 {
-       int i, j, enable, ret;
+       int i, enable, ret;
        u32 *data, *mask;
        struct trace_params tp;
        const struct inode *ino;
        unsigned int trcidx;
        char *s, *p, *word, *end;
        struct adapter *adap;
+       u32 j;
 
        ino = file_inode(file);
        trcidx = (uintptr_t)ino->i_private & 3;
@@ -1340,7 +1341,7 @@ static ssize_t mps_trc_write(struct file *file, const char __user *buf,
 
                if (!strncmp(word, "qid=", 4)) {
                        end = (char *)word + 4;
-                       ret = kstrtoul(end, 10, (unsigned long *)&j);
+                       ret = kstrtouint(end, 10, &j);
                        if (ret)
                                goto out;
                        if (!adap->trace_rss) {
@@ -1369,7 +1370,7 @@ static ssize_t mps_trc_write(struct file *file, const char __user *buf,
                }
                if (!strncmp(word, "snaplen=", 8)) {
                        end = (char *)word + 8;
-                       ret = kstrtoul(end, 10, (unsigned long *)&j);
+                       ret = kstrtouint(end, 10, &j);
                        if (ret || j > 9600) {
 inval:                         count = -EINVAL;
                                goto out;
@@ -1379,7 +1380,7 @@ inval:                            count = -EINVAL;
                }
                if (!strncmp(word, "minlen=", 7)) {
                        end = (char *)word + 7;
-                       ret = kstrtoul(end, 10, (unsigned long *)&j);
+                       ret = kstrtouint(end, 10, &j);
                        if (ret || j > TFMINPKTSIZE_M)
                                goto inval;
                        tp.min_len = j;
@@ -1453,7 +1454,7 @@ inval:                            count = -EINVAL;
                }
                if (*word == '@') {
                        end = (char *)word + 1;
-                       ret = kstrtoul(end, 10, (unsigned long *)&j);
+                       ret = kstrtouint(end, 10, &j);
                        if (*end && *end != '\n')
                                goto inval;
                        if (j & 7)          /* doesn't start at multiple of 8 */
index 84b6a2b46aec474959c69e84288386dc1d499282..8b53f7d4bebf33075f7f891bd3ceafc717fb6208 100644 (file)
@@ -33,7 +33,7 @@
 
 #define DRV_NAME               "enic"
 #define DRV_DESCRIPTION                "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION            "2.1.1.83"
+#define DRV_VERSION            "2.3.0.12"
 #define DRV_COPYRIGHT          "Copyright 2008-2013 Cisco Systems, Inc"
 
 #define ENIC_BARS_MAX          6
@@ -191,6 +191,25 @@ struct enic {
        struct vnic_gen_stats gen_stats;
 };
 
+static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev)
+{
+       struct enic *enic = vdev->priv;
+
+       return enic->netdev;
+}
+
+/* wrappers function for kernel log
+ * Make sure variable vdev of struct vnic_dev is available in the block where
+ * these macros are used
+ */
+#define vdev_info(args...)     dev_info(&vdev->pdev->dev, args)
+#define vdev_warn(args...)     dev_warn(&vdev->pdev->dev, args)
+#define vdev_err(args...)      dev_err(&vdev->pdev->dev, args)
+
+#define vdev_netinfo(args...)  netdev_info(vnic_get_netdev(vdev), args)
+#define vdev_netwarn(args...)  netdev_warn(vnic_get_netdev(vdev), args)
+#define vdev_neterr(args...)   netdev_err(vnic_get_netdev(vdev), args)
+
 static inline struct device *enic_get_dev(struct enic *enic)
 {
        return &(enic->pdev->dev);
index 8f646e4e968b329ab53dcb70af3c733e7788661f..3352d027ab895c59195ef79bd322112964c21c75 100644 (file)
@@ -2484,6 +2484,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_out_iounmap;
        }
 
+       err = vnic_devcmd_init(enic->vdev);
+
+       if (err)
+               goto err_out_vnic_unregister;
+
 #ifdef CONFIG_PCI_IOV
        /* Get number of subvnics */
        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
@@ -2658,8 +2663,8 @@ err_out_disable_sriov_pp:
                pci_disable_sriov(pdev);
                enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
        }
-err_out_vnic_unregister:
 #endif
+err_out_vnic_unregister:
        vnic_dev_unregister(enic->vdev);
 err_out_iounmap:
        enic_iounmap(enic);
index 0daa1c7073cb008fb79d774adb5c35c9c1a69bde..abeda2a9ea273745f532f0a17732ed8630898bb4 100644 (file)
@@ -24,6 +24,7 @@
 
 #include "vnic_dev.h"
 #include "vnic_cq.h"
+#include "enic.h"
 
 void vnic_cq_free(struct vnic_cq *cq)
 {
@@ -42,7 +43,7 @@ int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
 
        cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
        if (!cq->ctrl) {
-               pr_err("Failed to hook CQ[%d] resource\n", index);
+               vdev_err("Failed to hook CQ[%d] resource\n", index);
                return -EINVAL;
        }
 
index 62f7b7baf93cd79f77de9aceb8d977461c075cb7..19a49a6e3911b8f3308de52dae87eea44b2eabc0 100644 (file)
 #include "vnic_resource.h"
 #include "vnic_devcmd.h"
 #include "vnic_dev.h"
+#include "vnic_wq.h"
 #include "vnic_stats.h"
-
-enum vnic_proxy_type {
-       PROXY_NONE,
-       PROXY_BY_BDF,
-       PROXY_BY_INDEX,
-};
-
-struct vnic_res {
-       void __iomem *vaddr;
-       dma_addr_t bus_addr;
-       unsigned int count;
-};
-
-struct vnic_intr_coal_timer_info {
-       u32 mul;
-       u32 div;
-       u32 max_usec;
-};
-
-struct vnic_dev {
-       void *priv;
-       struct pci_dev *pdev;
-       struct vnic_res res[RES_TYPE_MAX];
-       enum vnic_dev_intr_mode intr_mode;
-       struct vnic_devcmd __iomem *devcmd;
-       struct vnic_devcmd_notify *notify;
-       struct vnic_devcmd_notify notify_copy;
-       dma_addr_t notify_pa;
-       u32 notify_sz;
-       dma_addr_t linkstatus_pa;
-       struct vnic_stats *stats;
-       dma_addr_t stats_pa;
-       struct vnic_devcmd_fw_info *fw_info;
-       dma_addr_t fw_info_pa;
-       enum vnic_proxy_type proxy;
-       u32 proxy_index;
-       u64 args[VNIC_DEVCMD_NARGS];
-       struct vnic_intr_coal_timer_info intr_coal_timer_info;
-};
+#include "enic.h"
 
 #define VNIC_MAX_RES_HDR_SIZE \
        (sizeof(struct vnic_resource_header) + \
@@ -90,14 +53,14 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
                return -EINVAL;
 
        if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
-               pr_err("vNIC BAR0 res hdr length error\n");
+               vdev_err("vNIC BAR0 res hdr length error\n");
                return -EINVAL;
        }
 
        rh  = bar->vaddr;
        mrh = bar->vaddr;
        if (!rh) {
-               pr_err("vNIC BAR0 res hdr not mem-mapped\n");
+               vdev_err("vNIC BAR0 res hdr not mem-mapped\n");
                return -EINVAL;
        }
 
@@ -106,11 +69,10 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
                (ioread32(&rh->version) != VNIC_RES_VERSION)) {
                if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
                        (ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
-                       pr_err("vNIC BAR0 res magic/version error "
-                       "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
-                       VNIC_RES_MAGIC, VNIC_RES_VERSION,
-                       MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
-                       ioread32(&rh->magic), ioread32(&rh->version));
+                       vdev_err("vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
+                                VNIC_RES_MAGIC, VNIC_RES_VERSION,
+                                MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
+                                ioread32(&rh->magic), ioread32(&rh->version));
                        return -EINVAL;
                }
        }
@@ -144,17 +106,15 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
                        /* each count is stride bytes long */
                        len = count * VNIC_RES_STRIDE;
                        if (len + bar_offset > bar[bar_num].len) {
-                               pr_err("vNIC BAR0 resource %d "
-                                       "out-of-bounds, offset 0x%x + "
-                                       "size 0x%x > bar len 0x%lx\n",
-                                       type, bar_offset,
-                                       len,
-                                       bar[bar_num].len);
+                               vdev_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
+                                        type, bar_offset, len,
+                                        bar[bar_num].len);
                                return -EINVAL;
                        }
                        break;
                case RES_TYPE_INTR_PBA_LEGACY:
                case RES_TYPE_DEVCMD:
+               case RES_TYPE_DEVCMD2:
                        len = count;
                        break;
                default:
@@ -238,8 +198,8 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
                &ring->base_addr_unaligned);
 
        if (!ring->descs_unaligned) {
-               pr_err("Failed to allocate ring (size=%d), aborting\n",
-                       (int)ring->size);
+               vdev_err("Failed to allocate ring (size=%d), aborting\n",
+                        (int)ring->size);
                return -ENOMEM;
        }
 
@@ -281,7 +241,7 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
                return -ENODEV;
        }
        if (status & STAT_BUSY) {
-               pr_err("Busy devcmd %d\n", _CMD_N(cmd));
+               vdev_neterr("Busy devcmd %d\n", _CMD_N(cmd));
                return -EBUSY;
        }
 
@@ -315,8 +275,8 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
                                        return -err;
                                if (err != ERR_ECMDUNKNOWN ||
                                    cmd != CMD_CAPABILITY)
-                                       pr_err("Error %d devcmd %d\n",
-                                               err, _CMD_N(cmd));
+                                       vdev_neterr("Error %d devcmd %d\n",
+                                                   err, _CMD_N(cmd));
                                return -err;
                        }
 
@@ -330,10 +290,160 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
                }
        }
 
-       pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
+       vdev_neterr("Timedout devcmd %d\n", _CMD_N(cmd));
        return -ETIMEDOUT;
 }
 
+static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+                         int wait)
+{
+       struct devcmd2_controller *dc2c = vdev->devcmd2;
+       struct devcmd2_result *result = dc2c->result + dc2c->next_result;
+       unsigned int i;
+       int delay, err;
+       u32 fetch_index, posted, new_posted;
+
+       posted = ioread32(&dc2c->wq_ctrl->posted_index);
+       fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index);
+
+       if (posted == 0xFFFFFFFF || fetch_index == 0xFFFFFFFF)
+               return -ENODEV;
+
+       new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
+
+       if (new_posted == fetch_index) {
+               vdev_neterr("devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n",
+                           _CMD_N(cmd), fetch_index, posted);
+               return -EBUSY;
+       }
+       dc2c->cmd_ring[posted].cmd = cmd;
+       dc2c->cmd_ring[posted].flags = 0;
+
+       if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
+               dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
+       if (_CMD_DIR(cmd) & _CMD_DIR_WRITE)
+               for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+                       dc2c->cmd_ring[posted].args[i] = vdev->args[i];
+
+       /* Adding write memory barrier prevents compiler and/or CPU reordering,
+        * thus avoiding descriptor posting before descriptor is initialized.
+        * Otherwise, hardware can read stale descriptor fields.
+        */
+       wmb();
+       iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
+
+       if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
+               return 0;
+
+       for (delay = 0; delay < wait; delay++) {
+               if (result->color == dc2c->color) {
+                       dc2c->next_result++;
+                       if (dc2c->next_result == dc2c->result_size) {
+                               dc2c->next_result = 0;
+                               dc2c->color = dc2c->color ? 0 : 1;
+                       }
+                       if (result->error) {
+                               err = result->error;
+                               if (err != ERR_ECMDUNKNOWN ||
+                                   cmd != CMD_CAPABILITY)
+                                       vdev_neterr("Error %d devcmd %d\n",
+                                                   err, _CMD_N(cmd));
+                               return -err;
+                       }
+                       if (_CMD_DIR(cmd) & _CMD_DIR_READ)
+                               for (i = 0; i < VNIC_DEVCMD2_NARGS; i++)
+                                       vdev->args[i] = result->results[i];
+
+                       return 0;
+               }
+               udelay(100);
+       }
+
+       vdev_neterr("devcmd %d timed out\n", _CMD_N(cmd));
+
+       return -ETIMEDOUT;
+}
+
+static int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
+{
+       vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
+       if (!vdev->devcmd)
+               return -ENODEV;
+       vdev->devcmd_rtn = _vnic_dev_cmd;
+
+       return 0;
+}
+
+static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
+{
+       int err;
+       unsigned int fetch_index;
+
+       if (vdev->devcmd2)
+               return 0;
+
+       vdev->devcmd2 = kzalloc(sizeof(*vdev->devcmd2), GFP_KERNEL);
+       if (!vdev->devcmd2)
+               return -ENOMEM;
+
+       vdev->devcmd2->color = 1;
+       vdev->devcmd2->result_size = DEVCMD2_RING_SIZE;
+       err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE,
+                                   DEVCMD2_DESC_SIZE);
+       if (err)
+               goto err_free_devcmd2;
+
+       fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
+       if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone  */
+               vdev_err("Fatal error in devcmd2 init - hardware surprise removal");
+
+               return -ENODEV;
+       }
+
+       enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0,
+                          0);
+       vnic_wq_enable(&vdev->devcmd2->wq);
+
+       err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
+                                      DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
+       if (err)
+               goto err_free_wq;
+
+       vdev->devcmd2->result = vdev->devcmd2->results_ring.descs;
+       vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs;
+       vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl;
+       vdev->args[0] = (u64)vdev->devcmd2->results_ring.base_addr |
+                       VNIC_PADDR_TARGET;
+       vdev->args[1] = DEVCMD2_RING_SIZE;
+
+       err = _vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000);
+       if (err)
+               goto err_free_desc_ring;
+
+       vdev->devcmd_rtn = _vnic_dev_cmd2;
+
+       return 0;
+
+err_free_desc_ring:
+       vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
+err_free_wq:
+       vnic_wq_disable(&vdev->devcmd2->wq);
+       vnic_wq_free(&vdev->devcmd2->wq);
+err_free_devcmd2:
+       kfree(vdev->devcmd2);
+       vdev->devcmd2 = NULL;
+
+       return err;
+}
+
+static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
+{
+       vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
+       vnic_wq_disable(&vdev->devcmd2->wq);
+       vnic_wq_free(&vdev->devcmd2->wq);
+       kfree(vdev->devcmd2);
+}
+
 static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
        enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
        u64 *a0, u64 *a1, int wait)
@@ -348,7 +458,7 @@ static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
        vdev->args[2] = *a0;
        vdev->args[3] = *a1;
 
-       err = _vnic_dev_cmd(vdev, proxy_cmd, wait);
+       err = vdev->devcmd_rtn(vdev, proxy_cmd, wait);
        if (err)
                return err;
 
@@ -357,7 +467,8 @@ static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
                err = (int)vdev->args[1];
                if (err != ERR_ECMDUNKNOWN ||
                    cmd != CMD_CAPABILITY)
-                       pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
+                       vdev_neterr("Error %d proxy devcmd %d\n", err,
+                                   _CMD_N(cmd));
                return err;
        }
 
@@ -375,7 +486,7 @@ static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
        vdev->args[0] = *a0;
        vdev->args[1] = *a1;
 
-       err = _vnic_dev_cmd(vdev, cmd, wait);
+       err = vdev->devcmd_rtn(vdev, cmd, wait);
 
        *a0 = vdev->args[0];
        *a1 = vdev->args[1];
@@ -650,7 +761,7 @@ int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
 
        err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
        if (err)
-               pr_err("Can't set packet filter\n");
+               vdev_neterr("Can't set packet filter\n");
 
        return err;
 }
@@ -667,7 +778,7 @@ int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr)
 
        err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
        if (err)
-               pr_err("Can't add addr [%pM], %d\n", addr, err);
+               vdev_neterr("Can't add addr [%pM], %d\n", addr, err);
 
        return err;
 }
@@ -684,7 +795,7 @@ int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr)
 
        err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
        if (err)
-               pr_err("Can't del addr [%pM], %d\n", addr, err);
+               vdev_neterr("Can't del addr [%pM], %d\n", addr, err);
 
        return err;
 }
@@ -728,7 +839,7 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
        dma_addr_t notify_pa;
 
        if (vdev->notify || vdev->notify_pa) {
-               pr_err("notify block %p still allocated", vdev->notify);
+               vdev_neterr("notify block %p still allocated", vdev->notify);
                return -EINVAL;
        }
 
@@ -838,7 +949,7 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
        memset(vdev->args, 0, sizeof(vdev->args));
 
        if (vnic_dev_capable(vdev, CMD_INTR_COAL_CONVERT))
-               err = _vnic_dev_cmd(vdev, CMD_INTR_COAL_CONVERT, wait);
+               err = vdev->devcmd_rtn(vdev, CMD_INTR_COAL_CONVERT, wait);
        else
                err = ERR_ECMDUNKNOWN;
 
@@ -847,7 +958,7 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
         */
        if ((err == ERR_ECMDUNKNOWN) ||
                (!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) {
-               pr_warn("Using default conversion factor for interrupt coalesce timer\n");
+               vdev_netwarn("Using default conversion factor for interrupt coalesce timer\n");
                vnic_dev_intr_coal_timer_info_default(vdev);
                return 0;
        }
@@ -938,6 +1049,9 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
                        pci_free_consistent(vdev->pdev,
                                sizeof(struct vnic_devcmd_fw_info),
                                vdev->fw_info, vdev->fw_info_pa);
+               if (vdev->devcmd2)
+                       vnic_dev_deinit_devcmd2(vdev);
+
                kfree(vdev);
        }
 }
@@ -959,10 +1073,6 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
        if (vnic_dev_discover_res(vdev, bar, num_bars))
                goto err_out;
 
-       vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
-       if (!vdev->devcmd)
-               goto err_out;
-
        return vdev;
 
 err_out:
@@ -977,6 +1087,29 @@ struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev)
 }
 EXPORT_SYMBOL(vnic_dev_get_pdev);
 
+int vnic_devcmd_init(struct vnic_dev *vdev)
+{
+       void __iomem *res;
+       int err;
+
+       res = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
+       if (res) {
+               err = vnic_dev_init_devcmd2(vdev);
+               if (err)
+                       vdev_warn("DEVCMD2 init failed: %d, Using DEVCMD1",
+                                 err);
+               else
+                       return 0;
+       } else {
+               vdev_warn("DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n");
+       }
+       err = vnic_dev_init_devcmd1(vdev);
+       if (err)
+               vdev_err("DEVCMD1 initialization failed: %d", err);
+
+       return err;
+}
+
 int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
 {
        u64 a0, a1 = len;
index 1fb214efcebaf0bb959dc62a0871f792cd2b4e43..b013b6a78e8772a9bdc077e0c0bd136092307193 100644 (file)
@@ -70,7 +70,48 @@ struct vnic_dev_ring {
        unsigned int desc_avail;
 };
 
-struct vnic_dev;
+enum vnic_proxy_type {
+       PROXY_NONE,
+       PROXY_BY_BDF,
+       PROXY_BY_INDEX,
+};
+
+struct vnic_res {
+       void __iomem *vaddr;
+       dma_addr_t bus_addr;
+       unsigned int count;
+};
+
+struct vnic_intr_coal_timer_info {
+       u32 mul;
+       u32 div;
+       u32 max_usec;
+};
+
+struct vnic_dev {
+       void *priv;
+       struct pci_dev *pdev;
+       struct vnic_res res[RES_TYPE_MAX];
+       enum vnic_dev_intr_mode intr_mode;
+       struct vnic_devcmd __iomem *devcmd;
+       struct vnic_devcmd_notify *notify;
+       struct vnic_devcmd_notify notify_copy;
+       dma_addr_t notify_pa;
+       u32 notify_sz;
+       dma_addr_t linkstatus_pa;
+       struct vnic_stats *stats;
+       dma_addr_t stats_pa;
+       struct vnic_devcmd_fw_info *fw_info;
+       dma_addr_t fw_info_pa;
+       enum vnic_proxy_type proxy;
+       u32 proxy_index;
+       u64 args[VNIC_DEVCMD_NARGS];
+       struct vnic_intr_coal_timer_info intr_coal_timer_info;
+       struct devcmd2_controller *devcmd2;
+       int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+                         int wait);
+};
+
 struct vnic_stats;
 
 void *vnic_dev_priv(struct vnic_dev *vdev);
@@ -135,5 +176,6 @@ int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
 int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
 int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
                        struct filter *data);
+int vnic_devcmd_init(struct vnic_dev *vdev);
 
 #endif /* _VNIC_DEV_H_ */
index 435d0cd96c224c5c8b6a5c8db6498d41458b8cf6..2a812880b884f35e8ebc51d971be3639c8f71c74 100644 (file)
@@ -365,6 +365,12 @@ enum vnic_devcmd_cmd {
         */
        CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56),
 
+       /* Initialization for the devcmd2 interface.
+        * in: (u64) a0 = host result buffer physical address
+        * in: (u16) a1 = number of entries in result buffer
+        */
+       CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57),
+
        /* Add a filter.
         * in: (u64) a0= filter address
         *     (u32) a1= size of filter
@@ -629,4 +635,26 @@ struct vnic_devcmd {
        u64 args[VNIC_DEVCMD_NARGS];    /* RW cmd args (little-endian) */
 };
 
+#define DEVCMD2_FNORESULT      0x1     /* Don't copy result to host */
+
+#define VNIC_DEVCMD2_NARGS     VNIC_DEVCMD_NARGS
+struct vnic_devcmd2 {
+       u16 pad;
+       u16 flags;
+       u32 cmd;
+       u64 args[VNIC_DEVCMD2_NARGS];
+};
+
+#define VNIC_DEVCMD2_NRESULTS  VNIC_DEVCMD_NARGS
+struct devcmd2_result {
+       u64 results[VNIC_DEVCMD2_NRESULTS];
+       u32 pad;
+       u16 completed_index;
+       u8  error;
+       u8  color;
+};
+
+#define DEVCMD2_RING_SIZE      32
+#define DEVCMD2_DESC_SIZE      128
+
 #endif /* _VNIC_DEVCMD_H_ */
index 0ca107f7bc8ca33851e7c26339d4fa48ee0fced4..942759d9cb3c4f4a932839fb8e9ea028a6f4a459 100644 (file)
@@ -25,6 +25,7 @@
 
 #include "vnic_dev.h"
 #include "vnic_intr.h"
+#include "enic.h"
 
 void vnic_intr_free(struct vnic_intr *intr)
 {
@@ -39,7 +40,7 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
 
        intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
        if (!intr->ctrl) {
-               pr_err("Failed to hook INTR[%d].ctrl resource\n", index);
+               vdev_err("Failed to hook INTR[%d].ctrl resource\n", index);
                return -EINVAL;
        }
 
index e0a73f1ca6f43e3b66558edb933cb9a9105aa5bb..4e45f88ac1d4e322ec80dcac4c33399fbf31189c 100644 (file)
@@ -48,6 +48,13 @@ enum vnic_res_type {
        RES_TYPE_RSVD7,
        RES_TYPE_DEVCMD,                /* Device command region */
        RES_TYPE_PASS_THRU_PAGE,        /* Pass-thru page */
+       RES_TYPE_SUBVNIC,               /* subvnic resource type */
+       RES_TYPE_MQ_WQ,                 /* MQ Work queues */
+       RES_TYPE_MQ_RQ,                 /* MQ Receive queues */
+       RES_TYPE_MQ_CQ,                 /* MQ Completion queues */
+       RES_TYPE_DEPRECATED1,           /* Old version of devcmd 2 */
+       RES_TYPE_DEPRECATED2,           /* Old version of devcmd 2 */
+       RES_TYPE_DEVCMD2,               /* Device control region */
 
        RES_TYPE_MAX,                   /* Count of resource types */
 };
index c4b2183bf352fb2a1881001777df91857c2d1f79..cce2777dfc415dc1da33b2e4866127edd095f90c 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "vnic_dev.h"
 #include "vnic_rq.h"
+#include "enic.h"
 
 static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
 {
@@ -91,7 +92,7 @@ int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
 
        rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
        if (!rq->ctrl) {
-               pr_err("Failed to hook RQ[%d] resource\n", index);
+               vdev_err("Failed to hook RQ[%d] resource\n", index);
                return -EINVAL;
        }
 
@@ -167,6 +168,7 @@ void vnic_rq_enable(struct vnic_rq *rq)
 int vnic_rq_disable(struct vnic_rq *rq)
 {
        unsigned int wait;
+       struct vnic_dev *vdev = rq->vdev;
 
        iowrite32(0, &rq->ctrl->enable);
 
@@ -177,7 +179,7 @@ int vnic_rq_disable(struct vnic_rq *rq)
                udelay(10);
        }
 
-       pr_err("Failed to disable RQ[%d]\n", rq->index);
+       vdev_neterr("Failed to disable RQ[%d]\n", rq->index);
 
        return -ETIMEDOUT;
 }
index b5a1c937fad2fb321336340b91b3358b551cf95e..05ad16a7e872054b1c6d2e3a4a87235e4ef2c29f 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "vnic_dev.h"
 #include "vnic_wq.h"
+#include "enic.h"
 
 static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
 {
@@ -94,7 +95,7 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
 
        wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
        if (!wq->ctrl) {
-               pr_err("Failed to hook WQ[%d] resource\n", index);
+               vdev_err("Failed to hook WQ[%d] resource\n", index);
                return -EINVAL;
        }
 
@@ -113,10 +114,27 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
        return 0;
 }
 
-static void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
-       unsigned int fetch_index, unsigned int posted_index,
-       unsigned int error_interrupt_enable,
-       unsigned int error_interrupt_offset)
+int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+                         unsigned int desc_count, unsigned int desc_size)
+{
+       int err;
+
+       wq->index = 0;
+       wq->vdev = vdev;
+
+       wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
+       if (!wq->ctrl)
+               return -EINVAL;
+       vnic_wq_disable(wq);
+       err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
+
+       return err;
+}
+
+void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+                       unsigned int fetch_index, unsigned int posted_index,
+                       unsigned int error_interrupt_enable,
+                       unsigned int error_interrupt_offset)
 {
        u64 paddr;
        unsigned int count = wq->ring.desc_count;
@@ -140,7 +158,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
        unsigned int error_interrupt_enable,
        unsigned int error_interrupt_offset)
 {
-       vnic_wq_init_start(wq, cq_index, 0, 0,
+       enic_wq_init_start(wq, cq_index, 0, 0,
                error_interrupt_enable,
                error_interrupt_offset);
 }
@@ -158,6 +176,7 @@ void vnic_wq_enable(struct vnic_wq *wq)
 int vnic_wq_disable(struct vnic_wq *wq)
 {
        unsigned int wait;
+       struct vnic_dev *vdev = wq->vdev;
 
        iowrite32(0, &wq->ctrl->enable);
 
@@ -168,7 +187,7 @@ int vnic_wq_disable(struct vnic_wq *wq)
                udelay(10);
        }
 
-       pr_err("Failed to disable WQ[%d]\n", wq->index);
+       vdev_neterr("Failed to disable WQ[%d]\n", wq->index);
 
        return -ETIMEDOUT;
 }
index 296154351823e2ebc30a205f72d2437bf86d0c30..8944af935a6078831a39c5145f35cc9af300a3fd 100644 (file)
@@ -88,6 +88,17 @@ struct vnic_wq {
        unsigned int pkts_outstanding;
 };
 
+struct devcmd2_controller {
+       struct vnic_wq_ctrl __iomem *wq_ctrl;
+       struct vnic_devcmd2 *cmd_ring;
+       struct devcmd2_result *result;
+       u16 next_result;
+       u16 result_size;
+       int color;
+       struct vnic_dev_ring results_ring;
+       struct vnic_wq wq;
+};
+
 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
 {
        /* how many does SW own? */
@@ -174,5 +185,11 @@ void vnic_wq_enable(struct vnic_wq *wq);
 int vnic_wq_disable(struct vnic_wq *wq);
 void vnic_wq_clean(struct vnic_wq *wq,
        void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
+int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+                         unsigned int desc_count, unsigned int desc_size);
+void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+                       unsigned int fetch_index, unsigned int posted_index,
+                       unsigned int error_interrupt_enable,
+                       unsigned int error_interrupt_offset);
 
 #endif /* _VNIC_WQ_H_ */
index 15cc3a1f12ff74b51af2f6139671c23dde996635..12687bf52b9518eaa1c4bb538ff26fcc88ce7acc 100644 (file)
@@ -5173,7 +5173,7 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
        struct device *dev = &adapter->pdev->dev;
        int status;
 
-       if (lancer_chip(adapter) || BEx_chip(adapter))
+       if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
                return;
 
        if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
@@ -5220,7 +5220,7 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
-       if (lancer_chip(adapter) || BEx_chip(adapter))
+       if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
                return;
 
        if (adapter->vxlan_port != port)
index 24a85b292007c9e2b9e662e70b5036ae17c969d5..63c2bcf8031a8a8cc39c714bb1510e3f3ae03793 100644 (file)
@@ -150,6 +150,9 @@ static void nps_enet_tx_handler(struct net_device *ndev)
        if (!priv->tx_packet_sent || tx_ctrl.ct)
                return;
 
+       /* Ack Tx ctrl register */
+       nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, 0);
+
        /* Check Tx transmit error */
        if (unlikely(tx_ctrl.et)) {
                ndev->stats.tx_errors++;
@@ -158,11 +161,7 @@ static void nps_enet_tx_handler(struct net_device *ndev)
                ndev->stats.tx_bytes += tx_ctrl.nt;
        }
 
-       if (priv->tx_skb) {
-               dev_kfree_skb(priv->tx_skb);
-               priv->tx_skb = NULL;
-       }
-
+       dev_kfree_skb(priv->tx_skb);
        priv->tx_packet_sent = false;
 
        if (netif_queue_stopped(ndev))
@@ -180,15 +179,16 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
 {
        struct net_device *ndev = napi->dev;
        struct nps_enet_priv *priv = netdev_priv(ndev);
-       struct nps_enet_buf_int_enable buf_int_enable;
        u32 work_done;
 
-       buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
-       buf_int_enable.tx_done = NPS_ENET_ENABLE;
        nps_enet_tx_handler(ndev);
        work_done = nps_enet_rx_handler(ndev);
        if (work_done < budget) {
+               struct nps_enet_buf_int_enable buf_int_enable;
+
                napi_complete(napi);
+               buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
+               buf_int_enable.tx_done = NPS_ENET_ENABLE;
                nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
                                 buf_int_enable.value);
        }
@@ -211,12 +211,13 @@ static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance)
 {
        struct net_device *ndev = dev_instance;
        struct nps_enet_priv *priv = netdev_priv(ndev);
-       struct nps_enet_buf_int_cause buf_int_cause;
+       struct nps_enet_rx_ctl rx_ctrl;
+       struct nps_enet_tx_ctl tx_ctrl;
 
-       buf_int_cause.value =
-                       nps_enet_reg_get(priv, NPS_ENET_REG_BUF_INT_CAUSE);
+       rx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
+       tx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
 
-       if (buf_int_cause.tx_done || buf_int_cause.rx_rdy)
+       if ((!tx_ctrl.ct && priv->tx_packet_sent) || rx_ctrl.cr)
                if (likely(napi_schedule_prep(&priv->napi))) {
                        nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
                        __napi_schedule(&priv->napi);
@@ -307,11 +308,8 @@ static void nps_enet_hw_enable_control(struct net_device *ndev)
 
        /* Discard Packets bigger than max frame length */
        max_frame_length = ETH_HLEN + ndev->mtu + ETH_FCS_LEN;
-       if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH) {
+       if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH)
                ge_mac_cfg_3->max_len = max_frame_length;
-               nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3,
-                                ge_mac_cfg_3->value);
-       }
 
        /* Enable interrupts */
        buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
@@ -339,11 +337,14 @@ static void nps_enet_hw_enable_control(struct net_device *ndev)
        ge_mac_cfg_0.tx_fc_en = NPS_ENET_ENABLE;
        ge_mac_cfg_0.rx_fc_en = NPS_ENET_ENABLE;
        ge_mac_cfg_0.tx_fc_retr = NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR;
+       ge_mac_cfg_3->cf_drop = NPS_ENET_ENABLE;
 
        /* Enable Rx and Tx */
        ge_mac_cfg_0.rx_en = NPS_ENET_ENABLE;
        ge_mac_cfg_0.tx_en = NPS_ENET_ENABLE;
 
+       nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3,
+                        ge_mac_cfg_3->value);
        nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0,
                         ge_mac_cfg_0.value);
 }
@@ -527,10 +528,10 @@ static netdev_tx_t nps_enet_start_xmit(struct sk_buff *skb,
        /* This driver handles one frame at a time  */
        netif_stop_queue(ndev);
 
-       nps_enet_send_frame(ndev, skb);
-
        priv->tx_skb = skb;
 
+       nps_enet_send_frame(ndev, skb);
+
        return NETDEV_TX_OK;
 }
 
index fc45c9daa1c2dfb6310e210f49f1958f666cd28a..6703674d679c964c00cac0ea12e084267df1ee47 100644 (file)
@@ -36,7 +36,6 @@
 #define NPS_ENET_REG_RX_CTL            0x810
 #define NPS_ENET_REG_RX_BUF            0x818
 #define NPS_ENET_REG_BUF_INT_ENABLE    0x8C0
-#define NPS_ENET_REG_BUF_INT_CAUSE     0x8C4
 #define NPS_ENET_REG_GE_MAC_CFG_0      0x1000
 #define NPS_ENET_REG_GE_MAC_CFG_1      0x1004
 #define NPS_ENET_REG_GE_MAC_CFG_2      0x1008
@@ -108,25 +107,6 @@ struct nps_enet_buf_int_enable {
        };
 };
 
-/* Interrupt cause for data buffer events register */
-struct nps_enet_buf_int_cause {
-       union {
-               /* tx_done: Interrupt in the case when current frame was
-                *          read from TX buffer.
-                * rx_rdy:  Interrupt in the case when new frame is ready
-                *          in RX buffer.
-                */
-               struct {
-                       u32
-                       __reserved:30,
-                       tx_done:1,
-                       rx_rdy:1;
-               };
-
-               u32 value;
-       };
-};
-
 /* Gbps Eth MAC Configuration 0 register */
 struct nps_enet_ge_mac_cfg_0 {
        union {
index 087ffcdc48a312d365ffb24ee4f7c16ddcf18edb..4b69d061d90f7983fb0ee4929b7f6074922d3690 100644 (file)
@@ -2067,6 +2067,11 @@ int startup_gfar(struct net_device *ndev)
        /* Start Rx/Tx DMA and enable the interrupts */
        gfar_start(priv);
 
+       /* force link state update after mac reset */
+       priv->oldlink = 0;
+       priv->oldspeed = 0;
+       priv->oldduplex = -1;
+
        phy_start(priv->phydev);
 
        enable_napi(priv);
index d2657a412768839145b57c656a2349cb750a146a..068789e694c9b310ca6fd541a2be90f6c5059e36 100644 (file)
@@ -1770,8 +1770,11 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
        dma_addr = pci_map_single(nic->pdev,
                                  skb->data, skb->len, PCI_DMA_TODEVICE);
        /* If we can't map the skb, have the upper layer try later */
-       if (pci_dma_mapping_error(nic->pdev, dma_addr))
+       if (pci_dma_mapping_error(nic->pdev, dma_addr)) {
+               dev_kfree_skb_any(skb);
+               skb = NULL;
                return -ENOMEM;
+       }
 
        /*
         * Use the last 4 bytes of the SKB payload packet as the CRC, used for
@@ -2967,6 +2970,11 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                           nic->params.cbs.max * sizeof(struct cb),
                           sizeof(u32),
                           0);
+       if (!nic->cbs_pool) {
+               netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n");
+               err = -ENOMEM;
+               goto err_out_pool;
+       }
        netif_info(nic, probe, nic->netdev,
                   "addr 0x%llx, irq %d, MAC addr %pM\n",
                   (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
@@ -2974,6 +2982,8 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        return 0;
 
+err_out_pool:
+       unregister_netdev(netdev);
 err_out_free:
        e100_free(nic);
 err_out_iounmap:
index 546b5da168dca57c8b4d7ea0f75a895fe99f74d4..faf4b3f3d0b53ed5ecbf17c87507bba0e45ff1df 100644 (file)
@@ -1737,12 +1737,6 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
        adapter->flags2 &= ~FLAG2_IS_DISCARDING;
-
-       writel(0, rx_ring->head);
-       if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
-               e1000e_update_rdt_wa(rx_ring, 0);
-       else
-               writel(0, rx_ring->tail);
 }
 
 static void e1000e_downshift_workaround(struct work_struct *work)
@@ -2447,12 +2441,6 @@ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
 
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
-
-       writel(0, tx_ring->head);
-       if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
-               e1000e_update_tdt_wa(tx_ring, 0);
-       else
-               writel(0, tx_ring->tail);
 }
 
 /**
@@ -2954,6 +2942,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
        tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
        tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
 
+       writel(0, tx_ring->head);
+       if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+               e1000e_update_tdt_wa(tx_ring, 0);
+       else
+               writel(0, tx_ring->tail);
+
        /* Set the Tx Interrupt Delay register */
        ew32(TIDV, adapter->tx_int_delay);
        /* Tx irq moderation */
@@ -3275,6 +3269,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
        rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
        rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
 
+       writel(0, rx_ring->head);
+       if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+               e1000e_update_rdt_wa(rx_ring, 0);
+       else
+               writel(0, rx_ring->tail);
+
        /* Enable Receive Checksum Offload for TCP and UDP */
        rxcsum = er32(RXCSUM);
        if (adapter->netdev->features & NETIF_F_RXCSUM)
index d19256994e5cfefce6793dbe58b953d9c79a0504..7a73510e547cd49f38629b4d60fbab8b8dce945a 100644 (file)
@@ -231,6 +231,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
        /* Verify phy id and set remaining function pointers */
        switch (phy->id) {
        case M88E1543_E_PHY_ID:
+       case M88E1512_E_PHY_ID:
        case I347AT4_E_PHY_ID:
        case M88E1112_E_PHY_ID:
        case M88E1111_I_PHY_ID:
@@ -243,7 +244,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
                else
                        phy->ops.get_cable_length = igb_get_cable_length_m88;
                phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
-               /* Check if this PHY is confgured for media swap. */
+               /* Check if this PHY is configured for media swap. */
                if (phy->id == M88E1112_E_PHY_ID) {
                        u16 data;
 
@@ -266,6 +267,11 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
                                hw->mac.ops.check_for_link =
                                                igb_check_for_link_media_swap;
                }
+               if (phy->id == M88E1512_E_PHY_ID) {
+                       ret_val = igb_initialize_M88E1512_phy(hw);
+                       if (ret_val)
+                               goto out;
+               }
                break;
        case IGP03E1000_E_PHY_ID:
                phy->type = e1000_phy_igp_3;
@@ -897,6 +903,7 @@ out:
  **/
 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
 {
+       struct e1000_phy_info *phy = &hw->phy;
        s32 ret_val;
 
        /* This isn't a true "hard" reset, but is the only reset
@@ -913,7 +920,11 @@ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
                goto out;
 
        ret_val = igb_phy_sw_reset(hw);
+       if (ret_val)
+               goto out;
 
+       if (phy->id == M88E1512_E_PHY_ID)
+               ret_val = igb_initialize_M88E1512_phy(hw);
 out:
        return ret_val;
 }
@@ -1587,6 +1598,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
                case I347AT4_E_PHY_ID:
                case M88E1112_E_PHY_ID:
                case M88E1543_E_PHY_ID:
+               case M88E1512_E_PHY_ID:
                case I210_I_PHY_ID:
                        ret_val = igb_copper_link_setup_m88_gen2(hw);
                        break;
@@ -2629,7 +2641,8 @@ s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M)
        u16 phy_data;
 
        if ((hw->phy.media_type != e1000_media_type_copper) ||
-           (phy->id != M88E1543_E_PHY_ID))
+           ((phy->id != M88E1543_E_PHY_ID) &&
+            (phy->id != M88E1512_E_PHY_ID)))
                goto out;
 
        if (!hw->dev_spec._82575.eee_disable) {
@@ -2709,7 +2722,8 @@ s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status)
 
        /* Check if EEE is supported on this device. */
        if ((hw->phy.media_type != e1000_media_type_copper) ||
-           (phy->id != M88E1543_E_PHY_ID))
+           ((phy->id != M88E1543_E_PHY_ID) &&
+            (phy->id != M88E1512_E_PHY_ID)))
                goto out;
 
        ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
index f8684aa285be8cac987263db6676f9d1076b5f9b..b1915043bc0cfefbe416b6bf5ca59658a8f47278 100644 (file)
 #define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT   7
 #define E1000_M88E1112_PAGE_ADDR               0x16
 #define E1000_M88E1112_STATUS                  0x01
+#define E1000_M88E1512_CFG_REG_1               0x0010
+#define E1000_M88E1512_CFG_REG_2               0x0011
+#define E1000_M88E1512_CFG_REG_3               0x0007
+#define E1000_M88E1512_MODE                    0x0014
 
 /* PCI Express Control */
 #define E1000_GCR_CMPL_TMOUT_MASK       0x0000F000
 #define M88_VENDOR           0x0141
 #define I210_I_PHY_ID        0x01410C00
 #define M88E1543_E_PHY_ID    0x01410EA0
+#define M88E1512_E_PHY_ID    0x01410DD0
 
 /* M88E1000 Specific Registers */
 #define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */
index 987c9de247645a2d0ec1992d65703bf22a0daeb2..23ec28f43f6d3d354094655c7c696c5f7e3bfb1b 100644 (file)
@@ -1262,6 +1262,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
                        switch (hw->phy.id) {
                        case I347AT4_E_PHY_ID:
                        case M88E1112_E_PHY_ID:
+                       case M88E1543_E_PHY_ID:
+                       case M88E1512_E_PHY_ID:
                        case I210_I_PHY_ID:
                                reset_dsp = false;
                                break;
@@ -1270,9 +1272,9 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
                                        reset_dsp = false;
                                break;
                        }
-                       if (!reset_dsp)
+                       if (!reset_dsp) {
                                hw_dbg("Link taking longer than expected.\n");
-                       else {
+                       else {
                                /* We didn't get link.
                                 * Reset the DSP and cross our fingers.
                                 */
@@ -1297,6 +1299,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
        if (hw->phy.type != e1000_phy_m88 ||
            hw->phy.id == I347AT4_E_PHY_ID ||
            hw->phy.id == M88E1112_E_PHY_ID ||
+           hw->phy.id == M88E1543_E_PHY_ID ||
+           hw->phy.id == M88E1512_E_PHY_ID ||
            hw->phy.id == I210_I_PHY_ID)
                goto out;
 
@@ -1737,6 +1741,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
                phy->cable_length = phy_data / (is_cm ? 100 : 1);
                break;
        case M88E1543_E_PHY_ID:
+       case M88E1512_E_PHY_ID:
        case I347AT4_E_PHY_ID:
                /* Remember the original page select and set it to 7 */
                ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
@@ -2188,6 +2193,90 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
        return 0;
 }
 
+/**
+ *  igb_initialize_M88E1512_phy - Initialize M88E1512 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize Marvel 1512 to work correctly with Avoton.
+ **/
+s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val = 0;
+
+       /* Switch to PHY page 0xFF. */
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159);
+       if (ret_val)
+               goto out;
+
+       /* Switch to PHY page 0xFB. */
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D);
+       if (ret_val)
+               goto out;
+
+       /* Switch to PHY page 0x12. */
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12);
+       if (ret_val)
+               goto out;
+
+       /* Change mode to SGMII-to-Copper */
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001);
+       if (ret_val)
+               goto out;
+
+       /* Return the PHY to page 0. */
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
+       if (ret_val)
+               goto out;
+
+       ret_val = igb_phy_sw_reset(hw);
+       if (ret_val) {
+               hw_dbg("Error committing the PHY changes\n");
+               return ret_val;
+       }
+
+       /* msec_delay(1000); */
+       usleep_range(1000, 2000);
+out:
+       return ret_val;
+}
+
 /**
  * igb_power_up_phy_copper - Restore copper link in case of PHY power down
  * @hw: pointer to the HW structure
index 7af4ffab0285653c4c400992edcd163782f919ac..24d55edbb0e3a8b58290f94e2c34f3708d8c0111 100644 (file)
@@ -61,6 +61,7 @@ s32  igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
 void igb_power_up_phy_copper(struct e1000_hw *hw);
 void igb_power_down_phy_copper(struct e1000_hw *hw);
 s32  igb_phy_init_script_igp3(struct e1000_hw *hw);
+s32  igb_initialize_M88E1512_phy(struct e1000_hw *hw);
 s32  igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
 s32  igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
 s32  igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
index 6f0490d0e981c487ce812545f0dbb834f83fe1d9..4af2870e49f88aaa67559e6d454328eb1ff1d6db 100644 (file)
 #define E1000_TRGTTIMH0  0x0B648 /* Target Time Register 0 High - RW */
 #define E1000_TRGTTIML1  0x0B64C /* Target Time Register 1 Low  - RW */
 #define E1000_TRGTTIMH1  0x0B650 /* Target Time Register 1 High - RW */
+#define E1000_FREQOUT0   0x0B654 /* Frequency Out 0 Control Register - RW */
+#define E1000_FREQOUT1   0x0B658 /* Frequency Out 1 Control Register - RW */
 #define E1000_AUXSTMPL0  0x0B65C /* Auxiliary Time Stamp 0 Register Low  - RO */
 #define E1000_AUXSTMPH0  0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */
 #define E1000_AUXSTMPL1  0x0B664 /* Auxiliary Time Stamp 1 Register Low  - RO */
index c2bd4f98a8376ecab82b99623ce2a9454ed148a9..212d668dabb382160ae04dfce3e6b1375b44832d 100644 (file)
@@ -540,6 +540,7 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
                         struct sk_buff *skb);
 int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
 int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
+void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
 #ifdef CONFIG_IGB_HWMON
 void igb_sysfs_exit(struct igb_adapter *adapter);
 int igb_sysfs_init(struct igb_adapter *adapter);
index b7b9c670bb3c7e24db5ff9d8e3437af06c27d317..74262768b09b75bd7ad267afb67e720a617bce10 100644 (file)
@@ -3008,6 +3008,7 @@ static int igb_set_channels(struct net_device *netdev,
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        unsigned int count = ch->combined_count;
+       unsigned int max_combined = 0;
 
        /* Verify they are not requesting separate vectors */
        if (!count || ch->rx_count || ch->tx_count)
@@ -3018,11 +3019,13 @@ static int igb_set_channels(struct net_device *netdev,
                return -EINVAL;
 
        /* Verify the number of channels doesn't exceed hw limits */
-       if (count > igb_max_channels(adapter))
+       max_combined = igb_max_channels(adapter);
+       if (count > max_combined)
                return -EINVAL;
 
        if (count != adapter->rss_queues) {
                adapter->rss_queues = count;
+               igb_set_flag_queue_pairs(adapter, max_combined);
 
                /* Hardware has to reinitialize queues and interrupts to
                 * match the new configuration.
index 41e27404689648a4bad220e174db32cdf1077580..1902ef8f4a0b30e1f509d130a1c2786632d646d4 100644 (file)
@@ -179,6 +179,8 @@ static void igb_check_vf_rate_limit(struct igb_adapter *);
 #ifdef CONFIG_PCI_IOV
 static int igb_vf_configure(struct igb_adapter *adapter, int vf);
 static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
+static int igb_disable_sriov(struct pci_dev *dev);
+static int igb_pci_disable_sriov(struct pci_dev *dev);
 #endif
 
 #ifdef CONFIG_PM
@@ -1205,10 +1207,14 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
 
        /* allocate q_vector and rings */
        q_vector = adapter->q_vector[v_idx];
-       if (!q_vector)
+       if (!q_vector) {
                q_vector = kzalloc(size, GFP_KERNEL);
-       else
+       } else if (size > ksize(q_vector)) {
+               kfree_rcu(q_vector, rcu);
+               q_vector = kzalloc(size, GFP_KERNEL);
+       } else {
                memset(q_vector, 0, size);
+       }
        if (!q_vector)
                return -ENOMEM;
 
@@ -2645,7 +2651,11 @@ err_eeprom:
        if (hw->flash_address)
                iounmap(hw->flash_address);
 err_sw_init:
+       kfree(adapter->shadow_vfta);
        igb_clear_interrupt_scheme(adapter);
+#ifdef CONFIG_PCI_IOV
+       igb_disable_sriov(pdev);
+#endif
        pci_iounmap(pdev, hw->hw_addr);
 err_ioremap:
        free_netdev(netdev);
@@ -2805,14 +2815,14 @@ static void igb_remove(struct pci_dev *pdev)
         */
        igb_release_hw_control(adapter);
 
-       unregister_netdev(netdev);
-
-       igb_clear_interrupt_scheme(adapter);
-
 #ifdef CONFIG_PCI_IOV
        igb_disable_sriov(pdev);
 #endif
 
+       unregister_netdev(netdev);
+
+       igb_clear_interrupt_scheme(adapter);
+
        pci_iounmap(pdev, hw->hw_addr);
        if (hw->flash_address)
                iounmap(hw->flash_address);
@@ -2847,7 +2857,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
                return;
 
        pci_sriov_set_totalvfs(pdev, 7);
-       igb_pci_enable_sriov(pdev, max_vfs);
+       igb_enable_sriov(pdev, max_vfs);
 
 #endif /* CONFIG_PCI_IOV */
 }
@@ -2888,6 +2898,14 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
 
        adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
 
+       igb_set_flag_queue_pairs(adapter, max_rss_queues);
+}
+
+void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
+                             const u32 max_rss_queues)
+{
+       struct e1000_hw *hw = &adapter->hw;
+
        /* Determine if we need to pair queues. */
        switch (hw->mac.type) {
        case e1000_82575:
@@ -2968,6 +2986,8 @@ static int igb_sw_init(struct igb_adapter *adapter)
        }
 #endif /* CONFIG_PCI_IOV */
 
+       igb_probe_vfs(adapter);
+
        igb_init_queue_configuration(adapter);
 
        /* Setup and initialize a copy of the hw vlan table array */
@@ -2980,8 +3000,6 @@ static int igb_sw_init(struct igb_adapter *adapter)
                return -ENOMEM;
        }
 
-       igb_probe_vfs(adapter);
-
        /* Explicitly disable IRQ since the NIC can be in any state. */
        igb_irq_disable(adapter);
 
@@ -7401,6 +7419,7 @@ static int igb_resume(struct device *dev)
 
        if (igb_init_interrupt_scheme(adapter, true)) {
                dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+               rtnl_unlock();
                return -ENOMEM;
        }
 
@@ -7494,6 +7513,7 @@ static int igb_sriov_reinit(struct pci_dev *dev)
        igb_init_queue_configuration(adapter);
 
        if (igb_init_interrupt_scheme(adapter, true)) {
+               rtnl_unlock();
                dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
                return -ENOMEM;
        }
index c3a9392cbc192229f4178c913fad8ab64d8c44c3..5982f28d521a2c116d49ba4cda22d8520c0cb1dc 100644 (file)
@@ -405,7 +405,7 @@ static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin)
        wr32(E1000_CTRL_EXT, ctrl_ext);
 }
 
-static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
+static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin, int freq)
 {
        static const u32 aux0_sel_sdp[IGB_N_SDP] = {
                AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
@@ -424,6 +424,14 @@ static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
                TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1,
                TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1,
        };
+       static const u32 ts_sdp_sel_fc0[IGB_N_SDP] = {
+               TS_SDP0_SEL_FC0, TS_SDP1_SEL_FC0,
+               TS_SDP2_SEL_FC0, TS_SDP3_SEL_FC0,
+       };
+       static const u32 ts_sdp_sel_fc1[IGB_N_SDP] = {
+               TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
+               TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
+       };
        static const u32 ts_sdp_sel_clr[IGB_N_SDP] = {
                TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
                TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
@@ -445,11 +453,17 @@ static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
                tssdp &= ~AUX1_TS_SDP_EN;
 
        tssdp &= ~ts_sdp_sel_clr[pin];
-       if (chan == 1)
-               tssdp |= ts_sdp_sel_tt1[pin];
-       else
-               tssdp |= ts_sdp_sel_tt0[pin];
-
+       if (freq) {
+               if (chan == 1)
+                       tssdp |= ts_sdp_sel_fc1[pin];
+               else
+                       tssdp |= ts_sdp_sel_fc0[pin];
+       } else {
+               if (chan == 1)
+                       tssdp |= ts_sdp_sel_tt1[pin];
+               else
+                       tssdp |= ts_sdp_sel_tt0[pin];
+       }
        tssdp |= ts_sdp_en[pin];
 
        wr32(E1000_TSSDP, tssdp);
@@ -463,10 +477,10 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
        struct igb_adapter *igb =
                container_of(ptp, struct igb_adapter, ptp_caps);
        struct e1000_hw *hw = &igb->hw;
-       u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh;
+       u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout;
        unsigned long flags;
        struct timespec ts;
-       int pin = -1;
+       int use_freq = 0, pin = -1;
        s64 ns;
 
        switch (rq->type) {
@@ -511,40 +525,58 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
                ts.tv_nsec = rq->perout.period.nsec;
                ns = timespec_to_ns(&ts);
                ns = ns >> 1;
-               if (on && ns < 500000LL) {
-                       /* 2k interrupts per second is an awful lot. */
-                       return -EINVAL;
+               if (on && ns <= 70000000LL) {
+                       if (ns < 8LL)
+                               return -EINVAL;
+                       use_freq = 1;
                }
                ts = ns_to_timespec(ns);
                if (rq->perout.index == 1) {
-                       tsauxc_mask = TSAUXC_EN_TT1;
-                       tsim_mask = TSINTR_TT1;
+                       if (use_freq) {
+                               tsauxc_mask = TSAUXC_EN_CLK1 | TSAUXC_ST1;
+                               tsim_mask = 0;
+                       } else {
+                               tsauxc_mask = TSAUXC_EN_TT1;
+                               tsim_mask = TSINTR_TT1;
+                       }
                        trgttiml = E1000_TRGTTIML1;
                        trgttimh = E1000_TRGTTIMH1;
+                       freqout = E1000_FREQOUT1;
                } else {
-                       tsauxc_mask = TSAUXC_EN_TT0;
-                       tsim_mask = TSINTR_TT0;
+                       if (use_freq) {
+                               tsauxc_mask = TSAUXC_EN_CLK0 | TSAUXC_ST0;
+                               tsim_mask = 0;
+                       } else {
+                               tsauxc_mask = TSAUXC_EN_TT0;
+                               tsim_mask = TSINTR_TT0;
+                       }
                        trgttiml = E1000_TRGTTIML0;
                        trgttimh = E1000_TRGTTIMH0;
+                       freqout = E1000_FREQOUT0;
                }
                spin_lock_irqsave(&igb->tmreg_lock, flags);
                tsauxc = rd32(E1000_TSAUXC);
                tsim = rd32(E1000_TSIM);
+               if (rq->perout.index == 1) {
+                       tsauxc &= ~(TSAUXC_EN_TT1 | TSAUXC_EN_CLK1 | TSAUXC_ST1);
+                       tsim &= ~TSINTR_TT1;
+               } else {
+                       tsauxc &= ~(TSAUXC_EN_TT0 | TSAUXC_EN_CLK0 | TSAUXC_ST0);
+                       tsim &= ~TSINTR_TT0;
+               }
                if (on) {
                        int i = rq->perout.index;
-
-                       igb_pin_perout(igb, i, pin);
+                       igb_pin_perout(igb, i, pin, use_freq);
                        igb->perout[i].start.tv_sec = rq->perout.start.sec;
                        igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
                        igb->perout[i].period.tv_sec = ts.tv_sec;
                        igb->perout[i].period.tv_nsec = ts.tv_nsec;
                        wr32(trgttimh, rq->perout.start.sec);
                        wr32(trgttiml, rq->perout.start.nsec);
+                       if (use_freq)
+                               wr32(freqout, ns);
                        tsauxc |= tsauxc_mask;
                        tsim |= tsim_mask;
-               } else {
-                       tsauxc &= ~tsauxc_mask;
-                       tsim &= ~tsim_mask;
                }
                wr32(E1000_TSAUXC, tsauxc);
                wr32(E1000_TSIM, tsim);
index 95af14e139d769254e8b3e20982b37a5888ebbba..686fa7184179a473599584f25e0a6d714baff54f 100644 (file)
@@ -319,6 +319,7 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
                        dma_unmap_single(&pdev->dev, buffer_info->dma,
                                         adapter->rx_ps_hdr_size,
                                         DMA_FROM_DEVICE);
+                       buffer_info->dma = 0;
                        skb_put(skb, hlen);
                }
 
index 3e6a9319c7185b52a4571cbbab61aa9dd54c422c..7906234c51642d60b4d79cfa4dfc48ade9ae353c 100644 (file)
@@ -248,8 +248,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
        enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
        struct pci_dev *pdev;
 
-       /* determine whether to use the the parent device
-        */
+       /* determine whether to use the parent device */
        if (ixgbe_pcie_from_parent(&adapter->hw))
                pdev = adapter->pdev->bus->parent->self;
        else
index b6f424f3b1a8388da48c6038faae16922e773e7a..4615a949381d9ec525124e588820fab7d54d0a4e 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2014 Intel Corporation.
+  Copyright(c) 1999 - 2015 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -3462,14 +3462,14 @@ struct ixgbe_info {
 #define IXGBE_ERR_HOST_INTERFACE_COMMAND        -33
 #define IXGBE_NOT_IMPLEMENTED                   0x7FFFFFFF
 
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P == 0) ? (0x4010) : (0x8010))
-#define IXGBE_KRM_LINK_CTRL_1(P)       ((P == 0) ? (0x420C) : (0x820C))
-#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P == 0) ? (0x4634) : (0x8634))
-#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P == 0) ? (0x4638) : (0x8638))
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P)        ((P == 0) ? (0x4B00) : (0x8B00))
-#define IXGBE_KRM_PMD_DFX_BURNIN(P)    ((P == 0) ? (0x4E00) : (0x8E00))
-#define IXGBE_KRM_TX_COEFF_CTRL_1(P)   ((P == 0) ? (0x5520) : (0x9520))
-#define IXGBE_KRM_RX_ANA_CTL(P)                ((P == 0) ? (0x5A00) : (0x9A00))
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010)
+#define IXGBE_KRM_LINK_CTRL_1(P)       ((P) ? 0x820C : 0x420C)
+#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
+#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P)        ((P) ? 0x8B00 : 0x4B00)
+#define IXGBE_KRM_PMD_DFX_BURNIN(P)    ((P) ? 0x8E00 : 0x4E00)
+#define IXGBE_KRM_TX_COEFF_CTRL_1(P)   ((P) ? 0x9520 : 0x5520)
+#define IXGBE_KRM_RX_ANA_CTL(P)                ((P) ? 0x9A00 : 0x5A00)
 
 #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B           (1 << 9)
 #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS         (1 << 11)
index e9d7d90363a8ea4d31ff6de176cf5b0b9c575956..27ca4596775af4b29666b323b9e45e42378041f5 100644 (file)
 
 #define MLX5E_MAX_NUM_TC       8
 
-#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x7
+#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x6
 #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE                0xa
 #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE                0xd
 
-#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE                0x7
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE                0x1
 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE                0xa
 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE                0xd
 
-#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (16 * 1024)
+#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (64 * 1024)
 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC      0x10
 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS      0x20
 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC      0x10
 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES                0x80
-#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ         0x7
 
+#define MLX5E_LOG_INDIR_RQT_SIZE       0x7
+#define MLX5E_INDIR_RQT_SIZE           BIT(MLX5E_LOG_INDIR_RQT_SIZE)
+#define MLX5E_MAX_NUM_CHANNELS         (MLX5E_INDIR_RQT_SIZE >> 1)
 #define MLX5E_TX_CQ_POLL_BUDGET        128
 #define MLX5E_UPDATE_STATS_INTERVAL    200 /* msecs */
 #define MLX5E_SQ_BF_BUDGET             16
@@ -92,6 +94,7 @@ static const char vport_strings[][ETH_GSTRING_LEN] = {
        "lro_bytes",
        "rx_csum_good",
        "rx_csum_none",
+       "rx_csum_sw",
        "tx_csum_offload",
        "tx_queue_stopped",
        "tx_queue_wake",
@@ -129,13 +132,14 @@ struct mlx5e_vport_stats {
        u64 lro_bytes;
        u64 rx_csum_good;
        u64 rx_csum_none;
+       u64 rx_csum_sw;
        u64 tx_csum_offload;
        u64 tx_queue_stopped;
        u64 tx_queue_wake;
        u64 tx_queue_dropped;
        u64 rx_wqe_err;
 
-#define NUM_VPORT_COUNTERS     31
+#define NUM_VPORT_COUNTERS     32
 };
 
 static const char pport_strings[][ETH_GSTRING_LEN] = {
@@ -215,6 +219,7 @@ struct mlx5e_pport_stats {
 static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
        "packets",
        "csum_none",
+       "csum_sw",
        "lro_packets",
        "lro_bytes",
        "wqe_err"
@@ -223,10 +228,11 @@ static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
 struct mlx5e_rq_stats {
        u64 packets;
        u64 csum_none;
+       u64 csum_sw;
        u64 lro_packets;
        u64 lro_bytes;
        u64 wqe_err;
-#define NUM_RQ_STATS 5
+#define NUM_RQ_STATS 6
 };
 
 static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
@@ -268,11 +274,12 @@ struct mlx5e_params {
        u16 tx_cq_moderation_usec;
        u16 tx_cq_moderation_pkts;
        u16 min_rx_wqes;
-       u16 rx_hash_log_tbl_sz;
        bool lro_en;
        u32 lro_wqe_sz;
-       u8  rss_hfunc;
        u16 tx_max_inline;
+       u8  rss_hfunc;
+       u8  toeplitz_hash_key[40];
+       u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
 };
 
 enum {
@@ -569,6 +576,8 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
 
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix);
+
 int mlx5e_open_locked(struct net_device *netdev);
 int mlx5e_close_locked(struct net_device *netdev);
 
index b549797b315f293c0ace899e12ee7e2b877732f8..bce912688ca821e5b17f6781c16a0cec713913ef 100644 (file)
@@ -628,7 +628,7 @@ static int mlx5e_set_settings(struct net_device *netdev,
        u32 link_modes;
        u32 speed;
        u32 eth_proto_cap, eth_proto_admin;
-       u8 port_status;
+       enum mlx5_port_status ps;
        int err;
 
        speed = ethtool_cmd_speed(cmd);
@@ -662,33 +662,42 @@ static int mlx5e_set_settings(struct net_device *netdev,
        if (link_modes == eth_proto_admin)
                goto out;
 
-       err = mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
-       if (err) {
-               netdev_err(netdev, "%s: set port eth proto admin failed: %d\n",
-                          __func__, err);
-               goto out;
-       }
-
-       err = mlx5_query_port_status(mdev, &port_status);
-       if (err)
-               goto out;
-
-       if (port_status == MLX5_PORT_DOWN)
-               return 0;
+       mlx5_query_port_admin_status(mdev, &ps);
+       if (ps == MLX5_PORT_UP)
+               mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
+       mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
+       if (ps == MLX5_PORT_UP)
+               mlx5_set_port_admin_status(mdev, MLX5_PORT_UP);
 
-       err = mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
-       if (err)
-               goto out;
-       err = mlx5_set_port_status(mdev, MLX5_PORT_UP);
 out:
        return err;
 }
 
+static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+
+       return sizeof(priv->params.toeplitz_hash_key);
+}
+
+static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
+{
+       return MLX5E_INDIR_RQT_SIZE;
+}
+
 static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
                          u8 *hfunc)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
 
+       if (indir)
+               memcpy(indir, priv->params.indirection_rqt,
+                      sizeof(priv->params.indirection_rqt));
+
+       if (key)
+               memcpy(key, priv->params.toeplitz_hash_key,
+                      sizeof(priv->params.toeplitz_hash_key));
+
        if (hfunc)
                *hfunc = priv->params.rss_hfunc;
 
@@ -699,28 +708,60 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
                          const u8 *key, const u8 hfunc)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
+       bool close_open;
        int err = 0;
 
-       if (hfunc == ETH_RSS_HASH_NO_CHANGE)
-               return 0;
-
-       if ((hfunc != ETH_RSS_HASH_XOR) &&
+       if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
+           (hfunc != ETH_RSS_HASH_XOR) &&
            (hfunc != ETH_RSS_HASH_TOP))
                return -EINVAL;
 
        mutex_lock(&priv->state_lock);
 
-       priv->params.rss_hfunc = hfunc;
-       if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
-               mlx5e_close_locked(dev);
-               err = mlx5e_open_locked(dev);
+       if (indir) {
+               memcpy(priv->params.indirection_rqt, indir,
+                      sizeof(priv->params.indirection_rqt));
+               mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
        }
 
+       close_open = (key || (hfunc != ETH_RSS_HASH_NO_CHANGE)) &&
+                    test_bit(MLX5E_STATE_OPENED, &priv->state);
+       if (close_open)
+               mlx5e_close_locked(dev);
+
+       if (key)
+               memcpy(priv->params.toeplitz_hash_key, key,
+                      sizeof(priv->params.toeplitz_hash_key));
+
+       if (hfunc != ETH_RSS_HASH_NO_CHANGE)
+               priv->params.rss_hfunc = hfunc;
+
+       if (close_open)
+               err = mlx5e_open_locked(priv->netdev);
+
        mutex_unlock(&priv->state_lock);
 
        return err;
 }
 
+static int mlx5e_get_rxnfc(struct net_device *netdev,
+                          struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       int err = 0;
+
+       switch (info->cmd) {
+       case ETHTOOL_GRXRINGS:
+               info->data = priv->params.num_channels;
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+
+       return err;
+}
+
 static int mlx5e_get_tunable(struct net_device *dev,
                             const struct ethtool_tunable *tuna,
                             void *data)
@@ -779,6 +820,42 @@ static int mlx5e_set_tunable(struct net_device *dev,
        return err;
 }
 
+static void mlx5e_get_pauseparam(struct net_device *netdev,
+                                struct ethtool_pauseparam *pauseparam)
+{
+       struct mlx5e_priv *priv    = netdev_priv(netdev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+       int err;
+
+       err = mlx5_query_port_pause(mdev, &pauseparam->rx_pause,
+                                   &pauseparam->tx_pause);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5_query_port_pause failed:0x%x\n",
+                          __func__, err);
+       }
+}
+
+static int mlx5e_set_pauseparam(struct net_device *netdev,
+                               struct ethtool_pauseparam *pauseparam)
+{
+       struct mlx5e_priv *priv    = netdev_priv(netdev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+       int err;
+
+       if (pauseparam->autoneg)
+               return -EINVAL;
+
+       err = mlx5_set_port_pause(mdev,
+                                 pauseparam->rx_pause ? 1 : 0,
+                                 pauseparam->tx_pause ? 1 : 0);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5_set_port_pause failed:0x%x\n",
+                          __func__, err);
+       }
+
+       return err;
+}
+
 const struct ethtool_ops mlx5e_ethtool_ops = {
        .get_drvinfo       = mlx5e_get_drvinfo,
        .get_link          = ethtool_op_get_link,
@@ -793,8 +870,13 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
        .set_coalesce      = mlx5e_set_coalesce,
        .get_settings      = mlx5e_get_settings,
        .set_settings      = mlx5e_set_settings,
+       .get_rxfh_key_size   = mlx5e_get_rxfh_key_size,
+       .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
        .get_rxfh          = mlx5e_get_rxfh,
        .set_rxfh          = mlx5e_set_rxfh,
+       .get_rxnfc         = mlx5e_get_rxnfc,
        .get_tunable       = mlx5e_get_tunable,
        .set_tunable       = mlx5e_set_tunable,
+       .get_pauseparam    = mlx5e_get_pauseparam,
+       .set_pauseparam    = mlx5e_set_pauseparam,
 };
index 111427b33ec861df14ec9b2a5eef873ee87976f6..55166dd5b4ea5a16f77c91b6a2dc6cfb9a88b77e 100644 (file)
@@ -149,6 +149,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
        s->lro_packets          = 0;
        s->lro_bytes            = 0;
        s->rx_csum_none         = 0;
+       s->rx_csum_sw           = 0;
        s->rx_wqe_err           = 0;
        for (i = 0; i < priv->params.num_channels; i++) {
                rq_stats = &priv->channel[i]->rq.stats;
@@ -156,6 +157,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
                s->lro_packets  += rq_stats->lro_packets;
                s->lro_bytes    += rq_stats->lro_bytes;
                s->rx_csum_none += rq_stats->csum_none;
+               s->rx_csum_sw   += rq_stats->csum_sw;
                s->rx_wqe_err   += rq_stats->wqe_err;
 
                for (j = 0; j < priv->params.num_tc; j++) {
@@ -241,7 +243,8 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
 
        /* Update calculated offload counters */
        s->tx_csum_offload = s->tx_packets - tx_offload_none;
-       s->rx_csum_good    = s->rx_packets - s->rx_csum_none;
+       s->rx_csum_good    = s->rx_packets - s->rx_csum_none -
+                              s->rx_csum_sw;
 
        mlx5e_update_pport_counters(priv);
 free_out:
@@ -1174,27 +1177,32 @@ static int mlx5e_bits_invert(unsigned long a, int size)
        return inv;
 }
 
+static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
+{
+       int i;
+
+       for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
+               int ix = i;
+
+               if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
+                       ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
+
+               ix = priv->params.indirection_rqt[ix];
+               ix = ix % priv->params.num_channels;
+               MLX5_SET(rqtc, rqtc, rq_num[i],
+                        test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+                        priv->channel[ix]->rq.rqn :
+                        priv->drop_rq.rqn);
+       }
+}
+
 static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
                                enum mlx5e_rqt_ix rqt_ix)
 {
-       int i;
-       int log_sz;
 
        switch (rqt_ix) {
        case MLX5E_INDIRECTION_RQT:
-               log_sz = priv->params.rx_hash_log_tbl_sz;
-               for (i = 0; i < (1 << log_sz); i++) {
-                       int ix = i;
-
-                       if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
-                               ix = mlx5e_bits_invert(i, log_sz);
-
-                       ix = ix % priv->params.num_channels;
-                       MLX5_SET(rqtc, rqtc, rq_num[i],
-                                test_bit(MLX5E_STATE_OPENED, &priv->state) ?
-                                priv->channel[ix]->rq.rqn :
-                                priv->drop_rq.rqn);
-               }
+               mlx5e_fill_indir_rqt_rqns(priv, rqtc);
 
                break;
 
@@ -1214,13 +1222,10 @@ static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
        u32 *in;
        void *rqtc;
        int inlen;
-       int log_sz;
        int sz;
        int err;
 
-       log_sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 0 :
-                 priv->params.rx_hash_log_tbl_sz;
-       sz = 1 << log_sz;
+       sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
 
        inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
        in = mlx5_vzalloc(inlen);
@@ -1241,19 +1246,16 @@ static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
        return err;
 }
 
-static int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
        u32 *in;
        void *rqtc;
        int inlen;
-       int log_sz;
        int sz;
        int err;
 
-       log_sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 0 :
-                 priv->params.rx_hash_log_tbl_sz;
-       sz = 1 << log_sz;
+       sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
 
        inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
        in = mlx5_vzalloc(inlen);
@@ -1301,7 +1303,7 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
                  ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
        MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
                 MLX5_CAP_ETH(priv->mdev,
-                             lro_timer_supported_periods[3]));
+                             lro_timer_supported_periods[2]));
 }
 
 static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
@@ -1611,7 +1613,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
                                                       rx_hash_toeplitz_key);
 
                        MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
-                       netdev_rss_key_fill(rss_key, len);
+                       memcpy(rss_key, priv->params.toeplitz_hash_key, len);
                }
                break;
        }
@@ -1911,9 +1913,10 @@ u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
 
 static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
                                    struct net_device *netdev,
-                                   int num_comp_vectors)
+                                   int num_channels)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
+       int i;
 
        priv->params.log_sq_size           =
                MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
@@ -1930,22 +1933,22 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
        priv->params.tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
        priv->params.min_rx_wqes           =
                MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
-       priv->params.rx_hash_log_tbl_sz    =
-               (order_base_2(num_comp_vectors) >
-                MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
-               order_base_2(num_comp_vectors)           :
-               MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
        priv->params.num_tc                = 1;
        priv->params.default_vlan_prio     = 0;
        priv->params.rss_hfunc             = ETH_RSS_HASH_XOR;
 
-       priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
+       netdev_rss_key_fill(priv->params.toeplitz_hash_key,
+                           sizeof(priv->params.toeplitz_hash_key));
+
+       for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
+               priv->params.indirection_rqt[i] = i % num_channels;
+
        priv->params.lro_wqe_sz            =
                MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
 
        priv->mdev                         = mdev;
        priv->netdev                       = netdev;
-       priv->params.num_channels          = num_comp_vectors;
+       priv->params.num_channels          = num_channels;
        priv->default_vlan_prio            = priv->params.default_vlan_prio;
 
        spin_lock_init(&priv->async_events_spinlock);
@@ -2034,19 +2037,20 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
 {
        struct net_device *netdev;
        struct mlx5e_priv *priv;
-       int ncv = mdev->priv.eq_table.num_comp_vectors;
+       int nch = min_t(int, mdev->priv.eq_table.num_comp_vectors,
+                       MLX5E_MAX_NUM_CHANNELS);
        int err;
 
        if (mlx5e_check_required_hca_cap(mdev))
                return NULL;
 
-       netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), ncv, ncv);
+       netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), nch, nch);
        if (!netdev) {
                mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
                return NULL;
        }
 
-       mlx5e_build_netdev_priv(mdev, netdev, ncv);
+       mlx5e_build_netdev_priv(mdev, netdev, nch);
        mlx5e_build_netdev(netdev);
 
        netif_carrier_off(netdev);
index 9a9374131f5b45e7c740f80e2ef8b0e9ba175fa9..cf0098596e85847ad558896cc8ec4731b0420192 100644 (file)
@@ -111,10 +111,12 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
                tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
                                        sizeof(struct iphdr));
                ipv6 = NULL;
+               skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
        } else {
                tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
                                        sizeof(struct ipv6hdr));
                ipv4 = NULL;
+               skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
        }
 
        if (get_cqe_lro_tcppsh(cqe))
@@ -149,6 +151,38 @@ static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
        skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
 }
 
+static inline bool is_first_ethertype_ip(struct sk_buff *skb)
+{
+       __be16 ethertype = ((struct ethhdr *)skb->data)->h_proto;
+
+       return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
+}
+
+static inline void mlx5e_handle_csum(struct net_device *netdev,
+                                    struct mlx5_cqe64 *cqe,
+                                    struct mlx5e_rq *rq,
+                                    struct sk_buff *skb)
+{
+       if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
+               goto csum_none;
+
+       if (likely(cqe->hds_ip_ext & CQE_L4_OK)) {
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       } else if (is_first_ethertype_ip(skb)) {
+               skb->ip_summed = CHECKSUM_COMPLETE;
+               skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+               rq->stats.csum_sw++;
+       } else {
+               goto csum_none;
+       }
+
+       return;
+
+csum_none:
+       skb->ip_summed = CHECKSUM_NONE;
+       rq->stats.csum_none++;
+}
+
 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
                                      struct mlx5e_rq *rq,
                                      struct sk_buff *skb)
@@ -162,20 +196,12 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
        lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
        if (lro_num_seg > 1) {
                mlx5e_lro_update_hdr(skb, cqe);
-               skb_shinfo(skb)->gso_size = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+               skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
                rq->stats.lro_packets++;
                rq->stats.lro_bytes += cqe_bcnt;
        }
 
-       if (likely(netdev->features & NETIF_F_RXCSUM) &&
-           (cqe->hds_ip_ext & CQE_L2_OK) &&
-           (cqe->hds_ip_ext & CQE_L3_OK) &&
-           (cqe->hds_ip_ext & CQE_L4_OK)) {
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-       } else {
-               skb->ip_summed = CHECKSUM_NONE;
-               rq->stats.csum_none++;
-       }
+       mlx5e_handle_csum(netdev, cqe, rq, skb);
 
        skb->protocol = eth_type_trans(skb, netdev);
 
index 70147999f6574f9fc1e236b6b43ce0610cbd4d3b..821caaab9bfb04697fb0424cb8498bdc9eacabed 100644 (file)
@@ -216,22 +216,25 @@ int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
 }
 EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
 
-int mlx5_set_port_status(struct mlx5_core_dev *dev,
-                        enum mlx5_port_status status)
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+                              enum mlx5_port_status status)
 {
        u32 in[MLX5_ST_SZ_DW(paos_reg)];
        u32 out[MLX5_ST_SZ_DW(paos_reg)];
 
        memset(in, 0, sizeof(in));
 
+       MLX5_SET(paos_reg, in, local_port, 1);
        MLX5_SET(paos_reg, in, admin_status, status);
        MLX5_SET(paos_reg, in, ase, 1);
 
        return mlx5_core_access_reg(dev, in, sizeof(in), out,
                                    sizeof(out), MLX5_REG_PAOS, 0, 1);
 }
+EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status);
 
-int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+                                enum mlx5_port_status *status)
 {
        u32 in[MLX5_ST_SZ_DW(paos_reg)];
        u32 out[MLX5_ST_SZ_DW(paos_reg)];
@@ -239,14 +242,17 @@ int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
 
        memset(in, 0, sizeof(in));
 
+       MLX5_SET(paos_reg, in, local_port, 1);
+
        err = mlx5_core_access_reg(dev, in, sizeof(in), out,
                                   sizeof(out), MLX5_REG_PAOS, 0, 0);
        if (err)
                return err;
 
-       *status = MLX5_GET(paos_reg, out, oper_status);
+       *status = MLX5_GET(paos_reg, out, admin_status);
        return err;
 }
+EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
 
 static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
                                int *max_mtu, int *oper_mtu, u8 port)
@@ -328,3 +334,45 @@ int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
        return 0;
 }
 EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap);
+
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
+{
+       u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+       u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+       int err;
+
+       memset(in, 0, sizeof(in));
+       MLX5_SET(pfcc_reg, in, local_port, 1);
+       MLX5_SET(pfcc_reg, in, pptx, tx_pause);
+       MLX5_SET(pfcc_reg, in, pprx, rx_pause);
+
+       err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+                                  sizeof(out), MLX5_REG_PFCC, 0, 1);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
+
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+                         u32 *rx_pause, u32 *tx_pause)
+{
+       u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+       u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+       int err;
+
+       memset(in, 0, sizeof(in));
+       MLX5_SET(pfcc_reg, in, local_port, 1);
+
+       err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+                                  sizeof(out), MLX5_REG_PFCC, 0, 0);
+       if (err)
+               return err;
+
+       if (rx_pause)
+               *rx_pause = MLX5_GET(pfcc_reg, out, pprx);
+
+       if (tx_pause)
+               *tx_pause = MLX5_GET(pfcc_reg, out, pptx);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
index f78909a00f150edfd76065b2b993d9660edaebdd..09d2e16fd6b00bfdd0c20fc10c64abd03c29a935 100644 (file)
@@ -952,9 +952,8 @@ static int ks8842_alloc_dma_bufs(struct net_device *netdev)
 
        sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
                tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
-       err = dma_mapping_error(adapter->dev,
-               sg_dma_address(&tx_ctl->sg));
-       if (err) {
+       if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) {
+               err = -ENOMEM;
                sg_dma_address(&tx_ctl->sg) = 0;
                goto err;
        }
index 0f21aa3bb5379a518d80a3afbd06795bbdb482aa..6eef3251d8333233445122ff6321091b6292155a 100644 (file)
@@ -2367,12 +2367,14 @@ static const struct smsc911x_ops shifted_smsc911x_ops = {
 static int smsc911x_probe_config(struct smsc911x_platform_config *config,
                                 struct device *dev)
 {
+       int phy_interface;
        u32 width = 0;
 
-       if (!dev)
-               return -ENODEV;
+       phy_interface = device_get_phy_mode(dev);
+       if (phy_interface < 0)
+               return phy_interface;
 
-       config->phy_interface = device_get_phy_mode(dev);
+       config->phy_interface = phy_interface;
 
        device_get_mac_address(dev, config->mac, ETH_ALEN);
 
index 78d49d186e056fc278e59afaad5543f3b9717b84..897e1a3f035bc227fdcf3b181a349dbd49cbd629 100644 (file)
@@ -283,7 +283,6 @@ static void geneve_setup(struct net_device *dev)
 
        SET_NETDEV_DEVTYPE(dev, &geneve_type);
 
-       dev->tx_queue_len = 0;
        dev->features    |= NETIF_F_LLTX;
        dev->features    |= NETIF_F_SG | NETIF_F_HW_CSUM;
        dev->features    |= NETIF_F_RXCSUM;
@@ -297,7 +296,7 @@ static void geneve_setup(struct net_device *dev)
        dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
 
        netif_keep_dst(dev);
-       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
 }
 
 static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
index f3b9d3eb753b3e48ac689d66314c6e629a651566..2990024b90f972e3e22c56f21d2cce278da651ab 100644 (file)
@@ -777,14 +777,17 @@ static int netvsc_set_channels(struct net_device *net,
        struct hv_device *dev = net_device_ctx->device_ctx;
        struct netvsc_device *nvdev = hv_get_drvdata(dev);
        struct netvsc_device_info device_info;
-       const u32 num_chn = nvdev->num_chn;
-       const u32 max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());
+       u32 num_chn;
+       u32 max_chn;
        int ret = 0;
        bool recovering = false;
 
        if (!nvdev || nvdev->destroy)
                return -ENODEV;
 
+       num_chn = nvdev->num_chn;
+       max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());
+
        if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) {
                pr_info("vRSS unsupported before NVSP Version 5\n");
                return -EINVAL;
index d0d5bf6cbb686a357e4e5ed98ad652c0dbf6908a..6422caac8d40d64644aa9c95f32b5661dd37003b 100644 (file)
@@ -97,9 +97,7 @@ struct at86rf230_local {
 
        struct at86rf230_state_change irq;
 
-       bool tx_aret;
        unsigned long cal_timeout;
-       s8 max_frame_retries;
        bool is_tx;
        bool is_tx_from_off;
        u8 tx_retry;
@@ -651,7 +649,7 @@ at86rf230_tx_complete(void *context)
 
        enable_irq(ctx->irq);
 
-       ieee802154_xmit_complete(lp->hw, lp->tx_skb, !lp->tx_aret);
+       ieee802154_xmit_complete(lp->hw, lp->tx_skb, false);
 }
 
 static void
@@ -760,17 +758,10 @@ at86rf230_irq_trx_end(struct at86rf230_local *lp)
 {
        if (lp->is_tx) {
                lp->is_tx = 0;
-
-               if (lp->tx_aret)
-                       at86rf230_async_state_change(lp, &lp->irq,
-                                                    STATE_FORCE_TX_ON,
-                                                    at86rf230_tx_trac_status,
-                                                    true);
-               else
-                       at86rf230_async_state_change(lp, &lp->irq,
-                                                    STATE_RX_AACK_ON,
-                                                    at86rf230_tx_complete,
-                                                    true);
+               at86rf230_async_state_change(lp, &lp->irq,
+                                            STATE_FORCE_TX_ON,
+                                            at86rf230_tx_trac_status,
+                                            true);
        } else {
                at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq,
                                         at86rf230_rx_trac_check, true);
@@ -876,24 +867,16 @@ at86rf230_xmit_start(void *context)
        struct at86rf230_state_change *ctx = context;
        struct at86rf230_local *lp = ctx->lp;
 
-       /* In ARET mode we need to go into STATE_TX_ARET_ON after we
-        * are in STATE_TX_ON. The pfad differs here, so we change
-        * the complete handler.
-        */
-       if (lp->tx_aret) {
-               if (lp->is_tx_from_off) {
-                       lp->is_tx_from_off = false;
-                       at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
-                                                    at86rf230_write_frame,
-                                                    false);
-               } else {
-                       at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
-                                                    at86rf230_xmit_tx_on,
-                                                    false);
-               }
+       /* check if we change from off state */
+       if (lp->is_tx_from_off) {
+               lp->is_tx_from_off = false;
+               at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
+                                            at86rf230_write_frame,
+                                            false);
        } else {
                at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
-                                            at86rf230_write_frame, false);
+                                            at86rf230_xmit_tx_on,
+                                            false);
        }
 }
 
@@ -1267,15 +1250,8 @@ static int
 at86rf230_set_frame_retries(struct ieee802154_hw *hw, s8 retries)
 {
        struct at86rf230_local *lp = hw->priv;
-       int rc = 0;
-
-       lp->tx_aret = retries >= 0;
-       lp->max_frame_retries = retries;
 
-       if (retries >= 0)
-               rc = at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries);
-
-       return rc;
+       return at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries);
 }
 
 static int
index 613dae559925f947586f8f011aad315d0705866c..c5b54a15fc4cb2b59584a492a5b4a9bca190e687 100644 (file)
@@ -833,6 +833,7 @@ static int cc2520_get_platform_data(struct spi_device *spi,
                if (!spi_pdata)
                        return -ENOENT;
                *pdata = *spi_pdata;
+               priv->fifo_pin = pdata->fifo;
                return 0;
        }
 
index 20b58bdecf7540100edc5522e74804e6a7544d95..a9268db4e349fc2e131be0010ecd67e8bb779939 100644 (file)
@@ -520,12 +520,11 @@ static void ipvlan_link_setup(struct net_device *dev)
        ether_setup(dev);
 
        dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
-       dev->priv_flags |= IFF_UNICAST_FLT;
+       dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
        dev->netdev_ops = &ipvlan_netdev_ops;
        dev->destructor = free_netdev;
        dev->header_ops = &ipvlan_header_ops;
        dev->ethtool_ops = &ipvlan_ethtool_ops;
-       dev->tx_queue_len = 0;
 }
 
 static const struct nla_policy ipvlan_nl_policy[IFLA_IPVLAN_MAX + 1] =
index c76283c2f84a4e7aa21b684770542e9b6202bffd..dc7d970bd1c0baaf1f707df87001da8f59f3d74a 100644 (file)
@@ -165,10 +165,9 @@ static void loopback_setup(struct net_device *dev)
        dev->mtu                = 64 * 1024;
        dev->hard_header_len    = ETH_HLEN;     /* 14   */
        dev->addr_len           = ETH_ALEN;     /* 6    */
-       dev->tx_queue_len       = 0;
        dev->type               = ARPHRD_LOOPBACK;      /* 0x0001*/
        dev->flags              = IFF_LOOPBACK;
-       dev->priv_flags         |= IFF_LIVE_ADDR_CHANGE;
+       dev->priv_flags         |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
        netif_keep_dst(dev);
        dev->hw_features        = NETIF_F_ALL_TSO | NETIF_F_UFO;
        dev->features           = NETIF_F_SG | NETIF_F_FRAGLIST
index 34924dfadd0097608dac20dcf0aced6bb1c9805e..7b7c70e2341eff0391c1e98f21a0e6ca5931a35d 100644 (file)
@@ -130,7 +130,7 @@ static const struct net_device_ops nlmon_ops = {
 static void nlmon_setup(struct net_device *dev)
 {
        dev->type = ARPHRD_NETLINK;
-       dev->tx_queue_len = 0;
+       dev->priv_flags |= IFF_NO_QUEUE;
 
        dev->netdev_ops = &nlmon_ops;
        dev->ethtool_ops = &nlmon_ethtool_ops;
index 84b1fba58ac3c8efcbbb0bf9311b442ac52614c1..d9728516dac32d935e9b19ffacf2d9431d81d448 100644 (file)
@@ -814,6 +814,7 @@ void phy_state_machine(struct work_struct *work)
        bool needs_aneg = false, do_suspend = false;
        enum phy_state old_state;
        int err = 0;
+       int old_link;
 
        mutex_lock(&phydev->lock);
 
@@ -899,11 +900,18 @@ void phy_state_machine(struct work_struct *work)
                phydev->adjust_link(phydev->attached_dev);
                break;
        case PHY_RUNNING:
-               /* Only register a CHANGE if we are
-                * polling or ignoring interrupts
+               /* Only register a CHANGE if we are polling or ignoring
+                * interrupts and link changed since latest checking.
                 */
-               if (!phy_interrupt_is_valid(phydev))
-                       phydev->state = PHY_CHANGELINK;
+               if (!phy_interrupt_is_valid(phydev)) {
+                       old_link = phydev->link;
+                       err = phy_read_status(phydev);
+                       if (err)
+                               break;
+
+                       if (old_link != phydev->link)
+                               phydev->state = PHY_CHANGELINK;
+               }
                break;
        case PHY_CHANGELINK:
                err = phy_read_status(phydev);
index c0f6479e19d48e51fd76c3bbce161ffdd7846842..70b08958763a129fff47ad00a1db130c1334f254 100644 (file)
@@ -91,19 +91,18 @@ static int lan911x_config_init(struct phy_device *phydev)
 }
 
 /*
- * The LAN8710/LAN8720 requires a minimum of 2 link pulses within 64ms of each
- * other in order to set the ENERGYON bit and exit EDPD mode.  If a link partner
- * does send the pulses within this interval, the PHY will remained powered
- * down.
- *
- * This workaround will manually toggle the PHY on/off upon calls to read_status
- * in order to generate link test pulses if the link is down.  If a link partner
- * is present, it will respond to the pulses, which will cause the ENERGYON bit
- * to be set and will cause the EDPD mode to be exited.
+ * The LAN87xx suffers from rare absence of the ENERGYON-bit when Ethernet cable
+ * plugs in while LAN87xx is in Energy Detect Power-Down mode. This leads to
+ * unstable detection of plugging in Ethernet cable.
+ * This workaround disables Energy Detect Power-Down mode and waiting for
+ * response on link pulses to detect presence of plugged Ethernet cable.
+ * The Energy Detect Power-Down mode is enabled again in the end of procedure to
+ * save approximately 220 mW of power if cable is unplugged.
  */
 static int lan87xx_read_status(struct phy_device *phydev)
 {
        int err = genphy_read_status(phydev);
+       int i;
 
        if (!phydev->link) {
                /* Disable EDPD to wake up PHY */
@@ -116,8 +115,16 @@ static int lan87xx_read_status(struct phy_device *phydev)
                if (rc < 0)
                        return rc;
 
-               /* Sleep 64 ms to allow ~5 link test pulses to be sent */
-               msleep(64);
+               /* Wait max 640 ms to detect energy */
+               for (i = 0; i < 64; i++) {
+                       /* Sleep to allow link test pulses to be sent */
+                       msleep(10);
+                       rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+                       if (rc < 0)
+                               return rc;
+                       if (rc & MII_LAN83C185_ENERGYON)
+                               break;
+               }
 
                /* Re-enable EDPD */
                rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
@@ -191,7 +198,7 @@ static struct phy_driver smsc_phy_driver[] = {
 
        /* basic functions */
        .config_aneg    = genphy_config_aneg,
-       .read_status    = genphy_read_status,
+       .read_status    = lan87xx_read_status,
        .config_init    = smsc_phy_config_init,
        .soft_reset     = smsc_phy_reset,
 
index 9d15566521a719b525a28a009f1d999c91a00da2..fa8f5046afe90627242f6d2d523178da653c9427 100644 (file)
@@ -269,9 +269,9 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
 static void ppp_ccp_closed(struct ppp *ppp);
 static struct compressor *find_compressor(int type);
 static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
-static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp);
+static struct ppp *ppp_create_interface(struct net *net, int unit,
+                                       struct file *file, int *retp);
 static void init_ppp_file(struct ppp_file *pf, int kind);
-static void ppp_shutdown_interface(struct ppp *ppp);
 static void ppp_destroy_interface(struct ppp *ppp);
 static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
 static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
@@ -392,8 +392,10 @@ static int ppp_release(struct inode *unused, struct file *file)
                file->private_data = NULL;
                if (pf->kind == INTERFACE) {
                        ppp = PF_TO_PPP(pf);
+                       rtnl_lock();
                        if (file == ppp->owner)
-                               ppp_shutdown_interface(ppp);
+                               unregister_netdevice(ppp->dev);
+                       rtnl_unlock();
                }
                if (atomic_dec_and_test(&pf->refcnt)) {
                        switch (pf->kind) {
@@ -593,8 +595,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                mutex_lock(&ppp_mutex);
                if (pf->kind == INTERFACE) {
                        ppp = PF_TO_PPP(pf);
+                       rtnl_lock();
                        if (file == ppp->owner)
-                               ppp_shutdown_interface(ppp);
+                               unregister_netdevice(ppp->dev);
+                       rtnl_unlock();
                }
                if (atomic_long_read(&file->f_count) < 2) {
                        ppp_release(NULL, file);
@@ -838,11 +842,10 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
                /* Create a new ppp unit */
                if (get_user(unit, p))
                        break;
-               ppp = ppp_create_interface(net, unit, &err);
+               ppp = ppp_create_interface(net, unit, file, &err);
                if (!ppp)
                        break;
                file->private_data = &ppp->file;
-               ppp->owner = file;
                err = -EFAULT;
                if (put_user(ppp->file.index, p))
                        break;
@@ -916,6 +919,16 @@ static __net_init int ppp_init_net(struct net *net)
 static __net_exit void ppp_exit_net(struct net *net)
 {
        struct ppp_net *pn = net_generic(net, ppp_net_id);
+       struct ppp *ppp;
+       LIST_HEAD(list);
+       int id;
+
+       rtnl_lock();
+       idr_for_each_entry(&pn->units_idr, ppp, id)
+               unregister_netdevice_queue(ppp->dev, &list);
+
+       unregister_netdevice_many(&list);
+       rtnl_unlock();
 
        idr_destroy(&pn->units_idr);
 }
@@ -1088,8 +1101,28 @@ static int ppp_dev_init(struct net_device *dev)
        return 0;
 }
 
+static void ppp_dev_uninit(struct net_device *dev)
+{
+       struct ppp *ppp = netdev_priv(dev);
+       struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
+
+       ppp_lock(ppp);
+       ppp->closing = 1;
+       ppp_unlock(ppp);
+
+       mutex_lock(&pn->all_ppp_mutex);
+       unit_put(&pn->units_idr, ppp->file.index);
+       mutex_unlock(&pn->all_ppp_mutex);
+
+       ppp->owner = NULL;
+
+       ppp->file.dead = 1;
+       wake_up_interruptible(&ppp->file.rwait);
+}
+
 static const struct net_device_ops ppp_netdev_ops = {
        .ndo_init        = ppp_dev_init,
+       .ndo_uninit      = ppp_dev_uninit,
        .ndo_start_xmit  = ppp_start_xmit,
        .ndo_do_ioctl    = ppp_net_ioctl,
        .ndo_get_stats64 = ppp_get_stats64,
@@ -2667,8 +2700,8 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
  * or if there is already a unit with the requested number.
  * unit == -1 means allocate a new number.
  */
-static struct ppp *
-ppp_create_interface(struct net *net, int unit, int *retp)
+static struct ppp *ppp_create_interface(struct net *net, int unit,
+                                       struct file *file, int *retp)
 {
        struct ppp *ppp;
        struct ppp_net *pn;
@@ -2688,6 +2721,7 @@ ppp_create_interface(struct net *net, int unit, int *retp)
        ppp->mru = PPP_MRU;
        init_ppp_file(&ppp->file, INTERFACE);
        ppp->file.hdrlen = PPP_HDRLEN - 2;      /* don't count proto bytes */
+       ppp->owner = file;
        for (i = 0; i < NUM_NP; ++i)
                ppp->npmode[i] = NPMODE_PASS;
        INIT_LIST_HEAD(&ppp->channels);
@@ -2775,34 +2809,6 @@ init_ppp_file(struct ppp_file *pf, int kind)
        init_waitqueue_head(&pf->rwait);
 }
 
-/*
- * Take down a ppp interface unit - called when the owning file
- * (the one that created the unit) is closed or detached.
- */
-static void ppp_shutdown_interface(struct ppp *ppp)
-{
-       struct ppp_net *pn;
-
-       pn = ppp_pernet(ppp->ppp_net);
-       mutex_lock(&pn->all_ppp_mutex);
-
-       /* This will call dev_close() for us. */
-       ppp_lock(ppp);
-       if (!ppp->closing) {
-               ppp->closing = 1;
-               ppp_unlock(ppp);
-               unregister_netdev(ppp->dev);
-               unit_put(&pn->units_idr, ppp->file.index);
-       } else
-               ppp_unlock(ppp);
-
-       ppp->file.dead = 1;
-       ppp->owner = NULL;
-       wake_up_interruptible(&ppp->file.rwait);
-
-       mutex_unlock(&pn->all_ppp_mutex);
-}
-
 /*
  * Free the memory used by a ppp unit.  This is only called once
  * there are no channels connected to the unit and no file structs
index daa054b3ff03ebf58f32c8bc7b1b012240895176..651d35ea22c5f8335b50c0b0aa75c834eb4c83c5 100644 (file)
@@ -2051,9 +2051,9 @@ static void team_setup(struct net_device *dev)
        dev->netdev_ops = &team_netdev_ops;
        dev->ethtool_ops = &team_ethtool_ops;
        dev->destructor = team_destructor;
-       dev->tx_queue_len = 0;
        dev->flags |= IFF_MULTICAST;
        dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
+       dev->priv_flags |= IFF_NO_QUEUE;
 
        /*
         * Indicate we support unicast address filtering. That way core won't
index 1f7a7cd97e50277e48487e18eaeafc9406b27f46..6392ae3c4ab82a5c7314ce219575bd09fe995922 100644 (file)
@@ -786,6 +786,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x413c, 0x81a8, 8)},    /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a9, 8)},    /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81b1, 8)},    /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
+       {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},    /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
        {QMI_FIXED_INTF(0x03f0, 0x581d, 4)},    /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
 
        /* 4. Gobi 1000 devices */
index 343592c4315f6397d6a2414457a7d10b532c0eca..0ef4a5ad555739870897bfbb96c7a95847deed20 100644 (file)
@@ -306,6 +306,7 @@ static void veth_setup(struct net_device *dev)
 
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+       dev->priv_flags |= IFF_NO_QUEUE;
 
        dev->netdev_ops = &veth_netdev_ops;
        dev->ethtool_ops = &veth_ethtool_ops;
index 546b669fbfdd2bcc54ad565bbf83b38c5764e134..9b950f2db836fd6cbc9165e19c525eedbbefaa70 100644 (file)
@@ -40,12 +40,12 @@ module_param(gso, bool, 0444);
 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
 #define GOOD_COPY_LEN  128
 
-/* Weight used for the RX packet size EWMA. The average packet size is used to
- * determine the packet buffer size when refilling RX rings. As the entire RX
- * ring may be refilled at once, the weight is chosen so that the EWMA will be
- * insensitive to short-term, transient changes in packet size.
+/* RX packet size EWMA. The average packet size is used to determine the packet
+ * buffer size when refilling RX rings. As the entire RX ring may be refilled
+ * at once, the weight is chosen so that the EWMA will be insensitive to short-
+ * term, transient changes in packet size.
  */
-#define RECEIVE_AVG_WEIGHT 64
+DECLARE_EWMA(pkt_len, 1, 64)
 
 /* Minimum alignment for mergeable packet buffers. */
 #define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
@@ -85,7 +85,7 @@ struct receive_queue {
        struct page *pages;
 
        /* Average packet length for mergeable receive buffers. */
-       struct ewma mrg_avg_pkt_len;
+       struct ewma_pkt_len mrg_avg_pkt_len;
 
        /* Page frag for packet buffer allocation. */
        struct page_frag alloc_frag;
@@ -407,7 +407,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                }
        }
 
-       ewma_add(&rq->mrg_avg_pkt_len, head_skb->len);
+       ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
        return head_skb;
 
 err_skb:
@@ -600,12 +600,12 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
        return err;
 }
 
-static unsigned int get_mergeable_buf_len(struct ewma *avg_pkt_len)
+static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len)
 {
        const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
        unsigned int len;
 
-       len = hdr_len + clamp_t(unsigned int, ewma_read(avg_pkt_len),
+       len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
                        GOOD_PACKET_LEN, PAGE_SIZE - hdr_len);
        return ALIGN(len, MERGEABLE_BUFFER_ALIGN);
 }
@@ -1615,7 +1615,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
                napi_hash_add(&vi->rq[i].napi);
 
                sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
-               ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT);
+               ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
                sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
        }
 
@@ -1658,7 +1658,7 @@ static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
 {
        struct virtnet_info *vi = netdev_priv(queue->dev);
        unsigned int queue_index = get_netdev_rx_queue_index(queue);
-       struct ewma *avg;
+       struct ewma_pkt_len *avg;
 
        BUG_ON(queue_index >= vi->max_queue_pairs);
        avg = &vi->rq[queue_index].mrg_avg_pkt_len;
index 95097cb793540d292e244408ef441d4612e67bb7..b3d9c5546c795768a7982092b23063a36b7749d9 100644 (file)
@@ -97,6 +97,12 @@ static bool is_ip_rx_frame(struct sk_buff *skb)
        return false;
 }
 
+static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
+{
+       vrf_dev->stats.tx_errors++;
+       kfree_skb(skb);
+}
+
 /* note: already called with rcu_read_lock */
 static rx_handler_result_t vrf_handle_frame(struct sk_buff **pskb)
 {
@@ -149,7 +155,8 @@ static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
 static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
                                           struct net_device *dev)
 {
-       return 0;
+       vrf_tx_error(dev, skb);
+       return NET_XMIT_DROP;
 }
 
 static int vrf_send_v4_prep(struct sk_buff *skb, struct flowi4 *fl4,
@@ -206,8 +213,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
 out:
        return ret;
 err:
-       vrf_dev->stats.tx_errors++;
-       kfree_skb(skb);
+       vrf_tx_error(vrf_dev, skb);
        goto out;
 }
 
@@ -219,6 +225,7 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
        case htons(ETH_P_IPV6):
                return vrf_process_v6_outbound(skb, dev);
        default:
+               vrf_tx_error(dev, skb);
                return NET_XMIT_DROP;
        }
 }
@@ -265,8 +272,7 @@ static void vrf_rtable_destroy(struct net_vrf *vrf)
 {
        struct dst_entry *dst = (struct dst_entry *)vrf->rth;
 
-       if (dst)
-               dst_destroy(dst);
+       dst_destroy(dst);
        vrf->rth = NULL;
 }
 
@@ -289,7 +295,6 @@ static struct rtable *vrf_rtable_create(struct net_device *dev)
                rth->rt_uses_gateway = 0;
                INIT_LIST_HEAD(&rth->rt_uncached);
                rth->rt_uncached_list = NULL;
-               rth->rt_lwtstate = NULL;
        }
 
        return rth;
@@ -334,23 +339,18 @@ static struct slave *__vrf_find_slave_dev(struct slave_queue *queue,
 /* inverse of __vrf_insert_slave */
 static void __vrf_remove_slave(struct slave_queue *queue, struct slave *slave)
 {
-       dev_put(slave->dev);
        list_del(&slave->list);
-       queue->num_slaves--;
 }
 
 static void __vrf_insert_slave(struct slave_queue *queue, struct slave *slave)
 {
-       dev_hold(slave->dev);
        list_add(&slave->list, &queue->all_slaves);
-       queue->num_slaves++;
 }
 
 static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
 {
        struct net_vrf_dev *vrf_ptr = kmalloc(sizeof(*vrf_ptr), GFP_KERNEL);
        struct slave *slave = kzalloc(sizeof(*slave), GFP_KERNEL);
-       struct slave *duplicate_slave;
        struct net_vrf *vrf = netdev_priv(dev);
        struct slave_queue *queue = &vrf->queue;
        int ret = -ENOMEM;
@@ -359,25 +359,16 @@ static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
                goto out_fail;
 
        slave->dev = port_dev;
-
        vrf_ptr->ifindex = dev->ifindex;
        vrf_ptr->tb_id = vrf->tb_id;
 
-       duplicate_slave = __vrf_find_slave_dev(queue, port_dev);
-       if (duplicate_slave) {
-               ret = -EBUSY;
-               goto out_fail;
-       }
-
-       __vrf_insert_slave(queue, slave);
-
        /* register the packet handler for slave ports */
        ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev);
        if (ret) {
                netdev_err(port_dev,
                           "Device %s failed to register rx_handler\n",
                           port_dev->name);
-               goto out_remove;
+               goto out_fail;
        }
 
        ret = netdev_master_upper_dev_link(port_dev, dev);
@@ -385,7 +376,7 @@ static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
                goto out_unregister;
 
        port_dev->flags |= IFF_SLAVE;
-
+       __vrf_insert_slave(queue, slave);
        rcu_assign_pointer(port_dev->vrf_ptr, vrf_ptr);
        cycle_netdev(port_dev);
 
@@ -393,8 +384,6 @@ static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
 
 out_unregister:
        netdev_rx_handler_unregister(port_dev);
-out_remove:
-       __vrf_remove_slave(queue, slave);
 out_fail:
        kfree(vrf_ptr);
        kfree(slave);
@@ -403,8 +392,7 @@ out_fail:
 
 static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
 {
-       if (!netif_is_vrf(dev) || netif_is_vrf(port_dev) ||
-           vrf_is_slave(port_dev))
+       if (netif_is_vrf(port_dev) || vrf_is_slave(port_dev))
                return -EINVAL;
 
        return do_vrf_add_slave(dev, port_dev);
@@ -441,9 +429,6 @@ static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
 
 static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
 {
-       if (!netif_is_vrf(dev))
-               return -EINVAL;
-
        return do_vrf_del_slave(dev, port_dev);
 }
 
@@ -459,8 +444,7 @@ static void vrf_dev_uninit(struct net_device *dev)
        list_for_each_entry_safe(slave, next, head, list)
                vrf_del_slave(dev, slave->dev);
 
-       if (dev->dstats)
-               free_percpu(dev->dstats);
+       free_percpu(dev->dstats);
        dev->dstats = NULL;
 }
 
@@ -630,9 +614,8 @@ static int vrf_device_event(struct notifier_block *unused,
                if (!vrf_ptr || netif_is_vrf(dev))
                        goto out;
 
-               vrf_dev = __dev_get_by_index(dev_net(dev), vrf_ptr->ifindex);
-               if (vrf_dev)
-                       vrf_del_slave(vrf_dev, dev);
+               vrf_dev = netdev_master_upper_dev_get(dev);
+               vrf_del_slave(vrf_dev, dev);
        }
 out:
        return NOTIFY_DONE;
@@ -649,7 +632,7 @@ static int __init vrf_init_module(void)
        vrf_dst_ops.kmem_cachep =
                kmem_cache_create("vrf_ip_dst_cache",
                                  sizeof(struct rtable), 0,
-                                 SLAB_HWCACHE_ALIGN | SLAB_PANIC,
+                                 SLAB_HWCACHE_ALIGN,
                                  NULL);
 
        if (!vrf_dst_ops.kmem_cachep)
index 06c0731ae619a041786938b6957a29024595bd81..61b457b9ec00517037e4833790bea97ac53aa832 100644 (file)
@@ -236,7 +236,7 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
 
        hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
                if (inet_sk(vs->sock->sk)->inet_sport == port &&
-                   inet_sk(vs->sock->sk)->sk.sk_family == family &&
+                   vxlan_get_sk_family(vs) == family &&
                    vs->flags == flags)
                        return vs;
        }
@@ -519,10 +519,10 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
                                          u32 data, struct gro_remcsum *grc,
                                          bool nopartial)
 {
-       size_t start, offset, plen;
+       size_t start, offset;
 
        if (skb->remcsum_offload)
-               return NULL;
+               return vh;
 
        if (!NAPI_GRO_CB(skb)->csum_valid)
                return NULL;
@@ -532,17 +532,8 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
                          offsetof(struct udphdr, check) :
                          offsetof(struct tcphdr, check));
 
-       plen = hdrlen + offset + sizeof(u16);
-
-       /* Pull checksum that will be written */
-       if (skb_gro_header_hard(skb, off + plen)) {
-               vh = skb_gro_header_slow(skb, off + plen, off);
-               if (!vh)
-                       return NULL;
-       }
-
-       skb_gro_remcsum_process(skb, (void *)vh + hdrlen,
-                               start, offset, grc, nopartial);
+       vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen,
+                                    start, offset, grc, nopartial);
 
        skb->remcsum_offload = 1;
 
@@ -573,7 +564,6 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
                        goto out;
        }
 
-       skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
        skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
 
        flags = ntohl(vh->vx_flags);
@@ -588,6 +578,8 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
                        goto out;
        }
 
+       skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
+
        flush = 0;
 
        for (p = *head; p; p = p->next) {
@@ -625,7 +617,7 @@ static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
        struct net_device *dev;
        struct sock *sk = vs->sock->sk;
        struct net *net = sock_net(sk);
-       sa_family_t sa_family = sk->sk_family;
+       sa_family_t sa_family = vxlan_get_sk_family(vs);
        __be16 port = inet_sk(sk)->inet_sport;
        int err;
 
@@ -650,7 +642,7 @@ static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
        struct net_device *dev;
        struct sock *sk = vs->sock->sk;
        struct net *net = sock_net(sk);
-       sa_family_t sa_family = sk->sk_family;
+       sa_family_t sa_family = vxlan_get_sk_family(vs);
        __be16 port = inet_sk(sk)->inet_sport;
 
        rcu_read_lock();
@@ -1110,6 +1102,9 @@ static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
 {
        size_t start, offset, plen;
 
+       if (skb->remcsum_offload)
+               return vh;
+
        start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
        offset = start + ((data & VXLAN_RCO_UDP) ?
                          offsetof(struct udphdr, check) :
@@ -1213,7 +1208,7 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
        stats->rx_bytes += skb->len;
        u64_stats_update_end(&stats->syncp);
 
-       netif_rx(skb);
+       gro_cells_receive(&vxlan->gro_cells, skb);
 
        return;
 drop:
@@ -1269,17 +1264,27 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
        }
 
        if (vxlan_collect_metadata(vs)) {
-               const struct iphdr *iph = ip_hdr(skb);
-
                tun_dst = metadata_dst_alloc(sizeof(*md), GFP_ATOMIC);
                if (!tun_dst)
                        goto drop;
 
                info = &tun_dst->u.tun_info;
-               info->key.ipv4_src = iph->saddr;
-               info->key.ipv4_dst = iph->daddr;
-               info->key.ipv4_tos = iph->tos;
-               info->key.ipv4_ttl = iph->ttl;
+               if (vxlan_get_sk_family(vs) == AF_INET) {
+                       const struct iphdr *iph = ip_hdr(skb);
+
+                       info->key.u.ipv4.src = iph->saddr;
+                       info->key.u.ipv4.dst = iph->daddr;
+                       info->key.tos = iph->tos;
+                       info->key.ttl = iph->ttl;
+               } else {
+                       const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+                       info->key.u.ipv6.src = ip6h->saddr;
+                       info->key.u.ipv6.dst = ip6h->daddr;
+                       info->key.tos = ipv6_get_dsfield(ip6h);
+                       info->key.ttl = ip6h->hop_limit;
+               }
+
                info->key.tp_src = udp_hdr(skb)->source;
                info->key.tp_dst = udp_hdr(skb)->dest;
 
@@ -1894,6 +1899,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        struct ip_tunnel_info *info;
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct sock *sk = vxlan->vn_sock->sock->sk;
+       unsigned short family = vxlan_get_sk_family(vxlan->vn_sock);
        struct rtable *rt = NULL;
        const struct iphdr *old_iph;
        struct flowi4 fl4;
@@ -1908,8 +1914,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        int err;
        u32 flags = vxlan->flags;
 
-       /* FIXME: Support IPv6 */
-       info = skb_tunnel_info(skb, AF_INET);
+       info = skb_tunnel_info(skb);
 
        if (rdst) {
                dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
@@ -1924,8 +1929,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
 
                dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
                vni = be64_to_cpu(info->key.tun_id);
-               remote_ip.sin.sin_family = AF_INET;
-               remote_ip.sin.sin_addr.s_addr = info->key.ipv4_dst;
+               remote_ip.sa.sa_family = family;
+               if (family == AF_INET)
+                       remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
+               else
+                       remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
                dst = &remote_ip;
        }
 
@@ -1951,23 +1959,24 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
                                     vxlan->cfg.port_max, true);
 
+       if (info) {
+               if (info->key.tun_flags & TUNNEL_CSUM)
+                       flags |= VXLAN_F_UDP_CSUM;
+               else
+                       flags &= ~VXLAN_F_UDP_CSUM;
+
+               ttl = info->key.ttl;
+               tos = info->key.tos;
+
+               if (info->options_len)
+                       md = ip_tunnel_info_opts(info, sizeof(*md));
+       } else {
+               md->gbp = skb->mark;
+       }
+
        if (dst->sa.sa_family == AF_INET) {
-               if (info) {
-                       if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
-                               df = htons(IP_DF);
-                       if (info->key.tun_flags & TUNNEL_CSUM)
-                               flags |= VXLAN_F_UDP_CSUM;
-                       else
-                               flags &= ~VXLAN_F_UDP_CSUM;
-
-                       ttl = info->key.ipv4_ttl;
-                       tos = info->key.ipv4_tos;
-
-                       if (info->options_len)
-                               md = ip_tunnel_info_opts(info, sizeof(*md));
-               } else {
-                       md->gbp = skb->mark;
-               }
+               if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT))
+                       df = htons(IP_DF);
 
                memset(&fl4, 0, sizeof(fl4));
                fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
@@ -2025,7 +2034,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        } else {
                struct dst_entry *ndst;
                struct flowi6 fl6;
-               u32 flags;
+               u32 rt6i_flags;
 
                memset(&fl6, 0, sizeof(fl6));
                fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0;
@@ -2050,9 +2059,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                }
 
                /* Bypass encapsulation if the destination is local */
-               flags = ((struct rt6_info *)ndst)->rt6i_flags;
-               if (flags & RTF_LOCAL &&
-                   !(flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
+               rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
+               if (rt6i_flags & RTF_LOCAL &&
+                   !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
                        struct vxlan_dev *dst_vxlan;
 
                        dst_release(ndst);
@@ -2066,12 +2075,10 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                }
 
                ttl = ttl ? : ip6_dst_hoplimit(ndst);
-               md->gbp = skb->mark;
-
                err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr,
                                      0, ttl, src_port, dst_port, htonl(vni << 8), md,
                                      !net_eq(vxlan->net, dev_net(vxlan->dev)),
-                                     vxlan->flags);
+                                     flags);
 #endif
        }
 
@@ -2104,8 +2111,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
        struct vxlan_rdst *rdst, *fdst = NULL;
        struct vxlan_fdb *f;
 
-       /* FIXME: Support IPv6 */
-       info = skb_tunnel_info(skb, AF_INET);
+       info = skb_tunnel_info(skb);
 
        skb_reset_mac_header(skb);
        eth = eth_hdr(skb);
@@ -2390,7 +2396,7 @@ void vxlan_get_rx_port(struct net_device *dev)
        for (i = 0; i < PORT_HASH_SIZE; ++i) {
                hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
                        port = inet_sk(vs->sock->sk)->inet_sport;
-                       sa_family = vs->sock->sk->sk_family;
+                       sa_family = vxlan_get_sk_family(vs);
                        dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
                                                            port);
                }
@@ -2416,7 +2422,6 @@ static void vxlan_setup(struct net_device *dev)
        dev->destructor = free_netdev;
        SET_NETDEV_DEVTYPE(dev, &vxlan_type);
 
-       dev->tx_queue_len = 0;
        dev->features   |= NETIF_F_LLTX;
        dev->features   |= NETIF_F_SG | NETIF_F_HW_CSUM;
        dev->features   |= NETIF_F_RXCSUM;
@@ -2428,7 +2433,7 @@ static void vxlan_setup(struct net_device *dev)
        dev->hw_features |= NETIF_F_GSO_SOFTWARE;
        dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
        netif_keep_dst(dev);
-       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
 
        INIT_LIST_HEAD(&vxlan->next);
        spin_lock_init(&vxlan->hash_lock);
@@ -2441,6 +2446,8 @@ static void vxlan_setup(struct net_device *dev)
 
        vxlan->dev = dev;
 
+       gro_cells_init(&vxlan->gro_cells, dev);
+
        for (h = 0; h < FDB_HASH_SIZE; ++h)
                INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
 }
@@ -2880,6 +2887,7 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
                hlist_del_rcu(&vxlan->hlist);
        spin_unlock(&vn->sock_lock);
 
+       gro_cells_destroy(&vxlan->gro_cells);
        list_del(&vxlan->next);
        unregister_netdevice_queue(dev, head);
 }
@@ -3088,8 +3096,10 @@ static void __net_exit vxlan_exit_net(struct net *net)
                /* If vxlan->dev is in the same netns, it has already been added
                 * to the list by the previous loop.
                 */
-               if (!net_eq(dev_net(vxlan->dev), net))
+               if (!net_eq(dev_net(vxlan->dev), net)) {
+                       gro_cells_destroy(&vxlan->gro_cells);
                        unregister_netdevice_queue(vxlan->dev, &list);
+               }
        }
 
        unregister_netdevice_many(&list);
index 3ebed1c40abb11c192db0acc36d90bfd2028c646..e92aaf61590109430aa6b9b6cd100b7d35daf90b 100644 (file)
@@ -1096,7 +1096,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
        }
        dev->netdev_ops = &pvc_ops;
        dev->mtu = HDLC_MAX_MTU;
-       dev->tx_queue_len = 0;
+       dev->priv_flags |= IFF_NO_QUEUE;
        dev->ml_priv = pvc;
 
        if (register_netdevice(dev) != 0) {
index f79fa6c67ebc3cbcff75a802a426d8ccdb89d979..25510679fd2ed643d81f782eb3edc37e9c9f9a86 100644 (file)
 #include "wmi-ops.h"
 
 unsigned int ath10k_debug_mask;
+static unsigned int ath10k_cryptmode_param;
 static bool uart_print;
 static bool skip_otp;
 
 module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
+module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
 module_param(uart_print, bool, 0644);
 module_param(skip_otp, bool, 0644);
 
 MODULE_PARM_DESC(debug_mask, "Debugging mask");
 MODULE_PARM_DESC(uart_print, "Uart target debugging");
 MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
+MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software");
 
 static const struct ath10k_hw_params ath10k_hw_params_list[] = {
        {
@@ -1073,6 +1076,46 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
                return -EINVAL;
        }
 
+       ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_NATIVE_WIFI;
+       switch (ath10k_cryptmode_param) {
+       case ATH10K_CRYPT_MODE_HW:
+               clear_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
+               clear_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags);
+               break;
+       case ATH10K_CRYPT_MODE_SW:
+               if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
+                             ar->fw_features)) {
+                       ath10k_err(ar, "cryptmode > 0 requires raw mode support from firmware");
+                       return -EINVAL;
+               }
+
+               set_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
+               set_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags);
+               break;
+       default:
+               ath10k_info(ar, "invalid cryptmode: %d\n",
+                           ath10k_cryptmode_param);
+               return -EINVAL;
+       }
+
+       ar->htt.max_num_amsdu = ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT;
+       ar->htt.max_num_ampdu = ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT;
+
+       if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+               ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_RAW;
+
+               /* Workaround:
+                *
+                * Firmware A-MSDU aggregation breaks with RAW Tx encap mode
+                * and causes enormous performance issues (malformed frames,
+                * etc).
+                *
+                * Disabling A-MSDU makes RAW mode stable with heavy traffic
+                * albeit a bit slower compared to regular operation.
+                */
+               ar->htt.max_num_amsdu = 1;
+       }
+
        /* Backwards compatibility for firmwares without
         * ATH10K_FW_IE_WMI_OP_VERSION.
         */
@@ -1606,6 +1649,10 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
        if (!ar->workqueue)
                goto err_free_mac;
 
+       ar->workqueue_aux = create_singlethread_workqueue("ath10k_aux_wq");
+       if (!ar->workqueue_aux)
+               goto err_free_wq;
+
        mutex_init(&ar->conf_mutex);
        spin_lock_init(&ar->data_lock);
 
@@ -1626,10 +1673,12 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
 
        ret = ath10k_debug_create(ar);
        if (ret)
-               goto err_free_wq;
+               goto err_free_aux_wq;
 
        return ar;
 
+err_free_aux_wq:
+       destroy_workqueue(ar->workqueue_aux);
 err_free_wq:
        destroy_workqueue(ar->workqueue);
 
@@ -1645,6 +1694,9 @@ void ath10k_core_destroy(struct ath10k *ar)
        flush_workqueue(ar->workqueue);
        destroy_workqueue(ar->workqueue);
 
+       flush_workqueue(ar->workqueue_aux);
+       destroy_workqueue(ar->workqueue_aux);
+
        ath10k_debug_destroy(ar);
        ath10k_mac_destroy(ar);
 }
index 78e07051b8970415086783674ba2d123cfddc312..6a387bac27b0f8bea60d1ee7a7c4eba223fa4545 100644 (file)
@@ -92,6 +92,7 @@ struct ath10k_skb_cb {
                u8 tid;
                u16 freq;
                bool is_offchan;
+               bool nohwcrypt;
                struct ath10k_htt_txbuf *txbuf;
                u32 txbuf_paddr;
        } __packed htt;
@@ -152,6 +153,7 @@ struct ath10k_wmi {
        const struct wmi_ops *ops;
 
        u32 num_mem_chunks;
+       u32 rx_decap_mode;
        struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
 };
 
@@ -341,6 +343,7 @@ struct ath10k_vif {
        } u;
 
        bool use_cts_prot;
+       bool nohwcrypt;
        int num_legacy_stations;
        int txpower;
        struct wmi_wmm_params_all_arg wmm_params;
@@ -382,9 +385,6 @@ struct ath10k_debug {
        u32 reg_addr;
        u32 nf_cal_period;
 
-       u8 htt_max_amsdu;
-       u8 htt_max_ampdu;
-
        struct ath10k_fw_crash_data *fw_crash_data;
 };
 
@@ -453,16 +453,21 @@ enum ath10k_fw_features {
        ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6,
 
        /* Don't trust error code from otp.bin */
-       ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
+       ATH10K_FW_FEATURE_IGNORE_OTP_RESULT = 7,
 
        /* Some firmware revisions pad 4th hw address to 4 byte boundary making
         * it 8 bytes long in Native Wifi Rx decap.
         */
-       ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
+       ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING = 8,
 
        /* Firmware supports bypassing PLL setting on init. */
        ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT = 9,
 
+       /* Raw mode support. If supported, FW supports receiving and trasmitting
+        * frames in raw mode.
+        */
+       ATH10K_FW_FEATURE_RAW_MODE_SUPPORT = 10,
+
        /* keep last */
        ATH10K_FW_FEATURE_COUNT,
 };
@@ -476,6 +481,15 @@ enum ath10k_dev_flags {
         * waiters should immediately cancel instead of waiting for a time out.
         */
        ATH10K_FLAG_CRASH_FLUSH,
+
+       /* Use Raw mode instead of native WiFi Tx/Rx encap mode.
+        * Raw mode supports both hardware and software crypto. Native WiFi only
+        * supports hardware crypto.
+        */
+       ATH10K_FLAG_RAW_MODE,
+
+       /* Disable HW crypto engine */
+       ATH10K_FLAG_HW_CRYPTO_DISABLED,
 };
 
 enum ath10k_cal_mode {
@@ -484,6 +498,13 @@ enum ath10k_cal_mode {
        ATH10K_CAL_MODE_DT,
 };
 
+enum ath10k_crypt_mode {
+       /* Only use hardware crypto engine */
+       ATH10K_CRYPT_MODE_HW,
+       /* Only use software crypto engine */
+       ATH10K_CRYPT_MODE_SW,
+};
+
 static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
 {
        switch (mode) {
@@ -673,6 +694,8 @@ struct ath10k {
        struct completion vdev_setup_done;
 
        struct workqueue_struct *workqueue;
+       /* Auxiliary workqueue */
+       struct workqueue_struct *workqueue_aux;
 
        /* prevents concurrent FW reconfiguration */
        struct mutex conf_mutex;
@@ -695,6 +718,9 @@ struct ath10k {
        int num_active_peers;
        int num_tids;
 
+       struct work_struct svc_rdy_work;
+       struct sk_buff *svc_rdy_skb;
+
        struct work_struct offchan_tx_work;
        struct sk_buff_head offchan_tx_queue;
        struct completion offchan_tx_completed;
index edf6047997a7bef6a131692e6f842517c91fe891..f7aa1c73b4814968420dc50209b302a8a676c383 100644 (file)
@@ -124,11 +124,11 @@ EXPORT_SYMBOL(ath10k_info);
 
 void ath10k_print_driver_info(struct ath10k *ar)
 {
-       char fw_features[128];
+       char fw_features[128] = {};
 
        ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
 
-       ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d features %s\n",
+       ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d features %s\n",
                    ar->hw_params.name,
                    ar->target_version,
                    ar->chip_id,
@@ -144,6 +144,8 @@ void ath10k_print_driver_info(struct ath10k *ar)
                    ar->htt.op_version,
                    ath10k_cal_mode_str(ar->cal_mode),
                    ar->max_num_stations,
+                   test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags),
+                   !test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags),
                    fw_features);
        ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n",
                    config_enabled(CONFIG_ATH10K_DEBUG),
@@ -1363,12 +1365,8 @@ static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file,
 
        mutex_lock(&ar->conf_mutex);
 
-       if (ar->debug.htt_max_amsdu)
-               amsdu = ar->debug.htt_max_amsdu;
-
-       if (ar->debug.htt_max_ampdu)
-               ampdu = ar->debug.htt_max_ampdu;
-
+       amsdu = ar->htt.max_num_amsdu;
+       ampdu = ar->htt.max_num_ampdu;
        mutex_unlock(&ar->conf_mutex);
 
        len = scnprintf(buf, sizeof(buf), "%u %u\n", amsdu, ampdu);
@@ -1402,8 +1400,8 @@ static ssize_t ath10k_write_htt_max_amsdu_ampdu(struct file *file,
                goto out;
 
        res = count;
-       ar->debug.htt_max_amsdu = amsdu;
-       ar->debug.htt_max_ampdu = ampdu;
+       ar->htt.max_num_amsdu = amsdu;
+       ar->htt.max_num_ampdu = ampdu;
 
 out:
        mutex_unlock(&ar->conf_mutex);
@@ -1905,9 +1903,6 @@ void ath10k_debug_stop(struct ath10k *ar)
        if (ar->debug.htt_stats_mask != 0)
                cancel_delayed_work(&ar->debug.htt_stats_dwork);
 
-       ar->debug.htt_max_amsdu = 0;
-       ar->debug.htt_max_ampdu = 0;
-
        ath10k_wmi_pdev_pktlog_disable(ar);
 }
 
index 4474c3e839db459cd9e18eb33b821a7045da3190..3e6ba63dfdffe118b8d6e62a4fb086c376f1e3a5 100644 (file)
@@ -246,12 +246,31 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
        }
 
        status = ath10k_htt_verify_version(htt);
-       if (status)
+       if (status) {
+               ath10k_warn(ar, "failed to verify htt version: %d\n",
+                           status);
                return status;
+       }
 
        status = ath10k_htt_send_frag_desc_bank_cfg(htt);
        if (status)
                return status;
 
-       return ath10k_htt_send_rx_ring_cfg_ll(htt);
+       status = ath10k_htt_send_rx_ring_cfg_ll(htt);
+       if (status) {
+               ath10k_warn(ar, "failed to setup rx ring: %d\n",
+                           status);
+               return status;
+       }
+
+       status = ath10k_htt_h2t_aggr_cfg_msg(htt,
+                                            htt->max_num_ampdu,
+                                            htt->max_num_amsdu);
+       if (status) {
+               ath10k_warn(ar, "failed to setup amsdu/ampdu limit: %d\n",
+                           status);
+               return status;
+       }
+
+       return 0;
 }
index 8bdf1e7dd1718ab72199efe70bf1dc476adb0fa2..57318751289555b8e956ef2441c0f2ffafc83835 100644 (file)
@@ -83,15 +83,39 @@ struct htt_ver_req {
  * around the mask + shift defs.
  */
 struct htt_data_tx_desc_frag {
-       __le32 paddr;
-       __le32 len;
+       union {
+               struct double_word_addr {
+                       __le32 paddr;
+                       __le32 len;
+               } __packed dword_addr;
+               struct triple_word_addr {
+                       __le32 paddr_lo;
+                       __le16 paddr_hi;
+                       __le16 len_16;
+               } __packed tword_addr;
+       } __packed;
 } __packed;
 
 struct htt_msdu_ext_desc {
-       __le32 tso_flag[4];
+       __le32 tso_flag[3];
+       __le16 ip_identification;
+       u8 flags;
+       u8 reserved;
        struct htt_data_tx_desc_frag frags[6];
 };
 
+#define        HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE         BIT(0)
+#define        HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE     BIT(1)
+#define        HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE     BIT(2)
+#define        HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE     BIT(3)
+#define        HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE     BIT(4)
+
+#define HTT_MSDU_CHECKSUM_ENABLE (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE \
+                                | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE \
+                                | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE \
+                                | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \
+                                | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE)
+
 enum htt_data_tx_desc_flags0 {
        HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
        HTT_DATA_TX_DESC_FLAGS0_NO_AGGR         = 1 << 1,
@@ -260,6 +284,9 @@ struct htt_aggr_conf {
 } __packed;
 
 #define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
+struct htt_mgmt_tx_desc_qca99x0 {
+       __le32 rate;
+} __packed;
 
 struct htt_mgmt_tx_desc {
        u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
@@ -268,6 +295,9 @@ struct htt_mgmt_tx_desc {
        __le32 len;
        __le32 vdev_id;
        u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN];
+       union {
+               struct htt_mgmt_tx_desc_qca99x0 qca99x0;
+       } __packed;
 } __packed;
 
 enum htt_mgmt_tx_status {
@@ -1366,6 +1396,8 @@ struct ath10k_htt {
        u8 target_version_minor;
        struct completion target_version_received;
        enum ath10k_fw_htt_op_version op_version;
+       u8 max_num_amsdu;
+       u8 max_num_ampdu;
 
        const enum htt_t2h_msg_type *t2h_msg_types;
        u32 t2h_msg_types_max;
@@ -1528,6 +1560,12 @@ struct htt_rx_desc {
 #define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
 #define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
 
+/* These values are default in most firmware revisions and apparently are a
+ * sweet spot performance wise.
+ */
+#define ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT 3
+#define ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT 64
+
 int ath10k_htt_connect(struct ath10k_htt *htt);
 int ath10k_htt_init(struct ath10k *ar);
 int ath10k_htt_setup(struct ath10k_htt *htt);
index d7d118328f31b2bc96d661cae2c0d8e2591d0ae5..1b7a04366256febc8438e02a01eb979102cb1d39 100644 (file)
@@ -368,7 +368,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
                msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
                                        & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
                                           RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
-               msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
+               msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
                              RX_MSDU_START_INFO0_MSDU_LENGTH);
                msdu_chained = rx_desc->frag_info.ring2_more_count;
 
@@ -394,7 +394,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
                        msdu_chaining = 1;
                }
 
-               last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
+               last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
                                RX_MSDU_END_INFO0_LAST_MSDU;
 
                trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
@@ -740,7 +740,7 @@ ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
            __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
                return NULL;
 
-       if (!(rxd->msdu_end.info0 &
+       if (!(rxd->msdu_end.common.info0 &
              __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
                return NULL;
 
@@ -991,9 +991,9 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
        bool is_last;
 
        rxd = (void *)msdu->data - sizeof(*rxd);
-       is_first = !!(rxd->msdu_end.info0 &
+       is_first = !!(rxd->msdu_end.common.info0 &
                      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
-       is_last = !!(rxd->msdu_end.info0 &
+       is_last = !!(rxd->msdu_end.common.info0 &
                     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
 
        /* Delivered decapped frame:
@@ -1017,9 +1017,8 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
        skb_trim(msdu, msdu->len - FCS_LEN);
 
        /* In most cases this will be true for sniffed frames. It makes sense
-        * to deliver them as-is without stripping the crypto param. This would
-        * also make sense for software based decryption (which is not
-        * implemented in ath10k).
+        * to deliver them as-is without stripping the crypto param. This is
+        * necessary for software based decryption.
         *
         * If there's no error then the frame is decrypted. At least that is
         * the case for frames that come in via fragmented rx indication.
@@ -1104,9 +1103,9 @@ static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
        rxd = (void *)msdu->data - sizeof(*rxd);
        hdr = (void *)rxd->rx_hdr_status;
 
-       is_first = !!(rxd->msdu_end.info0 &
+       is_first = !!(rxd->msdu_end.common.info0 &
                      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
-       is_last = !!(rxd->msdu_end.info0 &
+       is_last = !!(rxd->msdu_end.common.info0 &
                     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
        is_amsdu = !(is_first && is_last);
 
@@ -1214,7 +1213,7 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
         */
 
        rxd = (void *)msdu->data - sizeof(*rxd);
-       decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
+       decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
                   RX_MSDU_START_INFO1_DECAP_FORMAT);
 
        switch (decap) {
@@ -1244,7 +1243,7 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
 
        rxd = (void *)skb->data - sizeof(*rxd);
        flags = __le32_to_cpu(rxd->attention.flags);
-       info = __le32_to_cpu(rxd->msdu_start.info1);
+       info = __le32_to_cpu(rxd->msdu_start.common.info1);
 
        is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
        is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
@@ -1437,7 +1436,7 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
 
        first = skb_peek(amsdu);
        rxd = (void *)first->data - sizeof(*rxd);
-       decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
+       decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
                   RX_MSDU_START_INFO1_DECAP_FORMAT);
 
        if (!chained)
@@ -1631,8 +1630,6 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
        __le16 msdu_id;
        int i;
 
-       lockdep_assert_held(&htt->tx_lock);
-
        switch (status) {
        case HTT_DATA_TX_STATUS_NO_ACK:
                tx_done.no_ack = true;
@@ -1757,14 +1754,14 @@ static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
                __skb_queue_tail(amsdu, msdu);
 
                rxd = (void *)msdu->data - sizeof(*rxd);
-               if (rxd->msdu_end.info0 &
+               if (rxd->msdu_end.common.info0 &
                    __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
                        break;
        }
 
        msdu = skb_peek_tail(amsdu);
        rxd = (void *)msdu->data - sizeof(*rxd);
-       if (!(rxd->msdu_end.info0 &
+       if (!(rxd->msdu_end.common.info0 &
              __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
                skb_queue_splice_init(amsdu, list);
                return -EAGAIN;
@@ -1998,15 +1995,11 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
                        break;
                }
 
-               spin_lock_bh(&htt->tx_lock);
                ath10k_txrx_tx_unref(htt, &tx_done);
-               spin_unlock_bh(&htt->tx_lock);
                break;
        }
        case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
-               spin_lock_bh(&htt->tx_lock);
-               __skb_queue_tail(&htt->tx_compl_q, skb);
-               spin_unlock_bh(&htt->tx_lock);
+               skb_queue_tail(&htt->tx_compl_q, skb);
                tasklet_schedule(&htt->txrx_compl_task);
                return;
        case HTT_T2H_MSG_TYPE_SEC_IND: {
@@ -2072,6 +2065,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
                break;
        case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
                break;
+       case HTT_T2H_MSG_TYPE_AGGR_CONF:
+               break;
        case HTT_T2H_MSG_TYPE_EN_STATS:
        case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
        case HTT_T2H_MSG_TYPE_TX_FETCH_CONF:
@@ -2095,12 +2090,10 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
        struct htt_resp *resp;
        struct sk_buff *skb;
 
-       spin_lock_bh(&htt->tx_lock);
-       while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
+       while ((skb = skb_dequeue(&htt->tx_compl_q))) {
                ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
                dev_kfree_skb_any(skb);
        }
-       spin_unlock_bh(&htt->tx_lock);
 
        spin_lock_bh(&htt->rx_ring.lock);
        while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
index 148d5b607c3cf08decf9aba36b82d16e87cf19b9..704bb5e071938b7e3f56b9b42fa8304c3dbe2fed 100644 (file)
@@ -63,7 +63,8 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
 
        lockdep_assert_held(&htt->tx_lock);
 
-       ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC);
+       ret = idr_alloc(&htt->pending_tx, skb, 0,
+                       htt->max_num_pending_tx, GFP_ATOMIC);
 
        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
 
@@ -133,9 +134,7 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
        tx_done.discard = 1;
        tx_done.msdu_id = msdu_id;
 
-       spin_lock_bh(&htt->tx_lock);
        ath10k_txrx_tx_unref(htt, &tx_done);
-       spin_unlock_bh(&htt->tx_lock);
 
        return 0;
 }
@@ -259,6 +258,7 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
        cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc);
        cmd->frag_desc_bank_cfg.bank_base_addrs[0] =
                                __cpu_to_le32(htt->frag_desc.paddr);
+       cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0;
        cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id =
                                __cpu_to_le16(htt->max_num_pending_tx - 1);
 
@@ -427,12 +427,11 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
 
        spin_lock_bh(&htt->tx_lock);
        res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+       spin_unlock_bh(&htt->tx_lock);
        if (res < 0) {
-               spin_unlock_bh(&htt->tx_lock);
                goto err_tx_dec;
        }
        msdu_id = res;
-       spin_unlock_bh(&htt->tx_lock);
 
        txdesc = ath10k_htc_alloc_skb(ar, len);
        if (!txdesc) {
@@ -448,6 +447,8 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
 
        skb_put(txdesc, len);
        cmd = (struct htt_cmd *)txdesc->data;
+       memset(cmd, 0, len);
+
        cmd->hdr.msg_type         = HTT_H2T_MSG_TYPE_MGMT_TX;
        cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
        cmd->mgmt_tx.len        = __cpu_to_le32(msdu->len);
@@ -494,6 +495,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        u16 msdu_id, flags1 = 0;
        dma_addr_t paddr = 0;
        u32 frags_paddr = 0;
+       struct htt_msdu_ext_desc *ext_desc = NULL;
 
        res = ath10k_htt_tx_inc_pending(htt);
        if (res)
@@ -501,12 +503,11 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
 
        spin_lock_bh(&htt->tx_lock);
        res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+       spin_unlock_bh(&htt->tx_lock);
        if (res < 0) {
-               spin_unlock_bh(&htt->tx_lock);
                goto err_tx_dec;
        }
        msdu_id = res;
-       spin_unlock_bh(&htt->tx_lock);
 
        prefetch_len = min(htt->prefetch_len, msdu->len);
        prefetch_len = roundup(prefetch_len, 4);
@@ -522,8 +523,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        if ((ieee80211_is_action(hdr->frame_control) ||
             ieee80211_is_deauth(hdr->frame_control) ||
             ieee80211_is_disassoc(hdr->frame_control)) &&
-            ieee80211_has_protected(hdr->frame_control))
+            ieee80211_has_protected(hdr->frame_control)) {
                skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+       } else if (!skb_cb->htt.nohwcrypt &&
+                  skb_cb->txmode == ATH10K_HW_TXRX_RAW) {
+               skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+       }
 
        skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
                                       DMA_TO_DEVICE);
@@ -537,16 +542,30 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
                flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
                /* pass through */
        case ATH10K_HW_TXRX_ETHERNET:
-               frags = skb_cb->htt.txbuf->frags;
-
-               frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
-               frags[0].len = __cpu_to_le32(msdu->len);
-               frags[1].paddr = 0;
-               frags[1].len = 0;
-
+               if (ar->hw_params.continuous_frag_desc) {
+                       memset(&htt->frag_desc.vaddr[msdu_id], 0,
+                              sizeof(struct htt_msdu_ext_desc));
+                       frags = (struct htt_data_tx_desc_frag *)
+                               &htt->frag_desc.vaddr[msdu_id].frags;
+                       ext_desc = &htt->frag_desc.vaddr[msdu_id];
+                       frags[0].tword_addr.paddr_lo =
+                               __cpu_to_le32(skb_cb->paddr);
+                       frags[0].tword_addr.paddr_hi = 0;
+                       frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
+
+                       frags_paddr =  htt->frag_desc.paddr +
+                               (sizeof(struct htt_msdu_ext_desc) * msdu_id);
+               } else {
+                       frags = skb_cb->htt.txbuf->frags;
+                       frags[0].dword_addr.paddr =
+                               __cpu_to_le32(skb_cb->paddr);
+                       frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
+                       frags[1].dword_addr.paddr = 0;
+                       frags[1].dword_addr.len = 0;
+
+                       frags_paddr = skb_cb->htt.txbuf_paddr;
+               }
                flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
-
-               frags_paddr = skb_cb->htt.txbuf_paddr;
                break;
        case ATH10K_HW_TXRX_MGMT:
                flags0 |= SM(ATH10K_HW_TXRX_MGMT,
@@ -580,14 +599,20 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
                        prefetch_len);
        skb_cb->htt.txbuf->htc_hdr.flags = 0;
 
+       if (skb_cb->htt.nohwcrypt)
+               flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+
        if (!skb_cb->is_protected)
                flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
 
        flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
        flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
-       if (msdu->ip_summed == CHECKSUM_PARTIAL) {
+       if (msdu->ip_summed == CHECKSUM_PARTIAL &&
+           !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
+               if (ar->hw_params.continuous_frag_desc)
+                       ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
        }
 
        /* Prevent firmware from sending up tx inspection requests. There's
index 9172285175461ab359f9a7b552a7276b664000a9..d9de4a73847028f7d9c869dcc0ce617195488fbd 100644 (file)
@@ -217,14 +217,16 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
 #define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0)
 
 /* Known pecularities:
- *  - current FW doesn't support raw rx mode (last tested v599)
- *  - current FW dumps upon raw tx mode (last tested v599)
  *  - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
  *  - raw have FCS, nwifi doesn't
  *  - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
  *    param, llc/snap) are aligned to 4byte boundaries each */
 enum ath10k_hw_txrx_mode {
        ATH10K_HW_TXRX_RAW = 0,
+
+       /* Native Wifi decap mode is used to align IP frames to 4-byte
+        * boundaries and avoid a very expensive re-alignment in mac80211.
+        */
        ATH10K_HW_TXRX_NATIVE_WIFI = 1,
        ATH10K_HW_TXRX_ETHERNET = 2,
 
@@ -286,10 +288,6 @@ enum ath10k_hw_rate_cck {
 #define TARGET_RX_TIMEOUT_LO_PRI               100
 #define TARGET_RX_TIMEOUT_HI_PRI               40
 
-/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and
- * avoid a very expensive re-alignment in mac80211. */
-#define TARGET_RX_DECAP_MODE                   ATH10K_HW_TXRX_NATIVE_WIFI
-
 #define TARGET_SCAN_MAX_PENDING_REQS           4
 #define TARGET_BMISS_OFFLOAD_MAX_VDEV          3
 #define TARGET_ROAM_OFFLOAD_MAX_VDEV           3
@@ -324,7 +322,6 @@ enum ath10k_hw_rate_cck {
 #define TARGET_10X_RX_CHAIN_MASK               (BIT(0) | BIT(1) | BIT(2))
 #define TARGET_10X_RX_TIMEOUT_LO_PRI           100
 #define TARGET_10X_RX_TIMEOUT_HI_PRI           40
-#define TARGET_10X_RX_DECAP_MODE               ATH10K_HW_TXRX_NATIVE_WIFI
 #define TARGET_10X_SCAN_MAX_PENDING_REQS       4
 #define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV      2
 #define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV       2
@@ -363,10 +360,7 @@ enum ath10k_hw_rate_cck {
                                                 (TARGET_10_4_NUM_VDEVS))
 #define TARGET_10_4_ACTIVE_PEERS               0
 
-/* TODO: increase qcache max client limit to 512 after
- * testing with 512 client.
- */
-#define TARGET_10_4_NUM_QCACHE_PEERS_MAX       256
+#define TARGET_10_4_NUM_QCACHE_PEERS_MAX       512
 #define TARGET_10_4_QCACHE_ACTIVE_PEERS                50
 #define TARGET_10_4_NUM_OFFLOAD_PEERS          0
 #define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS  0
index c9a7d5b5dffc5d367fe0ad9d84f071430eb8fb24..96f4285e93b8dd9d34d2e47d2e5975f9c040d1e9 100644 (file)
@@ -197,6 +197,10 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
                return -EOPNOTSUPP;
        }
 
+       if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+               key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+       }
+
        if (cmd == DISABLE_KEY) {
                arg.key_cipher = WMI_CIPHER_NONE;
                arg.key_data = NULL;
@@ -218,6 +222,9 @@ static int ath10k_install_key(struct ath10k_vif *arvif,
 
        reinit_completion(&ar->install_key_done);
 
+       if (arvif->nohwcrypt)
+               return 1;
+
        ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
        if (ret)
                return ret;
@@ -256,7 +263,7 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
 
                ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
                                         addr, flags);
-               if (ret)
+               if (ret < 0)
                        return ret;
 
                flags = 0;
@@ -264,7 +271,7 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
 
                ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
                                         addr, flags);
-               if (ret)
+               if (ret < 0)
                        return ret;
 
                spin_lock_bh(&ar->data_lock);
@@ -322,10 +329,10 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
                /* key flags are not required to delete the key */
                ret = ath10k_install_key(arvif, peer->keys[i],
                                         DISABLE_KEY, addr, flags);
-               if (ret && first_errno == 0)
+               if (ret < 0 && first_errno == 0)
                        first_errno = ret;
 
-               if (ret)
+               if (ret < 0)
                        ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
                                    i, ret);
 
@@ -398,7 +405,7 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
                        break;
                /* key flags are not required to delete the key */
                ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
-               if (ret && first_errno == 0)
+               if (ret < 0 && first_errno == 0)
                        first_errno = ret;
 
                if (ret)
@@ -591,11 +598,19 @@ ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
 static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
                              enum wmi_peer_type peer_type)
 {
+       struct ath10k_vif *arvif;
+       int num_peers = 0;
        int ret;
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       if (ar->num_peers >= ar->max_num_peers)
+       num_peers = ar->num_peers;
+
+       /* Each vdev consumes a peer entry as well */
+       list_for_each_entry(arvif, &ar->arvifs, list)
+               num_peers++;
+
+       if (num_peers >= ar->max_num_peers)
                return -ENOBUFS;
 
        ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
@@ -671,20 +686,6 @@ static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
        return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
 }
 
-static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value)
-{
-       struct ath10k *ar = arvif->ar;
-       u32 vdev_param;
-
-       if (value != 0xFFFFFFFF)
-               value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold,
-                               ATH10K_FRAGMT_THRESHOLD_MIN,
-                               ATH10K_FRAGMT_THRESHOLD_MAX);
-
-       vdev_param = ar->wmi.vdev_param->fragmentation_threshold;
-       return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
-}
-
 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
 {
        int ret;
@@ -836,7 +837,7 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
 {
        struct cfg80211_chan_def *chandef = NULL;
-       struct ieee80211_channel *channel = chandef->chan;
+       struct ieee80211_channel *channel = NULL;
        struct wmi_vdev_start_request_arg arg = {};
        int ret = 0;
 
@@ -2502,6 +2503,9 @@ static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
        u32 param;
        u32 value;
 
+       if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
+               return 0;
+
        if (!(ar->vht_cap_info &
              (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
               IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
@@ -3149,13 +3153,30 @@ ath10k_tx_h_get_txmode(struct ath10k *ar, struct ieee80211_vif *vif,
         * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
         * NativeWifi txmode - it selects AP key instead of peer key. It seems
         * to work with Ethernet txmode so use it.
+        *
+        * FIXME: Check if raw mode works with TDLS.
         */
        if (ieee80211_is_data_present(fc) && sta && sta->tdls)
                return ATH10K_HW_TXRX_ETHERNET;
 
+       if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+               return ATH10K_HW_TXRX_RAW;
+
        return ATH10K_HW_TXRX_NATIVE_WIFI;
 }
 
+static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
+                                    struct sk_buff *skb) {
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
+                        IEEE80211_TX_CTL_INJECTED;
+       if ((info->flags & mask) == mask)
+               return false;
+       if (vif)
+               return !ath10k_vif_to_arvif(vif)->nohwcrypt;
+       return true;
+}
+
 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
  * Control in the header.
  */
@@ -3322,6 +3343,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
        int vdev_id;
        int ret;
        unsigned long time_left;
+       bool tmp_peer_created = false;
 
        /* FW requirement: We must create a peer before FW will send out
         * an offchannel frame. Otherwise the frame will be stuck and
@@ -3359,6 +3381,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
                        if (ret)
                                ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
                                            peer_addr, vdev_id, ret);
+                       tmp_peer_created = (ret == 0);
                }
 
                spin_lock_bh(&ar->data_lock);
@@ -3374,7 +3397,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
                        ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
                                    skb);
 
-               if (!peer) {
+               if (!peer && tmp_peer_created) {
                        ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
                        if (ret)
                                ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
@@ -3600,6 +3623,7 @@ static void ath10k_tx(struct ieee80211_hw *hw,
        ATH10K_SKB_CB(skb)->htt.is_offchan = false;
        ATH10K_SKB_CB(skb)->htt.freq = 0;
        ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr);
+       ATH10K_SKB_CB(skb)->htt.nohwcrypt = !ath10k_tx_h_use_hwcrypto(vif, skb);
        ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif);
        ATH10K_SKB_CB(skb)->txmode = ath10k_tx_h_get_txmode(ar, vif, sta, skb);
        ATH10K_SKB_CB(skb)->is_protected = ieee80211_has_protected(fc);
@@ -3615,12 +3639,11 @@ static void ath10k_tx(struct ieee80211_hw *hw,
                ath10k_tx_h_8023(skb);
                break;
        case ATH10K_HW_TXRX_RAW:
-               /* FIXME: Packet injection isn't implemented. It should be
-                * doable with firmware 10.2 on qca988x.
-                */
-               WARN_ON_ONCE(1);
-               ieee80211_free_txskb(hw, skb);
-               return;
+               if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+                       WARN_ON_ONCE(1);
+                       ieee80211_free_txskb(hw, skb);
+                       return;
+               }
        }
 
        if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
@@ -4019,6 +4042,43 @@ static u32 get_nss_from_chainmask(u16 chain_mask)
        return 1;
 }
 
+static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
+{
+       u32 value = 0;
+       struct ath10k *ar = arvif->ar;
+
+       if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
+               return 0;
+
+       if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+                               IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
+               value |= SM((ar->num_rf_chains - 1), WMI_TXBF_STS_CAP_OFFSET);
+
+       if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+                               IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
+               value |= SM((ar->num_rf_chains - 1), WMI_BF_SOUND_DIM_OFFSET);
+
+       if (!value)
+               return 0;
+
+       if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
+               value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+       if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
+               value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
+                         WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
+
+       if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
+               value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+       if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
+               value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
+                         WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
+
+       return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+                                        ar->wmi.vdev_param->txbf, value);
+}
+
 /*
  * TODO:
  * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
@@ -4060,6 +4120,11 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                       sizeof(arvif->bitrate_mask.control[i].vht_mcs));
        }
 
+       if (ar->num_peers >= ar->max_num_peers) {
+               ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
+               return -ENOBUFS;
+       }
+
        if (ar->free_vdev_map == 0) {
                ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
                ret = -EBUSY;
@@ -4139,6 +4204,14 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                        goto err;
                }
        }
+       if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
+               arvif->nohwcrypt = true;
+
+       if (arvif->nohwcrypt &&
+           !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+               ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
+               goto err;
+       }
 
        ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
                   arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
@@ -4237,16 +4310,16 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                }
        }
 
-       ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
+       ret = ath10k_mac_set_txbf_conf(arvif);
        if (ret) {
-               ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
+               ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n",
                            arvif->vdev_id, ret);
                goto err_peer_delete;
        }
 
-       ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
+       ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
        if (ret) {
-               ath10k_warn(ar, "failed to set frag threshold for vdev %d: %d\n",
+               ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
                            arvif->vdev_id, ret);
                goto err_peer_delete;
        }
@@ -4728,6 +4801,9 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
        if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
                return 1;
 
+       if (arvif->nohwcrypt)
+               return 1;
+
        if (key->keyidx > WMI_MAX_KEY_INDEX)
                return -ENOSPC;
 
@@ -4797,6 +4873,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
        ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
        if (ret) {
+               WARN_ON(ret > 0);
                ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
                            arvif->vdev_id, peer_addr, ret);
                goto exit;
@@ -4812,13 +4889,16 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
                ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
                if (ret) {
+                       WARN_ON(ret > 0);
                        ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
                                    arvif->vdev_id, peer_addr, ret);
                        ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
                                                  peer_addr, flags);
-                       if (ret2)
+                       if (ret2) {
+                               WARN_ON(ret2 > 0);
                                ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
                                            arvif->vdev_id, peer_addr, ret2);
+                       }
                        goto exit;
                }
        }
@@ -5545,6 +5625,21 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
        return ret;
 }
 
+static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+{
+       /* Even though there's a WMI enum for fragmentation threshold no known
+        * firmware actually implements it. Moreover it is not possible to rely
+        * frame fragmentation to mac80211 because firmware clears the "more
+        * fragments" bit in frame control making it impossible for remote
+        * devices to reassemble frames.
+        *
+        * Hence implement a dummy callback just to say fragmentation isn't
+        * supported. This effectively prevents mac80211 from doing frame
+        * fragmentation in software.
+        */
+       return -EOPNOTSUPP;
+}
+
 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                         u32 queues, bool drop)
 {
@@ -6387,6 +6482,7 @@ static const struct ieee80211_ops ath10k_ops = {
        .remain_on_channel              = ath10k_remain_on_channel,
        .cancel_remain_on_channel       = ath10k_cancel_remain_on_channel,
        .set_rts_threshold              = ath10k_set_rts_threshold,
+       .set_frag_threshold             = ath10k_mac_op_set_frag_threshold,
        .flush                          = ath10k_flush,
        .tx_last_beacon                 = ath10k_tx_last_beacon,
        .set_antenna                    = ath10k_set_antenna,
@@ -6892,7 +6988,6 @@ int ath10k_mac_register(struct ath10k *ar)
        ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
        ieee80211_hw_set(ar->hw, AP_LINK_PS);
        ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
-       ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
        ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
        ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
        ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
@@ -6900,6 +6995,9 @@ int ath10k_mac_register(struct ath10k *ar)
        ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
        ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
 
+       if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+               ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
+
        ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
        ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
 
@@ -7003,7 +7101,8 @@ int ath10k_mac_register(struct ath10k *ar)
                goto err_free;
        }
 
-       ar->hw->netdev_features = NETIF_F_HW_CSUM;
+       if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+               ar->hw->netdev_features = NETIF_F_HW_CSUM;
 
        if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
                /* Init ath dfs pattern detector */
index 5778e5277823c53731ab7dd047f60a19816c7515..f00b251ec9ce92f15275ec51c3ff292da3dd423d 100644 (file)
@@ -64,6 +64,7 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
 static const struct pci_device_id ath10k_pci_id_table[] = {
        { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
        { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
+       { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
        {0}
 };
 
@@ -78,6 +79,7 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
+       { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
 };
 
 static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
@@ -2761,7 +2763,6 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
 
 static int ath10k_pci_cold_reset(struct ath10k *ar)
 {
-       int i;
        u32 val;
 
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
@@ -2777,23 +2778,18 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
        val |= 1;
        ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
 
-       for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
-               if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
-                                         RTC_STATE_COLD_RESET_MASK)
-                       break;
-               msleep(1);
-       }
+       /* After writing into SOC_GLOBAL_RESET to put device into
+        * reset and pulling out of reset pcie may not be stable
+        * for any immediate pcie register access and cause bus error,
+        * add delay before any pcie access request to fix this issue.
+        */
+       msleep(20);
 
        /* Pull Target, including PCIe, out of RESET. */
        val &= ~1;
        ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
 
-       for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
-               if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
-                                           RTC_STATE_COLD_RESET_MASK))
-                       break;
-               msleep(1);
-       }
+       msleep(20);
 
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
 
index 492b5a5af434ddb67e01fa9c0d4638eb84f3d66e..ca8d16884af1de1500f31b224f1b68c282b9d945 100644 (file)
@@ -422,6 +422,12 @@ struct rx_mpdu_end {
 #define RX_MSDU_START_INFO1_IP_FRAG             (1 << 14)
 #define RX_MSDU_START_INFO1_TCP_ONLY_ACK        (1 << 15)
 
+#define RX_MSDU_START_INFO2_DA_IDX_MASK         0x000007ff
+#define RX_MSDU_START_INFO2_DA_IDX_LSB          0
+#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_MASK 0x00ff0000
+#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_LSB  16
+#define RX_MSDU_START_INFO2_DA_BCAST_MCAST      BIT(11)
+
 /* The decapped header (rx_hdr_status) contains the following:
  *  a) 802.11 header
  *  [padding to 4 bytes]
@@ -449,12 +455,23 @@ enum rx_msdu_decap_format {
        RX_MSDU_DECAP_8023_SNAP_LLC = 3
 };
 
-struct rx_msdu_start {
+struct rx_msdu_start_common {
        __le32 info0; /* %RX_MSDU_START_INFO0_ */
        __le32 flow_id_crc;
        __le32 info1; /* %RX_MSDU_START_INFO1_ */
 } __packed;
 
+struct rx_msdu_start_qca99x0 {
+       __le32 info2; /* %RX_MSDU_START_INFO2_ */
+} __packed;
+
+struct rx_msdu_start {
+       struct rx_msdu_start_common common;
+       union {
+               struct rx_msdu_start_qca99x0 qca99x0;
+       } __packed;
+} __packed;
+
 /*
  * msdu_length
  *             MSDU length in bytes after decapsulation.  This field is
@@ -540,7 +557,7 @@ struct rx_msdu_start {
 #define RX_MSDU_END_INFO0_PRE_DELIM_ERR             (1 << 30)
 #define RX_MSDU_END_INFO0_RESERVED_3B               (1 << 31)
 
-struct rx_msdu_end {
+struct rx_msdu_end_common {
        __le16 ip_hdr_cksum;
        __le16 tcp_hdr_cksum;
        u8 key_id_octet;
@@ -549,6 +566,36 @@ struct rx_msdu_end {
        __le32 info0;
 } __packed;
 
+#define RX_MSDU_END_INFO1_TCP_FLAG_MASK     0x000001ff
+#define RX_MSDU_END_INFO1_TCP_FLAG_LSB      0
+#define RX_MSDU_END_INFO1_L3_HDR_PAD_MASK   0x00001c00
+#define RX_MSDU_END_INFO1_L3_HDR_PAD_LSB    10
+#define RX_MSDU_END_INFO1_WINDOW_SIZE_MASK  0xffff0000
+#define RX_MSDU_END_INFO1_WINDOW_SIZE_LSB   16
+#define RX_MSDU_END_INFO1_IRO_ELIGIBLE      BIT(9)
+
+#define RX_MSDU_END_INFO2_DA_OFFSET_MASK    0x0000003f
+#define RX_MSDU_END_INFO2_DA_OFFSET_LSB     0
+#define RX_MSDU_END_INFO2_SA_OFFSET_MASK    0x00000fc0
+#define RX_MSDU_END_INFO2_SA_OFFSET_LSB     6
+#define RX_MSDU_END_INFO2_TYPE_OFFSET_MASK  0x0003f000
+#define RX_MSDU_END_INFO2_TYPE_OFFSET_LSB   12
+
+struct rx_msdu_end_qca99x0 {
+       __le32 ipv6_crc;
+       __le32 tcp_seq_no;
+       __le32 tcp_ack_no;
+       __le32 info1;
+       __le32 info2;
+} __packed;
+
+struct rx_msdu_end {
+       struct rx_msdu_end_common common;
+       union {
+               struct rx_msdu_end_qca99x0 qca99x0;
+       } __packed;
+} __packed;
+
 /*
  *ip_hdr_chksum
  *             This can include the IP header checksum or the pseudo header
@@ -870,7 +917,11 @@ struct rx_ppdu_start {
 #define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK (1 << 24)
 #define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL (1 << 25)
 
-#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15)
+#define RX_PPDU_END_INFO1_PEER_IDX_MASK       0x1ffc
+#define RX_PPDU_END_INFO1_PEER_IDX_LSB        2
+#define RX_PPDU_END_INFO1_BB_DATA             BIT(0)
+#define RX_PPDU_END_INFO1_PEER_IDX_VALID      BIT(1)
+#define RX_PPDU_END_INFO1_PPDU_DONE           BIT(15)
 
 struct rx_ppdu_end_common {
        __le32 evm_p0;
@@ -891,13 +942,13 @@ struct rx_ppdu_end_common {
        __le32 evm_p15;
        __le32 tsf_timestamp;
        __le32 wb_timestamp;
+} __packed;
+
+struct rx_ppdu_end_qca988x {
        u8 locationing_timestamp;
        u8 phy_err_code;
        __le16 flags; /* %RX_PPDU_END_FLAGS_ */
        __le32 info0; /* %RX_PPDU_END_INFO0_ */
-} __packed;
-
-struct rx_ppdu_end_qca988x {
        __le16 bb_length;
        __le16 info1; /* %RX_PPDU_END_INFO1_ */
 } __packed;
@@ -909,16 +960,126 @@ struct rx_ppdu_end_qca988x {
 #define RX_PPDU_END_RTT_NORMAL_MODE            BIT(31)
 
 struct rx_ppdu_end_qca6174 {
+       u8 locationing_timestamp;
+       u8 phy_err_code;
+       __le16 flags; /* %RX_PPDU_END_FLAGS_ */
+       __le32 info0; /* %RX_PPDU_END_INFO0_ */
        __le32 rtt; /* %RX_PPDU_END_RTT_ */
        __le16 bb_length;
        __le16 info1; /* %RX_PPDU_END_INFO1_ */
 } __packed;
 
+#define RX_PKT_END_INFO0_RX_SUCCESS              BIT(0)
+#define RX_PKT_END_INFO0_ERR_TX_INTERRUPT_RX     BIT(3)
+#define RX_PKT_END_INFO0_ERR_OFDM_POWER_DROP     BIT(4)
+#define RX_PKT_END_INFO0_ERR_OFDM_RESTART        BIT(5)
+#define RX_PKT_END_INFO0_ERR_CCK_POWER_DROP      BIT(6)
+#define RX_PKT_END_INFO0_ERR_CCK_RESTART         BIT(7)
+
+#define RX_LOCATION_INFO_RTT_CORR_VAL_MASK       0x0001ffff
+#define RX_LOCATION_INFO_RTT_CORR_VAL_LSB        0
+#define RX_LOCATION_INFO_FAC_STATUS_MASK         0x000c0000
+#define RX_LOCATION_INFO_FAC_STATUS_LSB          18
+#define RX_LOCATION_INFO_PKT_BW_MASK             0x00700000
+#define RX_LOCATION_INFO_PKT_BW_LSB              20
+#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_MASK 0x01800000
+#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_LSB  23
+#define RX_LOCATION_INFO_CIR_STATUS              BIT(17)
+#define RX_LOCATION_INFO_RTT_MAC_PHY_PHASE       BIT(25)
+#define RX_LOCATION_INFO_RTT_TX_DATA_START_X     BIT(26)
+#define RX_LOCATION_INFO_HW_IFFT_MODE            BIT(30)
+#define RX_LOCATION_INFO_RX_LOCATION_VALID       BIT(31)
+
+struct rx_pkt_end {
+       __le32 info0; /* %RX_PKT_END_INFO0_ */
+       __le32 phy_timestamp_1;
+       __le32 phy_timestamp_2;
+       __le32 rx_location_info; /* %RX_LOCATION_INFO_ */
+} __packed;
+
+enum rx_phy_ppdu_end_info0 {
+       RX_PHY_PPDU_END_INFO0_ERR_RADAR           = BIT(2),
+       RX_PHY_PPDU_END_INFO0_ERR_RX_ABORT        = BIT(3),
+       RX_PHY_PPDU_END_INFO0_ERR_RX_NAP          = BIT(4),
+       RX_PHY_PPDU_END_INFO0_ERR_OFDM_TIMING     = BIT(5),
+       RX_PHY_PPDU_END_INFO0_ERR_OFDM_PARITY     = BIT(6),
+       RX_PHY_PPDU_END_INFO0_ERR_OFDM_RATE       = BIT(7),
+       RX_PHY_PPDU_END_INFO0_ERR_OFDM_LENGTH     = BIT(8),
+       RX_PHY_PPDU_END_INFO0_ERR_OFDM_RESTART    = BIT(9),
+       RX_PHY_PPDU_END_INFO0_ERR_OFDM_SERVICE    = BIT(10),
+       RX_PHY_PPDU_END_INFO0_ERR_OFDM_POWER_DROP = BIT(11),
+       RX_PHY_PPDU_END_INFO0_ERR_CCK_BLOCKER     = BIT(12),
+       RX_PHY_PPDU_END_INFO0_ERR_CCK_TIMING      = BIT(13),
+       RX_PHY_PPDU_END_INFO0_ERR_CCK_HEADER_CRC  = BIT(14),
+       RX_PHY_PPDU_END_INFO0_ERR_CCK_RATE        = BIT(15),
+       RX_PHY_PPDU_END_INFO0_ERR_CCK_LENGTH      = BIT(16),
+       RX_PHY_PPDU_END_INFO0_ERR_CCK_RESTART     = BIT(17),
+       RX_PHY_PPDU_END_INFO0_ERR_CCK_SERVICE     = BIT(18),
+       RX_PHY_PPDU_END_INFO0_ERR_CCK_POWER_DROP  = BIT(19),
+       RX_PHY_PPDU_END_INFO0_ERR_HT_CRC          = BIT(20),
+       RX_PHY_PPDU_END_INFO0_ERR_HT_LENGTH       = BIT(21),
+       RX_PHY_PPDU_END_INFO0_ERR_HT_RATE         = BIT(22),
+       RX_PHY_PPDU_END_INFO0_ERR_HT_ZLF          = BIT(23),
+       RX_PHY_PPDU_END_INFO0_ERR_FALSE_RADAR_EXT = BIT(24),
+       RX_PHY_PPDU_END_INFO0_ERR_GREEN_FIELD     = BIT(25),
+       RX_PHY_PPDU_END_INFO0_ERR_SPECTRAL_SCAN   = BIT(26),
+       RX_PHY_PPDU_END_INFO0_ERR_RX_DYN_BW       = BIT(27),
+       RX_PHY_PPDU_END_INFO0_ERR_LEG_HT_MISMATCH = BIT(28),
+       RX_PHY_PPDU_END_INFO0_ERR_VHT_CRC         = BIT(29),
+       RX_PHY_PPDU_END_INFO0_ERR_VHT_SIGA        = BIT(30),
+       RX_PHY_PPDU_END_INFO0_ERR_VHT_LSIG        = BIT(31),
+};
+
+enum rx_phy_ppdu_end_info1 {
+       RX_PHY_PPDU_END_INFO1_ERR_VHT_NDP            = BIT(0),
+       RX_PHY_PPDU_END_INFO1_ERR_VHT_NSYM           = BIT(1),
+       RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_EXT_SYM     = BIT(2),
+       RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID0    = BIT(3),
+       RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID1_62 = BIT(4),
+       RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID63   = BIT(5),
+       RX_PHY_PPDU_END_INFO1_ERR_OFDM_LDPC_DECODER  = BIT(6),
+       RX_PHY_PPDU_END_INFO1_ERR_DEFER_NAP          = BIT(7),
+       RX_PHY_PPDU_END_INFO1_ERR_FDOMAIN_TIMEOUT    = BIT(8),
+       RX_PHY_PPDU_END_INFO1_ERR_LSIG_REL_CHECK     = BIT(9),
+       RX_PHY_PPDU_END_INFO1_ERR_BT_COLLISION       = BIT(10),
+       RX_PHY_PPDU_END_INFO1_ERR_MU_FEEDBACK        = BIT(11),
+       RX_PHY_PPDU_END_INFO1_ERR_TX_INTERRUPT_RX    = BIT(12),
+       RX_PHY_PPDU_END_INFO1_ERR_RX_CBF             = BIT(13),
+};
+
+struct rx_phy_ppdu_end {
+       __le32 info0; /* %RX_PHY_PPDU_END_INFO0_ */
+       __le32 info1; /* %RX_PHY_PPDU_END_INFO1_ */
+} __packed;
+
+#define RX_PPDU_END_RX_TIMING_OFFSET_MASK          0x00000fff
+#define RX_PPDU_END_RX_TIMING_OFFSET_LSB           0
+
+#define RX_PPDU_END_RX_INFO_RX_ANTENNA_MASK        0x00ffffff
+#define RX_PPDU_END_RX_INFO_RX_ANTENNA_LSB         0
+#define RX_PPDU_END_RX_INFO_TX_HT_VHT_ACK          BIT(24)
+#define RX_PPDU_END_RX_INFO_RX_PKT_END_VALID       BIT(25)
+#define RX_PPDU_END_RX_INFO_RX_PHY_PPDU_END_VALID  BIT(26)
+#define RX_PPDU_END_RX_INFO_RX_TIMING_OFFSET_VALID BIT(27)
+#define RX_PPDU_END_RX_INFO_BB_CAPTURED_CHANNEL    BIT(28)
+#define RX_PPDU_END_RX_INFO_UNSUPPORTED_MU_NC      BIT(29)
+#define RX_PPDU_END_RX_INFO_OTP_TXBF_DISABLE       BIT(30)
+
+struct rx_ppdu_end_qca99x0 {
+       struct rx_pkt_end rx_pkt_end;
+       struct rx_phy_ppdu_end rx_phy_ppdu_end;
+       __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */
+       __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */
+       __le16 bb_length;
+       __le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
 struct rx_ppdu_end {
        struct rx_ppdu_end_common common;
        union {
                struct rx_ppdu_end_qca988x qca988x;
                struct rx_ppdu_end_qca6174 qca6174;
+               struct rx_ppdu_end_qca99x0 qca99x0;
        } __packed;
 } __packed;
 
index 6cf289158840a3ba01da0f82c81a4e07916a4b70..e4a9c4c8d0cb7e8f7a9a89372e6537d9efc9887e 100644 (file)
@@ -53,8 +53,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
        struct ath10k_skb_cb *skb_cb;
        struct sk_buff *msdu;
 
-       lockdep_assert_held(&htt->tx_lock);
-
        ath10k_dbg(ar, ATH10K_DBG_HTT,
                   "htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
                   tx_done->msdu_id, !!tx_done->discard,
@@ -66,12 +64,19 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
                return;
        }
 
+       spin_lock_bh(&htt->tx_lock);
        msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
        if (!msdu) {
                ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
                            tx_done->msdu_id);
+               spin_unlock_bh(&htt->tx_lock);
                return;
        }
+       ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
+       __ath10k_htt_tx_dec_pending(htt);
+       if (htt->num_pending_tx == 0)
+               wake_up(&htt->empty_tx_wq);
+       spin_unlock_bh(&htt->tx_lock);
 
        skb_cb = ATH10K_SKB_CB(msdu);
 
@@ -90,7 +95,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
 
        if (tx_done->discard) {
                ieee80211_free_txskb(htt->ar->hw, msdu);
-               goto exit;
+               return;
        }
 
        if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
@@ -104,12 +109,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
 
        ieee80211_tx_status(htt->ar->hw, msdu);
        /* we do not own the msdu anymore */
-
-exit:
-       ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
-       __ath10k_htt_tx_dec_pending(htt);
-       if (htt->num_pending_tx == 0)
-               wake_up(&htt->empty_tx_wq);
 }
 
 struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
index 47fe2e756becd4ebacabf199ee204f732413e378..2591018c4dc5e1d528b6550a6cc8141d9531f54a 100644 (file)
@@ -49,6 +49,7 @@ struct wmi_ops {
                            struct wmi_roam_ev_arg *arg);
        int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
                              struct wmi_wow_ev_arg *arg);
+       enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
 
        struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
        struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
@@ -319,6 +320,15 @@ ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
        return ar->wmi.ops->pull_wow_event(ar, skb, arg);
 }
 
+static inline enum wmi_txbf_conf
+ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
+{
+       if (!ar->wmi.ops->get_txbf_conf_scheme)
+               return WMI_TXBF_CONF_UNSUPPORTED;
+
+       return ar->wmi.ops->get_txbf_conf_scheme(ar);
+}
+
 static inline int
 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
 {
index 4189d4a90ce0a8e6eeca9a9f2a25a1818fce3548..357b5a292a892cb29592278120406a5e07396e44 100644 (file)
@@ -519,7 +519,7 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
                break;
        case WMI_TLV_SERVICE_READY_EVENTID:
                ath10k_wmi_event_service_ready(ar, skb);
-               break;
+               return;
        case WMI_TLV_READY_EVENTID:
                ath10k_wmi_event_ready(ar, skb);
                break;
@@ -1279,6 +1279,11 @@ ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
        return skb;
 }
 
+static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar)
+{
+       return WMI_TXBF_CONF_AFTER_ASSOC;
+}
+
 static struct sk_buff *
 ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
                                     u32 param_value)
@@ -1373,7 +1378,7 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
        cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
        cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
        cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
-       cfg->rx_decap_mode = __cpu_to_le32(1);
+       cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
        cfg->scan_max_pending_reqs = __cpu_to_le32(4);
        cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
        cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
@@ -3408,6 +3413,7 @@ static const struct wmi_ops wmi_tlv_ops = {
        .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
        .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
        .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
+       .get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
 
        .gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
        .gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
index 0791a4336e80f4b4886b62ef89eb892bb73342ce..36b8f7148b5162da910d0f1fc46c8133ceabe1f8 100644 (file)
@@ -3122,6 +3122,11 @@ static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar,
        return 0;
 }
 
+static enum wmi_txbf_conf ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k *ar)
+{
+       return WMI_TXBF_CONF_BEFORE_ASSOC;
+}
+
 void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
 {
        struct wmi_swba_ev_arg arg = {};
@@ -3498,7 +3503,7 @@ void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
                                                          fftr, fftr_len,
                                                          tsf);
                        if (res < 0) {
-                               ath10k_warn(ar, "failed to process fft report: %d\n",
+                               ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n",
                                            res);
                                return;
                        }
@@ -3789,7 +3794,7 @@ static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
        ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
                                                           pool_size,
                                                           &paddr,
-                                                          GFP_ATOMIC);
+                                                          GFP_KERNEL);
        if (!ar->wmi.mem_chunks[idx].vaddr) {
                ath10k_warn(ar, "failed to allocate memory chunk\n");
                return -ENOMEM;
@@ -3878,12 +3883,19 @@ ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
        return 0;
 }
 
-void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
+static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
 {
+       struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work);
+       struct sk_buff *skb = ar->svc_rdy_skb;
        struct wmi_svc_rdy_ev_arg arg = {};
        u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
        int ret;
 
+       if (!skb) {
+               ath10k_warn(ar, "invalid service ready event skb\n");
+               return;
+       }
+
        ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
        if (ret) {
                ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
@@ -4003,9 +4015,17 @@ void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
                   __le32_to_cpu(arg.eeprom_rd),
                   __le32_to_cpu(arg.num_mem_reqs));
 
+       dev_kfree_skb(skb);
+       ar->svc_rdy_skb = NULL;
        complete(&ar->wmi.service_ready);
 }
 
+void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
+{
+       ar->svc_rdy_skb = skb;
+       queue_work(ar->workqueue_aux, &ar->svc_rdy_work);
+}
+
 static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
                                     struct wmi_rdy_ev_arg *arg)
 {
@@ -4177,7 +4197,7 @@ static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
                break;
        case WMI_SERVICE_READY_EVENTID:
                ath10k_wmi_event_service_ready(ar, skb);
-               break;
+               return;
        case WMI_READY_EVENTID:
                ath10k_wmi_event_ready(ar, skb);
                break;
@@ -4298,7 +4318,7 @@ static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
                break;
        case WMI_10X_SERVICE_READY_EVENTID:
                ath10k_wmi_event_service_ready(ar, skb);
-               break;
+               return;
        case WMI_10X_READY_EVENTID:
                ath10k_wmi_event_ready(ar, skb);
                break;
@@ -4409,7 +4429,7 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
                break;
        case WMI_10_2_SERVICE_READY_EVENTID:
                ath10k_wmi_event_service_ready(ar, skb);
-               break;
+               return;
        case WMI_10_2_READY_EVENTID:
                ath10k_wmi_event_ready(ar, skb);
                break;
@@ -4461,7 +4481,7 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
                break;
        case WMI_10_4_SERVICE_READY_EVENTID:
                ath10k_wmi_event_service_ready(ar, skb);
-               break;
+               return;
        case WMI_10_4_SCAN_EVENTID:
                ath10k_wmi_event_scan(ar, skb);
                break;
@@ -4688,8 +4708,7 @@ static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
        config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
        config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
        config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
-       config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE);
-
+       config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
        config.scan_max_pending_reqs =
                __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
 
@@ -4757,8 +4776,7 @@ static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
        config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
        config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
        config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
-       config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
-
+       config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
        config.scan_max_pending_reqs =
                __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
 
@@ -4823,7 +4841,7 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
        config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
        config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
        config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
-       config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
+       config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
 
        config.scan_max_pending_reqs =
                __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
@@ -6431,6 +6449,7 @@ static const struct wmi_ops wmi_10_4_ops = {
        .pull_swba = ath10k_wmi_10_4_op_pull_swba_ev,
        .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
        .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+       .get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme,
 
        .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
        .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -6514,6 +6533,8 @@ int ath10k_wmi_attach(struct ath10k *ar)
        init_completion(&ar->wmi.service_ready);
        init_completion(&ar->wmi.unified_ready);
 
+       INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
+
        return 0;
 }
 
@@ -6521,6 +6542,11 @@ void ath10k_wmi_detach(struct ath10k *ar)
 {
        int i;
 
+       cancel_work_sync(&ar->svc_rdy_work);
+
+       if (ar->svc_rdy_skb)
+               dev_kfree_skb(ar->svc_rdy_skb);
+
        /* free the host memory chunks requested by firmware */
        for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
                dma_free_coherent(ar->dev,
index 0d4efc9c5796432cec96bacf70c83a4f94d8e7dc..232500a5d7bd9e41da6a6380e22908873a92850b 100644 (file)
@@ -4628,6 +4628,11 @@ enum wmi_10_4_vdev_param {
 #define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
 #define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
 
+#define WMI_TXBF_STS_CAP_OFFSET_LSB    4
+#define WMI_TXBF_STS_CAP_OFFSET_MASK   0xf0
+#define WMI_BF_SOUND_DIM_OFFSET_LSB    8
+#define WMI_BF_SOUND_DIM_OFFSET_MASK   0xf00
+
 /* slot time long */
 #define WMI_VDEV_SLOT_TIME_LONG                0x1
 /* slot time short */
@@ -6008,6 +6013,12 @@ struct wmi_tdls_peer_capab_arg {
        u32 pref_offchan_bw;
 };
 
+enum wmi_txbf_conf {
+       WMI_TXBF_CONF_UNSUPPORTED,
+       WMI_TXBF_CONF_BEFORE_ASSOC,
+       WMI_TXBF_CONF_AFTER_ASSOC,
+};
+
 struct ath10k;
 struct ath10k_vif;
 struct ath10k_fw_stats_pdev;
index a68d8fd853a3bb6f314a5c26d8f480c8d00a1792..8e02b381990f138c7e18c70b4d2789f6460d868e 100644 (file)
@@ -301,8 +301,26 @@ int ath10k_wow_op_resume(struct ieee80211_hw *hw)
                ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
 
 exit:
+       if (ret) {
+               switch (ar->state) {
+               case ATH10K_STATE_ON:
+                       ar->state = ATH10K_STATE_RESTARTING;
+                       ret = 1;
+                       break;
+               case ATH10K_STATE_OFF:
+               case ATH10K_STATE_RESTARTING:
+               case ATH10K_STATE_RESTARTED:
+               case ATH10K_STATE_UTF:
+               case ATH10K_STATE_WEDGED:
+                       ath10k_warn(ar, "encountered unexpected device state %d on resume, cannot recover\n",
+                                   ar->state);
+                       ret = -EIO;
+                       break;
+               }
+       }
+
        mutex_unlock(&ar->conf_mutex);
-       return ret ? 1 : 0;
+       return ret;
 }
 
 int ath10k_wow_init(struct ath10k *ar)
index 2399a39217625e55c9a52551263e3b9058d144c5..b1278f9f24baee8ba4c08a107d1c3d2ca5432c03 100644 (file)
@@ -5,7 +5,6 @@ config ATH5K
        select MAC80211_LEDS
        select LEDS_CLASS
        select NEW_LEDS
-       select AVERAGE
        select ATH5K_AHB if ATH25
        select ATH5K_PCI if !ATH25
        ---help---
index 5c008757662b50e05b63024a2e293bfd1a30d671..38be2702c0e23a485f886315a3d5966c13ba3848 100644 (file)
@@ -223,7 +223,7 @@ static void
 ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
                         bool ofdm_trigger)
 {
-       int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
+       int rssi = ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg);
 
        ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "raise immunity (%s)",
                ofdm_trigger ? "ODFM" : "CCK");
@@ -309,7 +309,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
 static void
 ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
 {
-       int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
+       int rssi = ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg);
 
        ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "lower immunity");
 
index e22b0e778927155ed693ad84c177b9a6c8d03715..fa6e89e5c4213dbb76da4a5ef2b780550cd6b954 100644 (file)
@@ -1252,6 +1252,8 @@ struct ath5k_statistics {
 #define ATH5K_TXQ_LEN_MAX      (ATH_TXBUF / 4)         /* bufs per queue */
 #define ATH5K_TXQ_LEN_LOW      (ATH5K_TXQ_LEN_MAX / 2) /* low mark */
 
+DECLARE_EWMA(beacon_rssi, 1024, 8)
+
 /* Driver state associated with an instance of a device */
 struct ath5k_hw {
        struct ath_common       common;
@@ -1432,7 +1434,7 @@ struct ath5k_hw {
        struct ath5k_nfcal_hist ah_nfcal_hist;
 
        /* average beacon RSSI in our BSS (used by ANI) */
-       struct ewma             ah_beacon_rssi_avg;
+       struct ewma_beacon_rssi ah_beacon_rssi_avg;
 
        /* noise floor from last periodic calibration */
        s32                     ah_noise_floor;
index 23552f43d1253dd81de3e4f7f6f400a14bff2bd4..342563a3706f403b445a24028f19b059ccd6285a 100644 (file)
@@ -1430,7 +1430,7 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
        trace_ath5k_rx(ah, skb);
 
        if (ath_is_mybeacon(common, (struct ieee80211_hdr *)skb->data)) {
-               ewma_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi);
+               ewma_beacon_rssi_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi);
 
                /* check beacons in IBSS mode */
                if (ah->opmode == NL80211_IFTYPE_ADHOC)
@@ -2936,7 +2936,7 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
        ah->ah_cal_next_short = jiffies +
                msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
 
-       ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
+       ewma_beacon_rssi_init(&ah->ah_beacon_rssi_avg);
 
        /* clear survey data and cycle counters */
        memset(&ah->survey, 0, sizeof(ah->survey));
index c70782e8f07bd704b2ff495339eb5ab6b65864bc..654a1e33f8278743fdb6034e1681b2a3e4aa8515 100644 (file)
@@ -722,7 +722,7 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
                        st->mib_intr);
        len += snprintf(buf + len, sizeof(buf) - len,
                        "beacon RSSI average:\t%d\n",
-                       (int)ewma_read(&ah->ah_beacon_rssi_avg));
+                       (int)ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg));
 
 #define CC_PRINT(_struct, _field) \
        _struct._field, \
index 14cab1403dd6071d48179922346b676d89e9610b..112d8a9b8d4319d1a8cbc9dd956911a4c09b39f6 100644 (file)
@@ -427,7 +427,7 @@ struct htc_endpoint_credit_dist {
 };
 
 /*
- * credit distibution code that is passed into the distrbution function,
+ * credit distribution code that is passed into the distribution function,
  * there are mandatory and optional codes that must be handled
  */
 enum htc_credit_dist_reason {
index a7a81b3969cec7e79b2cb73959c4d8ff1fb7489e..c85c47978e1e48e4dda029b1f9d55e5c04accf11 100644 (file)
@@ -172,14 +172,6 @@ struct ath_txq {
        struct sk_buff_head complete_q;
 };
 
-struct ath_atx_ac {
-       struct ath_txq *txq;
-       struct list_head list;
-       struct list_head tid_q;
-       bool clear_ps_filter;
-       bool sched;
-};
-
 struct ath_frame_info {
        struct ath_buf *bf;
        u16 framelen;
@@ -242,7 +234,7 @@ struct ath_atx_tid {
        struct sk_buff_head buf_q;
        struct sk_buff_head retry_q;
        struct ath_node *an;
-       struct ath_atx_ac *ac;
+       struct ath_txq *txq;
        unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
        u16 seq_start;
        u16 seq_next;
@@ -252,8 +244,8 @@ struct ath_atx_tid {
        int baw_tail;   /* next unused tx buffer slot */
 
        s8 bar_index;
-       bool sched;
        bool active;
+       bool clear_ps_filter;
 };
 
 struct ath_node {
@@ -261,7 +253,6 @@ struct ath_node {
        struct ieee80211_sta *sta; /* station struct we're part of */
        struct ieee80211_vif *vif; /* interface with which we're associated */
        struct ath_atx_tid tid[IEEE80211_NUM_TIDS];
-       struct ath_atx_ac ac[IEEE80211_NUM_ACS];
 
        u16 maxampdu;
        u8 mpdudensity;
@@ -410,6 +401,12 @@ enum ath_offchannel_state {
        ATH_OFFCHANNEL_ROC_DONE,
 };
 
+enum ath_roc_complete_reason {
+       ATH_ROC_COMPLETE_EXPIRE,
+       ATH_ROC_COMPLETE_ABORT,
+       ATH_ROC_COMPLETE_CANCEL,
+};
+
 struct ath_offchannel {
        struct ath_chanctx chan;
        struct timer_list timer;
@@ -471,7 +468,8 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
 void ath_chanctx_set_next(struct ath_softc *sc, bool force);
 void ath_offchannel_next(struct ath_softc *sc);
 void ath_scan_complete(struct ath_softc *sc, bool abort);
-void ath_roc_complete(struct ath_softc *sc, bool abort);
+void ath_roc_complete(struct ath_softc *sc,
+                     enum ath_roc_complete_reason reason);
 struct ath_chanctx* ath_is_go_chanctx_present(struct ath_softc *sc);
 
 #else
index 206665059d66a67b0bbd2a816ba4c2c4d187ee48..90f5773a1a614e34bceac511ba5d415d8614596b 100644 (file)
@@ -915,18 +915,27 @@ void ath_offchannel_next(struct ath_softc *sc)
        }
 }
 
-void ath_roc_complete(struct ath_softc *sc, bool abort)
+void ath_roc_complete(struct ath_softc *sc, enum ath_roc_complete_reason reason)
 {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 
-       if (abort)
+       sc->offchannel.roc_vif = NULL;
+       sc->offchannel.roc_chan = NULL;
+
+       switch (reason) {
+       case ATH_ROC_COMPLETE_ABORT:
                ath_dbg(common, CHAN_CTX, "RoC aborted\n");
-       else
+               ieee80211_remain_on_channel_expired(sc->hw);
+               break;
+       case ATH_ROC_COMPLETE_EXPIRE:
                ath_dbg(common, CHAN_CTX, "RoC expired\n");
+               ieee80211_remain_on_channel_expired(sc->hw);
+               break;
+       case ATH_ROC_COMPLETE_CANCEL:
+               ath_dbg(common, CHAN_CTX, "RoC canceled\n");
+               break;
+       }
 
-       sc->offchannel.roc_vif = NULL;
-       sc->offchannel.roc_chan = NULL;
-       ieee80211_remain_on_channel_expired(sc->hw);
        ath_offchannel_next(sc);
        ath9k_ps_restore(sc);
 }
@@ -1058,7 +1067,7 @@ static void ath_offchannel_timer(unsigned long data)
        case ATH_OFFCHANNEL_ROC_START:
        case ATH_OFFCHANNEL_ROC_WAIT:
                sc->offchannel.state = ATH_OFFCHANNEL_ROC_DONE;
-               ath_roc_complete(sc, false);
+               ath_roc_complete(sc, ATH_ROC_COMPLETE_EXPIRE);
                break;
        default:
                break;
index ffca918ff16aff4be941d572ac19d4f152e5b2c2..c2ca57a2ed09d0f14ee10520b8a8c27f5d5543c2 100644 (file)
@@ -26,12 +26,11 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
        struct ath_node *an = file->private_data;
        struct ath_softc *sc = an->sc;
        struct ath_atx_tid *tid;
-       struct ath_atx_ac *ac;
        struct ath_txq *txq;
        u32 len = 0, size = 4096;
        char *buf;
        size_t retval;
-       int tidno, acno;
+       int tidno;
 
        buf = kzalloc(size, GFP_KERNEL);
        if (buf == NULL)
@@ -48,19 +47,6 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
        len += scnprintf(buf + len, size - len, "MPDU Density: %d\n\n",
                         an->mpdudensity);
 
-       len += scnprintf(buf + len, size - len,
-                        "%2s%7s\n", "AC", "SCHED");
-
-       for (acno = 0, ac = &an->ac[acno];
-            acno < IEEE80211_NUM_ACS; acno++, ac++) {
-               txq = ac->txq;
-               ath_txq_lock(sc, txq);
-               len += scnprintf(buf + len, size - len,
-                                "%2d%7d\n",
-                                acno, ac->sched);
-               ath_txq_unlock(sc, txq);
-       }
-
        len += scnprintf(buf + len, size - len,
                         "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
                         "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
@@ -68,7 +54,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
 
        for (tidno = 0, tid = &an->tid[tidno];
             tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
-               txq = tid->ac->txq;
+               txq = tid->txq;
                ath_txq_lock(sc, txq);
                if (tid->active) {
                        len += scnprintf(buf + len, size - len,
@@ -80,7 +66,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
                                         tid->baw_head,
                                         tid->baw_tail,
                                         tid->bar_index,
-                                        tid->sched);
+                                        !list_empty(&tid->list));
                }
                ath_txq_unlock(sc, txq);
        }
index 39eaf9b6e9b45c610dbf40208151431bbf51b64c..1e84882f8c5b35374df2a6460aada9bfac1bfa5a 100644 (file)
@@ -74,7 +74,7 @@ static struct ath_ps_ops ath9k_htc_ps_ops = {
 
 static int ath9k_htc_wait_for_target(struct ath9k_htc_priv *priv)
 {
-       int time_left;
+       unsigned long time_left;
 
        if (atomic_read(&priv->htc->tgt_ready) > 0) {
                atomic_dec(&priv->htc->tgt_ready);
index d2408da38c1c1321354c1fb6f931cfb202250949..2294709ee8b0ae62144be926f55f4ac4f10a86c6 100644 (file)
@@ -146,7 +146,8 @@ static int htc_config_pipe_credits(struct htc_target *target)
 {
        struct sk_buff *skb;
        struct htc_config_pipe_msg *cp_msg;
-       int ret, time_left;
+       int ret;
+       unsigned long time_left;
 
        skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
        if (!skb) {
@@ -184,7 +185,8 @@ static int htc_setup_complete(struct htc_target *target)
 {
        struct sk_buff *skb;
        struct htc_comp_msg *comp_msg;
-       int ret = 0, time_left;
+       int ret = 0;
+       unsigned long time_left;
 
        skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
        if (!skb) {
@@ -236,7 +238,8 @@ int htc_connect_service(struct htc_target *target,
        struct sk_buff *skb;
        struct htc_endpoint *endpoint;
        struct htc_conn_svc_msg *conn_msg;
-       int ret, time_left;
+       int ret;
+       unsigned long time_left;
 
        /* Find an available endpoint */
        endpoint = get_next_avail_ep(target->endpoint);
index a31a6804dc34eff8174b06e15d8ee14bb5405888..1dd0339de372901d753fa70f975e4c5b0f492f21 100644 (file)
@@ -3186,6 +3186,7 @@ static struct {
        { AR_SREV_VERSION_9550,         "9550" },
        { AR_SREV_VERSION_9565,         "9565" },
        { AR_SREV_VERSION_9531,         "9531" },
+       { AR_SREV_VERSION_9561,         "9561" },
 };
 
 /* For devices with external radios */
index eff0e5325e6a304e376b71d33d6874318e087a53..57f95f2dca5b072ac294b2c76122ec1b952a2a80 100644 (file)
@@ -736,13 +736,14 @@ static const struct ieee80211_iface_limit if_limits_multi[] = {
                                 BIT(NL80211_IFTYPE_P2P_CLIENT) |
                                 BIT(NL80211_IFTYPE_P2P_GO) },
        { .max = 1,     .types = BIT(NL80211_IFTYPE_ADHOC) },
+       { .max = 1,     .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
 };
 
 static const struct ieee80211_iface_combination if_comb_multi[] = {
        {
                .limits = if_limits_multi,
                .n_limits = ARRAY_SIZE(if_limits_multi),
-               .max_interfaces = 2,
+               .max_interfaces = 3,
                .num_different_channels = 2,
                .beacon_int_infra_match = true,
        },
@@ -826,6 +827,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
        ieee80211_hw_set(hw, SIGNAL_DBM);
        ieee80211_hw_set(hw, RX_INCLUDES_FCS);
        ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+       ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
 
        if (ath9k_ps_enable)
                ieee80211_hw_set(hw, SUPPORTS_PS);
@@ -855,6 +857,10 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
                        BIT(NL80211_IFTYPE_MESH_POINT) |
                        BIT(NL80211_IFTYPE_WDS);
 
+               if (ath9k_is_chanctx_enabled())
+                       hw->wiphy->interface_modes |=
+                                       BIT(NL80211_IFTYPE_P2P_DEVICE);
+
                        hw->wiphy->iface_combinations = if_comb;
                        hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
        }
index 90631d768a60fa70a8239d529758edbbed9484c1..5ad0feeebc8669c530e6ae4aeba536f4375840e7 100644 (file)
@@ -172,7 +172,7 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath_tx_control txctl;
-       int time_left;
+       unsigned long time_left;
 
        memset(&txctl, 0, sizeof(txctl));
        txctl.txq = sc->tx.txq_map[IEEE80211_AC_BE];
index cfd45cb8ccfc13df856bb4bbe07d8e2bef21228f..c27143ba9ffbe8e6d863c0353e3ef3effd29bd5e 100644 (file)
@@ -1459,13 +1459,18 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
                                   u64 multicast)
 {
        struct ath_softc *sc = hw->priv;
+       struct ath_chanctx *ctx;
        u32 rfilt;
 
        changed_flags &= SUPPORTED_FILTERS;
        *total_flags &= SUPPORTED_FILTERS;
 
        spin_lock_bh(&sc->chan_lock);
-       sc->cur_chan->rxfilter = *total_flags;
+       ath_for_each_chanctx(sc, ctx)
+               ctx->rxfilter = *total_flags;
+#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+       sc->offchannel.chan.rxfilter = *total_flags;
+#endif
        spin_unlock_bh(&sc->chan_lock);
 
        ath9k_ps_wakeup(sc);
@@ -2246,7 +2251,7 @@ static void ath9k_cancel_pending_offchannel(struct ath_softc *sc)
 
                del_timer_sync(&sc->offchannel.timer);
                if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
-                       ath_roc_complete(sc, true);
+                       ath_roc_complete(sc, ATH_ROC_COMPLETE_ABORT);
        }
 
        if (test_bit(ATH_OP_SCANNING, &common->op_flags)) {
@@ -2355,7 +2360,7 @@ static int ath9k_cancel_remain_on_channel(struct ieee80211_hw *hw)
 
        if (sc->offchannel.roc_vif) {
                if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
-                       ath_roc_complete(sc, true);
+                       ath_roc_complete(sc, ATH_ROC_COMPLETE_CANCEL);
        }
 
        mutex_unlock(&sc->mutex);
index ca533b4321bddc9ee7621ca4e3dbbe5f03377236..9c16e2a6d185e31c1391e6d4aa4f14c2c9013cd4 100644 (file)
@@ -299,7 +299,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
                       sizeof(struct wmi_cmd_hdr);
        struct sk_buff *skb;
        u8 *data;
-       int time_left, ret = 0;
+       unsigned long time_left;
+       int ret = 0;
 
        if (ah->ah_flags & AH_UNPLUGGED)
                return 0;
index b766a7fc60aaa051a519ed18830444322e98d4b3..3e3dac3d70604fcb83b71f80fa3e19412a0cdbc3 100644 (file)
@@ -106,7 +106,6 @@ void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
 static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
                             struct ath_atx_tid *tid)
 {
-       struct ath_atx_ac *ac = tid->ac;
        struct list_head *list;
        struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv;
        struct ath_chanctx *ctx = avp->chanctx;
@@ -114,19 +113,9 @@ static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
        if (!ctx)
                return;
 
-       if (tid->sched)
-               return;
-
-       tid->sched = true;
-       list_add_tail(&tid->list, &ac->tid_q);
-
-       if (ac->sched)
-               return;
-
-       ac->sched = true;
-
        list = &ctx->acq[TID_TO_WME_AC(tid->tidno)];
-       list_add_tail(&ac->list, list);
+       if (list_empty(&tid->list))
+               list_add_tail(&tid->list, list);
 }
 
 static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
@@ -208,7 +197,7 @@ static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
 static void
 ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
 {
-       struct ath_txq *txq = tid->ac->txq;
+       struct ath_txq *txq = tid->txq;
        struct ieee80211_tx_info *tx_info;
        struct sk_buff *skb, *tskb;
        struct ath_buf *bf;
@@ -237,7 +226,7 @@ ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
 
 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
 {
-       struct ath_txq *txq = tid->ac->txq;
+       struct ath_txq *txq = tid->txq;
        struct sk_buff *skb;
        struct ath_buf *bf;
        struct list_head bf_head;
@@ -644,7 +633,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                        ath_tx_queue_tid(sc, txq, tid);
 
                        if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
-                               tid->ac->clear_ps_filter = true;
+                               tid->clear_ps_filter = true;
                }
        }
 
@@ -734,7 +723,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
        struct ieee80211_tx_rate *rates;
        u32 max_4ms_framelen, frmlen;
        u16 aggr_limit, bt_aggr_limit, legacy = 0;
-       int q = tid->ac->txq->mac80211_qnum;
+       int q = tid->txq->mac80211_qnum;
        int i;
 
        skb = bf->bf_mpdu;
@@ -1471,8 +1460,8 @@ static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
        if (list_empty(&bf_q))
                return false;
 
-       if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) {
-               tid->ac->clear_ps_filter = false;
+       if (tid->clear_ps_filter || tid->an->no_ps_filter) {
+               tid->clear_ps_filter = false;
                tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
        }
 
@@ -1491,7 +1480,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
 
        an = (struct ath_node *)sta->drv_priv;
        txtid = ATH_AN_2_TID(an, tid);
-       txq = txtid->ac->txq;
+       txq = txtid->txq;
 
        ath_txq_lock(sc, txq);
 
@@ -1525,7 +1514,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
 {
        struct ath_node *an = (struct ath_node *)sta->drv_priv;
        struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
-       struct ath_txq *txq = txtid->ac->txq;
+       struct ath_txq *txq = txtid->txq;
 
        ath_txq_lock(sc, txq);
        txtid->active = false;
@@ -1538,7 +1527,6 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
                       struct ath_node *an)
 {
        struct ath_atx_tid *tid;
-       struct ath_atx_ac *ac;
        struct ath_txq *txq;
        bool buffered;
        int tidno;
@@ -1546,25 +1534,18 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
        for (tidno = 0, tid = &an->tid[tidno];
             tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
 
-               ac = tid->ac;
-               txq = ac->txq;
+               txq = tid->txq;
 
                ath_txq_lock(sc, txq);
 
-               if (!tid->sched) {
+               if (list_empty(&tid->list)) {
                        ath_txq_unlock(sc, txq);
                        continue;
                }
 
                buffered = ath_tid_has_buffered(tid);
 
-               tid->sched = false;
-               list_del(&tid->list);
-
-               if (ac->sched) {
-                       ac->sched = false;
-                       list_del(&ac->list);
-               }
+               list_del_init(&tid->list);
 
                ath_txq_unlock(sc, txq);
 
@@ -1575,18 +1556,16 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
 void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
 {
        struct ath_atx_tid *tid;
-       struct ath_atx_ac *ac;
        struct ath_txq *txq;
        int tidno;
 
        for (tidno = 0, tid = &an->tid[tidno];
             tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
 
-               ac = tid->ac;
-               txq = ac->txq;
+               txq = tid->txq;
 
                ath_txq_lock(sc, txq);
-               ac->clear_ps_filter = true;
+               tid->clear_ps_filter = true;
 
                if (ath_tid_has_buffered(tid)) {
                        ath_tx_queue_tid(sc, txq, tid);
@@ -1606,7 +1585,7 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
 
        an = (struct ath_node *)sta->drv_priv;
        tid = ATH_AN_2_TID(an, tidno);
-       txq = tid->ac->txq;
+       txq = tid->txq;
 
        ath_txq_lock(sc, txq);
 
@@ -1645,7 +1624,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
 
                tid = ATH_AN_2_TID(an, i);
 
-               ath_txq_lock(sc, tid->ac->txq);
+               ath_txq_lock(sc, tid->txq);
                while (nframes > 0) {
                        bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
                        if (!bf)
@@ -1669,7 +1648,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
                        if (an->sta && !ath_tid_has_buffered(tid))
                                ieee80211_sta_set_buffered(an->sta, i, false);
                }
-               ath_txq_unlock_complete(sc, tid->ac->txq);
+               ath_txq_unlock_complete(sc, tid->txq);
        }
 
        if (list_empty(&bf_q))
@@ -1918,9 +1897,8 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
 {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       struct ath_atx_ac *ac, *last_ac;
        struct ath_atx_tid *tid, *last_tid;
-       struct list_head *ac_list;
+       struct list_head *tid_list;
        bool sent = false;
 
        if (txq->mac80211_qnum < 0)
@@ -1930,63 +1908,45 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
                return;
 
        spin_lock_bh(&sc->chan_lock);
-       ac_list = &sc->cur_chan->acq[txq->mac80211_qnum];
+       tid_list = &sc->cur_chan->acq[txq->mac80211_qnum];
 
-       if (list_empty(ac_list)) {
+       if (list_empty(tid_list)) {
                spin_unlock_bh(&sc->chan_lock);
                return;
        }
 
        rcu_read_lock();
 
-       last_ac = list_entry(ac_list->prev, struct ath_atx_ac, list);
-       while (!list_empty(ac_list)) {
+       last_tid = list_entry(tid_list->prev, struct ath_atx_tid, list);
+       while (!list_empty(tid_list)) {
                bool stop = false;
 
                if (sc->cur_chan->stopped)
                        break;
 
-               ac = list_first_entry(ac_list, struct ath_atx_ac, list);
-               last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
-               list_del(&ac->list);
-               ac->sched = false;
-
-               while (!list_empty(&ac->tid_q)) {
-
-                       tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
-                                              list);
-                       list_del(&tid->list);
-                       tid->sched = false;
+               tid = list_first_entry(tid_list, struct ath_atx_tid, list);
+               list_del_init(&tid->list);
 
-                       if (ath_tx_sched_aggr(sc, txq, tid, &stop))
-                               sent = true;
-
-                       /*
-                        * add tid to round-robin queue if more frames
-                        * are pending for the tid
-                        */
-                       if (ath_tid_has_buffered(tid))
-                               ath_tx_queue_tid(sc, txq, tid);
+               if (ath_tx_sched_aggr(sc, txq, tid, &stop))
+                       sent = true;
 
-                       if (stop || tid == last_tid)
-                               break;
-               }
-
-               if (!list_empty(&ac->tid_q) && !ac->sched) {
-                       ac->sched = true;
-                       list_add_tail(&ac->list, ac_list);
-               }
+               /*
+                * add tid to round-robin queue if more frames
+                * are pending for the tid
+                */
+               if (ath_tid_has_buffered(tid))
+                       ath_tx_queue_tid(sc, txq, tid);
 
                if (stop)
                        break;
 
-               if (ac == last_ac) {
+               if (tid == last_tid) {
                        if (!sent)
                                break;
 
                        sent = false;
-                       last_ac = list_entry(ac_list->prev,
-                                            struct ath_atx_ac, list);
+                       last_tid = list_entry(tid_list->prev,
+                                             struct ath_atx_tid, list);
                }
        }
 
@@ -2376,10 +2336,10 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
                txq = sc->tx.uapsdq;
                ath_txq_lock(sc, txq);
        } else if (txctl->an && queue) {
-               WARN_ON(tid->ac->txq != txctl->txq);
+               WARN_ON(tid->txq != txctl->txq);
 
                if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
-                       tid->ac->clear_ps_filter = true;
+                       tid->clear_ps_filter = true;
 
                /*
                 * Add this frame to software queue for scheduling later
@@ -2873,7 +2833,6 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
 {
        struct ath_atx_tid *tid;
-       struct ath_atx_ac *ac;
        int tidno, acno;
 
        for (tidno = 0, tid = &an->tid[tidno];
@@ -2884,26 +2843,18 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
                tid->seq_start = tid->seq_next = 0;
                tid->baw_size  = WME_MAX_BA;
                tid->baw_head  = tid->baw_tail = 0;
-               tid->sched     = false;
                tid->active        = false;
+               tid->clear_ps_filter = true;
                __skb_queue_head_init(&tid->buf_q);
                __skb_queue_head_init(&tid->retry_q);
+               INIT_LIST_HEAD(&tid->list);
                acno = TID_TO_WME_AC(tidno);
-               tid->ac = &an->ac[acno];
-       }
-
-       for (acno = 0, ac = &an->ac[acno];
-            acno < IEEE80211_NUM_ACS; acno++, ac++) {
-               ac->sched    = false;
-               ac->clear_ps_filter = true;
-               ac->txq = sc->tx.txq_map[acno];
-               INIT_LIST_HEAD(&ac->tid_q);
+               tid->txq = sc->tx.txq_map[acno];
        }
 }
 
 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
 {
-       struct ath_atx_ac *ac;
        struct ath_atx_tid *tid;
        struct ath_txq *txq;
        int tidno;
@@ -2911,20 +2862,12 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
        for (tidno = 0, tid = &an->tid[tidno];
             tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
 
-               ac = tid->ac;
-               txq = ac->txq;
+               txq = tid->txq;
 
                ath_txq_lock(sc, txq);
 
-               if (tid->sched) {
-                       list_del(&tid->list);
-                       tid->sched = false;
-               }
-
-               if (ac->sched) {
-                       list_del(&ac->list);
-                       tid->ac->sched = false;
-               }
+               if (!list_empty(&tid->list))
+                       list_del_init(&tid->list);
 
                ath_tid_drain(sc, txq, tid);
                tid->active = false;
index 508eccf5d982c8d821edec4fc4b8a578bf2e01e2..d59d83e0ce4b9588e5169b2b28121d82935d187b 100644 (file)
@@ -40,6 +40,8 @@ const char *ath_opmode_to_string(enum nl80211_iftype opmode)
                return "P2P-CLIENT";
        case NL80211_IFTYPE_P2P_GO:
                return "P2P-GO";
+       case NL80211_IFTYPE_OCB:
+               return "OCB";
        default:
                return "UNKNOWN";
        }
index 050506f842e9a63298920af2f5b8700e80aaf87b..64b432625fbbdb5ad5c33d9920065937ac2f5572 100644 (file)
@@ -12,6 +12,7 @@ wil6210-y += debug.o
 wil6210-y += rx_reorder.o
 wil6210-y += ioctl.o
 wil6210-y += fw.o
+wil6210-y += pm.o
 wil6210-y += pmc.o
 wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
 wil6210-y += wil_platform.o
diff --git a/drivers/net/wireless/ath/wil6210/boot_loader.h b/drivers/net/wireless/ath/wil6210/boot_loader.h
new file mode 100644 (file)
index 0000000..c131b5e
--- /dev/null
@@ -0,0 +1,61 @@
+/* Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* This file contains the definitions for the boot loader
+ * for the Qualcomm "Sparrow" 60 Gigabit wireless solution.
+ */
+#ifndef BOOT_LOADER_EXPORT_H_
+#define BOOT_LOADER_EXPORT_H_
+
+struct bl_dedicated_registers_v1 {
+       __le32  boot_loader_ready;              /* 0x880A3C driver will poll
+                                                * this Dword until BL will
+                                                * set it to 1 (initial value
+                                                * should be 0)
+                                                */
+       __le32  boot_loader_struct_version;     /* 0x880A40 BL struct ver. */
+       __le16  rf_type;                        /* 0x880A44 connected RF ID */
+       __le16  rf_status;                      /* 0x880A46 RF status,
+                                                * 0 is OK else error
+                                                */
+       __le32  baseband_type;                  /* 0x880A48 board type ID */
+       u8      mac_address[6];                 /* 0x880A4c BL mac address */
+       u8      bl_version_major;               /* 0x880A52 BL ver. major */
+       u8      bl_version_minor;               /* 0x880A53 BL ver. minor */
+       __le16  bl_version_subminor;            /* 0x880A54 BL ver. subminor */
+       __le16  bl_version_build;               /* 0x880A56 BL ver. build */
+       /* valid only for version 2 and above */
+       __le32  bl_assert_code;         /* 0x880A58 BL Assert code */
+       __le32  bl_assert_blink;        /* 0x880A5C BL Assert Branch */
+       __le32  bl_reserved[22];        /* 0x880A60 - 0x880AB4 */
+       __le32  bl_magic_number;        /* 0x880AB8 BL Magic number */
+} __packed;
+
+/* the following struct is the version 0 struct */
+
+struct bl_dedicated_registers_v0 {
+       __le32  boot_loader_ready;              /* 0x880A3C driver will poll
+                                                * this Dword until BL will
+                                                * set it to 1 (initial value
+                                                * should be 0)
+                                                */
+#define BL_READY (1)   /* ready indication */
+       __le32  boot_loader_struct_version;     /* 0x880A40 BL struct ver. */
+       __le32  rf_type;                        /* 0x880A44 connected RF ID */
+       __le32  baseband_type;                  /* 0x880A48 board type ID */
+       u8      mac_address[6];                 /* 0x880A4c BL mac address */
+} __packed;
+
+#endif /* BOOT_LOADER_EXPORT_H_ */
index e4be2d9bbac40415c8ffc660e674b5a1aba2920c..20d07ef679e89d467170abae532819f36f283821 100644 (file)
@@ -336,12 +336,9 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
        else
                wil_dbg_misc(wil, "Scan has no IE's\n");
 
-       rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len,
-                       request->ie);
-       if (rc) {
-               wil_err(wil, "Aborting scan, set_ie failed: %d\n", rc);
+       rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie);
+       if (rc)
                goto out;
-       }
 
        rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
                        cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
@@ -462,10 +459,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
         * ies in FW.
         */
        rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie);
-       if (rc) {
-               wil_err(wil, "WMI_SET_APPIE_CMD failed\n");
+       if (rc)
                goto out;
-       }
 
        /* WMI_CONNECT_CMD */
        memset(&conn, 0, sizeof(conn));
@@ -722,56 +717,52 @@ static int wil_fix_bcon(struct wil6210_priv *wil,
 {
        struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
        size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
-       int rc = 0;
 
        if (bcon->probe_resp_len <= hlen)
                return 0;
 
+/* always use IE's from full probe frame, they has more info
+ * notable RSN
+ */
+       bcon->proberesp_ies = f->u.probe_resp.variable;
+       bcon->proberesp_ies_len = bcon->probe_resp_len - hlen;
        if (!bcon->assocresp_ies) {
-               bcon->assocresp_ies = f->u.probe_resp.variable;
-               bcon->assocresp_ies_len = bcon->probe_resp_len - hlen;
-               rc = 1;
+               bcon->assocresp_ies = bcon->proberesp_ies;
+               bcon->assocresp_ies_len = bcon->proberesp_ies_len;
        }
 
-       return rc;
+       return 1;
 }
 
 /* internal functions for device reset and starting AP */
 static int _wil_cfg80211_set_ies(struct wiphy *wiphy,
-                                size_t probe_ies_len, const u8 *probe_ies,
-                                size_t assoc_ies_len, const u8 *assoc_ies)
-
+                                struct cfg80211_beacon_data *bcon)
 {
        int rc;
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 
-       /* FW do not form regular beacon, so bcon IE's are not set
-        * For the DMG bcon, when it will be supported, bcon IE's will
-        * be reused; add something like:
-        * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
-        * bcon->beacon_ies);
-        */
-       rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, probe_ies_len, probe_ies);
-       if (rc) {
-               wil_err(wil, "set_ie(PROBE_RESP) failed\n");
+       rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
+                       bcon->proberesp_ies);
+       if (rc)
                return rc;
-       }
 
-       rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, assoc_ies_len, assoc_ies);
-       if (rc) {
-               wil_err(wil, "set_ie(ASSOC_RESP) failed\n");
+       rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
+                       bcon->assocresp_ies);
+#if 0 /* to use beacon IE's, remove this #if 0 */
+       if (rc)
                return rc;
-       }
 
-       return 0;
+       rc = wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->tail_len, bcon->tail);
+#endif
+
+       return rc;
 }
 
 static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
                                  struct net_device *ndev,
                                  const u8 *ssid, size_t ssid_len, u32 privacy,
                                  int bi, u8 chan,
-                                 size_t probe_ies_len, const u8 *probe_ies,
-                                 size_t assoc_ies_len, const u8 *assoc_ies,
+                                 struct cfg80211_beacon_data *bcon,
                                  u8 hidden_ssid)
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
@@ -792,8 +783,7 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
        if (rc)
                goto out;
 
-       rc = _wil_cfg80211_set_ies(wiphy, probe_ies_len, probe_ies,
-                                  assoc_ies_len, assoc_ies);
+       rc = _wil_cfg80211_set_ies(wiphy, bcon);
        if (rc)
                goto out;
 
@@ -827,27 +817,20 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
                                      struct cfg80211_beacon_data *bcon)
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
-       struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
-       size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
-       const u8 *pr_ies = NULL;
-       size_t pr_ies_len = 0;
        int rc;
        u32 privacy = 0;
 
        wil_dbg_misc(wil, "%s()\n", __func__);
        wil_print_bcon_data(bcon);
 
-       if (bcon->probe_resp_len > hlen) {
-               pr_ies = f->u.probe_resp.variable;
-               pr_ies_len = bcon->probe_resp_len - hlen;
-       }
-
        if (wil_fix_bcon(wil, bcon)) {
                wil_dbg_misc(wil, "Fixed bcon\n");
                wil_print_bcon_data(bcon);
        }
 
-       if (pr_ies && cfg80211_find_ie(WLAN_EID_RSN, pr_ies, pr_ies_len))
+       if (bcon->proberesp_ies &&
+           cfg80211_find_ie(WLAN_EID_RSN, bcon->proberesp_ies,
+                            bcon->proberesp_ies_len))
                privacy = 1;
 
        /* in case privacy has changed, need to restart the AP */
@@ -860,14 +843,10 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
                rc = _wil_cfg80211_start_ap(wiphy, ndev, wdev->ssid,
                                            wdev->ssid_len, privacy,
                                            wdev->beacon_interval,
-                                           wil->channel, pr_ies_len, pr_ies,
-                                           bcon->assocresp_ies_len,
-                                           bcon->assocresp_ies,
+                                           wil->channel, bcon,
                                            wil->hidden_ssid);
        } else {
-               rc = _wil_cfg80211_set_ies(wiphy, pr_ies_len, pr_ies,
-                                          bcon->assocresp_ies_len,
-                                          bcon->assocresp_ies);
+               rc = _wil_cfg80211_set_ies(wiphy, bcon);
        }
 
        return rc;
@@ -882,10 +861,6 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
        struct ieee80211_channel *channel = info->chandef.chan;
        struct cfg80211_beacon_data *bcon = &info->beacon;
        struct cfg80211_crypto_settings *crypto = &info->crypto;
-       struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
-       size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
-       const u8 *pr_ies = NULL;
-       size_t pr_ies_len = 0;
        u8 hidden_ssid;
 
        wil_dbg_misc(wil, "%s()\n", __func__);
@@ -925,11 +900,6 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
        wil_print_bcon_data(bcon);
        wil_print_crypto(wil, crypto);
 
-       if (bcon->probe_resp_len > hlen) {
-               pr_ies = f->u.probe_resp.variable;
-               pr_ies_len = bcon->probe_resp_len - hlen;
-       }
-
        if (wil_fix_bcon(wil, bcon)) {
                wil_dbg_misc(wil, "Fixed bcon\n");
                wil_print_bcon_data(bcon);
@@ -938,10 +908,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
        rc = _wil_cfg80211_start_ap(wiphy, ndev,
                                    info->ssid, info->ssid_len, info->privacy,
                                    info->beacon_interval, channel->hw_value,
-                                   pr_ies_len, pr_ies,
-                                   bcon->assocresp_ies_len,
-                                   bcon->assocresp_ies,
-                                   hidden_ssid);
+                                   bcon, hidden_ssid);
 
        return rc;
 }
index 75219a1b8805135c5cc751db9f70c50e55ddf285..613ca2b2527be25a0c4a51329acdab30bb0e3ab3 100644 (file)
@@ -62,7 +62,7 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
        seq_printf(s, "  swhead = %d\n", vring->swhead);
        seq_printf(s, "  hwtail = [0x%08x] -> ", vring->hwtail);
        if (x) {
-               v = ioread32(x);
+               v = readl(x);
                seq_printf(s, "0x%08x = %d\n", v, v);
        } else {
                seq_puts(s, "???\n");
@@ -268,7 +268,7 @@ static const struct file_operations fops_mbox = {
 
 static int wil_debugfs_iomem_x32_set(void *data, u64 val)
 {
-       iowrite32(val, (void __iomem *)data);
+       writel(val, (void __iomem *)data);
        wmb(); /* make sure write propagated to HW */
 
        return 0;
@@ -276,7 +276,7 @@ static int wil_debugfs_iomem_x32_set(void *data, u64 val)
 
 static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
 {
-       *val = ioread32((void __iomem *)data);
+       *val = readl((void __iomem *)data);
 
        return 0;
 }
@@ -306,7 +306,7 @@ static int wil_debugfs_ulong_get(void *data, u64 *val)
 }
 
 DEFINE_SIMPLE_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get,
-                       wil_debugfs_ulong_set, "%llu\n");
+                       wil_debugfs_ulong_set, "0x%llx\n");
 
 static struct dentry *wil_debugfs_create_ulong(const char *name, umode_t mode,
                                               struct dentry *parent,
@@ -477,7 +477,7 @@ static int wil_memread_debugfs_show(struct seq_file *s, void *data)
        void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr));
 
        if (a)
-               seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, ioread32(a));
+               seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, readl(a));
        else
                seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
 
@@ -1344,6 +1344,7 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
 {
        int i;
        u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size;
+       unsigned long long drop_dup = r->drop_dup, drop_old = r->drop_old;
 
        seq_printf(s, "([%2d] %3d TU) 0x%03x [", r->buf_size, r->timeout,
                   r->head_seq_num);
@@ -1353,7 +1354,10 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
                else
                        seq_printf(s, "%c", r->reorder_buf[i] ? '*' : '_');
        }
-       seq_printf(s, "] last drop 0x%03x\n", r->ssn_last_drop);
+       seq_printf(s,
+                  "] total %llu drop %llu (dup %llu + old %llu) last 0x%03x\n",
+                  r->total, drop_dup + drop_old, drop_dup, drop_old,
+                  r->ssn_last_drop);
 }
 
 static int wil_sta_debugfs_show(struct seq_file *s, void *data)
index 0ea695ff98adeda1185382bfda7b58958b579275..7053b62ca8d313ac593143bb4619ed13db252de7 100644 (file)
@@ -50,19 +50,13 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
 
        wil_dbg_misc(wil, "%s()\n", __func__);
 
-       tx_itr_en = ioread32(wil->csr +
-                            HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL));
+       tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
        if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
-               tx_itr_val =
-                       ioread32(wil->csr +
-                                HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH));
+               tx_itr_val = wil_r(wil, RGF_DMA_ITR_TX_CNT_TRSH);
 
-       rx_itr_en = ioread32(wil->csr +
-                            HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL));
+       rx_itr_en = wil_r(wil, RGF_DMA_ITR_RX_CNT_CTL);
        if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
-               rx_itr_val =
-                       ioread32(wil->csr +
-                                HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH));
+               rx_itr_val = wil_r(wil, RGF_DMA_ITR_RX_CNT_TRSH);
 
        cp->tx_coalesce_usecs = tx_itr_val;
        cp->rx_coalesce_usecs = rx_itr_val;
index 4428345e5a470360560ceb82772349cf8f754a7f..82aae2d705b41803fee7b44ab174be875b159f96 100644 (file)
 MODULE_FIRMWARE(WIL_FW_NAME);
 MODULE_FIRMWARE(WIL_FW2_NAME);
 
-/* target operations */
-/* register read */
-#define R(a) ioread32(wil->csr + HOSTADDR(a))
-/* register write. wmb() to make sure it is completed */
-#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
-/* register set = read, OR, write */
-#define S(a, v) W(a, R(a) | v)
-/* register clear = read, AND with inverted, write */
-#define C(a, v) W(a, R(a) & ~v)
-
 static
 void wil_memset_toio_32(volatile void __iomem *dst, u32 val,
                        size_t count)
index 157f5ef384e0cc2804229044f369ac813c69a28d..d30657ee7e83fa887eb54340fed64bc45db3e606 100644 (file)
@@ -221,12 +221,12 @@ static int fw_handle_direct_write(struct wil6210_priv *wil, const void *data,
 
                FW_ADDR_CHECK(dst, block[i].addr, "address");
 
-               x = ioread32(dst);
+               x = readl(dst);
                y = (x & m) | (v & ~m);
                wil_dbg_fw(wil, "write [0x%08x] <== 0x%08x "
                           "(old 0x%08x val 0x%08x mask 0x%08x)\n",
                           le32_to_cpu(block[i].addr), y, x, v, m);
-               iowrite32(y, dst);
+               writel(y, dst);
                wmb(); /* finish before processing next record */
        }
 
@@ -239,18 +239,18 @@ static int gw_write(struct wil6210_priv *wil, void __iomem *gwa_addr,
 {
        unsigned delay = 0;
 
-       iowrite32(a, gwa_addr);
-       iowrite32(gw_cmd, gwa_cmd);
+       writel(a, gwa_addr);
+       writel(gw_cmd, gwa_cmd);
        wmb(); /* finish before activate gw */
 
-       iowrite32(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */
+       writel(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */
        do {
                udelay(1); /* typical time is few usec */
                if (delay++ > 100) {
                        wil_err_fw(wil, "gw timeout\n");
                        return -EINVAL;
                }
-       } while (ioread32(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */
+       } while (readl(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */
 
        return 0;
 }
@@ -305,7 +305,7 @@ static int fw_handle_gateway_data(struct wil6210_priv *wil, const void *data,
                wil_dbg_fw(wil, "  gw write[%3d] [0x%08x] <== 0x%08x\n",
                           i, a, v);
 
-               iowrite32(v, gwa_val);
+               writel(v, gwa_val);
                rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a);
                if (rc)
                        return rc;
@@ -372,7 +372,7 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data,
                                sizeof(v), false);
 
                for (k = 0; k < ARRAY_SIZE(block->value); k++)
-                       iowrite32(v[k], gwa_val[k]);
+                       writel(v[k], gwa_val[k]);
                rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a);
                if (rc)
                        return rc;
index 28ffc18466c4b1e1d2887f1887fc3decf36e73e2..a371f036d0546388c3bcfa6317e5d9e4ca6857f8 100644 (file)
@@ -61,13 +61,13 @@ static inline void wil_icr_clear(u32 x, void __iomem *addr)
 
 static inline void wil_icr_clear(u32 x, void __iomem *addr)
 {
-       iowrite32(x, addr);
+       writel(x, addr);
 }
 #endif /* defined(CONFIG_WIL6210_ISR_COR) */
 
 static inline u32 wil_ioread32_and_clear(void __iomem *addr)
 {
-       u32 x = ioread32(addr);
+       u32 x = readl(addr);
 
        wil_icr_clear(x, addr);
 
@@ -76,54 +76,47 @@ static inline u32 wil_ioread32_and_clear(void __iomem *addr)
 
 static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
 {
-       iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
-                 HOSTADDR(RGF_DMA_EP_TX_ICR) +
-                 offsetof(struct RGF_ICR, IMS));
+       wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMS),
+             WIL6210_IRQ_DISABLE);
 }
 
 static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
 {
-       iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
-                 HOSTADDR(RGF_DMA_EP_RX_ICR) +
-                 offsetof(struct RGF_ICR, IMS));
+       wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMS),
+             WIL6210_IRQ_DISABLE);
 }
 
 static void wil6210_mask_irq_misc(struct wil6210_priv *wil)
 {
-       iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
-                 HOSTADDR(RGF_DMA_EP_MISC_ICR) +
-                 offsetof(struct RGF_ICR, IMS));
+       wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
+             WIL6210_IRQ_DISABLE);
 }
 
 static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
 {
        wil_dbg_irq(wil, "%s()\n", __func__);
 
-       iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
-                 HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+       wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_DISABLE);
 
        clear_bit(wil_status_irqen, wil->status);
 }
 
 void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
 {
-       iowrite32(WIL6210_IMC_TX, wil->csr +
-                 HOSTADDR(RGF_DMA_EP_TX_ICR) +
-                 offsetof(struct RGF_ICR, IMC));
+       wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMC),
+             WIL6210_IMC_TX);
 }
 
 void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
 {
-       iowrite32(WIL6210_IMC_RX, wil->csr +
-                 HOSTADDR(RGF_DMA_EP_RX_ICR) +
-                 offsetof(struct RGF_ICR, IMC));
+       wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMC),
+             WIL6210_IMC_RX);
 }
 
 static void wil6210_unmask_irq_misc(struct wil6210_priv *wil)
 {
-       iowrite32(WIL6210_IMC_MISC, wil->csr +
-                 HOSTADDR(RGF_DMA_EP_MISC_ICR) +
-                 offsetof(struct RGF_ICR, IMC));
+       wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
+             WIL6210_IMC_MISC);
 }
 
 static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
@@ -132,8 +125,7 @@ static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
 
        set_bit(wil_status_irqen, wil->status);
 
-       iowrite32(WIL6210_IRQ_PSEUDO_MASK, wil->csr +
-                 HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+       wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_PSEUDO_MASK);
 }
 
 void wil_mask_irq(struct wil6210_priv *wil)
@@ -150,12 +142,12 @@ void wil_unmask_irq(struct wil6210_priv *wil)
 {
        wil_dbg_irq(wil, "%s()\n", __func__);
 
-       iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
-                 offsetof(struct RGF_ICR, ICC));
-       iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
-                 offsetof(struct RGF_ICR, ICC));
-       iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
-                 offsetof(struct RGF_ICR, ICC));
+       wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC),
+             WIL_ICR_ICC_VALUE);
+       wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, ICC),
+             WIL_ICR_ICC_VALUE);
+       wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC),
+             WIL_ICR_ICC_VALUE);
 
        wil6210_unmask_irq_pseudo(wil);
        wil6210_unmask_irq_tx(wil);
@@ -163,9 +155,6 @@ void wil_unmask_irq(struct wil6210_priv *wil)
        wil6210_unmask_irq_misc(wil);
 }
 
-/* target write operation */
-#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
-
 void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
 {
        wil_dbg_irq(wil, "%s()\n", __func__);
@@ -177,44 +166,42 @@ void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
                return;
 
        /* Disable and clear tx counter before (re)configuration */
-       W(RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
-       W(RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
+       wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
+       wil_w(wil, RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
        wil_info(wil, "set ITR_TX_CNT_TRSH = %d usec\n",
                 wil->tx_max_burst_duration);
        /* Configure TX max burst duration timer to use usec units */
-       W(RGF_DMA_ITR_TX_CNT_CTL,
-         BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL);
+       wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL,
+             BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL);
 
        /* Disable and clear tx idle counter before (re)configuration */
-       W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR);
-       W(RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout);
+       wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR);
+       wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout);
        wil_info(wil, "set ITR_TX_IDL_CNT_TRSH = %d usec\n",
                 wil->tx_interframe_timeout);
        /* Configure TX max burst duration timer to use usec units */
-       W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN |
-                                     BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL);
+       wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN |
+             BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL);
 
        /* Disable and clear rx counter before (re)configuration */
-       W(RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR);
-       W(RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration);
+       wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR);
+       wil_w(wil, RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration);
        wil_info(wil, "set ITR_RX_CNT_TRSH = %d usec\n",
                 wil->rx_max_burst_duration);
        /* Configure TX max burst duration timer to use usec units */
-       W(RGF_DMA_ITR_RX_CNT_CTL,
-         BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL);
+       wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL,
+             BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL);
 
        /* Disable and clear rx idle counter before (re)configuration */
-       W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR);
-       W(RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout);
+       wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR);
+       wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout);
        wil_info(wil, "set ITR_RX_IDL_CNT_TRSH = %d usec\n",
                 wil->rx_interframe_timeout);
        /* Configure TX max burst duration timer to use usec units */
-       W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN |
-                                     BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
+       wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN |
+             BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
 }
 
-#undef W
-
 static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
 {
        struct wil6210_priv *wil = cookie;
@@ -452,27 +439,24 @@ static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
                u32 icr_rx = wil_ioread32_and_clear(wil->csr +
                                HOSTADDR(RGF_DMA_EP_RX_ICR) +
                                offsetof(struct RGF_ICR, ICR));
-               u32 imv_rx = ioread32(wil->csr +
-                               HOSTADDR(RGF_DMA_EP_RX_ICR) +
-                               offsetof(struct RGF_ICR, IMV));
+               u32 imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR +
+                                  offsetof(struct RGF_ICR, IMV));
                u32 icm_tx = wil_ioread32_and_clear(wil->csr +
                                HOSTADDR(RGF_DMA_EP_TX_ICR) +
                                offsetof(struct RGF_ICR, ICM));
                u32 icr_tx = wil_ioread32_and_clear(wil->csr +
                                HOSTADDR(RGF_DMA_EP_TX_ICR) +
                                offsetof(struct RGF_ICR, ICR));
-               u32 imv_tx = ioread32(wil->csr +
-                               HOSTADDR(RGF_DMA_EP_TX_ICR) +
-                               offsetof(struct RGF_ICR, IMV));
+               u32 imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR +
+                                  offsetof(struct RGF_ICR, IMV));
                u32 icm_misc = wil_ioread32_and_clear(wil->csr +
                                HOSTADDR(RGF_DMA_EP_MISC_ICR) +
                                offsetof(struct RGF_ICR, ICM));
                u32 icr_misc = wil_ioread32_and_clear(wil->csr +
                                HOSTADDR(RGF_DMA_EP_MISC_ICR) +
                                offsetof(struct RGF_ICR, ICR));
-               u32 imv_misc = ioread32(wil->csr +
-                               HOSTADDR(RGF_DMA_EP_MISC_ICR) +
-                               offsetof(struct RGF_ICR, IMV));
+               u32 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
+                                    offsetof(struct RGF_ICR, IMV));
                wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n"
                                "Rx   icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
                                "Tx   icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
@@ -492,7 +476,7 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
 {
        irqreturn_t rc = IRQ_HANDLED;
        struct wil6210_priv *wil = cookie;
-       u32 pseudo_cause = ioread32(wil->csr + HOSTADDR(RGF_DMA_PSEUDO_CAUSE));
+       u32 pseudo_cause = wil_r(wil, RGF_DMA_PSEUDO_CAUSE);
 
        /**
         * pseudo_cause is Clear-On-Read, no need to ACK
@@ -541,48 +525,12 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
        return rc;
 }
 
-static int wil6210_request_3msi(struct wil6210_priv *wil, int irq)
-{
-       int rc;
-       /*
-        * IRQ's are in the following order:
-        * - Tx
-        * - Rx
-        * - Misc
-        */
-
-       rc = request_irq(irq, wil6210_irq_tx, IRQF_SHARED,
-                        WIL_NAME"_tx", wil);
-       if (rc)
-               return rc;
-
-       rc = request_irq(irq + 1, wil6210_irq_rx, IRQF_SHARED,
-                        WIL_NAME"_rx", wil);
-       if (rc)
-               goto free0;
-
-       rc = request_threaded_irq(irq + 2, wil6210_irq_misc,
-                                 wil6210_irq_misc_thread,
-                                 IRQF_SHARED, WIL_NAME"_misc", wil);
-       if (rc)
-               goto free1;
-
-       return 0;
-       /* error branch */
-free1:
-       free_irq(irq + 1, wil);
-free0:
-       free_irq(irq, wil);
-
-       return rc;
-}
-
 /* can't use wil_ioread32_and_clear because ICC value is not set yet */
 static inline void wil_clear32(void __iomem *addr)
 {
-       u32 x = ioread32(addr);
+       u32 x = readl(addr);
 
-       iowrite32(x, addr);
+       writel(x, addr);
 }
 
 void wil6210_clear_irq(struct wil6210_priv *wil)
@@ -596,19 +544,16 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
        wmb(); /* make sure write completed */
 }
 
-int wil6210_init_irq(struct wil6210_priv *wil, int irq)
+int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
 {
        int rc;
 
-       wil_dbg_misc(wil, "%s() n_msi=%d\n", __func__, wil->n_msi);
+       wil_dbg_misc(wil, "%s(%s)\n", __func__, use_msi ? "MSI" : "INTx");
 
-       if (wil->n_msi == 3)
-               rc = wil6210_request_3msi(wil, irq);
-       else
-               rc = request_threaded_irq(irq, wil6210_hardirq,
-                                         wil6210_thread_irq,
-                                         wil->n_msi ? 0 : IRQF_SHARED,
-                                         WIL_NAME, wil);
+       rc = request_threaded_irq(irq, wil6210_hardirq,
+                                 wil6210_thread_irq,
+                                 use_msi ? 0 : IRQF_SHARED,
+                                 WIL_NAME, wil);
        return rc;
 }
 
@@ -618,8 +563,4 @@ void wil6210_fini_irq(struct wil6210_priv *wil, int irq)
 
        wil_mask_irq(wil);
        free_irq(irq, wil);
-       if (wil->n_msi == 3) {
-               free_irq(irq + 1, wil);
-               free_irq(irq + 2, wil);
-       }
 }
index e9c0673819c624613ce25e5612f7886b592424d8..f7f9486219516f3d8d713d0377fa574bb5eb1ec9 100644 (file)
@@ -76,11 +76,11 @@ static int wil_ioc_memio_dword(struct wil6210_priv *wil, void __user *data)
        /* operation */
        switch (io.op & wil_mmio_op_mask) {
        case wil_mmio_read:
-               io.val = ioread32(a);
+               io.val = readl(a);
                need_copy = true;
                break;
        case wil_mmio_write:
-               iowrite32(io.val, a);
+               writel(io.val, a);
                wmb(); /* make sure write propagated to HW */
                break;
        default:
index 6ca6193ab8a6100ac6257e24745d4575e17648a9..2fb04c51da53f2dd4fd58f7995eee85a1a93dcd9 100644 (file)
@@ -21,6 +21,7 @@
 #include "wil6210.h"
 #include "txrx.h"
 #include "wmi.h"
+#include "boot_loader.h"
 
 #define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000
 #define WAIT_FOR_DISCONNECT_INTERVAL_MS 10
@@ -270,8 +271,7 @@ static void wil_scan_timer_fn(ulong x)
 
        clear_bit(wil_status_fwready, wil->status);
        wil_err(wil, "Scan timeout detected, start fw error recovery\n");
-       wil->recovery_state = fw_recovery_pending;
-       schedule_work(&wil->fw_error_worker);
+       wil_fw_error_recovery(wil);
 }
 
 static int wil_wait_for_recovery(struct wil6210_priv *wil)
@@ -528,26 +528,16 @@ void wil_priv_deinit(struct wil6210_priv *wil)
        destroy_workqueue(wil->wmi_wq);
 }
 
-/* target operations */
-/* register read */
-#define R(a) ioread32(wil->csr + HOSTADDR(a))
-/* register write. wmb() to make sure it is completed */
-#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
-/* register set = read, OR, write */
-#define S(a, v) W(a, R(a) | v)
-/* register clear = read, AND with inverted, write */
-#define C(a, v) W(a, R(a) & ~v)
-
 static inline void wil_halt_cpu(struct wil6210_priv *wil)
 {
-       W(RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
-       W(RGF_USER_MAC_CPU_0,  BIT_USER_MAC_CPU_MAN_RST);
+       wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
+       wil_w(wil, RGF_USER_MAC_CPU_0,  BIT_USER_MAC_CPU_MAN_RST);
 }
 
 static inline void wil_release_cpu(struct wil6210_priv *wil)
 {
        /* Start CPU */
-       W(RGF_USER_USER_CPU_0, 1);
+       wil_w(wil, RGF_USER_USER_CPU_0, 1);
 }
 
 static int wil_target_reset(struct wil6210_priv *wil)
@@ -558,56 +548,60 @@ static int wil_target_reset(struct wil6210_priv *wil)
        wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
 
        /* Clear MAC link up */
-       S(RGF_HP_CTRL, BIT(15));
-       S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD);
-       S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
+       wil_s(wil, RGF_HP_CTRL, BIT(15));
+       wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD);
+       wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
 
        wil_halt_cpu(wil);
 
        /* clear all boot loader "ready" bits */
-       W(RGF_USER_BL + offsetof(struct RGF_BL, ready), 0);
+       wil_w(wil, RGF_USER_BL +
+             offsetof(struct bl_dedicated_registers_v0, boot_loader_ready), 0);
        /* Clear Fw Download notification */
-       C(RGF_USER_USAGE_6, BIT(0));
+       wil_c(wil, RGF_USER_USAGE_6, BIT(0));
 
-       S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
+       wil_s(wil, RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
        /* XTAL stabilization should take about 3ms */
        usleep_range(5000, 7000);
-       x = R(RGF_CAF_PLL_LOCK_STATUS);
+       x = wil_r(wil, RGF_CAF_PLL_LOCK_STATUS);
        if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) {
                wil_err(wil, "Xtal stabilization timeout\n"
                        "RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x);
                return -ETIME;
        }
        /* switch 10k to XTAL*/
-       C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
+       wil_c(wil, RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
        /* 40 MHz */
-       C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
+       wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
 
-       W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
-       W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
+       wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
+       wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
 
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
+       wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
+       wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
+       wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
+       wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
 
-       W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
-       W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
+       wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
+       wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
 
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+       wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
+       wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
+       wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
+       wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
 
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000); /* reset A2 PCIE AHB */
+       wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
+       /* reset A2 PCIE AHB */
+       wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
 
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+       wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
 
        /* wait until device ready. typical time is 20..80 msec */
        do {
                msleep(RST_DELAY);
-               x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready));
+               x = wil_r(wil, RGF_USER_BL +
+                         offsetof(struct bl_dedicated_registers_v0,
+                                  boot_loader_ready));
                if (x1 != x) {
                        wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", x1, x);
                        x1 = x;
@@ -617,13 +611,13 @@ static int wil_target_reset(struct wil6210_priv *wil)
                                x);
                        return -ETIME;
                }
-       } while (x != BIT_BL_READY);
+       } while (x != BL_READY);
 
-       C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+       wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
 
        /* enable fix for HW bug related to the SA/DA swap in AP Rx */
-       S(RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
-         BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
+       wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
+             BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
 
        wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY);
        return 0;
@@ -641,29 +635,93 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
 static int wil_get_bl_info(struct wil6210_priv *wil)
 {
        struct net_device *ndev = wil_to_ndev(wil);
-       struct RGF_BL bl;
-
-       wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL), sizeof(bl));
-       le32_to_cpus(&bl.ready);
-       le32_to_cpus(&bl.version);
-       le32_to_cpus(&bl.rf_type);
-       le32_to_cpus(&bl.baseband_type);
+       union {
+               struct bl_dedicated_registers_v0 bl0;
+               struct bl_dedicated_registers_v1 bl1;
+       } bl;
+       u32 bl_ver;
+       u8 *mac;
+       u16 rf_status;
+
+       wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL),
+                            sizeof(bl));
+       bl_ver = le32_to_cpu(bl.bl0.boot_loader_struct_version);
+       mac = bl.bl0.mac_address;
+
+       if (bl_ver == 0) {
+               le32_to_cpus(&bl.bl0.rf_type);
+               le32_to_cpus(&bl.bl0.baseband_type);
+               rf_status = 0; /* actually, unknown */
+               wil_info(wil,
+                        "Boot Loader struct v%d: MAC = %pM RF = 0x%08x bband = 0x%08x\n",
+                        bl_ver, mac,
+                        bl.bl0.rf_type, bl.bl0.baseband_type);
+               wil_info(wil, "Boot Loader build unknown for struct v0\n");
+       } else {
+               le16_to_cpus(&bl.bl1.rf_type);
+               rf_status = le16_to_cpu(bl.bl1.rf_status);
+               le32_to_cpus(&bl.bl1.baseband_type);
+               le16_to_cpus(&bl.bl1.bl_version_subminor);
+               le16_to_cpus(&bl.bl1.bl_version_build);
+               wil_info(wil,
+                        "Boot Loader struct v%d: MAC = %pM RF = 0x%04x (status 0x%04x) bband = 0x%08x\n",
+                        bl_ver, mac,
+                        bl.bl1.rf_type, rf_status,
+                        bl.bl1.baseband_type);
+               wil_info(wil, "Boot Loader build %d.%d.%d.%d\n",
+                        bl.bl1.bl_version_major, bl.bl1.bl_version_minor,
+                        bl.bl1.bl_version_subminor, bl.bl1.bl_version_build);
+       }
 
-       if (!is_valid_ether_addr(bl.mac_address)) {
-               wil_err(wil, "BL: Invalid MAC %pM\n", bl.mac_address);
+       if (!is_valid_ether_addr(mac)) {
+               wil_err(wil, "BL: Invalid MAC %pM\n", mac);
                return -EINVAL;
        }
 
-       ether_addr_copy(ndev->perm_addr, bl.mac_address);
+       ether_addr_copy(ndev->perm_addr, mac);
        if (!is_valid_ether_addr(ndev->dev_addr))
-               ether_addr_copy(ndev->dev_addr, bl.mac_address);
-       wil_info(wil,
-                "Boot Loader: ver = %d MAC = %pM RF = 0x%08x bband = 0x%08x\n",
-                bl.version, bl.mac_address, bl.rf_type, bl.baseband_type);
+               ether_addr_copy(ndev->dev_addr, mac);
+
+       if (rf_status) {/* bad RF cable? */
+               wil_err(wil, "RF communication error 0x%04x",
+                       rf_status);
+               return -EAGAIN;
+       }
 
        return 0;
 }
 
+static void wil_bl_crash_info(struct wil6210_priv *wil, bool is_err)
+{
+       u32 bl_assert_code, bl_assert_blink, bl_magic_number;
+       u32 bl_ver = wil_r(wil, RGF_USER_BL +
+                          offsetof(struct bl_dedicated_registers_v0,
+                                   boot_loader_struct_version));
+
+       if (bl_ver < 2)
+               return;
+
+       bl_assert_code = wil_r(wil, RGF_USER_BL +
+                              offsetof(struct bl_dedicated_registers_v1,
+                                       bl_assert_code));
+       bl_assert_blink = wil_r(wil, RGF_USER_BL +
+                               offsetof(struct bl_dedicated_registers_v1,
+                                        bl_assert_blink));
+       bl_magic_number = wil_r(wil, RGF_USER_BL +
+                               offsetof(struct bl_dedicated_registers_v1,
+                                        bl_magic_number));
+
+       if (is_err) {
+               wil_err(wil,
+                       "BL assert code 0x%08x blink 0x%08x magic 0x%08x\n",
+                       bl_assert_code, bl_assert_blink, bl_magic_number);
+       } else {
+               wil_dbg_misc(wil,
+                            "BL assert code 0x%08x blink 0x%08x magic 0x%08x\n",
+                            bl_assert_code, bl_assert_blink, bl_magic_number);
+       }
+}
+
 static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
 {
        ulong to = msecs_to_jiffies(1000);
@@ -690,9 +748,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 
        wil_dbg_misc(wil, "%s()\n", __func__);
 
-       if (wil->hw_version == HW_VER_UNKNOWN)
-               return -ENODEV;
-
        WARN_ON(!mutex_is_locked(&wil->mutex));
        WARN_ON(test_bit(wil_status_napi_en, wil->status));
 
@@ -707,6 +762,9 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
                return 0;
        }
 
+       if (wil->hw_version == HW_VER_UNKNOWN)
+               return -ENODEV;
+
        cancel_work_sync(&wil->disconnect_worker);
        wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
        wil_bcast_fini(wil);
@@ -729,12 +787,17 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
        flush_workqueue(wil->wq_service);
        flush_workqueue(wil->wmi_wq);
 
+       wil_bl_crash_info(wil, false);
        rc = wil_target_reset(wil);
        wil_rx_fini(wil);
-       if (rc)
+       if (rc) {
+               wil_bl_crash_info(wil, true);
                return rc;
+       }
 
        rc = wil_get_bl_info(wil);
+       if (rc == -EAGAIN && !load_fw) /* ignore RF error if not going up */
+               rc = 0;
        if (rc)
                return rc;
 
@@ -752,7 +815,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
                        return rc;
 
                /* Mark FW as loaded from host */
-               S(RGF_USER_USAGE_6, 1);
+               wil_s(wil, RGF_USER_USAGE_6, 1);
 
                /* clear any interrupts which on-card-firmware
                 * may have set
@@ -760,8 +823,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
                wil6210_clear_irq(wil);
                /* CAF_ICR - clear and mask */
                /* it is W1C, clear by writing back same value */
-               S(RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
-               W(RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
+               wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
+               wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
 
                wil_release_cpu(wil);
        }
@@ -785,11 +848,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
        return rc;
 }
 
-#undef R
-#undef W
-#undef S
-#undef C
-
 void wil_fw_error_recovery(struct wil6210_priv *wil)
 {
        wil_dbg_misc(wil, "starting fw error recovery\n");
index 8ef18ace110ffffde2e4a954c20c2a0e0632858e..e3b3c8fb4605502a8c7cf80bbf12b1473a69388c 100644 (file)
@@ -173,7 +173,10 @@ void *wil_if_alloc(struct device *dev)
        wil_set_ethtoolops(ndev);
        ndev->ieee80211_ptr = wdev;
        ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
-                           NETIF_F_SG | NETIF_F_GRO;
+                           NETIF_F_SG | NETIF_F_GRO |
+                           NETIF_F_TSO | NETIF_F_TSO6 |
+                           NETIF_F_RXHASH;
+
        ndev->features |= ndev->hw_features;
        SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
        wdev->netdev = ndev;
index aa3ecc607ca31abae2879bea058a2505006e1495..feff1ef10fb3d757fc0f00a0fde7a4e127016918 100644 (file)
 
 #include "wil6210.h"
 
-static int use_msi = 1;
-module_param(use_msi, int, S_IRUGO);
-MODULE_PARM_DESC(use_msi,
-                " Use MSI interrupt: "
-                "0 - don't, 1 - (default) - single, or 3");
+static bool use_msi = true;
+module_param(use_msi, bool, S_IRUGO);
+MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
 
 static
 void wil_set_capabilities(struct wil6210_priv *wil)
 {
-       u32 rev_id = ioread32(wil->csr + HOSTADDR(RGF_USER_JTAG_DEV_ID));
+       u32 rev_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
 
        bitmap_zero(wil->hw_capabilities, hw_capability_last);
 
@@ -50,24 +48,12 @@ void wil_set_capabilities(struct wil6210_priv *wil)
 
 void wil_disable_irq(struct wil6210_priv *wil)
 {
-       int irq = wil->pdev->irq;
-
-       disable_irq(irq);
-       if (wil->n_msi == 3) {
-               disable_irq(irq + 1);
-               disable_irq(irq + 2);
-       }
+       disable_irq(wil->pdev->irq);
 }
 
 void wil_enable_irq(struct wil6210_priv *wil)
 {
-       int irq = wil->pdev->irq;
-
-       enable_irq(irq);
-       if (wil->n_msi == 3) {
-               enable_irq(irq + 1);
-               enable_irq(irq + 2);
-       }
+       enable_irq(wil->pdev->irq);
 }
 
 /* Bus ops */
@@ -80,6 +66,7 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
         * and only MSI should be used
         */
        int msi_only = pdev->msi_enabled;
+       bool _use_msi = use_msi;
 
        wil_dbg_misc(wil, "%s()\n", __func__);
 
@@ -87,41 +74,20 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
 
        pci_set_master(pdev);
 
-       /*
-        * how many MSI interrupts to request?
-        */
-       switch (use_msi) {
-       case 3:
-       case 1:
-               wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
-               break;
-       case 0:
-               wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
-               break;
-       default:
-               wil_err(wil, "Invalid use_msi=%d, default to 1\n", use_msi);
-               use_msi = 1;
-       }
-
-       if (use_msi == 3 && pci_enable_msi_range(pdev, 3, 3) < 0) {
-               wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
-               use_msi = 1;
-       }
+       wil_dbg_misc(wil, "Setup %s interrupt\n", use_msi ? "MSI" : "INTx");
 
-       if (use_msi == 1 && pci_enable_msi(pdev)) {
+       if (use_msi && pci_enable_msi(pdev)) {
                wil_err(wil, "pci_enable_msi failed, use INTx\n");
-               use_msi = 0;
+               _use_msi = false;
        }
 
-       wil->n_msi = use_msi;
-
-       if ((wil->n_msi == 0) && msi_only) {
+       if (!_use_msi && msi_only) {
                wil_err(wil, "Interrupt pin not routed, unable to use INTx\n");
                rc = -ENODEV;
                goto stop_master;
        }
 
-       rc = wil6210_init_irq(wil, pdev->irq);
+       rc = wil6210_init_irq(wil, pdev->irq, _use_msi);
        if (rc)
                goto stop_master;
 
@@ -293,11 +259,80 @@ static const struct pci_device_id wil6210_pcie_ids[] = {
 };
 MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
 
+#ifdef CONFIG_PM
+
+static int wil6210_suspend(struct device *dev, bool is_runtime)
+{
+       int rc = 0;
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+       wil_dbg_pm(wil, "%s(%s)\n", __func__,
+                  is_runtime ? "runtime" : "system");
+
+       rc = wil_can_suspend(wil, is_runtime);
+       if (rc)
+               goto out;
+
+       rc = wil_suspend(wil, is_runtime);
+       if (rc)
+               goto out;
+
+       /* TODO: how do I bring card in low power state? */
+
+       /* disable bus mastering */
+       pci_clear_master(pdev);
+       /* PCI will call pci_save_state(pdev) and pci_prepare_to_sleep(pdev) */
+
+out:
+       return rc;
+}
+
+static int wil6210_resume(struct device *dev, bool is_runtime)
+{
+       int rc = 0;
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+       wil_dbg_pm(wil, "%s(%s)\n", __func__,
+                  is_runtime ? "runtime" : "system");
+
+       /* allow master */
+       pci_set_master(pdev);
+
+       rc = wil_resume(wil, is_runtime);
+       if (rc)
+               pci_clear_master(pdev);
+
+       return rc;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int wil6210_pm_suspend(struct device *dev)
+{
+       return wil6210_suspend(dev, false);
+}
+
+static int wil6210_pm_resume(struct device *dev)
+{
+       return wil6210_resume(dev, false);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops wil6210_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume)
+};
+
 static struct pci_driver wil6210_driver = {
        .probe          = wil_pcie_probe,
        .remove         = wil_pcie_remove,
        .id_table       = wil6210_pcie_ids,
        .name           = WIL_NAME,
+       .driver         = {
+               .pm = &wil6210_pm_ops,
+       },
 };
 
 static int __init wil6210_driver_init(void)
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
new file mode 100644 (file)
index 0000000..0b7ecbc
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "wil6210.h"
+
+int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
+{
+       int rc = 0;
+       struct wireless_dev *wdev = wil->wdev;
+
+       wil_dbg_pm(wil, "%s(%s)\n", __func__,
+                  is_runtime ? "runtime" : "system");
+
+       switch (wdev->iftype) {
+       case NL80211_IFTYPE_MONITOR:
+       case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_P2P_CLIENT:
+               break;
+       /* AP-like interface - can't suspend */
+       default:
+               wil_dbg_pm(wil, "AP-like interface\n");
+               rc = -EBUSY;
+               break;
+       }
+
+       wil_dbg_pm(wil, "%s(%s) => %s (%d)\n", __func__,
+                  is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc);
+
+       return rc;
+}
+
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
+{
+       int rc = 0;
+       struct net_device *ndev = wil_to_ndev(wil);
+
+       wil_dbg_pm(wil, "%s(%s)\n", __func__,
+                  is_runtime ? "runtime" : "system");
+
+       /* if netif up, hardware is alive, shut it down */
+       if (ndev->flags & IFF_UP) {
+               rc = wil_down(wil);
+               if (rc) {
+                       wil_err(wil, "wil_down : %d\n", rc);
+                       goto out;
+               }
+       }
+
+       if (wil->platform_ops.suspend)
+               rc = wil->platform_ops.suspend(wil->platform_handle);
+
+out:
+       wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
+                  is_runtime ? "runtime" : "system", rc);
+       return rc;
+}
+
+int wil_resume(struct wil6210_priv *wil, bool is_runtime)
+{
+       int rc = 0;
+       struct net_device *ndev = wil_to_ndev(wil);
+
+       wil_dbg_pm(wil, "%s(%s)\n", __func__,
+                  is_runtime ? "runtime" : "system");
+
+       if (wil->platform_ops.resume) {
+               rc = wil->platform_ops.resume(wil->platform_handle);
+               if (rc) {
+                       wil_err(wil, "platform_ops.resume : %d\n", rc);
+                       goto out;
+               }
+       }
+
+       /* if netif up, bring hardware up
+        * During open(), IFF_UP set after actual device method
+        * invocation. This prevent recursive call to wil_up()
+        */
+       if (ndev->flags & IFF_UP)
+               rc = wil_up(wil);
+
+out:
+       wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
+                  is_runtime ? "runtime" : "system", rc);
+       return rc;
+}
index ca10dcf0986eaa39faeed4f61d8a90189a224376..9238c1ac23dd0311509b6d769e00ee70ce18ec81 100644 (file)
@@ -121,6 +121,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
                goto out;
        }
 
+       r->total++;
        hseq = r->head_seq_num;
 
        /** Due to the race between WMI events, where BACK establishment
@@ -153,6 +154,9 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
        /* frame with out of date sequence number */
        if (seq_less(seq, r->head_seq_num)) {
                r->ssn_last_drop = seq;
+               r->drop_old++;
+               wil_dbg_txrx(wil, "Rx drop: old seq 0x%03x head 0x%03x\n",
+                            seq, r->head_seq_num);
                dev_kfree_skb(skb);
                goto out;
        }
@@ -173,6 +177,8 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
 
        /* check if we already stored this frame */
        if (r->reorder_buf[index]) {
+               r->drop_dup++;
+               wil_dbg_txrx(wil, "Rx drop: dup seq 0x%03x\n", seq);
                dev_kfree_skb(skb);
                goto out;
        }
index aa20af86e1d61e790a4ba054c9db383ec2968b15..6229110d558a1a043566091859144b582c34c6c9 100644 (file)
@@ -509,7 +509,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
                        break;
                }
        }
-       iowrite32(v->swtail, wil->csr + HOSTADDR(v->hwtail));
+       wil_w(wil, v->hwtail, v->swtail);
 
        return rc;
 }
@@ -541,6 +541,14 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
                [GRO_DROP]              = "GRO_DROP",
        };
 
+       if (ndev->features & NETIF_F_RXHASH)
+               /* fake L4 to ensure it won't be re-calculated later
+                * set hash to any non-zero value to activate rps
+                * mechanism, core will be chosen according
+                * to user-level rps configuration.
+                */
+               skb_set_hash(skb, 1, PKT_HASH_TYPE_L4);
+
        skb_orphan(skb);
 
        if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
@@ -1058,14 +1066,52 @@ static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
 static inline
 void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
 {
-       d->mac.d[2] |= ((nr_frags + 1) <<
-                      MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
+       d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
 }
 
-static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
-                                        struct vring_tx_desc *d,
-                                        struct sk_buff *skb)
+/**
+ * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
+ * @skb is used to obtain the protocol and headers length.
+ * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
+ * 2 - middle, 3 - last descriptor.
+ */
+
+static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
+                                         struct sk_buff *skb,
+                                         int tso_desc_type, bool is_ipv4,
+                                         int tcp_hdr_len, int skb_net_hdr_len)
 {
+       d->dma.b11 = ETH_HLEN; /* MAC header length */
+       d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
+
+       d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
+       /* L4 header len: TCP header length */
+       d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
+
+       /* Setup TSO: bit and desc type */
+       d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) |
+               (tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS);
+       d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS);
+
+       d->dma.ip_length = skb_net_hdr_len;
+       /* Enable TCP/UDP checksum */
+       d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
+       /* Calculate pseudo-header */
+       d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
+}
+
+/**
+ * Sets the descriptor @d up for csum. The corresponding
+ * @skb is used to obtain the protocol and headers length.
+ * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
+ * Note, if d==NULL, the function only returns the protocol result.
+ *
+ * It is very similar to previous wil_tx_desc_offload_setup_tso. This
+ * is "if unrolling" to optimize the critical path.
+ */
+
+static int wil_tx_desc_offload_setup(struct vring_tx_desc *d,
+                                    struct sk_buff *skb){
        int protocol;
 
        if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -1110,6 +1156,305 @@ static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
        return 0;
 }
 
+static inline void wil_tx_last_desc(struct vring_tx_desc *d)
+{
+       d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) |
+             BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) |
+             BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
+}
+
+static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
+{
+       d->dma.d0 |= wil_tso_type_lst <<
+                 DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS;
+}
+
+static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
+                             struct sk_buff *skb)
+{
+       struct device *dev = wil_to_dev(wil);
+
+       /* point to descriptors in shared memory */
+       volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc,
+                                     *_first_desc = NULL;
+
+       /* pointers to shadow descriptors */
+       struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem,
+                            *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem,
+                            *first_desc = &first_desc_mem;
+
+       /* pointer to shadow descriptors' context */
+       struct wil_ctx *hdr_ctx, *first_ctx = NULL;
+
+       int descs_used = 0; /* total number of used descriptors */
+       int sg_desc_cnt = 0; /* number of descriptors for current mss*/
+
+       u32 swhead = vring->swhead;
+       int used, avail = wil_vring_avail_tx(vring);
+       int nr_frags = skb_shinfo(skb)->nr_frags;
+       int min_desc_required = nr_frags + 1;
+       int mss = skb_shinfo(skb)->gso_size;    /* payload size w/o headers */
+       int f, len, hdrlen, headlen;
+       int vring_index = vring - wil->vring_tx;
+       struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+       uint i = swhead;
+       dma_addr_t pa;
+       const skb_frag_t *frag = NULL;
+       int rem_data = mss;
+       int lenmss;
+       int hdr_compensation_need = true;
+       int desc_tso_type = wil_tso_type_first;
+       bool is_ipv4;
+       int tcp_hdr_len;
+       int skb_net_hdr_len;
+       int gso_type;
+
+       wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
+                    __func__, skb->len, vring_index);
+
+       if (unlikely(!txdata->enabled))
+               return -EINVAL;
+
+       /* A typical page 4K is 3-4 payloads, we assume each fragment
+        * is a full payload, that's how min_desc_required has been
+        * calculated. In real we might need more or less descriptors,
+        * this is the initial check only.
+        */
+       if (unlikely(avail < min_desc_required)) {
+               wil_err_ratelimited(wil,
+                                   "TSO: Tx ring[%2d] full. No space for %d fragments\n",
+                                   vring_index, min_desc_required);
+               return -ENOMEM;
+       }
+
+       /* Header Length = MAC header len + IP header len + TCP header len*/
+       hdrlen = ETH_HLEN +
+               (int)skb_network_header_len(skb) +
+               tcp_hdrlen(skb);
+
+       gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
+       switch (gso_type) {
+       case SKB_GSO_TCPV4:
+               /* TCP v4, zero out the IP length and IPv4 checksum fields
+                * as required by the offloading doc
+                */
+               ip_hdr(skb)->tot_len = 0;
+               ip_hdr(skb)->check = 0;
+               is_ipv4 = true;
+               break;
+       case SKB_GSO_TCPV6:
+               /* TCP v6, zero out the payload length */
+               ipv6_hdr(skb)->payload_len = 0;
+               is_ipv4 = false;
+               break;
+       default:
+               /* other than TCPv4 or TCPv6 types are not supported for TSO.
+                * It is also illegal for both to be set simultaneously
+                */
+               return -EINVAL;
+       }
+
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return -EINVAL;
+
+       /* tcp header length and skb network header length are fixed for all
+        * packet's descriptors - read then once here
+        */
+       tcp_hdr_len = tcp_hdrlen(skb);
+       skb_net_hdr_len = skb_network_header_len(skb);
+
+       _hdr_desc = &vring->va[i].tx;
+
+       pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, pa))) {
+               wil_err(wil, "TSO: Skb head DMA map error\n");
+               goto err_exit;
+       }
+
+       wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index);
+       wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
+                                     tcp_hdr_len, skb_net_hdr_len);
+       wil_tx_last_desc(hdr_desc);
+
+       vring->ctx[i].mapped_as = wil_mapped_as_single;
+       hdr_ctx = &vring->ctx[i];
+
+       descs_used++;
+       headlen = skb_headlen(skb) - hdrlen;
+
+       for (f = headlen ? -1 : 0; f < nr_frags; f++)  {
+               if (headlen) {
+                       len = headlen;
+                       wil_dbg_txrx(wil, "TSO: process skb head, len %u\n",
+                                    len);
+               } else {
+                       frag = &skb_shinfo(skb)->frags[f];
+                       len = frag->size;
+                       wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
+               }
+
+               while (len) {
+                       wil_dbg_txrx(wil,
+                                    "TSO: len %d, rem_data %d, descs_used %d\n",
+                                    len, rem_data, descs_used);
+
+                       if (descs_used == avail)  {
+                               wil_err(wil, "TSO: ring overflow\n");
+                               goto dma_error;
+                       }
+
+                       lenmss = min_t(int, rem_data, len);
+                       i = (swhead + descs_used) % vring->size;
+                       wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i);
+
+                       if (!headlen) {
+                               pa = skb_frag_dma_map(dev, frag,
+                                                     frag->size - len, lenmss,
+                                                     DMA_TO_DEVICE);
+                               vring->ctx[i].mapped_as = wil_mapped_as_page;
+                       } else {
+                               pa = dma_map_single(dev,
+                                                   skb->data +
+                                                   skb_headlen(skb) - headlen,
+                                                   lenmss,
+                                                   DMA_TO_DEVICE);
+                               vring->ctx[i].mapped_as = wil_mapped_as_single;
+                               headlen -= lenmss;
+                       }
+
+                       if (unlikely(dma_mapping_error(dev, pa)))
+                               goto dma_error;
+
+                       _desc = &vring->va[i].tx;
+
+                       if (!_first_desc) {
+                               _first_desc = _desc;
+                               first_ctx = &vring->ctx[i];
+                               d = first_desc;
+                       } else {
+                               d = &desc_mem;
+                       }
+
+                       wil_tx_desc_map(d, pa, lenmss, vring_index);
+                       wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
+                                                     is_ipv4, tcp_hdr_len,
+                                                     skb_net_hdr_len);
+
+                       /* use tso_type_first only once */
+                       desc_tso_type = wil_tso_type_mid;
+
+                       descs_used++;  /* desc used so far */
+                       sg_desc_cnt++; /* desc used for this segment */
+                       len -= lenmss;
+                       rem_data -= lenmss;
+
+                       wil_dbg_txrx(wil,
+                                    "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
+                                    len, rem_data, descs_used, sg_desc_cnt);
+
+                       /* Close the segment if reached mss size or last frag*/
+                       if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) {
+                               if (hdr_compensation_need) {
+                                       /* first segment include hdr desc for
+                                        * release
+                                        */
+                                       hdr_ctx->nr_frags = sg_desc_cnt;
+                                       wil_tx_desc_set_nr_frags(first_desc,
+                                                                sg_desc_cnt +
+                                                                1);
+                                       hdr_compensation_need = false;
+                               } else {
+                                       wil_tx_desc_set_nr_frags(first_desc,
+                                                                sg_desc_cnt);
+                               }
+                               first_ctx->nr_frags = sg_desc_cnt - 1;
+
+                               wil_tx_last_desc(d);
+
+                               /* first descriptor may also be the last
+                                * for this mss - make sure not to copy
+                                * it twice
+                                */
+                               if (first_desc != d)
+                                       *_first_desc = *first_desc;
+
+                               /*last descriptor will be copied at the end
+                                * of this TS processing
+                                */
+                               if (f < nr_frags - 1 || len > 0)
+                                       *_desc = *d;
+
+                               rem_data = mss;
+                               _first_desc = NULL;
+                               sg_desc_cnt = 0;
+                       } else if (first_desc != d) /* update mid descriptor */
+                                       *_desc = *d;
+               }
+       }
+
+       /* first descriptor may also be the last.
+        * in this case d pointer is invalid
+        */
+       if (_first_desc == _desc)
+               d = first_desc;
+
+       /* Last data descriptor */
+       wil_set_tx_desc_last_tso(d);
+       *_desc = *d;
+
+       /* Fill the total number of descriptors in first desc (hdr)*/
+       wil_tx_desc_set_nr_frags(hdr_desc, descs_used);
+       *_hdr_desc = *hdr_desc;
+
+       /* hold reference to skb
+        * to prevent skb release before accounting
+        * in case of immediate "tx done"
+        */
+       vring->ctx[i].skb = skb_get(skb);
+
+       /* performance monitoring */
+       used = wil_vring_used_tx(vring);
+       if (wil_val_in_range(vring_idle_trsh,
+                            used, used + descs_used)) {
+               txdata->idle += get_cycles() - txdata->last_idle;
+               wil_dbg_txrx(wil,  "Ring[%2d] not idle %d -> %d\n",
+                            vring_index, used, used + descs_used);
+       }
+
+       /* advance swhead */
+       wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
+       wil_vring_advance_head(vring, descs_used);
+
+       /* make sure all writes to descriptors (shared memory) are done before
+        * committing them to HW
+        */
+       wmb();
+
+       wil_w(wil, vring->hwtail, vring->swhead);
+       return 0;
+
+dma_error:
+       wil_err(wil, "TSO: DMA map page error\n");
+       while (descs_used > 0) {
+               struct wil_ctx *ctx;
+
+               i = (swhead + descs_used) % vring->size;
+               d = (struct vring_tx_desc *)&vring->va[i].tx;
+               _desc = &vring->va[i].tx;
+               *d = *_desc;
+               _desc->dma.status = TX_DMA_STATUS_DU;
+               ctx = &vring->ctx[i];
+               wil_txdesc_unmap(dev, d, ctx);
+               if (ctx->skb)
+                       dev_kfree_skb_any(ctx->skb);
+               memset(ctx, 0, sizeof(*ctx));
+               descs_used--;
+       }
+
+err_exit:
+       return -EINVAL;
+}
+
 static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
                          struct sk_buff *skb)
 {
@@ -1128,7 +1473,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        bool mcast = (vring_index == wil->bcast_vring);
        uint len = skb_headlen(skb);
 
-       wil_dbg_txrx(wil, "%s()\n", __func__);
+       wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
+                    __func__, skb->len, vring_index);
 
        if (unlikely(!txdata->enabled))
                return -EINVAL;
@@ -1159,14 +1505,14 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
                        d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
        }
        /* Process TCP/UDP checksum offloading */
-       if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) {
+       if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
                wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
                        vring_index);
                goto dma_error;
        }
 
        vring->ctx[i].nr_frags = nr_frags;
-       wil_tx_desc_set_nr_frags(d, nr_frags);
+       wil_tx_desc_set_nr_frags(d, nr_frags + 1);
 
        /* middle segments */
        for (; f < nr_frags; f++) {
@@ -1190,7 +1536,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
                 * if it succeeded for 1-st descriptor,
                 * it will succeed here too
                 */
-               wil_tx_desc_offload_cksum_set(wil, d, skb);
+               wil_tx_desc_offload_setup(d, skb);
        }
        /* for the last seg only */
        d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
@@ -1221,7 +1567,13 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
                     vring->swhead);
        trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
-       iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
+
+       /* make sure all writes to descriptors (shared memory) are done before
+        * committing them to HW
+        */
+       wmb();
+
+       wil_w(wil, vring->hwtail, vring->swhead);
 
        return 0;
  dma_error:
@@ -1254,8 +1606,12 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        int rc;
 
        spin_lock(&txdata->lock);
-       rc = __wil_tx_vring(wil, vring, skb);
+
+       rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
+            (wil, vring, skb);
+
        spin_unlock(&txdata->lock);
+
        return rc;
 }
 
@@ -1382,7 +1738,8 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
                struct wil_ctx *ctx = &vring->ctx[vring->swtail];
                /**
                 * For the fragmented skb, HW will set DU bit only for the
-                * last fragment. look for it
+                * last fragment. look for it.
+                * In TSO the first DU will include hdr desc
                 */
                int lf = (vring->swtail + ctx->nr_frags) % vring->size;
                /* TODO: check we are not past head */
index 0c4638487c742c3c5cbace887a95562a8e618ea2..82a8f9a030e7e9179db31f6be1cec985a801e8a3 100644 (file)
@@ -291,6 +291,14 @@ struct vring_tx_dma {
        __le16 length;
 } __packed;
 
+/* TSO type used in dma descriptor d0 bits 11-12 */
+enum {
+       wil_tso_type_hdr = 0,
+       wil_tso_type_first = 1,
+       wil_tso_type_mid  = 2,
+       wil_tso_type_lst  = 3,
+};
+
 /* Rx descriptor - MAC part
  * [dword 0]
  * bit  0.. 3 : tid:4 The QoS (b3-0) TID Field
index c63e4a35eaa0fc95cc0cf9d68847c8a11b0b6749..dd4ea926b8e31eb8721a8c8e2f2a209fd7f0017b 100644 (file)
@@ -127,16 +127,6 @@ struct RGF_ICR {
        u32 IMC; /* Mask Clear, write 1 to clear */
 } __packed;
 
-struct RGF_BL {
-       u32 ready;              /* 0x880A3C bit [0] */
-#define BIT_BL_READY   BIT(0)
-       u32 version;            /* 0x880A40 version of the BL struct */
-       u32 rf_type;            /* 0x880A44 ID of the connected RF */
-       u32 baseband_type;      /* 0x880A48 ID of the baseband */
-       u8  mac_address[ETH_ALEN]; /* 0x880A4C permanent MAC */
-       u8 pad[2];
-} __packed;
-
 /* registers - FW addresses */
 #define RGF_USER_USAGE_1               (0x880004)
 #define RGF_USER_USAGE_6               (0x880018)
@@ -262,9 +252,8 @@ enum {
 };
 
 /* popular locations */
-#define HOST_MBOX   HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
-#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \
-       offsetof(struct RGF_ICR, ICS))
+#define RGF_MBOX   RGF_USER_USER_SCRATCH_PAD
+#define HOST_MBOX   HOSTADDR(RGF_MBOX)
 #define SW_INT_MBOX BIT_USER_USER_ICR_SW_INT_2
 
 /* ISR register bits */
@@ -434,12 +423,12 @@ struct pci_dev;
  * @ssn: Starting Sequence Number expected to be aggregated.
  * @buf_size: buffer size for incoming A-MPDUs
  * @timeout: reset timer value (in TUs).
+ * @ssn_last_drop: SSN of the last dropped frame
+ * @total: total number of processed incoming frames
+ * @drop_dup: duplicate frames dropped for this reorder buffer
+ * @drop_old: old frames dropped for this reorder buffer
  * @dialog_token: dialog token for aggregation session
- * @rcu_head: RCU head used for freeing this struct
- *
- * This structure's lifetime is managed by RCU, assignments to
- * the array holding it must hold the aggregation mutex.
- *
+ * @first_time: true when this buffer used 1-st time
  */
 struct wil_tid_ampdu_rx {
        struct sk_buff **reorder_buf;
@@ -453,6 +442,9 @@ struct wil_tid_ampdu_rx {
        u16 buf_size;
        u16 timeout;
        u16 ssn_last_drop;
+       unsigned long long total; /* frames processed */
+       unsigned long long drop_dup;
+       unsigned long long drop_old;
        u8 dialog_token;
        bool first_time; /* is it 1-st time this buffer used? */
 };
@@ -543,7 +535,6 @@ struct pmc_ctx {
 
 struct wil6210_priv {
        struct pci_dev *pdev;
-       int n_msi;
        struct wireless_dev *wdev;
        void __iomem *csr;
        DECLARE_BITMAP(status, wil_status_last);
@@ -656,6 +647,33 @@ void wil_info(struct wil6210_priv *wil, const char *fmt, ...);
 #define wil_dbg_txrx(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg)
 #define wil_dbg_wmi(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg)
 #define wil_dbg_misc(wil, fmt, arg...) wil_dbg(wil, "DBG[MISC]" fmt, ##arg)
+#define wil_dbg_pm(wil, fmt, arg...) wil_dbg(wil, "DBG[ PM ]" fmt, ##arg)
+
+/* target operations */
+/* register read */
+static inline u32 wil_r(struct wil6210_priv *wil, u32 reg)
+{
+       return readl(wil->csr + HOSTADDR(reg));
+}
+
+/* register write. wmb() to make sure it is completed */
+static inline void wil_w(struct wil6210_priv *wil, u32 reg, u32 val)
+{
+       writel(val, wil->csr + HOSTADDR(reg));
+       wmb(); /* wait for write to propagate to the HW */
+}
+
+/* register set = read, OR, write */
+static inline void wil_s(struct wil6210_priv *wil, u32 reg, u32 val)
+{
+       wil_w(wil, reg, wil_r(wil, reg) | val);
+}
+
+/* register clear = read, AND with inverted, write */
+static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val)
+{
+       wil_w(wil, reg, wil_r(wil, reg) & ~val);
+}
 
 #if defined(CONFIG_DYNAMIC_DEBUG)
 #define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize,    \
@@ -746,7 +764,7 @@ void wil_back_tx_worker(struct work_struct *work);
 void wil_back_tx_flush(struct wil6210_priv *wil);
 
 void wil6210_clear_irq(struct wil6210_priv *wil);
-int wil6210_init_irq(struct wil6210_priv *wil, int irq);
+int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi);
 void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
 void wil_mask_irq(struct wil6210_priv *wil);
 void wil_unmask_irq(struct wil6210_priv *wil);
@@ -798,4 +816,8 @@ int wil_iftype_nl2wmi(enum nl80211_iftype type);
 int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
 int wil_request_firmware(struct wil6210_priv *wil, const char *name);
 
+int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
+int wil_resume(struct wil6210_priv *wil, bool is_runtime);
+
 #endif /* __WIL6210_H__ */
index de15f1422fe9faefe225dd618aecbe688c7a443d..2e831bf20117f5316ff6b1dd6fa22dddc8d95e04 100644 (file)
@@ -14,7 +14,7 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#include "linux/device.h"
+#include <linux/device.h>
 #include "wil_platform.h"
 
 int __init wil_platform_modinit(void)
index c759759afbb2dfe63f8d2f7dd20f718d16d2591b..7a257360c4201a6c9dab3e21ab43a09f724ce8d3 100644 (file)
@@ -228,8 +228,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
        wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
        /* wait till FW finish with previous command */
        for (retry = 5; retry > 0; retry--) {
-               r->tail = ioread32(wil->csr + HOST_MBOX +
-                                  offsetof(struct wil6210_mbox_ctl, tx.tail));
+               r->tail = wil_r(wil, RGF_MBOX +
+                               offsetof(struct wil6210_mbox_ctl, tx.tail));
                if (next_head != r->tail)
                        break;
                msleep(20);
@@ -254,16 +254,16 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
        wil_memcpy_toio_32(dst, &cmd, sizeof(cmd));
        wil_memcpy_toio_32(dst + sizeof(cmd), buf, len);
        /* mark entry as full */
-       iowrite32(1, wil->csr + HOSTADDR(r->head) +
-                 offsetof(struct wil6210_mbox_ring_desc, sync));
+       wil_w(wil, r->head + offsetof(struct wil6210_mbox_ring_desc, sync), 1);
        /* advance next ptr */
-       iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
-                 offsetof(struct wil6210_mbox_ctl, tx.head));
+       wil_w(wil, RGF_MBOX + offsetof(struct wil6210_mbox_ctl, tx.head),
+             r->head = next_head);
 
        trace_wil6210_wmi_cmd(&cmd.wmi, buf, len);
 
        /* interrupt to FW */
-       iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
+       wil_w(wil, RGF_USER_USER_ICR + offsetof(struct RGF_ICR, ICS),
+             SW_INT_MBOX);
 
        return 0;
 }
@@ -312,22 +312,44 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
        struct wiphy *wiphy = wil_to_wiphy(wil);
        struct ieee80211_mgmt *rx_mgmt_frame =
                        (struct ieee80211_mgmt *)data->payload;
-       int ch_no = data->info.channel+1;
-       u32 freq = ieee80211_channel_to_frequency(ch_no,
-                       IEEE80211_BAND_60GHZ);
-       struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq);
-       s32 signal = data->info.sqi;
-       __le16 fc = rx_mgmt_frame->frame_control;
-       u32 d_len = le32_to_cpu(data->info.len);
-       u16 d_status = le16_to_cpu(data->info.status);
-
-       wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d SQI %d%%\n",
+       int flen = len - offsetof(struct wmi_rx_mgmt_packet_event, payload);
+       int ch_no;
+       u32 freq;
+       struct ieee80211_channel *channel;
+       s32 signal;
+       __le16 fc;
+       u32 d_len;
+       u16 d_status;
+
+       if (flen < 0) {
+               wil_err(wil, "MGMT Rx: short event, len %d\n", len);
+               return;
+       }
+
+       d_len = le32_to_cpu(data->info.len);
+       if (d_len != flen) {
+               wil_err(wil,
+                       "MGMT Rx: length mismatch, d_len %d should be %d\n",
+                       d_len, flen);
+               return;
+       }
+
+       ch_no = data->info.channel + 1;
+       freq = ieee80211_channel_to_frequency(ch_no, IEEE80211_BAND_60GHZ);
+       channel = ieee80211_get_channel(wiphy, freq);
+       signal = data->info.sqi;
+       d_status = le16_to_cpu(data->info.status);
+       fc = rx_mgmt_frame->frame_control;
+
+       wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %d SNR %d SQI %d%%\n",
                    data->info.channel, data->info.mcs, data->info.snr,
                    data->info.sqi);
        wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
                    le16_to_cpu(fc));
        wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
                    data->info.qid, data->info.mid, data->info.cid);
+       wil_hex_dump_wmi("MGMT Rx ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame,
+                        d_len, true);
 
        if (!channel) {
                wil_err(wil, "Frame on unsupported channel\n");
@@ -363,6 +385,17 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
        }
 }
 
+static void wmi_evt_tx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
+{
+       struct wmi_tx_mgmt_packet_event *data = d;
+       struct ieee80211_mgmt *mgmt_frame =
+                       (struct ieee80211_mgmt *)data->payload;
+       int flen = len - offsetof(struct wmi_tx_mgmt_packet_event, payload);
+
+       wil_hex_dump_wmi("MGMT Tx ", DUMP_PREFIX_OFFSET, 16, 1, mgmt_frame,
+                        flen, true);
+}
+
 static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
                                  void *d, int len)
 {
@@ -659,6 +692,7 @@ static const struct {
        {WMI_READY_EVENTID,             wmi_evt_ready},
        {WMI_FW_READY_EVENTID,          wmi_evt_fw_ready},
        {WMI_RX_MGMT_PACKET_EVENTID,    wmi_evt_rx_mgmt},
+       {WMI_TX_MGMT_PACKET_EVENTID,            wmi_evt_tx_mgmt},
        {WMI_SCAN_COMPLETE_EVENTID,     wmi_evt_scan_complete},
        {WMI_CONNECT_EVENTID,           wmi_evt_connect},
        {WMI_DISCONNECT_EVENTID,        wmi_evt_disconnect},
@@ -695,8 +729,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
                u16 len;
                bool q;
 
-               r->head = ioread32(wil->csr + HOST_MBOX +
-                                  offsetof(struct wil6210_mbox_ctl, rx.head));
+               r->head = wil_r(wil, RGF_MBOX +
+                               offsetof(struct wil6210_mbox_ctl, rx.head));
                if (r->tail == r->head)
                        break;
 
@@ -734,8 +768,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
                cmd = (void *)&evt->event.wmi;
                wil_memcpy_fromio_32(cmd, src, len);
                /* mark entry as empty */
-               iowrite32(0, wil->csr + HOSTADDR(r->tail) +
-                         offsetof(struct wil6210_mbox_ring_desc, sync));
+               wil_w(wil, r->tail +
+                     offsetof(struct wil6210_mbox_ring_desc, sync), 0);
                /* indicate */
                if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
                    (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
@@ -754,8 +788,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
                /* advance tail */
                r->tail = r->base + ((r->tail - r->base +
                          sizeof(struct wil6210_mbox_ring_desc)) % r->size);
-               iowrite32(r->tail, wil->csr + HOST_MBOX +
-                         offsetof(struct wil6210_mbox_ctl, rx.tail));
+               wil_w(wil, RGF_MBOX +
+                     offsetof(struct wil6210_mbox_ctl, rx.tail), r->tail);
 
                /* add to the pending list */
                spin_lock_irqsave(&wil->wmi_ev_lock, flags);
@@ -988,12 +1022,21 @@ int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
 
 int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
 {
+       static const char *const names[] = {
+               [WMI_FRAME_BEACON]      = "BEACON",
+               [WMI_FRAME_PROBE_REQ]   = "PROBE_REQ",
+               [WMI_FRAME_PROBE_RESP]  = "WMI_FRAME_PROBE_RESP",
+               [WMI_FRAME_ASSOC_REQ]   = "WMI_FRAME_ASSOC_REQ",
+               [WMI_FRAME_ASSOC_RESP]  = "WMI_FRAME_ASSOC_RESP",
+       };
        int rc;
        u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
        struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
 
-       if (!cmd)
-               return -ENOMEM;
+       if (!cmd) {
+               rc = -ENOMEM;
+               goto out;
+       }
        if (!ie)
                ie_len = 0;
 
@@ -1003,6 +1046,12 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
        memcpy(cmd->ie_info, ie, ie_len);
        rc = wmi_send(wil, WMI_SET_APPIE_CMDID, cmd, len);
        kfree(cmd);
+out:
+       if (rc) {
+               const char *name = type < ARRAY_SIZE(names) ?
+                                  names[type] : "??";
+               wil_err(wil, "set_ie(%d %s) failed : %d\n", type, name, rc);
+       }
 
        return rc;
 }
@@ -1129,15 +1178,42 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf)
 
 int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason)
 {
+       int rc;
+       u16 reason_code;
        struct wmi_disconnect_sta_cmd cmd = {
                .disconnect_reason = cpu_to_le16(reason),
        };
+       struct {
+               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_disconnect_event evt;
+       } __packed reply;
 
        ether_addr_copy(cmd.dst_mac, mac);
 
        wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason);
 
-       return wmi_send(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd));
+       rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd),
+                     WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), 1000);
+       /* failure to disconnect in reasonable time treated as FW error */
+       if (rc) {
+               wil_fw_error_recovery(wil);
+               return rc;
+       }
+
+       /* call event handler manually after processing wmi_call,
+        * to avoid deadlock - disconnect event handler acquires wil->mutex
+        * while it is already held here
+        */
+       reason_code = le16_to_cpu(reply.evt.protocol_reason_status);
+
+       wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n",
+                   reply.evt.bssid, reason_code,
+                   reply.evt.disconnect_reason);
+
+       wil->sinfo_gen++;
+       wil6210_disconnect(wil, reply.evt.bssid, reason_code, true);
+
+       return 0;
 }
 
 int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout)
@@ -1279,7 +1355,7 @@ static void wmi_event_handle(struct wil6210_priv *wil,
                /* search for handler */
                if (!wmi_evt_call_handler(wil, id, evt_data,
                                          len - sizeof(*wmi))) {
-                       wil_err(wil, "Unhandled event 0x%04x\n", id);
+                       wil_info(wil, "Unhandled event 0x%04x\n", id);
                }
        } else {
                wil_err(wil, "Unknown event type\n");
index d36f5f3d931b55f1df10da3b30d87fdd9b931611..f990e3d0e696e7f76b4ef501853eb6c5b7ff2831 100644 (file)
@@ -2564,15 +2564,6 @@ static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
        }
 }
 
-static void atomic_orr(int val, atomic_t *v)
-{
-       int old_val;
-
-       old_val = atomic_read(v);
-       while (atomic_cmpxchg(v, old_val, val | old_val) != old_val)
-               old_val = atomic_read(v);
-}
-
 static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
 {
        struct brcmf_core *buscore;
@@ -2595,7 +2586,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
        if (val) {
                brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
                bus->sdcnt.f1regdata++;
-               atomic_orr(val, &bus->intstatus);
+               atomic_or(val, &bus->intstatus);
        }
 
        return ret;
@@ -2712,7 +2703,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 
        /* Keep still-pending events for next scheduling */
        if (intstatus)
-               atomic_orr(intstatus, &bus->intstatus);
+               atomic_or(intstatus, &bus->intstatus);
 
        brcmf_sdio_clrintr(bus);
 
index 01de1a3bf94ef0965d03dfd9c6a23d5fd2f744b6..80d4228ba7543ee024bd95b72956595772509d27 100644 (file)
@@ -865,7 +865,7 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local,
 
        switch(type) {
        case HOSTAP_INTERFACE_AP:
-               dev->tx_queue_len = 0;  /* use main radio device queue */
+               dev->priv_flags |= IFF_NO_QUEUE;        /* use main radio device queue */
                dev->netdev_ops = &hostap_mgmt_netdev_ops;
                dev->type = ARPHRD_IEEE80211;
                dev->header_ops = &hostap_80211_ops;
@@ -874,7 +874,7 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local,
                dev->netdev_ops = &hostap_master_ops;
                break;
        default:
-               dev->tx_queue_len = 0;  /* use main radio device queue */
+               dev->priv_flags |= IFF_NO_QUEUE;        /* use main radio device queue */
                dev->netdev_ops = &hostap_netdev_ops;
        }
 
index c160dad03037bd01399a8747096a3653ee67cd3a..edc3dd42f8f86f9f1480a1e5a8011f0991e05962 100644 (file)
@@ -122,9 +122,8 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
 void iwl_down(struct iwl_priv *priv);
 void iwl_cancel_deferred_work(struct iwl_priv *priv);
 void iwlagn_prepare_restart(struct iwl_priv *priv);
-int __must_check iwl_rx_dispatch(struct iwl_op_mode *op_mode,
-                                struct iwl_rx_cmd_buffer *rxb,
-                                struct iwl_device_cmd *cmd);
+void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+                    struct iwl_rx_cmd_buffer *rxb);
 
 bool iwl_check_for_ct_kill(struct iwl_priv *priv);
 
@@ -216,11 +215,9 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
                       struct ieee80211_sta *sta, u16 tid);
 int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif,
                        struct ieee80211_sta *sta, u16 tid);
-int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
-                                  struct iwl_rx_cmd_buffer *rxb,
-                                  struct iwl_device_cmd *cmd);
-int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd);
+void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+                                  struct iwl_rx_cmd_buffer *rxb);
+void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
 
 static inline u32 iwl_tx_status_to_mac80211(u32 status)
 {
@@ -277,9 +274,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
 
 /* bt coex */
 void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
-int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
-                                 struct iwl_rx_cmd_buffer *rxb,
-                                 struct iwl_device_cmd *cmd);
 void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv);
 void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv);
 void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv);
@@ -332,8 +326,7 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
 
 int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
                    struct iwl_link_quality_cmd *lq, u8 flags, bool init);
-int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd);
+void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
 int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
                      struct ieee80211_sta *sta);
 
index 0ffb6ff1a255f8ac609ba3799c02489ab853eea1..b15e44f8d1bd7858939cd4379374dfd2aa37cd9c 100644 (file)
@@ -310,12 +310,8 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
        pos += scnprintf(buf + pos, buf_size - pos,
                         "NVM version: 0x%x\n", nvm_ver);
        for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
-               pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
-               hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
-                                  buf_size - pos, 0);
-               pos += strlen(buf + pos);
-               if (buf_size - pos > 0)
-                       buf[pos++] = '\n';
+               pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n",
+                                ofs, ptr + ofs);
        }
 
        ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
index 3811878ab9cd2057ad44785c7f3dad5cb5edcf07..0ba3e56d6015b16feb34acc71c36a81d42453eb6 100644 (file)
@@ -669,6 +669,8 @@ struct iwl_priv {
        /* ieee device used by generic ieee processing code */
        struct ieee80211_hw *hw;
 
+       struct napi_struct *napi;
+
        struct list_head calib_results;
 
        struct workqueue_struct *workqueue;
@@ -678,9 +680,8 @@ struct iwl_priv {
        enum ieee80211_band band;
        u8 valid_contexts;
 
-       int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
-                                      struct iwl_rx_cmd_buffer *rxb,
-                                      struct iwl_device_cmd *cmd);
+       void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
+                                      struct iwl_rx_cmd_buffer *rxb);
 
        struct iwl_notif_wait_data notif_wait;
 
index 1d2223df5cb01fc84a06a55e431825175864ba34..ab45819c1fbbf6d0080813c26090bb095082672d 100644 (file)
@@ -659,9 +659,8 @@ static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv,
        return need_update;
 }
 
-int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
-                                 struct iwl_rx_cmd_buffer *rxb,
-                                 struct iwl_device_cmd *cmd)
+static void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
+                                        struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_bt_coex_profile_notif *coex = (void *)pkt->data;
@@ -669,7 +668,7 @@ int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
 
        if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
                /* bt coex disabled */
-               return 0;
+               return;
        }
 
        IWL_DEBUG_COEX(priv, "BT Coex notification:\n");
@@ -714,7 +713,6 @@ int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
        /* FIXME: based on notification, adjust the prio_boost */
 
        priv->bt_ci_compliance = coex->bt_ci_compliance;
-       return 0;
 }
 
 void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
index 7acaa266b704699754a30bda7f5cb1547767e8ae..453f7c315ab525dcda15696676b16ef3b02f46f1 100644 (file)
@@ -250,12 +250,24 @@ static int __iwl_up(struct iwl_priv *priv)
                }
        }
 
+       ret = iwl_trans_start_hw(priv->trans);
+       if (ret) {
+               IWL_ERR(priv, "Failed to start HW: %d\n", ret);
+               goto error;
+       }
+
        ret = iwl_run_init_ucode(priv);
        if (ret) {
                IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
                goto error;
        }
 
+       ret = iwl_trans_start_hw(priv->trans);
+       if (ret) {
+               IWL_ERR(priv, "Failed to start HW: %d\n", ret);
+               goto error;
+       }
+
        ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
        if (ret) {
                IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
@@ -432,7 +444,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
                u32 error_id;
        } err_info;
        struct iwl_notification_wait status_wait;
-       static const u8 status_cmd[] = {
+       static const u16 status_cmd[] = {
                REPLY_WOWLAN_GET_STATUS,
        };
        struct iwlagn_wowlan_status status_data = {};
index 234e30f498b2dde18f3f355d5cfdcf13db6dc185..e7616f0ee6e88061589d84fb98357ea72c79eae1 100644 (file)
@@ -2029,17 +2029,6 @@ static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
        return false;
 }
 
-static void iwl_napi_add(struct iwl_op_mode *op_mode,
-                        struct napi_struct *napi,
-                        struct net_device *napi_dev,
-                        int (*poll)(struct napi_struct *, int),
-                        int weight)
-{
-       struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
-
-       ieee80211_napi_add(priv->hw, napi, napi_dev, poll, weight);
-}
-
 static const struct iwl_op_mode_ops iwl_dvm_ops = {
        .start = iwl_op_mode_dvm_start,
        .stop = iwl_op_mode_dvm_stop,
@@ -2052,7 +2041,6 @@ static const struct iwl_op_mode_ops iwl_dvm_ops = {
        .cmd_queue_full = iwl_cmd_queue_full,
        .nic_config = iwl_nic_config,
        .wimax_active = iwl_wimax_active,
-       .napi_add = iwl_napi_add,
 };
 
 /*****************************************************************************
index 3bd7c86e90d9fca5c43c6a95ac7795ec74ddeaf7..cef921c1a62325a9f9d8cd03273ec95e9eab2204 100644 (file)
@@ -1416,11 +1416,11 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
 /*
  * Try to switch to new modulation mode from legacy
  */
-static int rs_move_legacy_other(struct iwl_priv *priv,
-                               struct iwl_lq_sta *lq_sta,
-                               struct ieee80211_conf *conf,
-                               struct ieee80211_sta *sta,
-                               int index)
+static void rs_move_legacy_other(struct iwl_priv *priv,
+                                struct iwl_lq_sta *lq_sta,
+                                struct ieee80211_conf *conf,
+                                struct ieee80211_sta *sta,
+                                int index)
 {
        struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
        struct iwl_scale_tbl_info *search_tbl =
@@ -1575,7 +1575,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
 
        }
        search_tbl->lq_type = LQ_NONE;
-       return 0;
+       return;
 
 out:
        lq_sta->search_better_tbl = 1;
@@ -1584,17 +1584,15 @@ out:
                tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
        if (update_search_tbl_counter)
                search_tbl->action = tbl->action;
-       return 0;
-
 }
 
 /*
  * Try to switch to new modulation mode from SISO
  */
-static int rs_move_siso_to_other(struct iwl_priv *priv,
-                                struct iwl_lq_sta *lq_sta,
-                                struct ieee80211_conf *conf,
-                                struct ieee80211_sta *sta, int index)
+static void rs_move_siso_to_other(struct iwl_priv *priv,
+                                 struct iwl_lq_sta *lq_sta,
+                                 struct ieee80211_conf *conf,
+                                 struct ieee80211_sta *sta, int index)
 {
        u8 is_green = lq_sta->is_green;
        struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1747,7 +1745,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
                        break;
        }
        search_tbl->lq_type = LQ_NONE;
-       return 0;
+       return;
 
  out:
        lq_sta->search_better_tbl = 1;
@@ -1756,17 +1754,15 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
                tbl->action = IWL_SISO_SWITCH_ANTENNA1;
        if (update_search_tbl_counter)
                search_tbl->action = tbl->action;
-
-       return 0;
 }
 
 /*
  * Try to switch to new modulation mode from MIMO2
  */
-static int rs_move_mimo2_to_other(struct iwl_priv *priv,
-                                struct iwl_lq_sta *lq_sta,
-                                struct ieee80211_conf *conf,
-                                struct ieee80211_sta *sta, int index)
+static void rs_move_mimo2_to_other(struct iwl_priv *priv,
+                                  struct iwl_lq_sta *lq_sta,
+                                  struct ieee80211_conf *conf,
+                                  struct ieee80211_sta *sta, int index)
 {
        s8 is_green = lq_sta->is_green;
        struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1917,7 +1913,7 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
                        break;
        }
        search_tbl->lq_type = LQ_NONE;
-       return 0;
+       return;
  out:
        lq_sta->search_better_tbl = 1;
        tbl->action++;
@@ -1926,17 +1922,15 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
        if (update_search_tbl_counter)
                search_tbl->action = tbl->action;
 
-       return 0;
-
 }
 
 /*
  * Try to switch to new modulation mode from MIMO3
  */
-static int rs_move_mimo3_to_other(struct iwl_priv *priv,
-                                struct iwl_lq_sta *lq_sta,
-                                struct ieee80211_conf *conf,
-                                struct ieee80211_sta *sta, int index)
+static void rs_move_mimo3_to_other(struct iwl_priv *priv,
+                                  struct iwl_lq_sta *lq_sta,
+                                  struct ieee80211_conf *conf,
+                                  struct ieee80211_sta *sta, int index)
 {
        s8 is_green = lq_sta->is_green;
        struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -2093,7 +2087,7 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
                        break;
        }
        search_tbl->lq_type = LQ_NONE;
-       return 0;
+       return;
  out:
        lq_sta->search_better_tbl = 1;
        tbl->action++;
@@ -2101,9 +2095,6 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
                tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
        if (update_search_tbl_counter)
                search_tbl->action = tbl->action;
-
-       return 0;
-
 }
 
 /*
index debec963c610d4693fb40ca22a27b9cd3b6310ac..4785203ae203dbd96287c0b9f584f02afa4c8b77 100644 (file)
@@ -123,9 +123,8 @@ const char *const iwl_dvm_cmd_strings[REPLY_MAX] = {
  *
  ******************************************************************************/
 
-static int iwlagn_rx_reply_error(struct iwl_priv *priv,
-                              struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_error(struct iwl_priv *priv,
+                                 struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_error_resp *err_resp = (void *)pkt->data;
@@ -136,11 +135,9 @@ static int iwlagn_rx_reply_error(struct iwl_priv *priv,
                err_resp->cmd_id,
                le16_to_cpu(err_resp->bad_cmd_seq_num),
                le32_to_cpu(err_resp->error_info));
-       return 0;
 }
 
-static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd)
+static void iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_csa_notification *csa = (void *)pkt->data;
@@ -152,7 +149,7 @@ static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
        struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
 
        if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
-               return 0;
+               return;
 
        if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
                rxon->channel = csa->channel;
@@ -165,13 +162,11 @@ static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
                        le16_to_cpu(csa->channel));
                iwl_chswitch_done(priv, false);
        }
-       return 0;
 }
 
 
-static int iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
-                                         struct iwl_rx_cmd_buffer *rxb,
-                                         struct iwl_device_cmd *cmd)
+static void iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
+                                            struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_spectrum_notification *report = (void *)pkt->data;
@@ -179,17 +174,15 @@ static int iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
        if (!report->state) {
                IWL_DEBUG_11H(priv,
                        "Spectrum Measure Notification: Start\n");
-               return 0;
+               return;
        }
 
        memcpy(&priv->measure_report, report, sizeof(*report));
        priv->measurement_status |= MEASUREMENT_READY;
-       return 0;
 }
 
-static int iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
-                                 struct iwl_rx_cmd_buffer *rxb,
-                                 struct iwl_device_cmd *cmd)
+static void iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
+                                    struct iwl_rx_cmd_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -197,24 +190,20 @@ static int iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
        IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
                     sleep->pm_sleep_mode, sleep->pm_wakeup_src);
 #endif
-       return 0;
 }
 
-static int iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
-                                            struct iwl_rx_cmd_buffer *rxb,
-                                            struct iwl_device_cmd *cmd)
+static void iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
+                                               struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        u32 __maybe_unused len = iwl_rx_packet_len(pkt);
        IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
                        "notification for PM_DEBUG_STATISTIC_NOTIFIC:\n", len);
        iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len);
-       return 0;
 }
 
-static int iwlagn_rx_beacon_notif(struct iwl_priv *priv,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd)
+static void iwlagn_rx_beacon_notif(struct iwl_priv *priv,
+                                  struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwlagn_beacon_notif *beacon = (void *)pkt->data;
@@ -232,8 +221,6 @@ static int iwlagn_rx_beacon_notif(struct iwl_priv *priv,
 #endif
 
        priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-
-       return 0;
 }
 
 /**
@@ -448,9 +435,8 @@ iwlagn_accumulative_statistics(struct iwl_priv *priv,
 }
 #endif
 
-static int iwlagn_rx_statistics(struct iwl_priv *priv,
-                             struct iwl_rx_cmd_buffer *rxb,
-                             struct iwl_device_cmd *cmd)
+static void iwlagn_rx_statistics(struct iwl_priv *priv,
+                                struct iwl_rx_cmd_buffer *rxb)
 {
        unsigned long stamp = jiffies;
        const int reg_recalib_period = 60;
@@ -505,7 +491,7 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
                          len, sizeof(struct iwl_bt_notif_statistics),
                          sizeof(struct iwl_notif_statistics));
                spin_unlock(&priv->statistics.lock);
-               return 0;
+               return;
        }
 
        change = common->temperature != priv->statistics.common.temperature ||
@@ -550,13 +536,10 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
                priv->lib->temperature(priv);
 
        spin_unlock(&priv->statistics.lock);
-
-       return 0;
 }
 
-static int iwlagn_rx_reply_statistics(struct iwl_priv *priv,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_statistics(struct iwl_priv *priv,
+                                      struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_notif_statistics *stats = (void *)pkt->data;
@@ -572,15 +555,14 @@ static int iwlagn_rx_reply_statistics(struct iwl_priv *priv,
 #endif
                IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
        }
-       iwlagn_rx_statistics(priv, rxb, cmd);
-       return 0;
+
+       iwlagn_rx_statistics(priv, rxb);
 }
 
 /* Handle notification from uCode that card's power state is changing
  * due to software, hardware, or critical temperature RFKILL */
-static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd)
+static void iwlagn_rx_card_state_notif(struct iwl_priv *priv,
+                                      struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
@@ -627,12 +609,10 @@ static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
             test_bit(STATUS_RF_KILL_HW, &priv->status)))
                wiphy_rfkill_set_hw_state(priv->hw->wiphy,
                        test_bit(STATUS_RF_KILL_HW, &priv->status));
-       return 0;
 }
 
-static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
-                                      struct iwl_rx_cmd_buffer *rxb,
-                                      struct iwl_device_cmd *cmd)
+static void iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
+                                         struct iwl_rx_cmd_buffer *rxb)
 
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -649,14 +629,12 @@ static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
                if (!test_bit(STATUS_SCANNING, &priv->status))
                        iwl_init_sensitivity(priv);
        }
-       return 0;
 }
 
 /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
  * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
-static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
+                                  struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
@@ -664,7 +642,6 @@ static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
        priv->ampdu_ref++;
        memcpy(&priv->last_phy_res, pkt->data,
               sizeof(struct iwl_rx_phy_res));
-       return 0;
 }
 
 /*
@@ -786,7 +763,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
 
        memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
 
-       ieee80211_rx(priv->hw, skb);
+       ieee80211_rx_napi(priv->hw, skb, priv->napi);
 }
 
 static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
@@ -890,9 +867,8 @@ static int iwlagn_calc_rssi(struct iwl_priv *priv,
 }
 
 /* Called for REPLY_RX_MPDU_CMD */
-static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
-                           struct iwl_rx_cmd_buffer *rxb,
-                           struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_rx(struct iwl_priv *priv,
+                              struct iwl_rx_cmd_buffer *rxb)
 {
        struct ieee80211_hdr *header;
        struct ieee80211_rx_status rx_status = {};
@@ -906,7 +882,7 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
 
        if (!priv->last_phy_res_valid) {
                IWL_ERR(priv, "MPDU frame without cached PHY data\n");
-               return 0;
+               return;
        }
        phy_res = &priv->last_phy_res;
        amsdu = (struct iwl_rx_mpdu_res_start *)pkt->data;
@@ -919,14 +895,14 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
        if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
                IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d\n",
                                phy_res->cfg_phy_cnt);
-               return 0;
+               return;
        }
 
        if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
            !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
                IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
                                le32_to_cpu(rx_pkt_status));
-               return 0;
+               return;
        }
 
        /* This will be used in several places later */
@@ -998,12 +974,10 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
 
        iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
                                    rxb, &rx_status);
-       return 0;
 }
 
-static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
-                                     struct iwl_rx_cmd_buffer *rxb,
-                                     struct iwl_device_cmd *cmd)
+static void iwlagn_rx_noa_notification(struct iwl_priv *priv,
+                                      struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_wipan_noa_data *new_data, *old_data;
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -1041,8 +1015,6 @@ static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
 
        if (old_data)
                kfree_rcu(old_data, rcu_head);
-
-       return 0;
 }
 
 /**
@@ -1053,8 +1025,7 @@ static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
  */
 void iwl_setup_rx_handlers(struct iwl_priv *priv)
 {
-       int (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd);
+       void (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
 
        handlers = priv->rx_handlers;
 
@@ -1102,12 +1073,11 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
                iwlagn_bt_rx_handler_setup(priv);
 }
 
-int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
-                   struct iwl_device_cmd *cmd)
+void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+                    struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
-       int err = 0;
 
        /*
         * Do the notification wait before RX handlers so
@@ -1121,12 +1091,11 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
         *   rx_handlers table.  See iwl_setup_rx_handlers() */
        if (priv->rx_handlers[pkt->hdr.cmd]) {
                priv->rx_handlers_stats[pkt->hdr.cmd]++;
-               err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd);
+               priv->rx_handlers[pkt->hdr.cmd](priv, rxb);
        } else {
                /* No handling needed */
                IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
                             iwl_dvm_get_cmd_string(pkt->hdr.cmd),
                             pkt->hdr.cmd);
        }
-       return err;
 }
index ed50de6362ed1d5dcd56b45243ff0b140dcbafe5..85ceceb34fcca76907cadbae633b49b107acdd91 100644 (file)
@@ -1,6 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -123,7 +124,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
        __le32 old_filter = send->filter_flags;
        u8 old_dev_type = send->dev_type;
        int ret;
-       static const u8 deactivate_cmd[] = {
+       static const u16 deactivate_cmd[] = {
                REPLY_WIPAN_DEACTIVATION_COMPLETE
        };
 
index 43bef901e8f9a80a7c3a56f63a7d2da93fda7076..648159495bbcb7df94c54dd5858449f360c47074 100644 (file)
@@ -247,9 +247,8 @@ void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
 }
 
 /* Service response to REPLY_SCAN_CMD (0x80) */
-static int iwl_rx_reply_scan(struct iwl_priv *priv,
-                             struct iwl_rx_cmd_buffer *rxb,
-                             struct iwl_device_cmd *cmd)
+static void iwl_rx_reply_scan(struct iwl_priv *priv,
+                             struct iwl_rx_cmd_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -257,13 +256,11 @@ static int iwl_rx_reply_scan(struct iwl_priv *priv,
 
        IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
 #endif
-       return 0;
 }
 
 /* Service SCAN_START_NOTIFICATION (0x82) */
-static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd)
+static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
+                                   struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_scanstart_notification *notif = (void *)pkt->data;
@@ -277,14 +274,11 @@ static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
                       le32_to_cpu(notif->tsf_high),
                       le32_to_cpu(notif->tsf_low),
                       notif->status, notif->beacon_timer);
-
-       return 0;
 }
 
 /* Service SCAN_RESULTS_NOTIFICATION (0x83) */
-static int iwl_rx_scan_results_notif(struct iwl_priv *priv,
-                                     struct iwl_rx_cmd_buffer *rxb,
-                                     struct iwl_device_cmd *cmd)
+static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
+                                     struct iwl_rx_cmd_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -303,13 +297,11 @@ static int iwl_rx_scan_results_notif(struct iwl_priv *priv,
                       le32_to_cpu(notif->statistics[0]),
                       le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
 #endif
-       return 0;
 }
 
 /* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
-static int iwl_rx_scan_complete_notif(struct iwl_priv *priv,
-                                      struct iwl_rx_cmd_buffer *rxb,
-                                      struct iwl_device_cmd *cmd)
+static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
+                                      struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_scancomplete_notification *scan_notif = (void *)pkt->data;
@@ -356,7 +348,6 @@ static int iwl_rx_scan_complete_notif(struct iwl_priv *priv,
                queue_work(priv->workqueue,
                           &priv->bt_traffic_change_work);
        }
-       return 0;
 }
 
 void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
index 6ec86adbe4a1fcc9df9aaf0d8b73ca4b48ac2e9b..0fa67d3b72356737256bae9c4f7d838b38edfb68 100644 (file)
@@ -60,41 +60,28 @@ static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
        return 0;
 }
 
-static int iwl_process_add_sta_resp(struct iwl_priv *priv,
-                                   struct iwl_addsta_cmd *addsta,
-                                   struct iwl_rx_packet *pkt)
+static void iwl_process_add_sta_resp(struct iwl_priv *priv,
+                                    struct iwl_rx_packet *pkt)
 {
        struct iwl_add_sta_resp *add_sta_resp = (void *)pkt->data;
-       u8 sta_id = addsta->sta.sta_id;
-       int ret = -EIO;
 
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
-                       pkt->hdr.flags);
-               return ret;
-       }
-
-       IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
-                      sta_id);
+       IWL_DEBUG_INFO(priv, "Processing response for adding station\n");
 
        spin_lock_bh(&priv->sta_lock);
 
        switch (add_sta_resp->status) {
        case ADD_STA_SUCCESS_MSK:
                IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
-               ret = iwl_sta_ucode_activate(priv, sta_id);
                break;
        case ADD_STA_NO_ROOM_IN_TABLE:
-               IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
-                       sta_id);
+               IWL_ERR(priv, "Adding station failed, no room in table.\n");
                break;
        case ADD_STA_NO_BLOCK_ACK_RESOURCE:
-               IWL_ERR(priv, "Adding station %d failed, no block ack "
-                       "resource.\n", sta_id);
+               IWL_ERR(priv,
+                       "Adding station failed, no block ack resource.\n");
                break;
        case ADD_STA_MODIFY_NON_EXIST_STA:
-               IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
-                       sta_id);
+               IWL_ERR(priv, "Attempting to modify non-existing station\n");
                break;
        default:
                IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
@@ -102,37 +89,14 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
                break;
        }
 
-       IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
-                      priv->stations[sta_id].sta.mode ==
-                      STA_CONTROL_MODIFY_MSK ?  "Modified" : "Added",
-                      sta_id, priv->stations[sta_id].sta.sta.addr);
-
-       /*
-        * XXX: The MAC address in the command buffer is often changed from
-        * the original sent to the device. That is, the MAC address
-        * written to the command buffer often is not the same MAC address
-        * read from the command buffer when the command returns. This
-        * issue has not yet been resolved and this debugging is left to
-        * observe the problem.
-        */
-       IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
-                      priv->stations[sta_id].sta.mode ==
-                      STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
-                      addsta->sta.addr);
        spin_unlock_bh(&priv->sta_lock);
-
-       return ret;
 }
 
-int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd)
+void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
-       if (!cmd)
-               return 0;
-
-       return iwl_process_add_sta_resp(priv, (void *)cmd->payload, pkt);
+       iwl_process_add_sta_resp(priv, pkt);
 }
 
 int iwl_send_add_sta(struct iwl_priv *priv,
@@ -146,6 +110,8 @@ int iwl_send_add_sta(struct iwl_priv *priv,
                .len = { sizeof(*sta), },
        };
        u8 sta_id __maybe_unused = sta->sta.sta_id;
+       struct iwl_rx_packet *pkt;
+       struct iwl_add_sta_resp *add_sta_resp;
 
        IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
                       sta_id, sta->sta.addr, flags & CMD_ASYNC ?  "a" : "");
@@ -159,16 +125,22 @@ int iwl_send_add_sta(struct iwl_priv *priv,
 
        if (ret || (flags & CMD_ASYNC))
                return ret;
-       /*else the command was successfully sent in SYNC mode, need to free
-        * the reply page */
 
-       iwl_free_resp(&cmd);
+       pkt = cmd.resp_pkt;
+       add_sta_resp = (void *)pkt->data;
 
-       if (cmd.handler_status)
-               IWL_ERR(priv, "%s - error in the CMD response %d\n", __func__,
-                       cmd.handler_status);
+       /* debug messages are printed in the handler */
+       if (add_sta_resp->status == ADD_STA_SUCCESS_MSK) {
+               spin_lock_bh(&priv->sta_lock);
+               ret = iwl_sta_ucode_activate(priv, sta_id);
+               spin_unlock_bh(&priv->sta_lock);
+       } else {
+               ret = -EIO;
+       }
 
-       return cmd.handler_status;
+       iwl_free_resp(&cmd);
+
+       return ret;
 }
 
 bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
@@ -452,6 +424,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
        struct iwl_rx_packet *pkt;
        int ret;
        struct iwl_rem_sta_cmd rm_sta_cmd;
+       struct iwl_rem_sta_resp *rem_sta_resp;
 
        struct iwl_host_cmd cmd = {
                .id = REPLY_REMOVE_STA,
@@ -471,29 +444,23 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
                return ret;
 
        pkt = cmd.resp_pkt;
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
-                         pkt->hdr.flags);
-               ret = -EIO;
-       }
+       rem_sta_resp = (void *)pkt->data;
 
-       if (!ret) {
-               struct iwl_rem_sta_resp *rem_sta_resp = (void *)pkt->data;
-               switch (rem_sta_resp->status) {
-               case REM_STA_SUCCESS_MSK:
-                       if (!temporary) {
-                               spin_lock_bh(&priv->sta_lock);
-                               iwl_sta_ucode_deactivate(priv, sta_id);
-                               spin_unlock_bh(&priv->sta_lock);
-                       }
-                       IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
-                       break;
-               default:
-                       ret = -EIO;
-                       IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
-                       break;
+       switch (rem_sta_resp->status) {
+       case REM_STA_SUCCESS_MSK:
+               if (!temporary) {
+                       spin_lock_bh(&priv->sta_lock);
+                       iwl_sta_ucode_deactivate(priv, sta_id);
+                       spin_unlock_bh(&priv->sta_lock);
                }
+               IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
+               break;
+       default:
+               ret = -EIO;
+               IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
+               break;
        }
+
        iwl_free_resp(&cmd);
 
        return ret;
index 275df12a6045044cdee1ddeba1c8840caf491923..bddd19769035130dde04a589aa3d5d1cba0379d5 100644 (file)
@@ -1128,8 +1128,7 @@ static void iwl_check_abort_status(struct iwl_priv *priv,
        }
 }
 
-int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd)
+void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -1273,8 +1272,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
                skb = __skb_dequeue(&skbs);
                ieee80211_tx_status(priv->hw, skb);
        }
-
-       return 0;
 }
 
 /**
@@ -1283,9 +1280,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
  * Handles block-acknowledge notification from device, which reports success
  * of frames sent via aggregation.
  */
-int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
-                                  struct iwl_rx_cmd_buffer *rxb,
-                                  struct iwl_device_cmd *cmd)
+void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+                                  struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
@@ -1306,7 +1302,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
        if (scd_flow >= priv->cfg->base_params->num_of_queues) {
                IWL_ERR(priv,
                        "BUG_ON scd_flow is bigger than number of queues\n");
-               return 0;
+               return;
        }
 
        sta_id = ba_resp->sta_id;
@@ -1319,7 +1315,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
                if (unlikely(ba_resp->bitmap))
                        IWL_ERR(priv, "Received BA when not expected\n");
                spin_unlock_bh(&priv->sta_lock);
-               return 0;
+               return;
        }
 
        if (unlikely(scd_flow != agg->txq_id)) {
@@ -1333,7 +1329,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
                                    "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
                                    scd_flow, sta_id, tid, agg->txq_id);
                spin_unlock_bh(&priv->sta_lock);
-               return 0;
+               return;
        }
 
        __skb_queue_head_init(&reclaimed_skbs);
@@ -1413,6 +1409,4 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
                skb = __skb_dequeue(&reclaimed_skbs);
                ieee80211_tx_status(priv->hw, skb);
        }
-
-       return 0;
 }
index 5244e43bfafbc4617720097660ec693f5d30742f..931a8e4269ef16220a5c57db37a9175913ba3bc3 100644 (file)
@@ -3,6 +3,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -327,7 +328,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
        const struct fw_img *fw;
        int ret;
        enum iwl_ucode_type old_type;
-       static const u8 alive_cmd[] = { REPLY_ALIVE };
+       static const u16 alive_cmd[] = { REPLY_ALIVE };
 
        fw = iwl_get_ucode_image(priv, ucode_type);
        if (WARN_ON(!fw))
@@ -406,7 +407,7 @@ static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait,
 int iwl_run_init_ucode(struct iwl_priv *priv)
 {
        struct iwl_notification_wait calib_wait;
-       static const u8 calib_complete[] = {
+       static const u16 calib_complete[] = {
                CALIBRATION_RES_NOTIFICATION,
                CALIBRATION_COMPLETE_NOTIFICATION
        };
index cc35f796d406156286164410aa7d1adcd087c015..413b63e09717ad9445bbe04b693ce2782d7ec2d4 100644 (file)
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX  15
+#define IWL7260_UCODE_API_MAX  16
 
 /* Oldest version we won't warn about */
 #define IWL7260_UCODE_API_OK   12
 #define IWL3165_UCODE_API_OK   13
 
 /* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN  10
+#define IWL7260_UCODE_API_MIN  12
 #define IWL3165_UCODE_API_MIN  13
 
 /* NVM versions */
index 72040cd0b9794e6790daaa7f887089692ccb1d5e..8324bc8987a944c913125b8c47b35db7c300c8f7 100644 (file)
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX  15
+#define IWL8000_UCODE_API_MAX  16
 
 /* Oldest version we won't warn about */
 #define IWL8000_UCODE_API_OK   12
 
 /* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN  10
+#define IWL8000_UCODE_API_MIN  12
 
 /* NVM versions */
 #define IWL8000_NVM_VERSION            0x0a1d
@@ -97,8 +97,9 @@
 #define DEFAULT_NVM_FILE_FAMILY_8000B          "nvmData-8000B"
 #define DEFAULT_NVM_FILE_FAMILY_8000C          "nvmData-8000C"
 
-/* Max SDIO RX aggregation size of the ADDBA request/response */
-#define MAX_RX_AGG_SIZE_8260_SDIO      28
+/* Max SDIO RX/TX aggregation sizes of the ADDBA request/response */
+#define MAX_RX_AGG_SIZE_8260_SDIO      21
+#define MAX_TX_AGG_SIZE_8260_SDIO      40
 
 /* Max A-MPDU exponent for HT and VHT */
 #define MAX_HT_AMPDU_EXPONENT_8260_SDIO        IEEE80211_HT_MAX_AMPDU_32K
@@ -154,6 +155,7 @@ static const struct iwl_tt_params iwl8000_tt_params = {
        .led_mode = IWL_LED_RF_STATE,                                   \
        .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000,           \
        .d0i3 = true,                                                   \
+       .features = NETIF_F_RXCSUM,                                     \
        .non_shared_ant = ANT_A,                                        \
        .dccm_offset = IWL8260_DCCM_OFFSET,                             \
        .dccm_len = IWL8260_DCCM_LEN,                                   \
@@ -203,6 +205,7 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
        .nvm_ver = IWL8000_NVM_VERSION,
        .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
        .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
+       .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
        .disable_dummy_notification = true,
        .max_ht_ampdu_exponent  = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
        .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
@@ -216,6 +219,7 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
        .nvm_ver = IWL8000_NVM_VERSION,
        .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
        .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
+       .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
        .bt_shared_single_ant = true,
        .disable_dummy_notification = true,
        .max_ht_ampdu_exponent  = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
index 08c14afeb1480aca04d61bd083795eb2aec401c2..939fa229c038a1dc6beb4288a4cb7364e1238a40 100644 (file)
@@ -297,6 +297,7 @@ struct iwl_pwr_tx_backoff {
  *     mode set
  * @d0i3: device uses d0i3 instead of d3
  * @nvm_hw_section_num: the ID of the HW NVM section
+ * @features: hw features, any combination of feature_whitelist
  * @pwr_tx_backoffs: translation table between power limits and backoffs
  * @max_rx_agg_size: max RX aggregation size of the ADDBA request/response
  * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response
@@ -348,6 +349,7 @@ struct iwl_cfg {
        bool no_power_up_nic_in_init;
        const char *default_nvm_file_B_step;
        const char *default_nvm_file_C_step;
+       netdev_features_t features;
        unsigned int max_rx_agg_size;
        bool disable_dummy_notification;
        unsigned int max_tx_agg_size;
index faa17f2e352adf72b740ffc37dfb03e747b003e1..543abeaffcf0017cd1ede2d6fb0ba3d03b3b392b 100644 (file)
 #define CSR_INT_BIT_FH_TX        (1 << 27) /* Tx DMA FH_INT[1:0] */
 #define CSR_INT_BIT_SCD          (1 << 26) /* TXQ pointer advanced */
 #define CSR_INT_BIT_SW_ERR       (1 << 25) /* uCode error */
+#define CSR_INT_BIT_PAGING       (1 << 24) /* SDIO PAGING */
 #define CSR_INT_BIT_RF_KILL      (1 << 7)  /* HW RFKILL switch GP_CNTRL[27] toggled */
 #define CSR_INT_BIT_CT_KILL      (1 << 6)  /* Critical temp (chip too hot) rfkill */
 #define CSR_INT_BIT_SW_RX        (1 << 3)  /* Rx, command responses */
                                 CSR_INT_BIT_HW_ERR  | \
                                 CSR_INT_BIT_FH_TX   | \
                                 CSR_INT_BIT_SW_ERR  | \
+                                CSR_INT_BIT_PAGING  | \
                                 CSR_INT_BIT_RF_KILL | \
                                 CSR_INT_BIT_SW_RX   | \
                                 CSR_INT_BIT_WAKEUP  | \
@@ -422,6 +424,7 @@ enum {
 
 /* DRAM INT TABLE */
 #define CSR_DRAM_INT_TBL_ENABLE                (1 << 31)
+#define CSR_DRAM_INIT_TBL_WRITE_POINTER        (1 << 28)
 #define CSR_DRAM_INIT_TBL_WRAP_CHECK   (1 << 27)
 
 /*
index 04e6649340b8c2b34d44f196aa20c531cbb83197..71a78cede9b079d61a6d557b0fa362a5d3fe7bc0 100644 (file)
@@ -35,8 +35,8 @@
 TRACE_EVENT(iwlwifi_dev_tx_data,
        TP_PROTO(const struct device *dev,
                 struct sk_buff *skb,
-                void *data, size_t data_len),
-       TP_ARGS(dev, skb, data, data_len),
+                u8 hdr_len, size_t data_len),
+       TP_ARGS(dev, skb, hdr_len, data_len),
        TP_STRUCT__entry(
                DEV_ENTRY
 
@@ -45,7 +45,8 @@ TRACE_EVENT(iwlwifi_dev_tx_data,
        TP_fast_assign(
                DEV_ASSIGN;
                if (iwl_trace_data(skb))
-                       memcpy(__get_dynamic_array(data), data, data_len);
+                       skb_copy_bits(skb, hdr_len,
+                                     __get_dynamic_array(data), data_len);
        ),
        TP_printk("[%s] TX frame data", __get_str(dev))
 );
index 948ce0802fa7ceae995d0257a6a3f8dace6ccf14..eb4b99a1c8cd432e3cd3a3febf0b4b58f8af26ef 100644 (file)
@@ -36,7 +36,7 @@
 TRACE_EVENT(iwlwifi_dev_hcmd,
        TP_PROTO(const struct device *dev,
                 struct iwl_host_cmd *cmd, u16 total_size,
-                struct iwl_cmd_header *hdr),
+                struct iwl_cmd_header_wide *hdr),
        TP_ARGS(dev, cmd, total_size, hdr),
        TP_STRUCT__entry(
                DEV_ENTRY
@@ -44,11 +44,14 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
                __field(u32, flags)
        ),
        TP_fast_assign(
-               int i, offset = sizeof(*hdr);
+               int i, offset = sizeof(struct iwl_cmd_header);
+
+               if (hdr->group_id)
+                       offset = sizeof(struct iwl_cmd_header_wide);
 
                DEV_ASSIGN;
                __entry->flags = cmd->flags;
-               memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
+               memcpy(__get_dynamic_array(hcmd), hdr, offset);
 
                for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
                        if (!cmd->len[i])
@@ -58,8 +61,9 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
                        offset += cmd->len[i];
                }
        ),
-       TP_printk("[%s] hcmd %#.2x (%ssync)",
-                 __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[0],
+       TP_printk("[%s] hcmd %#.2x.%#.2x (%ssync)",
+                 __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[1],
+                 ((u8 *)__get_dynamic_array(hcmd))[0],
                  __entry->flags & CMD_ASYNC ? "a" : "")
 );
 
index 6685259927f81bd979c20b90d0f9970358c98d46..a86aa5bcee7dd082105b18f56c01009aa910c53f 100644 (file)
@@ -372,6 +372,30 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
        return 0;
 }
 
+static int iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data,
+                               const u32 len)
+{
+       struct iwl_fw_gscan_capabilities *fw_capa = (void *)data;
+       struct iwl_gscan_capabilities *capa = &fw->gscan_capa;
+
+       if (len < sizeof(*fw_capa))
+               return -EINVAL;
+
+       capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size);
+       capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets);
+       capa->max_ap_cache_per_scan =
+               le32_to_cpu(fw_capa->max_ap_cache_per_scan);
+       capa->max_rssi_sample_size = le32_to_cpu(fw_capa->max_rssi_sample_size);
+       capa->max_scan_reporting_threshold =
+               le32_to_cpu(fw_capa->max_scan_reporting_threshold);
+       capa->max_hotlist_aps = le32_to_cpu(fw_capa->max_hotlist_aps);
+       capa->max_significant_change_aps =
+               le32_to_cpu(fw_capa->max_significant_change_aps);
+       capa->max_bssid_history_entries =
+               le32_to_cpu(fw_capa->max_bssid_history_entries);
+       return 0;
+}
+
 /*
  * Gets uCode section from tlv.
  */
@@ -573,13 +597,15 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
        size_t len = ucode_raw->size;
        const u8 *data;
        u32 tlv_len;
+       u32 usniffer_img;
        enum iwl_ucode_tlv_type tlv_type;
        const u8 *tlv_data;
        char buildstr[25];
-       u32 build;
+       u32 build, paging_mem_size;
        int num_of_cpus;
        bool usniffer_images = false;
        bool usniffer_req = false;
+       bool gscan_capa = false;
 
        if (len < sizeof(*ucode)) {
                IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
@@ -955,12 +981,46 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                                            IWL_UCODE_REGULAR_USNIFFER,
                                            tlv_len);
                        break;
+               case IWL_UCODE_TLV_PAGING:
+                       if (tlv_len != sizeof(u32))
+                               goto invalid_tlv_len;
+                       paging_mem_size = le32_to_cpup((__le32 *)tlv_data);
+
+                       IWL_DEBUG_FW(drv,
+                                    "Paging: paging enabled (size = %u bytes)\n",
+                                    paging_mem_size);
+
+                       if (paging_mem_size > MAX_PAGING_IMAGE_SIZE) {
+                               IWL_ERR(drv,
+                                       "Paging: driver supports up to %lu bytes for paging image\n",
+                                       MAX_PAGING_IMAGE_SIZE);
+                               return -EINVAL;
+                       }
+
+                       if (paging_mem_size & (FW_PAGING_SIZE - 1)) {
+                               IWL_ERR(drv,
+                                       "Paging: image isn't multiple %lu\n",
+                                       FW_PAGING_SIZE);
+                               return -EINVAL;
+                       }
+
+                       drv->fw.img[IWL_UCODE_REGULAR].paging_mem_size =
+                               paging_mem_size;
+                       usniffer_img = IWL_UCODE_REGULAR_USNIFFER;
+                       drv->fw.img[usniffer_img].paging_mem_size =
+                               paging_mem_size;
+                       break;
                case IWL_UCODE_TLV_SDIO_ADMA_ADDR:
                        if (tlv_len != sizeof(u32))
                                goto invalid_tlv_len;
                        drv->fw.sdio_adma_addr =
                                le32_to_cpup((__le32 *)tlv_data);
                        break;
+               case IWL_UCODE_TLV_FW_GSCAN_CAPA:
+                       if (iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len))
+                               goto invalid_tlv_len;
+                       gscan_capa = true;
+                       break;
                default:
                        IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
                        break;
@@ -979,6 +1039,16 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                return -EINVAL;
        }
 
+       /*
+        * If ucode advertises that it supports GSCAN but GSCAN
+        * capabilities TLV is not present, warn and continue without GSCAN.
+        */
+       if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
+           WARN(!gscan_capa,
+                "GSCAN is supported but capabilities TLV is unavailable\n"))
+               __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
+                           capa->_capa);
+
        return 0;
 
  invalid_tlv_len:
index 21302b6f2bfd79a8e8617a345e3771f6608c0145..acc3d186c5c101b0834fb4e424fe2203a66bb2d0 100644 (file)
@@ -713,12 +713,12 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data,
        struct ieee80211_channel *chan = &data->channels[0];
        int n = 0, idx = 0;
 
-       while (chan->band != band && idx < n_channels)
+       while (idx < n_channels && chan->band != band)
                chan = &data->channels[++idx];
 
        sband->channels = &data->channels[idx];
 
-       while (chan->band == band && idx < n_channels) {
+       while (idx < n_channels && chan->band == band) {
                chan = &data->channels[++idx];
                n++;
        }
index d45dc021cda2c0715b8d7e740ff90b46589ae141..d56064861a9c353dfb9fcf1720e1abde6c3fcf9d 100644 (file)
@@ -438,12 +438,6 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
 #define RX_QUEUE_MASK                         255
 #define RX_QUEUE_SIZE_LOG                     8
 
-/*
- * RX related structures and functions
- */
-#define RX_FREE_BUFFERS 64
-#define RX_LOW_WATERMARK 8
-
 /**
  * struct iwl_rb_status - reserve buffer status
  *     host memory mapped FH registers
index e57dbd0ef2e1f10f8d16bd26537f2ee829b5eade..af5b3201492cb7a690ec993d784866b1630d91a5 100644 (file)
@@ -84,6 +84,8 @@
  * @IWL_FW_ERROR_DUMP_MEM: chunk of memory
  * @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump.
  *     Structured as &struct iwl_fw_error_dump_trigger_desc.
+ * @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as
+ *     &struct iwl_fw_error_dump_rb
  */
 enum iwl_fw_error_dump_type {
        /* 0 is deprecated */
@@ -97,6 +99,7 @@ enum iwl_fw_error_dump_type {
        IWL_FW_ERROR_DUMP_FH_REGS = 8,
        IWL_FW_ERROR_DUMP_MEM = 9,
        IWL_FW_ERROR_DUMP_ERROR_INFO = 10,
+       IWL_FW_ERROR_DUMP_RB = 11,
 
        IWL_FW_ERROR_DUMP_MAX,
 };
@@ -222,6 +225,20 @@ struct iwl_fw_error_dump_mem {
        u8 data[];
 };
 
+/**
+ * struct iwl_fw_error_dump_rb - content of an Receive Buffer
+ * @index: the index of the Receive Buffer in the Rx queue
+ * @rxq: the RB's Rx queue
+ * @reserved:
+ * @data: the content of the Receive Buffer
+ */
+struct iwl_fw_error_dump_rb {
+       __le32 index;
+       __le32 rxq;
+       __le32 reserved;
+       u8 data[];
+};
+
 /**
  * iwl_fw_error_next_data - advance fw error dump data pointer
  * @data: previous data block
index a9b5ae4ebec021277efc454730d03b562add3210..75809abee75988c25d9591263b7c4e55a2bb0ce3 100644 (file)
@@ -132,12 +132,14 @@ enum iwl_ucode_tlv_type {
        IWL_UCODE_TLV_API_CHANGES_SET   = 29,
        IWL_UCODE_TLV_ENABLED_CAPABILITIES      = 30,
        IWL_UCODE_TLV_N_SCAN_CHANNELS           = 31,
+       IWL_UCODE_TLV_PAGING            = 32,
        IWL_UCODE_TLV_SEC_RT_USNIFFER   = 34,
        IWL_UCODE_TLV_SDIO_ADMA_ADDR    = 35,
        IWL_UCODE_TLV_FW_VERSION        = 36,
        IWL_UCODE_TLV_FW_DBG_DEST       = 38,
        IWL_UCODE_TLV_FW_DBG_CONF       = 39,
        IWL_UCODE_TLV_FW_DBG_TRIGGER    = 40,
+       IWL_UCODE_TLV_FW_GSCAN_CAPA     = 50,
 };
 
 struct iwl_ucode_tlv {
@@ -247,9 +249,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
  * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
  * IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR
  * @IWL_UCODE_TLV_API_TX_POWER_DEV: new API for tx power.
- * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
- *     regardless of the band or the number of the probes. FW will calculate
- *     the actual dwell time.
+ * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header
  * @IWL_UCODE_TLV_API_SCD_CFG: This firmware can configure the scheduler
  *     through the dedicated host command.
  * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
@@ -266,7 +266,7 @@ enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_WIFI_MCC_UPDATE       = (__force iwl_ucode_tlv_api_t)9,
        IWL_UCODE_TLV_API_HDC_PHASE_0           = (__force iwl_ucode_tlv_api_t)10,
        IWL_UCODE_TLV_API_TX_POWER_DEV          = (__force iwl_ucode_tlv_api_t)11,
-       IWL_UCODE_TLV_API_BASIC_DWELL           = (__force iwl_ucode_tlv_api_t)13,
+       IWL_UCODE_TLV_API_WIDE_CMD_HDR          = (__force iwl_ucode_tlv_api_t)14,
        IWL_UCODE_TLV_API_SCD_CFG               = (__force iwl_ucode_tlv_api_t)15,
        IWL_UCODE_TLV_API_SINGLE_SCAN_EBS       = (__force iwl_ucode_tlv_api_t)16,
        IWL_UCODE_TLV_API_ASYNC_DTM             = (__force iwl_ucode_tlv_api_t)17,
@@ -284,6 +284,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
  * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
  * @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
  * @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer
+ * @IWL_UCODE_TLV_CAPA_TOF_SUPPORT: supports Time of Flight (802.11mc FTM)
  * @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
  * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
  *     tx power value into TPC Report action frame and Link Measurement Report
@@ -298,6 +299,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
  * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
  * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
  * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
+ * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
  * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
  * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
  * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
@@ -305,12 +307,14 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
  *     IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
  *     is supported.
  * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
+ * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
  */
 enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_D0I3_SUPPORT                 = (__force iwl_ucode_tlv_capa_t)0,
        IWL_UCODE_TLV_CAPA_LAR_SUPPORT                  = (__force iwl_ucode_tlv_capa_t)1,
        IWL_UCODE_TLV_CAPA_UMAC_SCAN                    = (__force iwl_ucode_tlv_capa_t)2,
        IWL_UCODE_TLV_CAPA_BEAMFORMER                   = (__force iwl_ucode_tlv_capa_t)3,
+       IWL_UCODE_TLV_CAPA_TOF_SUPPORT                  = (__force iwl_ucode_tlv_capa_t)5,
        IWL_UCODE_TLV_CAPA_TDLS_SUPPORT                 = (__force iwl_ucode_tlv_capa_t)6,
        IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT    = (__force iwl_ucode_tlv_capa_t)8,
        IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT      = (__force iwl_ucode_tlv_capa_t)9,
@@ -320,10 +324,12 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH          = (__force iwl_ucode_tlv_capa_t)13,
        IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT              = (__force iwl_ucode_tlv_capa_t)18,
        IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT         = (__force iwl_ucode_tlv_capa_t)19,
+       IWL_UCODE_TLV_CAPA_CSUM_SUPPORT                 = (__force iwl_ucode_tlv_capa_t)21,
        IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS           = (__force iwl_ucode_tlv_capa_t)22,
        IWL_UCODE_TLV_CAPA_BT_COEX_PLCR                 = (__force iwl_ucode_tlv_capa_t)28,
        IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC                = (__force iwl_ucode_tlv_capa_t)29,
        IWL_UCODE_TLV_CAPA_BT_COEX_RRC                  = (__force iwl_ucode_tlv_capa_t)30,
+       IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT                = (__force iwl_ucode_tlv_capa_t)31,
 };
 
 /* The default calibrate table size if not specified by firmware file */
@@ -341,8 +347,9 @@ enum iwl_ucode_tlv_capa {
  * For 16.0 uCode and above, there is no differentiation between sections,
  * just an offset to the HW address.
  */
-#define IWL_UCODE_SECTION_MAX 12
+#define IWL_UCODE_SECTION_MAX 16
 #define CPU1_CPU2_SEPARATOR_SECTION    0xFFFFCCCC
+#define PAGING_SEPARATOR_SECTION       0xAAAABBBB
 
 /* uCode version contains 4 values: Major/Minor/API/Serial */
 #define IWL_UCODE_MAJOR(ver)   (((ver) & 0xFF000000) >> 24)
@@ -412,6 +419,12 @@ enum iwl_fw_dbg_reg_operator {
        PRPH_ASSIGN,
        PRPH_SETBIT,
        PRPH_CLEARBIT,
+
+       INDIRECT_ASSIGN,
+       INDIRECT_SETBIT,
+       INDIRECT_CLEARBIT,
+
+       PRPH_BLOCKBIT,
 };
 
 /**
@@ -485,10 +498,13 @@ struct iwl_fw_dbg_conf_hcmd {
  *
  * @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism
  * @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data
+ * @IWL_FW_DBG_TRIGGER_MONITOR_ONLY: when trigger occurs trigger is set to
+ *     collect only monitor data
  */
 enum iwl_fw_dbg_trigger_mode {
        IWL_FW_DBG_TRIGGER_START = BIT(0),
        IWL_FW_DBG_TRIGGER_STOP = BIT(1),
+       IWL_FW_DBG_TRIGGER_MONITOR_ONLY = BIT(2),
 };
 
 /**
@@ -718,4 +734,28 @@ struct iwl_fw_dbg_conf_tlv {
        struct iwl_fw_dbg_conf_hcmd hcmd;
 } __packed;
 
+/**
+ * struct iwl_fw_gscan_capabilities - gscan capabilities supported by FW
+ * @max_scan_cache_size: total space allocated for scan results (in bytes).
+ * @max_scan_buckets: maximum number of channel buckets.
+ * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
+ * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
+ * @max_scan_reporting_threshold: max possible report threshold. in percentage.
+ * @max_hotlist_aps: maximum number of entries for hotlist APs.
+ * @max_significant_change_aps: maximum number of entries for significant
+ *     change APs.
+ * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
+ *     hold.
+ */
+struct iwl_fw_gscan_capabilities {
+       __le32 max_scan_cache_size;
+       __le32 max_scan_buckets;
+       __le32 max_ap_cache_per_scan;
+       __le32 max_rssi_sample_size;
+       __le32 max_scan_reporting_threshold;
+       __le32 max_hotlist_aps;
+       __le32 max_significant_change_aps;
+       __le32 max_bssid_history_entries;
+} __packed;
+
 #endif  /* __iwl_fw_file_h__ */
index 3e3c9d8b3c37dd93b7ab59529bf5c7b3cd9d7ae0..45e732150d28ab9de90d322528fc8429c2a26d97 100644 (file)
@@ -133,6 +133,7 @@ struct fw_desc {
 struct fw_img {
        struct fw_desc sec[IWL_UCODE_SECTION_MAX];
        bool is_dual_cpus;
+       u32 paging_mem_size;
 };
 
 struct iwl_sf_region {
@@ -140,6 +141,48 @@ struct iwl_sf_region {
        u32 size;
 };
 
+/*
+ * Block paging calculations
+ */
+#define PAGE_2_EXP_SIZE 12 /* 4K == 2^12 */
+#define FW_PAGING_SIZE BIT(PAGE_2_EXP_SIZE) /* page size is 4KB */
+#define PAGE_PER_GROUP_2_EXP_SIZE 3
+/* 8 pages per group */
+#define NUM_OF_PAGE_PER_GROUP BIT(PAGE_PER_GROUP_2_EXP_SIZE)
+/* don't change, support only 32KB size */
+#define PAGING_BLOCK_SIZE (NUM_OF_PAGE_PER_GROUP * FW_PAGING_SIZE)
+/* 32K == 2^15 */
+#define BLOCK_2_EXP_SIZE (PAGE_2_EXP_SIZE + PAGE_PER_GROUP_2_EXP_SIZE)
+
+/*
+ * Image paging calculations
+ */
+#define BLOCK_PER_IMAGE_2_EXP_SIZE 5
+/* 2^5 == 32 blocks per image */
+#define NUM_OF_BLOCK_PER_IMAGE BIT(BLOCK_PER_IMAGE_2_EXP_SIZE)
+/* maximum image size 1024KB */
+#define MAX_PAGING_IMAGE_SIZE (NUM_OF_BLOCK_PER_IMAGE * PAGING_BLOCK_SIZE)
+
+/* Virtual address signature */
+#define PAGING_ADDR_SIG 0xAA000000
+
+#define PAGING_CMD_IS_SECURED BIT(9)
+#define PAGING_CMD_IS_ENABLED BIT(8)
+#define PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS        0
+#define PAGING_TLV_SECURE_MASK 1
+
+/**
+ * struct iwl_fw_paging
+ * @fw_paging_phys: page phy pointer
+ * @fw_paging_block: pointer to the allocated block
+ * @fw_paging_size: page size
+ */
+struct iwl_fw_paging {
+       dma_addr_t fw_paging_phys;
+       struct page *fw_paging_block;
+       u32 fw_paging_size;
+};
+
 /**
  * struct iwl_fw_cscheme_list - a cipher scheme list
  * @size: a number of entries
@@ -150,6 +193,30 @@ struct iwl_fw_cscheme_list {
        struct iwl_fw_cipher_scheme cs[];
 } __packed;
 
+/**
+ * struct iwl_gscan_capabilities - gscan capabilities supported by FW
+ * @max_scan_cache_size: total space allocated for scan results (in bytes).
+ * @max_scan_buckets: maximum number of channel buckets.
+ * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
+ * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
+ * @max_scan_reporting_threshold: max possible report threshold. in percentage.
+ * @max_hotlist_aps: maximum number of entries for hotlist APs.
+ * @max_significant_change_aps: maximum number of entries for significant
+ *     change APs.
+ * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
+ *     hold.
+ */
+struct iwl_gscan_capabilities {
+       u32 max_scan_cache_size;
+       u32 max_scan_buckets;
+       u32 max_ap_cache_per_scan;
+       u32 max_rssi_sample_size;
+       u32 max_scan_reporting_threshold;
+       u32 max_hotlist_aps;
+       u32 max_significant_change_aps;
+       u32 max_bssid_history_entries;
+};
+
 /**
  * struct iwl_fw - variables associated with the firmware
  *
@@ -208,6 +275,7 @@ struct iwl_fw {
        struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
        size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
        u8 dbg_dest_reg_num;
+       struct iwl_gscan_capabilities gscan_capa;
 };
 
 static inline const char *get_fw_dbg_mode_string(int mode)
index b5bc959b1dfe0bed54cf9423c6f01c23e332062e..6caf2affbbb52d5d0fc356db0f4b3d1360f4be8d 100644 (file)
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -98,7 +99,8 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
                                continue;
 
                        for (i = 0; i < w->n_cmds; i++) {
-                               if (w->cmds[i] == pkt->hdr.cmd) {
+                               if (w->cmds[i] ==
+                                   WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
                                        found = true;
                                        break;
                                }
@@ -136,7 +138,7 @@ IWL_EXPORT_SYMBOL(iwl_abort_notification_waits);
 void
 iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
                           struct iwl_notification_wait *wait_entry,
-                          const u8 *cmds, int n_cmds,
+                          const u16 *cmds, int n_cmds,
                           bool (*fn)(struct iwl_notif_wait_data *notif_wait,
                                      struct iwl_rx_packet *pkt, void *data),
                           void *fn_data)
@@ -147,7 +149,7 @@ iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
        wait_entry->fn = fn;
        wait_entry->fn_data = fn_data;
        wait_entry->n_cmds = n_cmds;
-       memcpy(wait_entry->cmds, cmds, n_cmds);
+       memcpy(wait_entry->cmds, cmds, n_cmds * sizeof(u16));
        wait_entry->triggered = false;
        wait_entry->aborted = false;
 
index 95af97a6c2cfa3886b6aa4fa5f1bb1213854cdf7..dbe8234521defb540ca545d3729b30e952bb4453 100644 (file)
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -105,7 +106,7 @@ struct iwl_notification_wait {
                   struct iwl_rx_packet *pkt, void *data);
        void *fn_data;
 
-       u8 cmds[MAX_NOTIF_CMDS];
+       u16 cmds[MAX_NOTIF_CMDS];
        u8 n_cmds;
        bool triggered, aborted;
 };
@@ -121,7 +122,7 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data);
 void __acquires(wait_entry)
 iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
                           struct iwl_notification_wait *wait_entry,
-                          const u8 *cmds, int n_cmds,
+                          const u16 *cmds, int n_cmds,
                           bool (*fn)(struct iwl_notif_wait_data *notif_data,
                                      struct iwl_rx_packet *pkt, void *data),
                           void *fn_data);
index ce1cdd7604e8a156cf7356466b9232bdc176d9db..b47fe9d6b97abacdc2c72256f261c8ee520da22e 100644 (file)
@@ -116,10 +116,6 @@ struct iwl_cfg;
  *     May sleep
  * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
  *     HCMD this Rx responds to. Can't sleep.
- * @napi_add: NAPI initialization. The transport is fully responsible for NAPI,
- *     but the higher layers need to know about it (in particular mac80211 to
- *     to able to call the right NAPI RX functions); this function is needed
- *     to eventually call netif_napi_add() with higher layer involvement.
  * @queue_full: notifies that a HW queue is full.
  *     Must be atomic and called with BH disabled.
  * @queue_not_full: notifies that a HW queue is not full any more.
@@ -148,13 +144,8 @@ struct iwl_op_mode_ops {
                                     const struct iwl_fw *fw,
                                     struct dentry *dbgfs_dir);
        void (*stop)(struct iwl_op_mode *op_mode);
-       int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
-                 struct iwl_device_cmd *cmd);
-       void (*napi_add)(struct iwl_op_mode *op_mode,
-                        struct napi_struct *napi,
-                        struct net_device *napi_dev,
-                        int (*poll)(struct napi_struct *, int),
-                        int weight);
+       void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+                  struct iwl_rx_cmd_buffer *rxb);
        void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
        void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
        bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
@@ -188,11 +179,11 @@ static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode)
        op_mode->ops->stop(op_mode);
 }
 
-static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
-                                 struct iwl_rx_cmd_buffer *rxb,
-                                 struct iwl_device_cmd *cmd)
+static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode,
+                                 struct napi_struct *napi,
+                                 struct iwl_rx_cmd_buffer *rxb)
 {
-       return op_mode->ops->rx(op_mode, rxb, cmd);
+       return op_mode->ops->rx(op_mode, napi, rxb);
 }
 
 static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
@@ -260,15 +251,4 @@ static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode)
        return op_mode->ops->exit_d0i3(op_mode);
 }
 
-static inline void iwl_op_mode_napi_add(struct iwl_op_mode *op_mode,
-                                       struct napi_struct *napi,
-                                       struct net_device *napi_dev,
-                                       int (*poll)(struct napi_struct *, int),
-                                       int weight)
-{
-       if (!op_mode->ops->napi_add)
-               return;
-       op_mode->ops->napi_add(op_mode, napi, napi_dev, poll, weight);
-}
-
 #endif /* __iwl_op_mode_h__ */
index 5af1c776d2d4381fdd01e0044c4eeb5108236a18..3ab777f79e4f60bcf77b00f8331a7b1df69475b2 100644 (file)
 #define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS     (16)
 #define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK     (0x007F0000)
 #define SCD_GP_CTRL_ENABLE_31_QUEUES           BIT(0)
+#define SCD_GP_CTRL_AUTO_ACTIVE_MODE           BIT(18)
 
 /* Context Data */
 #define SCD_CONTEXT_MEM_LOWER_BOUND    (SCD_MEM_LOWER_BOUND + 0x600)
 
 /*********************** END TX SCHEDULER *************************************/
 
+/* tcp checksum offload */
+#define RX_EN_CSUM             (0x00a00d88)
+
 /* Oscillator clock */
 #define OSC_CLK                                (0xa04068)
 #define OSC_CLK_FORCE_CONTROL          (0x8)
@@ -379,6 +383,8 @@ enum aux_misc_master1_en {
 #define AUX_MISC_MASTER1_SMPHR_STATUS  0xA20800
 #define RSA_ENABLE                     0xA24B08
 #define PREG_AUX_BUS_WPROT_0           0xA04CC0
+#define SB_CPU_1_STATUS                        0xA01E30
+#define SB_CPU_2_STATUS                        0xA01E34
 
 /* FW chicken bits */
 #define LMPM_CHICK                     0xA01FF8
@@ -386,4 +392,10 @@ enum {
        LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0),
 };
 
+/* FW chicken bits */
+#define LMPM_PAGE_PASS_NOTIF                   0xA03824
+enum {
+       LMPM_PAGE_PASS_NOTIF_POS = BIT(20),
+};
+
 #endif                         /* __iwl_prph_h__ */
index 87a230a7f4b605b8b6db80b69b3db7bc95b59e2f..c829c505e1419cf3ce84356f9a07c4b85b8ae447 100644 (file)
 #define INDEX_TO_SEQ(i)        ((i) & 0xff)
 #define SEQ_RX_FRAME   cpu_to_le16(0x8000)
 
+/*
+ * those functions retrieve specific information from
+ * the id field in the iwl_host_cmd struct which contains
+ * the command id, the group id and the version of the command
+ * and vice versa
+*/
+static inline u8 iwl_cmd_opcode(u32 cmdid)
+{
+       return cmdid & 0xFF;
+}
+
+static inline u8 iwl_cmd_groupid(u32 cmdid)
+{
+       return ((cmdid & 0xFF00) >> 8);
+}
+
+static inline u8 iwl_cmd_version(u32 cmdid)
+{
+       return ((cmdid & 0xFF0000) >> 16);
+}
+
+static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version)
+{
+       return opcode + (groupid << 8) + (version << 16);
+}
+
+/* make u16 wide id out of u8 group and opcode */
+#define WIDE_ID(grp, opcode) ((grp << 8) | opcode)
+
+/* due to the conversion, this group is special; new groups
+ * should be defined in the appropriate fw-api header files
+ */
+#define IWL_ALWAYS_LONG_GROUP  1
+
 /**
  * struct iwl_cmd_header
  *
  */
 struct iwl_cmd_header {
        u8 cmd;         /* Command ID:  REPLY_RXON, etc. */
-       u8 flags;       /* 0:5 reserved, 6 abort, 7 internal */
+       u8 group_id;
        /*
         * The driver sets up the sequence number to values of its choosing.
         * uCode does not use this value, but passes it back to the driver
@@ -154,9 +188,22 @@ struct iwl_cmd_header {
        __le16 sequence;
 } __packed;
 
-/* iwl_cmd_header flags value */
-#define IWL_CMD_FAILED_MSK 0x40
-
+/**
+ * struct iwl_cmd_header_wide
+ *
+ * This header format appears in the beginning of each command sent from the
+ * driver, and each response/notification received from uCode.
+ * this is the wide version that contains more information about the command
+ * like length, version and command type
+ */
+struct iwl_cmd_header_wide {
+       u8 cmd;
+       u8 group_id;
+       __le16 sequence;
+       __le16 length;
+       u8 reserved;
+       u8 version;
+} __packed;
 
 #define FH_RSCSR_FRAME_SIZE_MSK                0x00003FFF      /* bits 0-13 */
 #define FH_RSCSR_FRAME_INVALID         0x55550000
@@ -201,6 +248,8 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
  * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
  * @CMD_WAKE_UP_TRANS: The command response should wake up the trans
  *     (i.e. mark it as non-idle).
+ * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to
+ *     check that we leave enough room for the TBs bitmap which needs 20 bits.
  */
 enum CMD_MODE {
        CMD_ASYNC               = BIT(0),
@@ -210,6 +259,8 @@ enum CMD_MODE {
        CMD_SEND_IN_IDLE        = BIT(4),
        CMD_MAKE_TRANS_IDLE     = BIT(5),
        CMD_WAKE_UP_TRANS       = BIT(6),
+
+       CMD_TB_BITMAP_POS       = 11,
 };
 
 #define DEF_CMD_PAYLOAD_SIZE 320
@@ -222,8 +273,18 @@ enum CMD_MODE {
  * aren't fully copied and use other TFD space.
  */
 struct iwl_device_cmd {
-       struct iwl_cmd_header hdr;      /* uCode API */
-       u8 payload[DEF_CMD_PAYLOAD_SIZE];
+       union {
+               struct {
+                       struct iwl_cmd_header hdr;      /* uCode API */
+                       u8 payload[DEF_CMD_PAYLOAD_SIZE];
+               };
+               struct {
+                       struct iwl_cmd_header_wide hdr_wide;
+                       u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
+                                       sizeof(struct iwl_cmd_header_wide) +
+                                       sizeof(struct iwl_cmd_header)];
+               };
+       };
 } __packed;
 
 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
@@ -261,24 +322,22 @@ enum iwl_hcmd_dataflag {
  * @resp_pkt: response packet, if %CMD_WANT_SKB was set
  * @_rx_page_order: (internally used to free response packet)
  * @_rx_page_addr: (internally used to free response packet)
- * @handler_status: return value of the handler of the command
- *     (put in setup_rx_handlers) - valid for SYNC mode only
  * @flags: can be CMD_*
  * @len: array of the lengths of the chunks in data
  * @dataflags: IWL_HCMD_DFL_*
- * @id: id of the host command
+ * @id: command id of the host command, for wide commands encoding the
+ *     version and group as well
  */
 struct iwl_host_cmd {
        const void *data[IWL_MAX_CMD_TBS_PER_TFD];
        struct iwl_rx_packet *resp_pkt;
        unsigned long _rx_page_addr;
        u32 _rx_page_order;
-       int handler_status;
 
        u32 flags;
+       u32 id;
        u16 len[IWL_MAX_CMD_TBS_PER_TFD];
        u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
-       u8 id;
 };
 
 static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
@@ -379,6 +438,7 @@ enum iwl_trans_status {
  * @bc_table_dword: set to true if the BC table expects the byte count to be
  *     in DWORD (as opposed to bytes)
  * @scd_set_active: should the transport configure the SCD for HCMD queue
+ * @wide_cmd_header: firmware supports wide host command header
  * @command_names: array of command names, must be 256 entries
  *     (one for each command); for debugging only
  * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
@@ -396,6 +456,7 @@ struct iwl_trans_config {
        bool rx_buf_size_8k;
        bool bc_table_dword;
        bool scd_set_active;
+       bool wide_cmd_header;
        const char *const *command_names;
 
        u32 sdio_adma_addr;
@@ -544,10 +605,12 @@ struct iwl_trans_ops {
                              u32 value);
        void (*ref)(struct iwl_trans *trans);
        void (*unref)(struct iwl_trans *trans);
-       void (*suspend)(struct iwl_trans *trans);
+       int  (*suspend)(struct iwl_trans *trans);
        void (*resume)(struct iwl_trans *trans);
 
-       struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans);
+       struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
+                                                struct iwl_fw_dbg_trigger_tlv
+                                                *trigger);
 };
 
 /**
@@ -584,6 +647,8 @@ enum iwl_d0i3_mode {
  * @cfg - pointer to the configuration
  * @status: a bit-mask of transport status flags
  * @dev - pointer to struct device * that represents the device
+ * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
+ *     0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
  * @hw_id: a u32 with the ID of the device / sub-device.
  *     Set during transport allocation.
  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
@@ -603,6 +668,12 @@ enum iwl_d0i3_mode {
  * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
  * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
  * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
+ * @paging_req_addr: The location were the FW will upload / download the pages
+ *     from. The address is set by the opmode
+ * @paging_db: Pointer to the opmode paging data base, the pointer is set by
+ *     the opmode.
+ * @paging_download_buf: Buffer used for copying all of the pages before
+ *     downloading them to the FW. The buffer is allocated in the opmode
  */
 struct iwl_trans {
        const struct iwl_trans_ops *ops;
@@ -612,6 +683,7 @@ struct iwl_trans {
        unsigned long status;
 
        struct device *dev;
+       u32 max_skb_frags;
        u32 hw_rev;
        u32 hw_id;
        char hw_id_str[52];
@@ -639,6 +711,14 @@ struct iwl_trans {
        struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
        u8 dbg_dest_reg_num;
 
+       /*
+        * Paging parameters - All of the parameters should be set by the
+        * opmode when paging is enabled
+        */
+       u32 paging_req_addr;
+       struct iwl_fw_paging *paging_db;
+       void *paging_download_buf;
+
        enum iwl_d0i3_mode d0i3_mode;
 
        bool wowlan_d0i3;
@@ -730,7 +810,8 @@ static inline void iwl_trans_stop_device(struct iwl_trans *trans)
 static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
 {
        might_sleep();
-       trans->ops->d3_suspend(trans, test);
+       if (trans->ops->d3_suspend)
+               trans->ops->d3_suspend(trans, test);
 }
 
 static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
@@ -738,6 +819,9 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
                                      bool test)
 {
        might_sleep();
+       if (!trans->ops->d3_resume)
+               return 0;
+
        return trans->ops->d3_resume(trans, status, test);
 }
 
@@ -753,10 +837,12 @@ static inline void iwl_trans_unref(struct iwl_trans *trans)
                trans->ops->unref(trans);
 }
 
-static inline void iwl_trans_suspend(struct iwl_trans *trans)
+static inline int iwl_trans_suspend(struct iwl_trans *trans)
 {
-       if (trans->ops->suspend)
-               trans->ops->suspend(trans);
+       if (!trans->ops->suspend)
+               return 0;
+
+       return trans->ops->suspend(trans);
 }
 
 static inline void iwl_trans_resume(struct iwl_trans *trans)
@@ -766,11 +852,12 @@ static inline void iwl_trans_resume(struct iwl_trans *trans)
 }
 
 static inline struct iwl_trans_dump_data *
-iwl_trans_dump_data(struct iwl_trans *trans)
+iwl_trans_dump_data(struct iwl_trans *trans,
+                   struct iwl_fw_dbg_trigger_tlv *trigger)
 {
        if (!trans->ops->dump_data)
                return NULL;
-       return trans->ops->dump_data(trans);
+       return trans->ops->dump_data(trans, trigger);
 }
 
 static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
index 2d7c3ea3c4f8ba4db22fde31245ac43051f52a53..8c2c3d13b09233a3e944ab2ee3e502dde0415312 100644 (file)
@@ -6,6 +6,7 @@ iwlmvm-y += power.o coex.o coex_legacy.o
 iwlmvm-y += tt.o offloading.o tdls.o
 iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
 iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
+iwlmvm-y += tof.o
 iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
 
 ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
index b4737e296c927582063409917951a210e8ffa74e..e290ac67d97564bd0e85898d175fa4c12f11e6ed 100644 (file)
@@ -725,15 +725,17 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
        }
 }
 
-int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
-                            struct iwl_rx_cmd_buffer *rxb,
-                            struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+                             struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-               return iwl_mvm_rx_bt_coex_notif_old(mvm, rxb, dev_cmd);
+       if (!fw_has_api(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+               iwl_mvm_rx_bt_coex_notif_old(mvm, rxb);
+               return;
+       }
 
        IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
        IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
@@ -748,12 +750,6 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
        memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
 
        iwl_mvm_bt_coex_notif_handle(mvm);
-
-       /*
-        * This is an async handler for a notification, returning anything other
-        * than 0 doesn't make sense even if HCMD failed.
-        */
-       return 0;
 }
 
 void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -947,9 +943,8 @@ void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
        iwl_mvm_bt_coex_notif_handle(mvm);
 }
 
-int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
-                                 struct iwl_rx_cmd_buffer *rxb,
-                                 struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
+                                  struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        u32 ant_isolation = le32_to_cpup((void *)pkt->data);
@@ -957,20 +952,23 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
        u8 __maybe_unused lower_bound, upper_bound;
        u8 lut;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-               return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd);
+       if (!fw_has_api(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+               iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb);
+               return;
+       }
 
        if (!iwl_mvm_bt_is_plcr_supported(mvm))
-               return 0;
+               return;
 
        lockdep_assert_held(&mvm->mutex);
 
        /* Ignore updates if we are in force mode */
        if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
-               return 0;
+               return;
 
        if (ant_isolation ==  mvm->last_ant_isol)
-               return 0;
+               return;
 
        for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
                if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
@@ -989,7 +987,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
        mvm->last_ant_isol = ant_isolation;
 
        if (mvm->last_corun_lut == lut)
-               return 0;
+               return;
 
        mvm->last_corun_lut = lut;
 
@@ -1000,6 +998,8 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
        memcpy(&cmd.corun_lut40, antenna_coupling_ranges[lut].lut20,
               sizeof(cmd.corun_lut40));
 
-       return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0,
-                                   sizeof(cmd), &cmd);
+       if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0,
+                                sizeof(cmd), &cmd))
+               IWL_ERR(mvm,
+                       "failed to send BT_COEX_UPDATE_CORUN_LUT command\n");
 }
index 6ac6de2af9779982231d1efb4c6186fad4442f5d..61c07b05fcaa67cb302f959c4f3f2ec6c919a379 100644 (file)
@@ -1058,9 +1058,8 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
                IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
 }
 
-int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
-                                struct iwl_rx_cmd_buffer *rxb,
-                                struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
+                                 struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_bt_coex_profile_notif_old *notif = (void *)pkt->data;
@@ -1083,12 +1082,6 @@ int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
        memcpy(&mvm->last_bt_notif_old, notif, sizeof(mvm->last_bt_notif_old));
 
        iwl_mvm_bt_coex_notif_handle(mvm);
-
-       /*
-        * This is an async handler for a notification, returning anything other
-        * than 0 doesn't make sense even if HCMD failed.
-        */
-       return 0;
 }
 
 static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
@@ -1250,14 +1243,12 @@ void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm)
        iwl_mvm_bt_coex_notif_handle(mvm);
 }
 
-int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
-                                     struct iwl_rx_cmd_buffer *rxb,
-                                     struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
+                                      struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        u32 ant_isolation = le32_to_cpup((void *)pkt->data);
        u8 __maybe_unused lower_bound, upper_bound;
-       int ret;
        u8 lut;
 
        struct iwl_bt_coex_cmd_old *bt_cmd;
@@ -1268,16 +1259,16 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
        };
 
        if (!iwl_mvm_bt_is_plcr_supported(mvm))
-               return 0;
+               return;
 
        lockdep_assert_held(&mvm->mutex);
 
        /* Ignore updates if we are in force mode */
        if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
-               return 0;
+               return;
 
        if (ant_isolation ==  mvm->last_ant_isol)
-               return 0;
+               return;
 
        for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
                if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
@@ -1296,13 +1287,13 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
        mvm->last_ant_isol = ant_isolation;
 
        if (mvm->last_corun_lut == lut)
-               return 0;
+               return;
 
        mvm->last_corun_lut = lut;
 
        bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
        if (!bt_cmd)
-               return 0;
+               return;
        cmd.data[0] = bt_cmd;
 
        bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
@@ -1317,8 +1308,8 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
        memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
               sizeof(bt_cmd->bt4_corun_lut40));
 
-       ret = iwl_mvm_send_cmd(mvm, &cmd);
+       if (iwl_mvm_send_cmd(mvm, &cmd))
+               IWL_ERR(mvm, "failed to send BT_CONFIG command\n");
 
        kfree(bt_cmd);
-       return ret;
 }
index beba375489f1cc53b2e4a97397c4159732083536..b8ee3121fbd23be4336a4a7fafffc72f37c46b06 100644 (file)
 #define IWL_MVM_QUOTA_THRESHOLD                        4
 #define IWL_MVM_RS_RSSI_BASED_INIT_RATE         0
 #define IWL_MVM_RS_DISABLE_P2P_MIMO            0
+#define IWL_MVM_TOF_IS_RESPONDER               0
 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE    1
 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE      2
 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW   1
index 4165d104e4c379dde727f01d585904dec6030deb..04264e417c1c644e2b362e9bf29760489cfcae4f 100644 (file)
@@ -1145,7 +1145,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
 static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm)
 {
        struct iwl_notification_wait wait_d3;
-       static const u8 d3_notif[] = { D3_CONFIG_CMD };
+       static const u16 d3_notif[] = { D3_CONFIG_CMD };
        int ret;
 
        iwl_init_notification_wait(&mvm->notif_wait, &wait_d3,
@@ -1168,13 +1168,17 @@ remove_notif:
 int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       int ret;
+
+       ret = iwl_trans_suspend(mvm->trans);
+       if (ret)
+               return ret;
 
-       iwl_trans_suspend(mvm->trans);
        mvm->trans->wowlan_d0i3 = wowlan->any;
        if (mvm->trans->wowlan_d0i3) {
                /* 'any' trigger means d0i3 usage */
                if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
-                       int ret = iwl_mvm_enter_d0i3_sync(mvm);
+                       ret = iwl_mvm_enter_d0i3_sync(mvm);
 
                        if (ret)
                                return ret;
@@ -1183,6 +1187,9 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
                mutex_lock(&mvm->d0i3_suspend_mutex);
                __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
                mutex_unlock(&mvm->d0i3_suspend_mutex);
+
+               iwl_trans_d3_suspend(mvm->trans, false);
+
                return 0;
        }
 
@@ -1935,28 +1942,59 @@ out:
        return 1;
 }
 
-int iwl_mvm_resume(struct ieee80211_hw *hw)
+static int iwl_mvm_resume_d3(struct iwl_mvm *mvm)
 {
-       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       iwl_trans_resume(mvm->trans);
+
+       return __iwl_mvm_resume(mvm, false);
+}
+
+static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm)
+{
+       bool exit_now;
+       enum iwl_d3_status d3_status;
+
+       iwl_trans_d3_resume(mvm->trans, &d3_status, false);
+
+       /*
+        * make sure to clear D0I3_DEFER_WAKEUP before
+        * calling iwl_trans_resume(), which might wait
+        * for d0i3 exit completion.
+        */
+       mutex_lock(&mvm->d0i3_suspend_mutex);
+       __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+       exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
+                                       &mvm->d0i3_suspend_flags);
+       mutex_unlock(&mvm->d0i3_suspend_mutex);
+       if (exit_now) {
+               IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
+               _iwl_mvm_exit_d0i3(mvm);
+       }
 
        iwl_trans_resume(mvm->trans);
 
-       if (mvm->hw->wiphy->wowlan_config->any) {
-               /* 'any' trigger means d0i3 usage */
-               if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
-                       int ret = iwl_mvm_exit_d0i3(hw->priv);
+       if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
+               int ret = iwl_mvm_exit_d0i3(mvm->hw->priv);
 
-                       if (ret)
-                               return ret;
-                       /*
-                        * d0i3 exit will be deferred until reconfig_complete.
-                        * make sure there we are out of d0i3.
-                        */
-               }
-               return 0;
+               if (ret)
+                       return ret;
+               /*
+                * d0i3 exit will be deferred until reconfig_complete.
+                * make sure there we are out of d0i3.
+                */
        }
+       return 0;
+}
 
-       return __iwl_mvm_resume(mvm, false);
+int iwl_mvm_resume(struct ieee80211_hw *hw)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+       /* 'any' trigger means d0i3 was used */
+       if (hw->wiphy->wowlan_config->any)
+               return iwl_mvm_resume_d0i3(mvm);
+       else
+               return iwl_mvm_resume_d3(mvm);
 }
 
 void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
index 5c8a65de0e775a2327d392f617c4433bdd096f6d..383a3162046c143129308f906269dbeba95bac16 100644 (file)
@@ -63,6 +63,7 @@
  *
  *****************************************************************************/
 #include "mvm.h"
+#include "fw-api-tof.h"
 #include "debugfs.h"
 
 static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
@@ -497,6 +498,731 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
        return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
+static inline char *iwl_dbgfs_is_match(char *name, char *buf)
+{
+       int len = strlen(name);
+
+       return !strncmp(name, buf, len) ? buf + len : NULL;
+}
+
+static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif,
+                                         char *buf,
+                                         size_t count, loff_t *ppos)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       int value, ret = -EINVAL;
+       char *data;
+
+       mutex_lock(&mvm->mutex);
+
+       data = iwl_dbgfs_is_match("tof_disabled=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.tof_cfg.tof_disabled = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("one_sided_disabled=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.tof_cfg.one_sided_disabled = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("is_debug_mode=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.tof_cfg.is_debug_mode = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("is_buf=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.tof_cfg.is_buf_required = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("send_tof_cfg=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0 && value) {
+                       ret = iwl_mvm_tof_config_cmd(mvm);
+                       goto out;
+               }
+       }
+
+out:
+       mutex_unlock(&mvm->mutex);
+
+       return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_enable_read(struct file *file,
+                                        char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       char buf[256];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+       struct iwl_tof_config_cmd *cmd;
+
+       cmd = &mvm->tof_data.tof_cfg;
+
+       mutex_lock(&mvm->mutex);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "tof_disabled = %d\n",
+                        cmd->tof_disabled);
+       pos += scnprintf(buf + pos, bufsz - pos, "one_sided_disabled = %d\n",
+                        cmd->one_sided_disabled);
+       pos += scnprintf(buf + pos, bufsz - pos, "is_debug_mode = %d\n",
+                        cmd->is_debug_mode);
+       pos += scnprintf(buf + pos, bufsz - pos, "is_buf_required = %d\n",
+                        cmd->is_buf_required);
+
+       mutex_unlock(&mvm->mutex);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif,
+                                                   char *buf,
+                                                   size_t count, loff_t *ppos)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       int value, ret = 0;
+       char *data;
+
+       mutex_lock(&mvm->mutex);
+
+       data = iwl_dbgfs_is_match("burst_period=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (!ret)
+                       mvm->tof_data.responder_cfg.burst_period =
+                                                       cpu_to_le16(value);
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.min_delta_ftm = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("burst_duration=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.burst_duration = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("num_of_burst_exp=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.num_of_burst_exp = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("abort_responder=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.abort_responder = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("get_ch_est=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.get_ch_est = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("recv_sta_req_params=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.recv_sta_req_params = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("channel_num=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.channel_num = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("bandwidth=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.bandwidth = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("rate=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.rate = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("bssid=", buf);
+       if (data) {
+               u8 *mac = mvm->tof_data.responder_cfg.bssid;
+
+               if (!mac_pton(data, mac)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+       }
+
+       data = iwl_dbgfs_is_match("tsf_timer_offset_msecs=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.tsf_timer_offset_msecs =
+                                                       cpu_to_le16(value);
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("toa_offset=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.toa_offset =
+                                                       cpu_to_le16(value);
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("ctrl_ch_position=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.ctrl_ch_position = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("ftm_per_burst=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.ftm_per_burst = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("ftm_resp_ts_avail=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.ftm_resp_ts_avail = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("asap_mode=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.asap_mode = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("send_responder_cfg=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0 && value) {
+                       ret = iwl_mvm_tof_responder_cmd(mvm, vif);
+                       goto out;
+               }
+       }
+
+out:
+       mutex_unlock(&mvm->mutex);
+
+       return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_responder_params_read(struct file *file,
+                                                  char __user *user_buf,
+                                                  size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       char buf[256];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+       struct iwl_tof_responder_config_cmd *cmd;
+
+       cmd = &mvm->tof_data.responder_cfg;
+
+       mutex_lock(&mvm->mutex);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "burst_period = %d\n",
+                        le16_to_cpu(cmd->burst_period));
+       pos += scnprintf(buf + pos, bufsz - pos, "burst_duration = %d\n",
+                        cmd->burst_duration);
+       pos += scnprintf(buf + pos, bufsz - pos, "bandwidth = %d\n",
+                        cmd->bandwidth);
+       pos += scnprintf(buf + pos, bufsz - pos, "channel_num = %d\n",
+                        cmd->channel_num);
+       pos += scnprintf(buf + pos, bufsz - pos, "ctrl_ch_position = 0x%x\n",
+                        cmd->ctrl_ch_position);
+       pos += scnprintf(buf + pos, bufsz - pos, "bssid = %pM\n",
+                        cmd->bssid);
+       pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %d\n",
+                        cmd->min_delta_ftm);
+       pos += scnprintf(buf + pos, bufsz - pos, "num_of_burst_exp = %d\n",
+                        cmd->num_of_burst_exp);
+       pos += scnprintf(buf + pos, bufsz - pos, "rate = %d\n", cmd->rate);
+       pos += scnprintf(buf + pos, bufsz - pos, "abort_responder = %d\n",
+                        cmd->abort_responder);
+       pos += scnprintf(buf + pos, bufsz - pos, "get_ch_est = %d\n",
+                        cmd->get_ch_est);
+       pos += scnprintf(buf + pos, bufsz - pos, "recv_sta_req_params = %d\n",
+                        cmd->recv_sta_req_params);
+       pos += scnprintf(buf + pos, bufsz - pos, "ftm_per_burst = %d\n",
+                        cmd->ftm_per_burst);
+       pos += scnprintf(buf + pos, bufsz - pos, "ftm_resp_ts_avail = %d\n",
+                        cmd->ftm_resp_ts_avail);
+       pos += scnprintf(buf + pos, bufsz - pos, "asap_mode = %d\n",
+                        cmd->asap_mode);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "tsf_timer_offset_msecs = %d\n",
+                        le16_to_cpu(cmd->tsf_timer_offset_msecs));
+       pos += scnprintf(buf + pos, bufsz - pos, "toa_offset = %d\n",
+                        le16_to_cpu(cmd->toa_offset));
+
+       mutex_unlock(&mvm->mutex);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
+                                                char *buf, size_t count,
+                                                loff_t *ppos)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       int value, ret = 0;
+       char *data;
+
+       mutex_lock(&mvm->mutex);
+
+       data = iwl_dbgfs_is_match("request_id=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req.request_id = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("initiator=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req.initiator = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("one_sided_los_disable=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req.one_sided_los_disable = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("req_timeout=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req.req_timeout = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("report_policy=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req.report_policy = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("macaddr_random=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req.macaddr_random = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("num_of_ap=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req.num_of_ap = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("macaddr_template=", buf);
+       if (data) {
+               u8 mac[ETH_ALEN];
+
+               if (!mac_pton(data, mac)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN);
+       }
+
+       data = iwl_dbgfs_is_match("macaddr_mask=", buf);
+       if (data) {
+               u8 mac[ETH_ALEN];
+
+               if (!mac_pton(data, mac)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN);
+       }
+
+       data = iwl_dbgfs_is_match("ap=", buf);
+       if (data) {
+               struct iwl_tof_range_req_ap_entry ap;
+               int size = sizeof(struct iwl_tof_range_req_ap_entry);
+               u16 burst_period;
+               u8 *mac = ap.bssid;
+               unsigned int i;
+
+               if (sscanf(data, "%u %hhd %hhx %hhx"
+                          "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
+                          "%hhx %hhx %hx"
+                          "%hhx %hhx %x"
+                          "%hhx %hhx %hhx %hhx",
+                          &i, &ap.channel_num, &ap.bandwidth,
+                          &ap.ctrl_ch_position,
+                          mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5,
+                          &ap.measure_type, &ap.num_of_bursts,
+                          &burst_period,
+                          &ap.samples_per_burst, &ap.retries_per_sample,
+                          &ap.tsf_delta, &ap.location_req, &ap.asap_mode,
+                          &ap.enable_dyn_ack, &ap.rssi) != 20) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               if (i >= IWL_MVM_TOF_MAX_APS) {
+                       IWL_ERR(mvm, "Invalid AP index %d\n", i);
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               ap.burst_period = cpu_to_le16(burst_period);
+
+               memcpy(&mvm->tof_data.range_req.ap[i], &ap, size);
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("send_range_request=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0 && value) {
+                       ret = iwl_mvm_tof_range_request_cmd(mvm, vif);
+                       goto out;
+               }
+       }
+
+out:
+       mutex_unlock(&mvm->mutex);
+       return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_request_read(struct file *file,
+                                               char __user *user_buf,
+                                               size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       char buf[512];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+       struct iwl_tof_range_req_cmd *cmd;
+       int i;
+
+       cmd = &mvm->tof_data.range_req;
+
+       mutex_lock(&mvm->mutex);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "request_id= %d\n",
+                        cmd->request_id);
+       pos += scnprintf(buf + pos, bufsz - pos, "initiator= %d\n",
+                        cmd->initiator);
+       pos += scnprintf(buf + pos, bufsz - pos, "one_sided_los_disable = %d\n",
+                        cmd->one_sided_los_disable);
+       pos += scnprintf(buf + pos, bufsz - pos, "req_timeout= %d\n",
+                        cmd->req_timeout);
+       pos += scnprintf(buf + pos, bufsz - pos, "report_policy= %d\n",
+                        cmd->report_policy);
+       pos += scnprintf(buf + pos, bufsz - pos, "macaddr_random= %d\n",
+                        cmd->macaddr_random);
+       pos += scnprintf(buf + pos, bufsz - pos, "macaddr_template= %pM\n",
+                        cmd->macaddr_template);
+       pos += scnprintf(buf + pos, bufsz - pos, "macaddr_mask= %pM\n",
+                        cmd->macaddr_mask);
+       pos += scnprintf(buf + pos, bufsz - pos, "num_of_ap= %d\n",
+                        cmd->num_of_ap);
+       for (i = 0; i < cmd->num_of_ap; i++) {
+               struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i];
+
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "ap %.2d: channel_num=%hhx bw=%hhx"
+                               " control=%hhx bssid=%pM type=%hhx"
+                               " num_of_bursts=%hhx burst_period=%hx ftm=%hhx"
+                               " retries=%hhx tsf_delta=%x location_req=%hhx "
+                               " asap=%hhx enable=%hhx rssi=%hhx\n",
+                               i, ap->channel_num, ap->bandwidth,
+                               ap->ctrl_ch_position, ap->bssid,
+                               ap->measure_type, ap->num_of_bursts,
+                               ap->burst_period, ap->samples_per_burst,
+                               ap->retries_per_sample, ap->tsf_delta,
+                               ap->location_req, ap->asap_mode,
+                               ap->enable_dyn_ack, ap->rssi);
+       }
+
+       mutex_unlock(&mvm->mutex);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif,
+                                                char *buf,
+                                                size_t count, loff_t *ppos)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       int value, ret = 0;
+       char *data;
+
+       mutex_lock(&mvm->mutex);
+
+       data = iwl_dbgfs_is_match("tsf_timer_offset_msec=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req_ext.tsf_timer_offset_msec =
+                                                       cpu_to_le16(value);
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req_ext.min_delta_ftm = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("ftm_format_and_bw20M=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req_ext.ftm_format_and_bw20M =
+                                                                       value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("ftm_format_and_bw40M=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req_ext.ftm_format_and_bw40M =
+                                                                       value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("ftm_format_and_bw80M=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req_ext.ftm_format_and_bw80M =
+                                                                       value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("send_range_req_ext=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0 && value) {
+                       ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif);
+                       goto out;
+               }
+       }
+
+out:
+       mutex_unlock(&mvm->mutex);
+       return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file *file,
+                                               char __user *user_buf,
+                                               size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       char buf[256];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+       struct iwl_tof_range_req_ext_cmd *cmd;
+
+       cmd = &mvm->tof_data.range_req_ext;
+
+       mutex_lock(&mvm->mutex);
+
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "tsf_timer_offset_msec = %hx\n",
+                        cmd->tsf_timer_offset_msec);
+       pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhx\n",
+                        cmd->min_delta_ftm);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "ftm_format_and_bw20M = %hhx\n",
+                        cmd->ftm_format_and_bw20M);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "ftm_format_and_bw40M = %hhx\n",
+                        cmd->ftm_format_and_bw40M);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "ftm_format_and_bw80M = %hhx\n",
+                        cmd->ftm_format_and_bw80M);
+
+       mutex_unlock(&mvm->mutex);
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif *vif,
+                                              char *buf,
+                                              size_t count, loff_t *ppos)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       int value, ret = 0;
+       int abort_id;
+       char *data;
+
+       mutex_lock(&mvm->mutex);
+
+       data = iwl_dbgfs_is_match("abort_id=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.last_abort_id = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("send_range_abort=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0 && value) {
+                       abort_id = mvm->tof_data.last_abort_id;
+                       ret = iwl_mvm_tof_range_abort_cmd(mvm, abort_id);
+                       goto out;
+               }
+       }
+
+out:
+       mutex_unlock(&mvm->mutex);
+       return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_abort_read(struct file *file,
+                                             char __user *user_buf,
+                                             size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       char buf[32];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+       int last_abort_id;
+
+       mutex_lock(&mvm->mutex);
+       last_abort_id = mvm->tof_data.last_abort_id;
+       mutex_unlock(&mvm->mutex);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "last_abort_id = %d\n",
+                        last_abort_id);
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_response_read(struct file *file,
+                                                char __user *user_buf,
+                                                size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       char *buf;
+       int pos = 0;
+       const size_t bufsz = sizeof(struct iwl_tof_range_rsp_ntfy) + 256;
+       struct iwl_tof_range_rsp_ntfy *cmd;
+       int i, ret;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       mutex_lock(&mvm->mutex);
+       cmd = &mvm->tof_data.range_resp;
+
+       pos += scnprintf(buf + pos, bufsz - pos, "request_id = %d\n",
+                        cmd->request_id);
+       pos += scnprintf(buf + pos, bufsz - pos, "status = %d\n",
+                        cmd->request_status);
+       pos += scnprintf(buf + pos, bufsz - pos, "last_in_batch = %d\n",
+                        cmd->last_in_batch);
+       pos += scnprintf(buf + pos, bufsz - pos, "num_of_aps = %d\n",
+                        cmd->num_of_aps);
+       for (i = 0; i < cmd->num_of_aps; i++) {
+               struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i];
+
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "ap %.2d: bssid=%pM status=%hhx bw=%hhx"
+                               " rtt=%x rtt_var=%x rtt_spread=%x"
+                               " rssi=%hhx  rssi_spread=%hhx"
+                               " range=%x range_var=%x"
+                               " time_stamp=%x\n",
+                               i, ap->bssid, ap->measure_status,
+                               ap->measure_bw,
+                               ap->rtt, ap->rtt_variance, ap->rtt_spread,
+                               ap->rssi, ap->rssi_spread, ap->range,
+                               ap->range_variance, ap->timestamp);
+       }
+       mutex_unlock(&mvm->mutex);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
 static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
                                           size_t count, loff_t *ppos)
 {
@@ -628,6 +1354,12 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_enable, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_request, 512);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
+MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
 
 void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
@@ -671,6 +1403,25 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir,
                                         S_IRUSR | S_IWUSR);
 
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) &&
+           !vif->p2p && (vif->type != NL80211_IFTYPE_P2P_DEVICE)) {
+               if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP)
+                       MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params,
+                                                mvmvif->dbgfs_dir,
+                                                S_IRUSR | S_IWUSR);
+
+               MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir,
+                                        S_IRUSR | S_IWUSR);
+               MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir,
+                                        S_IRUSR | S_IWUSR);
+               MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir,
+                                        S_IRUSR | S_IWUSR);
+               MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir,
+                                        S_IRUSR | S_IWUSR);
+               MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir,
+                                        S_IRUSR);
+       }
+
        /*
         * Create symlink for convenience pointing to interface specific
         * debugfs entries for the driver. For example, under
index ffb4b5cef27570bbe4af0670683a41891b673330..ca4a1f8f82a5d6456a5729f3f960d1c838dc75f2 100644 (file)
@@ -974,7 +974,7 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
        if (ret)
                return ret;
 
-       iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, 0);
+       iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, NULL);
 
        iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
 
@@ -1200,12 +1200,7 @@ static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
        if (ptr) {
                for (ofs = 0; ofs < len; ofs += 16) {
                        pos += scnprintf(buf + pos, bufsz - pos,
-                                        "0x%.4x ", ofs);
-                       hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos,
-                                          bufsz - pos, false);
-                       pos += strlen(buf + pos);
-                       if (bufsz - pos > 0)
-                               buf[pos++] = '\n';
+                                        "0x%.4x %16ph\n", ofs, ptr + ofs);
                }
        } else {
                pos += scnprintf(buf + pos, bufsz - pos,
index b1baa33cc19b3228a8534af71ab69c7dede40520..b86b1697d56f6807011faa40188094f47140a99a 100644 (file)
@@ -413,7 +413,7 @@ struct iwl_beacon_filter_cmd {
 #define IWL_BF_TEMP_FAST_FILTER_MIN 0
 
 #define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
-#define IWL_BF_TEMP_SLOW_FILTER_D0I3 5
+#define IWL_BF_TEMP_SLOW_FILTER_D0I3 20
 #define IWL_BF_TEMP_SLOW_FILTER_MAX 255
 #define IWL_BF_TEMP_SLOW_FILTER_MIN 0
 
index 737774a01c74a500b3934928449531889701d7f5..660cc1c93e192654345b96b5b4a6a9c9589dc746 100644 (file)
@@ -87,41 +87,6 @@ struct iwl_ssid_ie {
        u8 ssid[IEEE80211_MAX_SSID_LEN];
 } __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
 
-/* How many statistics are gathered for each channel */
-#define SCAN_RESULTS_STATISTICS 1
-
-/**
- * enum iwl_scan_complete_status - status codes for scan complete notifications
- * @SCAN_COMP_STATUS_OK:  scan completed successfully
- * @SCAN_COMP_STATUS_ABORT: scan was aborted by user
- * @SCAN_COMP_STATUS_ERR_SLEEP: sending null sleep packet failed
- * @SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT: timeout before channel is ready
- * @SCAN_COMP_STATUS_ERR_PROBE: sending probe request failed
- * @SCAN_COMP_STATUS_ERR_WAKEUP: sending null wakeup packet failed
- * @SCAN_COMP_STATUS_ERR_ANTENNAS: invalid antennas chosen at scan command
- * @SCAN_COMP_STATUS_ERR_INTERNAL: internal error caused scan abort
- * @SCAN_COMP_STATUS_ERR_COEX: medium was lost ot WiMax
- * @SCAN_COMP_STATUS_P2P_ACTION_OK: P2P public action frame TX was successful
- *     (not an error!)
- * @SCAN_COMP_STATUS_ITERATION_END: indicates end of one repetition the driver
- *     asked for
- * @SCAN_COMP_STATUS_ERR_ALLOC_TE: scan could not allocate time events
-*/
-enum iwl_scan_complete_status {
-       SCAN_COMP_STATUS_OK = 0x1,
-       SCAN_COMP_STATUS_ABORT = 0x2,
-       SCAN_COMP_STATUS_ERR_SLEEP = 0x3,
-       SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT = 0x4,
-       SCAN_COMP_STATUS_ERR_PROBE = 0x5,
-       SCAN_COMP_STATUS_ERR_WAKEUP = 0x6,
-       SCAN_COMP_STATUS_ERR_ANTENNAS = 0x7,
-       SCAN_COMP_STATUS_ERR_INTERNAL = 0x8,
-       SCAN_COMP_STATUS_ERR_COEX = 0x9,
-       SCAN_COMP_STATUS_P2P_ACTION_OK = 0xA,
-       SCAN_COMP_STATUS_ITERATION_END = 0x0B,
-       SCAN_COMP_STATUS_ERR_ALLOC_TE = 0x0C,
-};
-
 /* scan offload */
 #define IWL_SCAN_MAX_BLACKLIST_LEN     64
 #define IWL_SCAN_SHORT_BLACKLIST_LEN   16
@@ -143,71 +108,6 @@ enum scan_framework_client {
        SCAN_CLIENT_ASSET_TRACKING      = BIT(2),
 };
 
-/**
- * struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6
- * @scan_flags:                see enum iwl_scan_flags
- * @channel_count:     channels in channel list
- * @quiet_time:                dwell time, in milliseconds, on quiet channel
- * @quiet_plcp_th:     quiet channel num of packets threshold
- * @good_CRC_th:       passive to active promotion threshold
- * @rx_chain:          RXON rx chain.
- * @max_out_time:      max TUs to be out of associated channel
- * @suspend_time:      pause scan this TUs when returning to service channel
- * @flags:             RXON flags
- * @filter_flags:      RXONfilter
- * @tx_cmd:            tx command for active scan; for 2GHz and for 5GHz.
- * @direct_scan:       list of SSIDs for directed active scan
- * @scan_type:         see enum iwl_scan_type.
- * @rep_count:         repetition count for each scheduled scan iteration.
- */
-struct iwl_scan_offload_cmd {
-       __le16 len;
-       u8 scan_flags;
-       u8 channel_count;
-       __le16 quiet_time;
-       __le16 quiet_plcp_th;
-       __le16 good_CRC_th;
-       __le16 rx_chain;
-       __le32 max_out_time;
-       __le32 suspend_time;
-       /* RX_ON_FLAGS_API_S_VER_1 */
-       __le32 flags;
-       __le32 filter_flags;
-       struct iwl_tx_cmd tx_cmd[2];
-       /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
-       struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
-       __le32 scan_type;
-       __le32 rep_count;
-} __packed;
-
-enum iwl_scan_offload_channel_flags {
-       IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE         = BIT(0),
-       IWL_SCAN_OFFLOAD_CHANNEL_NARROW         = BIT(22),
-       IWL_SCAN_OFFLOAD_CHANNEL_FULL           = BIT(24),
-       IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL        = BIT(25),
-};
-
-/* channel configuration for struct iwl_scan_offload_cfg. Each channels needs:
- * __le32 type:        bitmap; bits 1-20 are for directed scan to i'th ssid and
- *     see enum iwl_scan_offload_channel_flags.
- * __le16 channel_number: channel number 1-13 etc.
- * __le16 iter_count: repetition count for the channel.
- * __le32 iter_interval: interval between two iterations on one channel.
- * u8 active_dwell.
- * u8 passive_dwell.
- */
-#define IWL_SCAN_CHAN_SIZE 14
-
-/**
- * iwl_scan_offload_cfg - SCAN_OFFLOAD_CONFIG_API_S
- * @scan_cmd:          scan command fixed part
- * @data:              scan channel configuration and probe request frames
- */
-struct iwl_scan_offload_cfg {
-       struct iwl_scan_offload_cmd scan_cmd;
-       u8 data[0];
-} __packed;
-
 /**
  * iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
  * @ssid:              MAC address to filter out
@@ -297,35 +197,6 @@ enum iwl_scan_ebs_status {
        IWL_SCAN_EBS_INACTIVE,
 };
 
-/**
- * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
- * @last_schedule_line:                last schedule line executed (fast or regular)
- * @last_schedule_iteration:   last scan iteration executed before scan abort
- * @status:                    enum iwl_scan_offload_compleate_status
- * @ebs_status: last EBS status, see IWL_SCAN_EBS_*
- */
-struct iwl_scan_offload_complete {
-       u8 last_schedule_line;
-       u8 last_schedule_iteration;
-       u8 status;
-       u8 ebs_status;
-} __packed;
-
-/**
- * iwl_sched_scan_results - SCAN_OFFLOAD_MATCH_FOUND_NTF_API_S_VER_1
- * @ssid_bitmap:       SSIDs indexes found in this iteration
- * @client_bitmap:     clients that are active and wait for this notification
- */
-struct iwl_sched_scan_results {
-       __le16 ssid_bitmap;
-       u8 client_bitmap;
-       u8 reserved;
-};
-
-/* Unified LMAC scan API */
-
-#define IWL_MVM_BASIC_PASSIVE_DWELL 110
-
 /**
  * iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S
  * @tx_flags: combination of TX_CMD_FLG_*
@@ -550,18 +421,6 @@ struct iwl_periodic_scan_complete {
 
 /* UMAC Scan API */
 
-/**
- * struct iwl_mvm_umac_cmd_hdr - Command header for UMAC commands
- * @size:      size of the command (not including header)
- * @reserved0: for future use and alignment
- * @ver:       API version number
- */
-struct iwl_mvm_umac_cmd_hdr {
-       __le16 size;
-       u8 reserved0;
-       u8 ver;
-} __packed;
-
 /* The maximum of either of these cannot exceed 8, because we use an
  * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h).
  */
@@ -621,7 +480,6 @@ enum iwl_channel_flags {
 
 /**
  * struct iwl_scan_config
- * @hdr: umac command header
  * @flags:                     enum scan_config_flags
  * @tx_chains:                 valid_tx antenna - ANT_* definitions
  * @rx_chains:                 valid_rx antenna - ANT_* definitions
@@ -639,7 +497,6 @@ enum iwl_channel_flags {
  * @channel_array:             default supported channels
  */
 struct iwl_scan_config {
-       struct iwl_mvm_umac_cmd_hdr hdr;
        __le32 flags;
        __le32 tx_chains;
        __le32 rx_chains;
@@ -735,7 +592,6 @@ struct iwl_scan_req_umac_tail {
 
 /**
  * struct iwl_scan_req_umac
- * @hdr: umac command header
  * @flags: &enum iwl_umac_scan_flags
  * @uid: scan id, &enum iwl_umac_scan_uid_offsets
  * @ooc_priority: out of channel priority - &enum iwl_scan_priority
@@ -754,7 +610,6 @@ struct iwl_scan_req_umac_tail {
  *     &struct iwl_scan_req_umac_tail
  */
 struct iwl_scan_req_umac {
-       struct iwl_mvm_umac_cmd_hdr hdr;
        __le32 flags;
        __le32 uid;
        __le32 ooc_priority;
@@ -776,12 +631,10 @@ struct iwl_scan_req_umac {
 
 /**
  * struct iwl_umac_scan_abort
- * @hdr: umac command header
  * @uid: scan id, &enum iwl_umac_scan_uid_offsets
  * @flags: reserved
  */
 struct iwl_umac_scan_abort {
-       struct iwl_mvm_umac_cmd_hdr hdr;
        __le32 uid;
        __le32 flags;
 } __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */
index 21dd5b771660f0f689798b650fbb4acd1eed3d21..493a8bdfbc9e4bb89180bc75bdd462d88b67255a 100644 (file)
@@ -366,8 +366,8 @@ struct iwl_mvm_rm_sta_cmd {
  * ( MGMT_MCAST_KEY = 0x1f )
  * @ctrl_flags: %iwl_sta_key_flag
  * @IGTK:
- * @K1: IGTK master key
- * @K2: IGTK sub key
+ * @K1: unused
+ * @K2: unused
  * @sta_id: station ID that support IGTK
  * @key_id:
  * @receive_seq_cnt: initial RSC/PN needed for replay check
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h
new file mode 100644 (file)
index 0000000..eed6271
--- /dev/null
@@ -0,0 +1,386 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __fw_api_tof_h__
+#define __fw_api_tof_h__
+
+#include "fw-api.h"
+
+/* ToF sub-group command IDs */
+enum iwl_mvm_tof_sub_grp_ids {
+       TOF_RANGE_REQ_CMD = 0x1,
+       TOF_CONFIG_CMD = 0x2,
+       TOF_RANGE_ABORT_CMD = 0x3,
+       TOF_RANGE_REQ_EXT_CMD = 0x4,
+       TOF_RESPONDER_CONFIG_CMD = 0x5,
+       TOF_NW_INITIATED_RES_SEND_CMD = 0x6,
+       TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7,
+       TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC,
+       TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD,
+       TOF_RANGE_RESPONSE_NOTIF = 0xFE,
+       TOF_MCSI_DEBUG_NOTIF = 0xFB,
+};
+
+/**
+ * struct iwl_tof_config_cmd - ToF configuration
+ * @tof_disabled: 0 enabled, 1 - disabled
+ * @one_sided_disabled: 0 enabled, 1 - disabled
+ * @is_debug_mode: 1 debug mode, 0 - otherwise
+ * @is_buf_required: 1 channel estimation buffer required, 0 - otherwise
+ */
+struct iwl_tof_config_cmd {
+       __le32 sub_grp_cmd_id;
+       u8 tof_disabled;
+       u8 one_sided_disabled;
+       u8 is_debug_mode;
+       u8 is_buf_required;
+} __packed;
+
+/**
+ * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
+ * @burst_period: future use: (currently hard coded in the LMAC)
+ *               The interval between two sequential bursts.
+ * @min_delta_ftm: future use: (currently hard coded in the LMAC)
+ *                The minimum delay between two sequential FTM Responses
+ *                in the same burst.
+ * @burst_duration: future use: (currently hard coded in the LMAC)
+ *                The total time for all FTMs handshake in the same burst.
+ *                Affect the time events duration in the LMAC.
+ * @num_of_burst_exp: future use: (currently hard coded in the LMAC)
+ *                The number of bursts for the current ToF request. Affect
+ *                the number of events allocations in the current iteration.
+ * @get_ch_est: for xVT only, NA for driver
+ * @abort_responder: when set to '1' - Responder will terminate its activity
+ *                  (all other fields in the command are ignored)
+ * @recv_sta_req_params: 1 - Responder will ignore the other Responder's
+ *                      params and use the recomended Initiator params.
+ *                      0 - otherwise
+ * @channel_num: current AP Channel
+ * @bandwidth: current AP Bandwidth: 0  20MHz, 1  40MHz, 2  80MHz
+ * @rate: current AP rate
+ * @ctrl_ch_position: coding of the control channel position relative to
+ *          the center frequency.
+ *          40MHz  0 below center, 1 above center
+ *          80MHz  bits [0..1]: 0  the near 20MHz to the center,
+ *                              1  the far  20MHz to the center
+ *                 bit[2]  as above 40MHz
+ * @ftm_per_burst: FTMs per Burst
+ * @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response,
+ *               '1' - we measure over the Initial FTM Response
+ * @asap_mode: ASAP / Non ASAP mode for the current WLS station
+ * @sta_id: index of the AP STA when in AP mode
+ * @tsf_timer_offset_msecs: The dictated time offset (mSec) from the AP's TSF
+ * @toa_offset: Artificial addition [0.1nsec] for the ToA - to be used for debug
+ *             purposes, simulating station movement by adding various values
+ *             to this field
+ * @bssid: Current AP BSSID
+ */
+struct iwl_tof_responder_config_cmd {
+       __le32 sub_grp_cmd_id;
+       __le16 burst_period;
+       u8 min_delta_ftm;
+       u8 burst_duration;
+       u8 num_of_burst_exp;
+       u8 get_ch_est;
+       u8 abort_responder;
+       u8 recv_sta_req_params;
+       u8 channel_num;
+       u8 bandwidth;
+       u8 rate;
+       u8 ctrl_ch_position;
+       u8 ftm_per_burst;
+       u8 ftm_resp_ts_avail;
+       u8 asap_mode;
+       u8 sta_id;
+       __le16 tsf_timer_offset_msecs;
+       __le16 toa_offset;
+       u8 bssid[ETH_ALEN];
+} __packed;
+
+/**
+ * struct iwl_tof_range_request_ext_cmd - extended range req for WLS
+ * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF
+ * @min_delta_ftm: Minimal time between two consecutive measurements,
+ *                in units of 100us. 0 means no preference by station
+ * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended
+ *                     value be sent to the AP
+ * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended
+ *                     value to be sent to the AP
+ * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended
+ *                     value to be sent to the AP
+ */
+struct iwl_tof_range_req_ext_cmd {
+       __le32 sub_grp_cmd_id;
+       __le16 tsf_timer_offset_msec;
+       __le16 reserved;
+       u8 min_delta_ftm;
+       u8 ftm_format_and_bw20M;
+       u8 ftm_format_and_bw40M;
+       u8 ftm_format_and_bw80M;
+} __packed;
+
+#define IWL_MVM_TOF_MAX_APS 21
+
+/**
+ * struct iwl_tof_range_req_ap_entry - AP configuration parameters
+ * @channel_num: Current AP Channel
+ * @bandwidth: Current AP Bandwidth: 0  20MHz, 1  40MHz, 2  80MHz
+ * @tsf_delta_direction: TSF relatively to the subject AP
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ *          center frequency.
+ *          40MHz  0 below center, 1 above center
+ *          80MHz  bits [0..1]: 0  the near 20MHz to the center,
+ *                              1  the far  20MHz to the center
+ *                 bit[2]  as above 40MHz
+ * @bssid: AP's bss id
+ * @measure_type: Measurement type: 0 - two sided, 1 - One sided
+ * @num_of_bursts: Recommended value to be sent to the AP.  2s Exponent of the
+ *                number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ *               periodicity In units of 100ms. ignored if num_of_bursts = 0
+ * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31)
+ *                    1-sided: how many rts/cts pairs should be used per burst.
+ * @retries_per_sample: Max number of retries that the LMAC should send
+ *                     in case of no replies by the AP.
+ * @tsf_delta: TSF Delta in units of microseconds.
+ *            The difference between the AP TSF and the device local clock.
+ * @location_req: Location Request Bit[0] LCI should be sent in the FTMR
+ *                           Bit[1] Civic should be sent in the FTMR
+ * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided)
+ * @enable_dyn_ack: Enable Dynamic ACK BW.
+ *         0  Initiator interact with regular AP
+ *         1  Initiator interact with Responder machine: need to send the
+ *         Initiator Acks with HT 40MHz / 80MHz, since the Responder should
+ *         use it for its ch est measurement (this flag will be set when we
+ *         configure the opposite machine to be Responder).
+ * @rssi: Last received value
+ *       leagal values: -128-0 (0x7f). above 0x0 indicating an invalid value.
+ */
+struct iwl_tof_range_req_ap_entry {
+       u8 channel_num;
+       u8 bandwidth;
+       u8 tsf_delta_direction;
+       u8 ctrl_ch_position;
+       u8 bssid[ETH_ALEN];
+       u8 measure_type;
+       u8 num_of_bursts;
+       __le16 burst_period;
+       u8 samples_per_burst;
+       u8 retries_per_sample;
+       __le32 tsf_delta;
+       u8 location_req;
+       u8 asap_mode;
+       u8 enable_dyn_ack;
+       s8 rssi;
+} __packed;
+
+/**
+ * enum iwl_tof_response_mode
+ * @IWL_MVM_TOF_RESPOSE_ASAP: report each AP measurement separately as soon as
+ *                           possible (not supported for this release)
+ * @IWL_MVM_TOF_RESPOSE_TIMEOUT: report all AP measurements as a batch upon
+ *                              timeout expiration
+ * @IWL_MVM_TOF_RESPOSE_COMPLETE: report all AP measurements as a batch at the
+ *                               earlier of: measurements completion / timeout
+ *                               expiration.
+ */
+enum iwl_tof_response_mode {
+       IWL_MVM_TOF_RESPOSE_ASAP = 1,
+       IWL_MVM_TOF_RESPOSE_TIMEOUT,
+       IWL_MVM_TOF_RESPOSE_COMPLETE,
+};
+
+/**
+ * struct iwl_tof_range_req_cmd - start measurement cmd
+ * @request_id: A Token incremented per request. The same Token will be
+ *             sent back in the range response
+ * @initiator: 0- NW initiated,  1 - Client Initiated
+ * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided,
+ *                        '1' - run ML-Algo for ToF only
+ * @req_timeout: Requested timeout of the response in units of 100ms.
+ *          This is equivalent to the session time configured to the
+ *          LMAC in Initiator Request
+ * @report_policy: Supported partially for this release: For current release -
+ *                the range report will be uploaded as a batch when ready or
+ *                when the session is done (successfully / partially).
+ *                one of iwl_tof_response_mode.
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @macaddr_random: '0' Use default source MAC address (i.e. p2_p),
+ *                 '1' Use MAC Address randomization according to the below
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ *               Bits set to 1 shall be randomized by the UMAC
+ */
+struct iwl_tof_range_req_cmd {
+       __le32 sub_grp_cmd_id;
+       u8 request_id;
+       u8 initiator;
+       u8 one_sided_los_disable;
+       u8 req_timeout;
+       u8 report_policy;
+       u8 los_det_disable;
+       u8 num_of_ap;
+       u8 macaddr_random;
+       u8 macaddr_template[ETH_ALEN];
+       u8 macaddr_mask[ETH_ALEN];
+       struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
+} __packed;
+
+/**
+ * struct iwl_tof_gen_resp_cmd - generic ToF response
+ */
+struct iwl_tof_gen_resp_cmd {
+       __le32 sub_grp_cmd_id;
+       u8 data[];
+} __packed;
+
+/**
+ * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
+ * @measure_status: current APs measurement status
+ * @measure_bw: Current AP Bandwidth: 0  20MHz, 1  40MHz, 2  80MHz
+ * @rtt: The Round Trip Time that took for the last measurement for
+ *      current AP [nSec]
+ * @rtt_variance: The Variance of the RTT values measured for current AP
+ * @rtt_spread: The Difference between the maximum and the minimum RTT
+ *            values measured for current AP in the current session [nsec]
+ * @rssi: RSSI as uploaded in the Channel Estimation notification
+ * @rssi_spread: The Difference between the maximum and the minimum RSSI values
+ *             measured for current AP in the current session
+ * @range: Measured range [cm]
+ * @range_variance: Measured range variance [cm]
+ * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
+ *            uploaded by the LMAC
+ */
+struct iwl_tof_range_rsp_ap_entry_ntfy {
+       u8 bssid[ETH_ALEN];
+       u8 measure_status;
+       u8 measure_bw;
+       __le32 rtt;
+       __le32 rtt_variance;
+       __le32 rtt_spread;
+       s8 rssi;
+       u8 rssi_spread;
+       __le16 reserved;
+       __le32 range;
+       __le32 range_variance;
+       __le32 timestamp;
+} __packed;
+
+/**
+ * struct iwl_tof_range_rsp_ntfy -
+ * @request_id: A Token ID of the corresponding Range request
+ * @request_status: status of current measurement session
+ * @last_in_batch: reprot policy (when not all responses are uploaded at once)
+ * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ */
+struct iwl_tof_range_rsp_ntfy {
+       u8 request_id;
+       u8 request_status;
+       u8 last_in_batch;
+       u8 num_of_aps;
+       struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
+} __packed;
+
+#define IWL_MVM_TOF_MCSI_BUF_SIZE  (245)
+/**
+ * struct iwl_tof_mcsi_notif - used for debug
+ * @token: token ID for the current session
+ * @role: '0' - initiator, '1' - responder
+ * @initiator_bssid: initiator machine
+ * @responder_bssid: responder machine
+ * @mcsi_buffer: debug data
+ */
+struct iwl_tof_mcsi_notif {
+       u8 token;
+       u8 role;
+       __le16 reserved;
+       u8 initiator_bssid[ETH_ALEN];
+       u8 responder_bssid[ETH_ALEN];
+       u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4];
+} __packed;
+
+/**
+ * struct iwl_tof_neighbor_report_notif
+ * @bssid: BSSID of the AP which sent the report
+ * @request_token: same token as the corresponding request
+ * @status:
+ * @report_ie_len: the length of the response frame starting from the Element ID
+ * @data: the IEs
+ */
+struct iwl_tof_neighbor_report {
+       u8 bssid[ETH_ALEN];
+       u8 request_token;
+       u8 status;
+       __le16 report_ie_len;
+       u8 data[];
+} __packed;
+
+/**
+ * struct iwl_tof_range_abort_cmd
+ * @request_id: corresponds to a range request
+ */
+struct iwl_tof_range_abort_cmd {
+       __le32 sub_grp_cmd_id;
+       u8 request_id;
+       u8 reserved[3];
+} __packed;
+
+#endif
index 81c4ea3c6958440b105c239f755b85a6a36fe73c..853698ab8b05cc958105755073953c8d999ffaad 100644 (file)
@@ -124,6 +124,18 @@ enum iwl_tx_flags {
        TX_CMD_FLG_HCCA_CHUNK           = BIT(31)
 }; /* TX_FLAGS_BITS_API_S_VER_1 */
 
+/**
+ * enum iwl_tx_pm_timeouts - pm timeout values in TX command
+ * @PM_FRAME_NONE: no need to suspend sleep mode
+ * @PM_FRAME_MGMT: fw suspend sleep mode for 100TU
+ * @PM_FRAME_ASSOC: fw suspend sleep mode for 10sec
+ */
+enum iwl_tx_pm_timeouts {
+       PM_FRAME_NONE           = 0,
+       PM_FRAME_MGMT           = 2,
+       PM_FRAME_ASSOC          = 3,
+};
+
 /*
  * TX command security control
  */
index 16e9ef49397f4d055b788f970f1b74bf2712e5e7..4af7513adda22e3a08a59361313659ba0ff8d450 100644 (file)
@@ -75,6 +75,7 @@
 #include "fw-api-coex.h"
 #include "fw-api-scan.h"
 #include "fw-api-stats.h"
+#include "fw-api-tof.h"
 
 /* Tx queue numbers */
 enum {
@@ -119,6 +120,9 @@ enum {
        ADD_STA = 0x18,
        REMOVE_STA = 0x19,
 
+       /* paging get item */
+       FW_GET_ITEM_CMD = 0x1a,
+
        /* TX */
        TX_CMD = 0x1c,
        TXPATH_FLUSH = 0x1e,
@@ -148,6 +152,9 @@ enum {
 
        LQ_CMD = 0x4e,
 
+       /* paging block to FW cpu2 */
+       FW_PAGING_BLOCK_CMD = 0x4f,
+
        /* Scan offload */
        SCAN_OFFLOAD_REQUEST_CMD = 0x51,
        SCAN_OFFLOAD_ABORT_CMD = 0x52,
@@ -163,6 +170,10 @@ enum {
        CALIB_RES_NOTIF_PHY_DB = 0x6b,
        /* PHY_DB_CMD = 0x6c, */
 
+       /* ToF - 802.11mc FTM */
+       TOF_CMD = 0x10,
+       TOF_NOTIFICATION = 0x11,
+
        /* Power - legacy power table command */
        POWER_TABLE_CMD = 0x77,
        PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
@@ -365,6 +376,50 @@ struct iwl_nvm_access_cmd {
        u8 data[];
 } __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */
 
+#define NUM_OF_FW_PAGING_BLOCKS        33 /* 32 for data and 1 block for CSS */
+
+/*
+ * struct iwl_fw_paging_cmd - paging layout
+ *
+ * (FW_PAGING_BLOCK_CMD = 0x4f)
+ *
+ * Send to FW the paging layout in the driver.
+ *
+ * @flags: various flags for the command
+ * @block_size: the block size in powers of 2
+ * @block_num: number of blocks specified in the command.
+ * @device_phy_addr: virtual addresses from device side
+*/
+struct iwl_fw_paging_cmd {
+       __le32 flags;
+       __le32 block_size;
+       __le32 block_num;
+       __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
+} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */
+
+/*
+ * Fw items ID's
+ *
+ * @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload
+ *     download
+ */
+enum iwl_fw_item_id {
+       IWL_FW_ITEM_ID_PAGING = 3,
+};
+
+/*
+ * struct iwl_fw_get_item_cmd - get an item from the fw
+ */
+struct iwl_fw_get_item_cmd {
+       __le32 item_id;
+} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */
+
+struct iwl_fw_get_item_resp {
+       __le32 item_id;
+       __le32 item_byte_cnt;
+       __le32 item_val;
+} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */
+
 /**
  * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD
  * @offset: offset in bytes into the section
@@ -1080,10 +1135,33 @@ struct iwl_rx_phy_info {
        __le16 frame_time;
 } __packed;
 
+/*
+ * TCP offload Rx assist info
+ *
+ * bits 0:3 - reserved
+ * bits 4:7 - MIC CRC length
+ * bits 8:12 - MAC header length
+ * bit 13 - Padding indication
+ * bit 14 - A-AMSDU indication
+ * bit 15 - Offload enabled
+ */
+enum iwl_csum_rx_assist_info {
+       CSUM_RXA_RESERVED_MASK  = 0x000f,
+       CSUM_RXA_MICSIZE_MASK   = 0x00f0,
+       CSUM_RXA_HEADERLEN_MASK = 0x1f00,
+       CSUM_RXA_PADD           = BIT(13),
+       CSUM_RXA_AMSDU          = BIT(14),
+       CSUM_RXA_ENA            = BIT(15)
+};
+
+/**
+ * struct iwl_rx_mpdu_res_start - phy info
+ * @assist: see CSUM_RX_ASSIST_ above
+ */
 struct iwl_rx_mpdu_res_start {
        __le16 byte_count;
-       __le16 reserved;
-} __packed;
+       __le16 assist;
+} __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */
 
 /**
  * enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags
@@ -1136,6 +1214,8 @@ enum iwl_rx_phy_flags {
  * @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP:
  * @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT:
  * @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame
+ * @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw
+ * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors
  * @RX_MPDU_RES_STATUS_HASH_INDEX_MSK:
  * @RX_MPDU_RES_STATUS_STA_ID_MSK:
  * @RX_MPDU_RES_STATUS_RRF_KILL:
@@ -1165,6 +1245,8 @@ enum iwl_mvm_rx_status {
        RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP               = BIT(13),
        RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT               = BIT(14),
        RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME             = BIT(15),
+       RX_MPDU_RES_STATUS_CSUM_DONE                    = BIT(16),
+       RX_MPDU_RES_STATUS_CSUM_OK                      = BIT(17),
        RX_MPDU_RES_STATUS_HASH_INDEX_MSK               = (0x3F0000),
        RX_MPDU_RES_STATUS_STA_ID_MSK                   = (0x1f000000),
        RX_MPDU_RES_STATUS_RRF_KILL                     = BIT(29),
index eb10c5ee4a1407c5b02babe009a899e9f747d6c9..4a0ce83315bdd212d1714956af8900ea271f62b6 100644 (file)
@@ -106,6 +106,306 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
                                    sizeof(tx_ant_cmd), &tx_ant_cmd);
 }
 
+static void iwl_free_fw_paging(struct iwl_mvm *mvm)
+{
+       int i;
+
+       if (!mvm->fw_paging_db[0].fw_paging_block)
+               return;
+
+       for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
+               if (!mvm->fw_paging_db[i].fw_paging_block) {
+                       IWL_DEBUG_FW(mvm,
+                                    "Paging: block %d already freed, continue to next page\n",
+                                    i);
+
+                       continue;
+               }
+
+               __free_pages(mvm->fw_paging_db[i].fw_paging_block,
+                            get_order(mvm->fw_paging_db[i].fw_paging_size));
+       }
+       kfree(mvm->trans->paging_download_buf);
+       memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
+}
+
+static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
+{
+       int sec_idx, idx;
+       u32 offset = 0;
+
+       /*
+        * find where is the paging image start point:
+        * if CPU2 exist and it's in paging format, then the image looks like:
+        * CPU1 sections (2 or more)
+        * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
+        * CPU2 sections (not paged)
+        * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
+        * non paged to CPU2 paging sec
+        * CPU2 paging CSS
+        * CPU2 paging image (including instruction and data)
+        */
+       for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) {
+               if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
+                       sec_idx++;
+                       break;
+               }
+       }
+
+       if (sec_idx >= IWL_UCODE_SECTION_MAX) {
+               IWL_ERR(mvm, "driver didn't find paging image\n");
+               iwl_free_fw_paging(mvm);
+               return -EINVAL;
+       }
+
+       /* copy the CSS block to the dram */
+       IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n",
+                    sec_idx);
+
+       memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
+              image->sec[sec_idx].data,
+              mvm->fw_paging_db[0].fw_paging_size);
+
+       IWL_DEBUG_FW(mvm,
+                    "Paging: copied %d CSS bytes to first block\n",
+                    mvm->fw_paging_db[0].fw_paging_size);
+
+       sec_idx++;
+
+       /*
+        * copy the paging blocks to the dram
+        * loop index start from 1 since that CSS block already copied to dram
+        * and CSS index is 0.
+        * loop stop at num_of_paging_blk since that last block is not full.
+        */
+       for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
+               memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+                      image->sec[sec_idx].data + offset,
+                      mvm->fw_paging_db[idx].fw_paging_size);
+
+               IWL_DEBUG_FW(mvm,
+                            "Paging: copied %d paging bytes to block %d\n",
+                            mvm->fw_paging_db[idx].fw_paging_size,
+                            idx);
+
+               offset += mvm->fw_paging_db[idx].fw_paging_size;
+       }
+
+       /* copy the last paging block */
+       if (mvm->num_of_pages_in_last_blk > 0) {
+               memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+                      image->sec[sec_idx].data + offset,
+                      FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
+
+               IWL_DEBUG_FW(mvm,
+                            "Paging: copied %d pages in the last block %d\n",
+                            mvm->num_of_pages_in_last_blk, idx);
+       }
+
+       return 0;
+}
+
+static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
+                                  const struct fw_img *image)
+{
+       struct page *block;
+       dma_addr_t phys = 0;
+       int blk_idx = 0;
+       int order, num_of_pages;
+       int dma_enabled;
+
+       if (mvm->fw_paging_db[0].fw_paging_block)
+               return 0;
+
+       dma_enabled = is_device_dma_capable(mvm->trans->dev);
+
+       /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
+       BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
+
+       num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
+       mvm->num_of_paging_blk = ((num_of_pages - 1) /
+                                   NUM_OF_PAGE_PER_GROUP) + 1;
+
+       mvm->num_of_pages_in_last_blk =
+               num_of_pages -
+               NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
+
+       IWL_DEBUG_FW(mvm,
+                    "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
+                    mvm->num_of_paging_blk,
+                    mvm->num_of_pages_in_last_blk);
+
+       /* allocate block of 4Kbytes for paging CSS */
+       order = get_order(FW_PAGING_SIZE);
+       block = alloc_pages(GFP_KERNEL, order);
+       if (!block) {
+               /* free all the previous pages since we failed */
+               iwl_free_fw_paging(mvm);
+               return -ENOMEM;
+       }
+
+       mvm->fw_paging_db[blk_idx].fw_paging_block = block;
+       mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE;
+
+       if (dma_enabled) {
+               phys = dma_map_page(mvm->trans->dev, block, 0,
+                                   PAGE_SIZE << order, DMA_BIDIRECTIONAL);
+               if (dma_mapping_error(mvm->trans->dev, phys)) {
+                       /*
+                        * free the previous pages and the current one since
+                        * we failed to map_page.
+                        */
+                       iwl_free_fw_paging(mvm);
+                       return -ENOMEM;
+               }
+               mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
+       } else {
+               mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG |
+                       blk_idx << BLOCK_2_EXP_SIZE;
+       }
+
+       IWL_DEBUG_FW(mvm,
+                    "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
+                    order);
+
+       /*
+        * allocate blocks in dram.
+        * since that CSS allocated in fw_paging_db[0] loop start from index 1
+        */
+       for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
+               /* allocate block of PAGING_BLOCK_SIZE (32K) */
+               order = get_order(PAGING_BLOCK_SIZE);
+               block = alloc_pages(GFP_KERNEL, order);
+               if (!block) {
+                       /* free all the previous pages since we failed */
+                       iwl_free_fw_paging(mvm);
+                       return -ENOMEM;
+               }
+
+               mvm->fw_paging_db[blk_idx].fw_paging_block = block;
+               mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE;
+
+               if (dma_enabled) {
+                       phys = dma_map_page(mvm->trans->dev, block, 0,
+                                           PAGE_SIZE << order,
+                                           DMA_BIDIRECTIONAL);
+                       if (dma_mapping_error(mvm->trans->dev, phys)) {
+                               /*
+                                * free the previous pages and the current one
+                                * since we failed to map_page.
+                                */
+                               iwl_free_fw_paging(mvm);
+                               return -ENOMEM;
+                       }
+                       mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
+               } else {
+                       mvm->fw_paging_db[blk_idx].fw_paging_phys =
+                               PAGING_ADDR_SIG |
+                               blk_idx << BLOCK_2_EXP_SIZE;
+               }
+
+               IWL_DEBUG_FW(mvm,
+                            "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
+                            order);
+       }
+
+       return 0;
+}
+
+static int iwl_save_fw_paging(struct iwl_mvm *mvm,
+                             const struct fw_img *fw)
+{
+       int ret;
+
+       ret = iwl_alloc_fw_paging_mem(mvm, fw);
+       if (ret)
+               return ret;
+
+       return iwl_fill_paging_mem(mvm, fw);
+}
+
+/* send paging cmd to FW in case CPU2 has paging image */
+static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
+{
+       int blk_idx;
+       __le32 dev_phy_addr;
+       struct iwl_fw_paging_cmd fw_paging_cmd = {
+               .flags =
+                       cpu_to_le32(PAGING_CMD_IS_SECURED |
+                                   PAGING_CMD_IS_ENABLED |
+                                   (mvm->num_of_pages_in_last_blk <<
+                                   PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
+               .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
+               .block_num = cpu_to_le32(mvm->num_of_paging_blk),
+       };
+
+       /* loop for for all paging blocks + CSS block */
+       for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
+               dev_phy_addr =
+                       cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >>
+                                   PAGE_2_EXP_SIZE);
+               fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
+       }
+
+       return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
+                                                   IWL_ALWAYS_LONG_GROUP, 0),
+                                   0, sizeof(fw_paging_cmd), &fw_paging_cmd);
+}
+
+/*
+ * Send paging item cmd to FW in case CPU2 has paging image
+ */
+static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
+{
+       int ret;
+       struct iwl_fw_get_item_cmd fw_get_item_cmd = {
+               .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
+       };
+
+       struct iwl_fw_get_item_resp *item_resp;
+       struct iwl_host_cmd cmd = {
+               .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+               .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+               .data = { &fw_get_item_cmd, },
+       };
+
+       cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);
+
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+       if (ret) {
+               IWL_ERR(mvm,
+                       "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
+                       ret);
+               return ret;
+       }
+
+       item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
+       if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
+               IWL_ERR(mvm,
+                       "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
+                       le32_to_cpu(item_resp->item_id));
+               ret = -EIO;
+               goto exit;
+       }
+
+       mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE,
+                                                 GFP_KERNEL);
+       if (!mvm->trans->paging_download_buf) {
+               ret = -ENOMEM;
+               goto exit;
+       }
+       mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
+       mvm->trans->paging_db = mvm->fw_paging_db;
+       IWL_DEBUG_FW(mvm,
+                    "Paging: got paging request address (paging_req_addr 0x%08x)\n",
+                    mvm->trans->paging_req_addr);
+
+exit:
+       iwl_free_resp(&cmd);
+
+       return ret;
+}
+
 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
                         struct iwl_rx_packet *pkt, void *data)
 {
@@ -213,7 +513,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
        const struct fw_img *fw;
        int ret, i;
        enum iwl_ucode_type old_type = mvm->cur_ucode;
-       static const u8 alive_cmd[] = { MVM_ALIVE };
+       static const u16 alive_cmd[] = { MVM_ALIVE };
        struct iwl_sf_region st_fwrd_space;
 
        if (ucode_type == IWL_UCODE_REGULAR &&
@@ -244,6 +544,11 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
        ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
                                    MVM_UCODE_ALIVE_TIMEOUT);
        if (ret) {
+               if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+                       IWL_ERR(mvm,
+                               "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
+                               iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
+                               iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
                mvm->cur_ucode = old_type;
                return ret;
        }
@@ -268,6 +573,40 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
 
        iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
 
+       /*
+        * configure and operate fw paging mechanism.
+        * driver configures the paging flow only once, CPU2 paging image
+        * included in the IWL_UCODE_INIT image.
+        */
+       if (fw->paging_mem_size) {
+               /*
+                * When dma is not enabled, the driver needs to copy / write
+                * the downloaded / uploaded page to / from the smem.
+                * This gets the location of the place were the pages are
+                * stored.
+                */
+               if (!is_device_dma_capable(mvm->trans->dev)) {
+                       ret = iwl_trans_get_paging_item(mvm);
+                       if (ret) {
+                               IWL_ERR(mvm, "failed to get FW paging item\n");
+                               return ret;
+                       }
+               }
+
+               ret = iwl_save_fw_paging(mvm, fw);
+               if (ret) {
+                       IWL_ERR(mvm, "failed to save the FW paging image\n");
+                       return ret;
+               }
+
+               ret = iwl_send_paging_cmd(mvm, fw);
+               if (ret) {
+                       IWL_ERR(mvm, "failed to send the paging cmd\n");
+                       iwl_free_fw_paging(mvm);
+                       return ret;
+               }
+       }
+
        /*
         * Note: all the queues are enabled as part of the interface
         * initialization, but in firmware restart scenarios they
@@ -314,7 +653,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
 {
        struct iwl_notification_wait calib_wait;
-       static const u8 init_complete[] = {
+       static const u16 init_complete[] = {
                INIT_COMPLETE_NOTIF,
                CALIB_RES_NOTIF_PHY_DB
        };
@@ -444,12 +783,6 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
                return;
 
        pkt = cmd.resp_pkt;
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(mvm, "Bad return from SHARED_MEM_CFG (0x%08X)\n",
-                       pkt->hdr.flags);
-               goto exit;
-       }
-
        mem_cfg = (void *)pkt->data;
 
        mvm->shared_mem_cfg.shared_mem_addr =
@@ -473,14 +806,18 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
                le32_to_cpu(mem_cfg->page_buff_size);
        IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
 
-exit:
        iwl_free_resp(&cmd);
 }
 
 int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
                                struct iwl_mvm_dump_desc *desc,
-                               unsigned int delay)
+                               struct iwl_fw_dbg_trigger_tlv *trigger)
 {
+       unsigned int delay = 0;
+
+       if (trigger)
+               delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
+
        if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status))
                return -EBUSY;
 
@@ -491,6 +828,7 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
                 le32_to_cpu(desc->trig_desc.type));
 
        mvm->fw_dump_desc = desc;
+       mvm->fw_dump_trig = trigger;
 
        queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay);
 
@@ -498,7 +836,8 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
 }
 
 int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
-                          const char *str, size_t len, unsigned int delay)
+                          const char *str, size_t len,
+                          struct iwl_fw_dbg_trigger_tlv *trigger)
 {
        struct iwl_mvm_dump_desc *desc;
 
@@ -510,14 +849,13 @@ int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
        desc->trig_desc.type = cpu_to_le32(trig);
        memcpy(desc->trig_desc.data, str, len);
 
-       return iwl_mvm_fw_dbg_collect_desc(mvm, desc, delay);
+       return iwl_mvm_fw_dbg_collect_desc(mvm, desc, trigger);
 }
 
 int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
                                struct iwl_fw_dbg_trigger_tlv *trigger,
                                const char *fmt, ...)
 {
-       unsigned int delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
        u16 occurrences = le16_to_cpu(trigger->occurrences);
        int ret, len = 0;
        char buf[64];
@@ -541,8 +879,9 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
                len = strlen(buf) + 1;
        }
 
-       ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf,
-                                    len, delay);
+       ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, len,
+                                    trigger);
+
        if (ret)
                return ret;
 
@@ -676,8 +1015,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
                goto error;
        }
 
-       if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10)
-               iwl_mvm_get_shared_mem_conf(mvm);
+       iwl_mvm_get_shared_mem_conf(mvm);
 
        ret = iwl_mvm_sf_update(mvm, NULL, false);
        if (ret)
@@ -760,6 +1098,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
                        goto error;
        }
 
+       if (iwl_mvm_is_csum_supported(mvm) &&
+           mvm->cfg->features & NETIF_F_RXCSUM)
+               iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3);
+
        /* allow FW/transport low power modes if not during restart */
        if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
                iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
@@ -815,9 +1157,8 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
        return ret;
 }
 
-int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+                                struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
@@ -828,13 +1169,10 @@ int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
                          (flags & SW_CARD_DISABLED) ? "Kill" : "On",
                          (flags & CT_KILL_CARD_DISABLED) ?
                          "Reached" : "Not reached");
-
-       return 0;
 }
 
-int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
-                           struct iwl_rx_cmd_buffer *rxb,
-                           struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
+                            struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
@@ -845,5 +1183,4 @@ int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
                       le32_to_cpu(mfuart_notif->external_ver),
                       le32_to_cpu(mfuart_notif->status),
                       le32_to_cpu(mfuart_notif->duration));
-       return 0;
 }
index 1812dd018af27628bdc87bfae780e3de3735bea3..3424315dd876de13f9a64da6b7703f51103ce87e 100644 (file)
@@ -1312,9 +1312,8 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
        }
 }
 
-int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
-                           struct iwl_rx_cmd_buffer *rxb,
-                           struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
+                            struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
@@ -1365,8 +1364,6 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
                        RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
                }
        }
-
-       return 0;
 }
 
 static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
@@ -1415,9 +1412,8 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
                iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL);
 }
 
-int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+                                    struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
@@ -1434,5 +1430,4 @@ int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
                                                   IEEE80211_IFACE_ITER_NORMAL,
                                                   iwl_mvm_beacon_loss_iterator,
                                                   mb);
-       return 0;
 }
index dfdab38e2d4ad5d84d8a4b30b51cfb846e44c179..537a15719d45f1035b5cec2c98ff06939f82c2fe 100644 (file)
@@ -641,6 +641,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                        IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
                IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
                hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+               ieee80211_hw_set(hw, TDLS_WIDER_BW);
        }
 
        if (fw_has_capa(&mvm->fw->ucode_capa,
@@ -649,6 +650,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
        }
 
+       hw->netdev_features |= mvm->cfg->features;
+       if (!iwl_mvm_is_csum_supported(mvm))
+               hw->netdev_features &= ~NETIF_F_RXCSUM;
+
        ret = ieee80211_register_hw(mvm->hw);
        if (ret)
                iwl_mvm_leds_exit(mvm);
@@ -1120,9 +1125,14 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        u32 file_len, fifo_data_len = 0;
        u32 smem_len = mvm->cfg->smem_len;
        u32 sram2_len = mvm->cfg->dccm2_len;
+       bool monitor_dump_only = false;
 
        lockdep_assert_held(&mvm->mutex);
 
+       if (mvm->fw_dump_trig &&
+           mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
+               monitor_dump_only = true;
+
        fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
        if (!fw_error_dump)
                return;
@@ -1174,6 +1184,20 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                   fifo_data_len +
                   sizeof(*dump_info);
 
+       /* Make room for the SMEM, if it exists */
+       if (smem_len)
+               file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
+
+       /* Make room for the secondary SRAM, if it exists */
+       if (sram2_len)
+               file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
+
+       /* If we only want a monitor dump, reset the file length */
+       if (monitor_dump_only) {
+               file_len = sizeof(*dump_file) + sizeof(*dump_data) +
+                          sizeof(*dump_info);
+       }
+
        /*
         * In 8000 HW family B-step include the ICCM (which resides separately)
         */
@@ -1186,14 +1210,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
                            mvm->fw_dump_desc->len;
 
-       /* Make room for the SMEM, if it exists */
-       if (smem_len)
-               file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
-
-       /* Make room for the secondary SRAM, if it exists */
-       if (sram2_len)
-               file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
-
        dump_file = vzalloc(file_len);
        if (!dump_file) {
                kfree(fw_error_dump);
@@ -1239,6 +1255,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                dump_data = iwl_fw_error_next_data(dump_data);
        }
 
+       /* In case we only want monitor dump, skip to dump trasport data */
+       if (monitor_dump_only)
+               goto dump_trans_data;
+
        dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
        dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
        dump_mem = (void *)dump_data->data;
@@ -1282,7 +1302,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                                         dump_mem->data, IWL8260_ICCM_LEN);
        }
 
-       fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans);
+dump_trans_data:
+       fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,
+                                                      mvm->fw_dump_trig);
        fw_error_dump->op_mode_len = file_len;
        if (fw_error_dump->trans_ptr)
                file_len += fw_error_dump->trans_ptr->len;
@@ -1291,6 +1313,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
                      GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
 
+       mvm->fw_dump_trig = NULL;
        clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
 }
 
@@ -1433,22 +1456,9 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
 
 static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
 {
-       bool exit_now;
-
        if (!iwl_mvm_is_d0i3_supported(mvm))
                return;
 
-       mutex_lock(&mvm->d0i3_suspend_mutex);
-       __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
-       exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
-                                       &mvm->d0i3_suspend_flags);
-       mutex_unlock(&mvm->d0i3_suspend_mutex);
-
-       if (exit_now) {
-               IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
-               _iwl_mvm_exit_d0i3(mvm);
-       }
-
        if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND)
                if (!wait_event_timeout(mvm->d0i3_exit_waitq,
                                        !test_bit(IWL_MVM_STATUS_IN_D0I3,
@@ -1664,6 +1674,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
                goto out_unlock;
        }
 
+       mvmvif->features |= hw->netdev_features;
+
        ret = iwl_mvm_mac_ctxt_add(mvm, vif);
        if (ret)
                goto out_release;
@@ -2880,10 +2892,11 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
        switch (key->cipher) {
        case WLAN_CIPHER_SUITE_TKIP:
                key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
-               /* fall-through */
-       case WLAN_CIPHER_SUITE_CCMP:
                key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
                break;
+       case WLAN_CIPHER_SUITE_CCMP:
+               key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+               break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
                WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
                break;
@@ -3025,7 +3038,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
        int res, time_reg = DEVICE_SYSTEM_TIME_REG;
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
-       static const u8 time_event_response[] = { HOT_SPOT_CMD };
+       static const u16 time_event_response[] = { HOT_SPOT_CMD };
        struct iwl_notification_wait wait_time_event;
        struct iwl_hs20_roc_req aux_roc_req = {
                .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
index 2d4bad5fe825fb4802fc1439167323dcf4b23e7b..b95a07ec9e362bf031f960dee35dc52c97221ed0 100644 (file)
@@ -80,6 +80,7 @@
 #include "sta.h"
 #include "fw-api.h"
 #include "constants.h"
+#include "tof.h"
 
 #define IWL_INVALID_MAC80211_QUEUE     0xff
 #define IWL_MVM_MAX_ADDRESSES          5
@@ -122,8 +123,7 @@ extern const struct ieee80211_ops iwl_mvm_hw_ops;
  *     be up'ed after the INIT fw asserted. This is useful to be able to use
  *     proprietary tools over testmode to debug the INIT fw.
  * @tfd_q_hang_detect: enabled the detection of hung transmit queues
- * @power_scheme: CAM(Continuous Active Mode)-1, BPS(Balanced Power
- *     Save)-2(default), LP(Low Power)-3
+ * @power_scheme: one of enum iwl_power_scheme
  */
 struct iwl_mvm_mod_params {
        bool init_dbg;
@@ -357,6 +357,7 @@ struct iwl_mvm_vif_bf_data {
  *     # of received beacons accumulated over FW restart, and the current
  *     average signal of beacons retrieved from the firmware
  * @csa_failed: CSA failed to schedule time event, report an error later
+ * @features: hw features active for this vif
  */
 struct iwl_mvm_vif {
        struct iwl_mvm *mvm;
@@ -437,6 +438,9 @@ struct iwl_mvm_vif {
        /* Indicates that CSA countdown may be started */
        bool csa_countdown;
        bool csa_failed;
+
+       /* TCP Checksum Offload */
+       netdev_features_t features;
 };
 
 static inline struct iwl_mvm_vif *
@@ -606,6 +610,11 @@ struct iwl_mvm {
        /* NVM sections */
        struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
 
+       /* Paging section */
+       struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS];
+       u16 num_of_paging_blk;
+       u16 num_of_pages_in_last_blk;
+
        /* EEPROM MAC addresses */
        struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
 
@@ -686,6 +695,7 @@ struct iwl_mvm {
         * can hold 16 keys at most. Reflect this fact.
         */
        unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
+       u8 fw_key_deleted[STA_KEY_MAX_NUM];
 
        /* references taken by the driver and spinlock protecting them */
        spinlock_t refs_lock;
@@ -698,6 +708,7 @@ struct iwl_mvm {
        u8 fw_dbg_conf;
        struct delayed_work fw_dump_wk;
        struct iwl_mvm_dump_desc *fw_dump_desc;
+       struct iwl_fw_dbg_trigger_tlv *fw_dump_trig;
 
 #ifdef CONFIG_IWLWIFI_LEDS
        struct led_classdev led;
@@ -822,6 +833,7 @@ struct iwl_mvm {
        struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
 
        u32 ciphers[6];
+       struct iwl_mvm_tof_data tof_data;
 };
 
 /* Extract MVM priv from op_mode and _hw */
@@ -941,6 +953,12 @@ static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
                IWL_MVM_BT_COEX_RRC;
 }
 
+static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm)
+{
+       return fw_has_capa(&mvm->fw->ucode_capa,
+                          IWL_UCODE_TLV_CAPA_CSUM_SUPPORT);
+}
+
 extern const u8 iwl_mvm_ac_to_tx_fifo[];
 
 struct iwl_rate_info {
@@ -974,12 +992,12 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
 /* Tx / Host Commands */
 int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
                                  struct iwl_host_cmd *cmd);
-int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id,
+int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
                                      u32 flags, u16 len, const void *data);
 int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
                                         struct iwl_host_cmd *cmd,
                                         u32 *status);
-int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id,
+int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
                                             u16 len, const void *data,
                                             u32 *status);
 int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
@@ -988,10 +1006,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
 void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
                        struct iwl_tx_cmd *tx_cmd,
                        struct ieee80211_tx_info *info, u8 sta_id);
-void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
-                              struct ieee80211_tx_info *info,
-                              struct iwl_tx_cmd *tx_cmd,
-                              struct sk_buff *skb_frag);
 void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
                            struct ieee80211_tx_info *info,
                            struct ieee80211_sta *sta, __le16 fc);
@@ -1003,6 +1017,17 @@ static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
 int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync);
 void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
 
+static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
+                                          struct iwl_tx_cmd *tx_cmd)
+{
+       struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+       tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+       memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+       if (info->flags & IEEE80211_TX_CTL_AMPDU)
+               tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
+}
+
 static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
 {
        flush_work(&mvm->async_handlers_wk);
@@ -1011,9 +1036,8 @@ static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
 /* Statistics */
 void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
                                  struct iwl_rx_packet *pkt);
-int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
-                         struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
+                          struct iwl_rx_cmd_buffer *rxb);
 int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear);
 void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
 
@@ -1059,27 +1083,20 @@ bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
  * FW notifications / CMD responses handlers
  * Convention: iwl_mvm_rx_<NAME OF THE CMD>
  */
-int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                      struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                     struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                       struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
-                                 struct iwl_rx_cmd_buffer *rxb,
-                                 struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                           struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+                       struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
+                                  struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+                                struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
+                            struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
+                                    struct iwl_rx_cmd_buffer *rxb);
 
 /* MVM PHY */
 int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
@@ -1106,12 +1123,10 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif);
 int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
                                    struct ieee80211_vif *vif);
-int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
-                           struct iwl_rx_cmd_buffer *rxb,
-                           struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
+                            struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+                                    struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
                                    struct ieee80211_vif *vif);
 unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
@@ -1135,29 +1150,24 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
 void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
 
 /* Scheduled scan */
-int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
-                                       struct iwl_rx_cmd_buffer *rxb,
-                                       struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
-                                            struct iwl_rx_cmd_buffer *rxb,
-                                            struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+                                        struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+                                             struct iwl_rx_cmd_buffer *rxb);
 int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
                             struct ieee80211_vif *vif,
                             struct cfg80211_sched_scan_request *req,
                             struct ieee80211_scan_ies *ies,
                             int type);
-int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+                                struct iwl_rx_cmd_buffer *rxb);
 
 /* UMAC scan */
 int iwl_mvm_config_scan(struct iwl_mvm *mvm);
-int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
-                                       struct iwl_rx_cmd_buffer *rxb,
-                                       struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
-                                            struct iwl_rx_cmd_buffer *rxb,
-                                            struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+                                        struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+                                             struct iwl_rx_cmd_buffer *rxb);
 
 /* MVM debugfs */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1196,9 +1206,8 @@ int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                 char *buf, int bufsz);
 
 void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
-                                            struct iwl_rx_cmd_buffer *rxb,
-                                            struct iwl_device_cmd *cmd);
+void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+                                             struct iwl_rx_cmd_buffer *rxb);
 
 #ifdef CONFIG_IWLWIFI_LEDS
 int iwl_mvm_leds_init(struct iwl_mvm *mvm);
@@ -1254,9 +1263,8 @@ int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
 
 /* BT Coex */
 int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
-int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
-                            struct iwl_rx_cmd_buffer *rxb,
-                            struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+                             struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                           enum ieee80211_rssi_event_data);
 void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
@@ -1274,9 +1282,8 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
 bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
 void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
 int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
-int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
-                                struct iwl_rx_cmd_buffer *rxb,
-                                struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
+                                 struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                               enum ieee80211_rssi_event_data);
 u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
@@ -1285,9 +1292,8 @@ bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
                                         struct ieee80211_sta *sta);
 bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
                                        enum ieee80211_band band);
-int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
-                                     struct iwl_rx_cmd_buffer *rxb,
-                                     struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
+                                      struct iwl_rx_cmd_buffer *rxb);
 
 /* beacon filtering */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1376,9 +1382,8 @@ static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
 /* Thermal management and CT-kill */
 void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
 void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
-int iwl_mvm_temp_notif(struct iwl_mvm *mvm,
-                      struct iwl_rx_cmd_buffer *rxb,
-                      struct iwl_device_cmd *cmd);
+void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
+                       struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
 void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
 void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
@@ -1390,9 +1395,8 @@ struct iwl_mcc_update_resp *
 iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
                   enum iwl_mcc_source src_id);
 int iwl_mvm_init_mcc(struct iwl_mvm *mvm);
-int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
-                              struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+                               struct iwl_rx_cmd_buffer *rxb);
 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
                                                  const char *alpha2,
                                                  enum iwl_mcc_source src_id,
@@ -1431,8 +1435,7 @@ void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
 void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
                                        struct ieee80211_vif *vif,
                                        struct ieee80211_sta *sta);
-int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
 
 struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
@@ -1442,10 +1445,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
 
 int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id);
 int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
-                          const char *str, size_t len, unsigned int delay);
+                          const char *str, size_t len,
+                          struct iwl_fw_dbg_trigger_tlv *trigger);
 int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
                                struct iwl_mvm_dump_desc *desc,
-                               unsigned int delay);
+                               struct iwl_fw_dbg_trigger_tlv *trigger);
 void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm);
 int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
                                struct iwl_fw_dbg_trigger_tlv *trigger,
index 2a6be350704a9442245f6e8bd2391503f67f0c6d..328187da7541dff2233c1f020c559522e7c4f8a7 100644 (file)
@@ -139,12 +139,6 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
                return ret;
 
        pkt = cmd.resp_pkt;
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(mvm, "Bad return from NVM_ACCES_COMMAND (0x%08X)\n",
-                       pkt->hdr.flags);
-               ret = -EIO;
-               goto exit;
-       }
 
        /* Extract NVM response */
        nvm_resp = (void *)pkt->data;
@@ -652,12 +646,6 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
                return ERR_PTR(ret);
 
        pkt = cmd.resp_pkt;
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(mvm, "Bad return from MCC_UPDATE_COMMAND (0x%08X)\n",
-                       pkt->hdr.flags);
-               ret = -EIO;
-               goto exit;
-       }
 
        /* Extract MCC response */
        mcc_resp = (void *)pkt->data;
@@ -839,9 +827,8 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
        return retval;
 }
 
-int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
-                              struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+                               struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mcc_chub_notif *notif = (void *)pkt->data;
@@ -852,7 +839,7 @@ int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
        lockdep_assert_held(&mvm->mutex);
 
        if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
-               return 0;
+               return;
 
        mcc[0] = notif->mcc >> 8;
        mcc[1] = notif->mcc & 0xff;
@@ -864,10 +851,8 @@ int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
                      mcc, src);
        regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL);
        if (IS_ERR_OR_NULL(regd))
-               return 0;
+               return;
 
        regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
        kfree(regd);
-
-       return 0;
 }
index e4fa50075ffdc3b51c66f4b6587595134da9ade3..07e68929b005e06ae6c74f298f94e610240eb46d 100644 (file)
@@ -201,14 +201,15 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
 }
 
 struct iwl_rx_handlers {
-       u8 cmd_id;
+       u16 cmd_id;
        bool async;
-       int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                 struct iwl_device_cmd *cmd);
+       void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 };
 
 #define RX_HANDLER(_cmd_id, _fn, _async)       \
        { .cmd_id = _cmd_id , .fn = _fn , .async = _async }
+#define RX_HANDLER_GRP(_grp, _cmd, _fn, _async)        \
+       { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .async = _async }
 
 /*
  * Handlers for fw notifications
@@ -221,7 +222,6 @@ struct iwl_rx_handlers {
  * called from a worker with mvm->mutex held.
  */
 static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
-       RX_HANDLER(REPLY_RX_MPDU_CMD, iwl_mvm_rx_rx_mpdu, false),
        RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false),
        RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
        RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
@@ -261,9 +261,11 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
        RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
                   true),
        RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false),
+       RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true),
 
 };
 #undef RX_HANDLER
+#undef RX_HANDLER_GRP
 #define CMD(x) [x] = #x
 
 static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
@@ -286,8 +288,10 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(PHY_CONFIGURATION_CMD),
        CMD(CALIB_RES_NOTIF_PHY_DB),
        CMD(SET_CALIB_DEFAULT_CMD),
+       CMD(FW_PAGING_BLOCK_CMD),
        CMD(ADD_STA_KEY),
        CMD(ADD_STA),
+       CMD(FW_GET_ITEM_CMD),
        CMD(REMOVE_STA),
        CMD(LQ_CMD),
        CMD(SCAN_OFFLOAD_CONFIG_CMD),
@@ -470,6 +474,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
        trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
        trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
+       trans_cfg.wide_cmd_header = fw_has_api(&mvm->fw->ucode_capa,
+                                              IWL_UCODE_TLV_API_WIDE_CMD_HDR);
 
        if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE)
                trans_cfg.bc_table_dword = true;
@@ -576,6 +582,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        /* rpm starts with a taken ref. only set the appropriate bit here. */
        mvm->refs[IWL_MVM_REF_UCODE_DOWN] = 1;
 
+       iwl_mvm_tof_init(mvm);
+
        return op_mode;
 
  out_unregister:
@@ -623,14 +631,15 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
        for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
                kfree(mvm->nvm_sections[i].data);
 
+       iwl_mvm_tof_clean(mvm);
+
        ieee80211_free_hw(mvm->hw);
 }
 
 struct iwl_async_handler_entry {
        struct list_head list;
        struct iwl_rx_cmd_buffer rxb;
-       int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                 struct iwl_device_cmd *cmd);
+       void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 };
 
 void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
@@ -667,9 +676,7 @@ static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
        spin_unlock_bh(&mvm->async_handlers_lock);
 
        list_for_each_entry_safe(entry, tmp, &local_list, list) {
-               if (entry->fn(mvm, &entry->rxb, NULL))
-                       IWL_WARN(mvm,
-                                "returned value from ASYNC handlers are ignored\n");
+               entry->fn(mvm, &entry->rxb);
                iwl_free_rxb(&entry->rxb);
                list_del(&entry->list);
                kfree(entry);
@@ -698,24 +705,30 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
                if (!cmds_trig->cmds[i].cmd_id)
                        break;
 
-               if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd)
+               if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
+                   cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
                        continue;
 
                iwl_mvm_fw_dbg_collect_trig(mvm, trig,
-                                           "CMD 0x%02x received",
-                                           pkt->hdr.cmd);
+                                           "CMD 0x%02x.%02x received",
+                                           pkt->hdr.group_id, pkt->hdr.cmd);
                break;
        }
 }
 
-static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
-                              struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd)
+static void iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
+                               struct napi_struct *napi,
+                               struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
        u8 i;
 
+       if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) {
+               iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
+               return;
+       }
+
        iwl_mvm_rx_check_trigger(mvm, pkt);
 
        /*
@@ -729,16 +742,18 @@ static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
                const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
                struct iwl_async_handler_entry *entry;
 
-               if (rx_h->cmd_id != pkt->hdr.cmd)
+               if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
                        continue;
 
-               if (!rx_h->async)
-                       return rx_h->fn(mvm, rxb, cmd);
+               if (!rx_h->async) {
+                       rx_h->fn(mvm, rxb);
+                       return;
+               }
 
                entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
                /* we can't do much... */
                if (!entry)
-                       return 0;
+                       return;
 
                entry->rxb._page = rxb_steal_page(rxb);
                entry->rxb._offset = rxb->_offset;
@@ -750,8 +765,6 @@ static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
                schedule_work(&mvm->async_handlers_wk);
                break;
        }
-
-       return 0;
 }
 
 static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
@@ -903,7 +916,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
         * can't recover this since we're already half suspended.
         */
        if (!mvm->restart_fw && fw_error) {
-               iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, 0);
+               iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert,
+                                           NULL);
        } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART,
                                    &mvm->status)) {
                struct iwl_mvm_reprobe *reprobe;
@@ -1100,9 +1114,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
 
        IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
 
-       /* make sure we have no running tx while configuring the qos */
        set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
-       synchronize_net();
 
        /*
         * iwl_mvm_ref_sync takes a reference before checking the flag.
@@ -1130,6 +1142,9 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
                mvm->d0i3_offloading = false;
        }
 
+       /* make sure we have no running tx while configuring the seqno */
+       synchronize_net();
+
        iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, &d0i3_iter_data);
        ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
                                   sizeof(wowlan_config_cmd),
@@ -1156,15 +1171,25 @@ static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac,
        iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
 }
 
-static void iwl_mvm_d0i3_disconnect_iter(void *data, u8 *mac,
-                                        struct ieee80211_vif *vif)
+struct iwl_mvm_wakeup_reason_iter_data {
+       struct iwl_mvm *mvm;
+       u32 wakeup_reasons;
+};
+
+static void iwl_mvm_d0i3_wakeup_reason_iter(void *_data, u8 *mac,
+                                           struct ieee80211_vif *vif)
 {
-       struct iwl_mvm *mvm = data;
+       struct iwl_mvm_wakeup_reason_iter_data *data = _data;
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
        if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc &&
-           mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
-               iwl_mvm_connection_loss(mvm, vif, "D0i3");
+           data->mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) {
+               if (data->wakeup_reasons &
+                   IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)
+                       iwl_mvm_connection_loss(data->mvm, vif, "D0i3");
+               else
+                       ieee80211_beacon_loss(vif);
+       }
 }
 
 void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
@@ -1232,7 +1257,7 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
        };
        struct iwl_wowlan_status *status;
        int ret;
-       u32 disconnection_reasons, wakeup_reasons;
+       u32 handled_reasons, wakeup_reasons;
        __le16 *qos_seq = NULL;
 
        mutex_lock(&mvm->mutex);
@@ -1249,13 +1274,18 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
 
        IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
 
-       disconnection_reasons =
-               IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
-               IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
-       if (wakeup_reasons & disconnection_reasons)
+       handled_reasons = IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+                               IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
+       if (wakeup_reasons & handled_reasons) {
+               struct iwl_mvm_wakeup_reason_iter_data data = {
+                       .mvm = mvm,
+                       .wakeup_reasons = wakeup_reasons,
+               };
+
                ieee80211_iterate_active_interfaces(
                        mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                       iwl_mvm_d0i3_disconnect_iter, mvm);
+                       iwl_mvm_d0i3_wakeup_reason_iter, &data);
+       }
 out:
        iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
 
@@ -1308,17 +1338,6 @@ int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
        return _iwl_mvm_exit_d0i3(mvm);
 }
 
-static void iwl_mvm_napi_add(struct iwl_op_mode *op_mode,
-                            struct napi_struct *napi,
-                            struct net_device *napi_dev,
-                            int (*poll)(struct napi_struct *, int),
-                            int weight)
-{
-       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-
-       ieee80211_napi_add(mvm->hw, napi, napi_dev, poll, weight);
-}
-
 static const struct iwl_op_mode_ops iwl_mvm_ops = {
        .start = iwl_op_mode_mvm_start,
        .stop = iwl_op_mode_mvm_stop,
@@ -1332,5 +1351,4 @@ static const struct iwl_op_mode_ops iwl_mvm_ops = {
        .nic_config = iwl_mvm_nic_config,
        .enter_d0i3 = iwl_mvm_enter_d0i3,
        .exit_d0i3 = iwl_mvm_exit_d0i3,
-       .napi_add = iwl_mvm_napi_add,
 };
index d2c6ba9d326b4656b8f6e7007554fb3a5ba8e681..4645877882a6af6ee51a87838f9f3b57e3ab0054 100644 (file)
@@ -112,11 +112,12 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
 static
 void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
                                          struct ieee80211_vif *vif,
-                                         struct iwl_beacon_filter_cmd *cmd)
+                                         struct iwl_beacon_filter_cmd *cmd,
+                                         bool d0i3)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-       if (vif->bss_conf.cqm_rssi_thold) {
+       if (vif->bss_conf.cqm_rssi_thold && !d0i3) {
                cmd->bf_energy_delta =
                        cpu_to_le32(vif->bss_conf.cqm_rssi_hyst);
                /* fw uses an absolute value for this */
@@ -287,27 +288,6 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
        return true;
 }
 
-static int iwl_mvm_power_get_skip_over_dtim(int dtimper, int bi)
-{
-       int numerator;
-       int dtim_interval = dtimper * bi;
-
-       if (WARN_ON(!dtim_interval))
-               return 0;
-
-       if (dtimper == 1) {
-               if (bi > 100)
-                       numerator = 408;
-               else
-                       numerator = 510;
-       } else if (dtimper < 10) {
-               numerator = 612;
-       } else {
-               return 0;
-       }
-       return max(1, (numerator / dtim_interval));
-}
-
 static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
 {
        struct ieee80211_chanctx_conf *chanctx_conf;
@@ -357,8 +337,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
 
        cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
 
-       if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
-           !mvmvif->pm_enabled)
+       if (!vif->bss_conf.ps || !mvmvif->pm_enabled ||
+           (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p))
                return;
 
        cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -377,11 +357,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
        if (!radar_detect && (dtimper < 10) &&
            (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
             mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
-               cmd->skip_dtim_periods =
-                       iwl_mvm_power_get_skip_over_dtim(dtimper, bi);
-               if (cmd->skip_dtim_periods)
-                       cmd->flags |=
-                               cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+               cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+               cmd->skip_dtim_periods = 3;
        }
 
        if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
@@ -509,9 +486,8 @@ static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac,
                       ETH_ALEN);
 }
 
-int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
-                                            struct iwl_rx_cmd_buffer *rxb,
-                                            struct iwl_device_cmd *cmd)
+void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+                                             struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data;
@@ -520,8 +496,6 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
        ieee80211_iterate_active_interfaces_atomic(
                mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
                iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id);
-
-       return 0;
 }
 
 struct iwl_power_vifs {
@@ -810,7 +784,7 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
            vif->type != NL80211_IFTYPE_STATION || vif->p2p)
                return 0;
 
-       iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd);
+       iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd, d0i3);
        if (!d0i3)
                iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
        ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
index daff1d0a8e4adad6ebf2cdc5e811411776d63913..5ae9c8aa868fa445063e956a26ac1444caf5d8dd 100644 (file)
@@ -177,7 +177,8 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 
        mvmsta = iwl_mvm_sta_from_mac80211(sta);
        mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
-       if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
+       if (IWL_MVM_RS_DISABLE_P2P_MIMO &&
+           iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
                return false;
 
        if (mvm->nvm_data->sku_cap_mimo_disabled)
@@ -2403,7 +2404,7 @@ struct rs_init_rate_info {
        u8 rate_idx;
 };
 
-static const struct rs_init_rate_info rs_init_rates_24ghz[] = {
+static const struct rs_init_rate_info rs_optimal_rates_24ghz_legacy[] = {
        { -60, IWL_RATE_54M_INDEX },
        { -64, IWL_RATE_48M_INDEX },
        { -68, IWL_RATE_36M_INDEX },
@@ -2416,7 +2417,7 @@ static const struct rs_init_rate_info rs_init_rates_24ghz[] = {
        { S8_MIN, IWL_RATE_1M_INDEX },
 };
 
-static const struct rs_init_rate_info rs_init_rates_5ghz[] = {
+static const struct rs_init_rate_info rs_optimal_rates_5ghz_legacy[] = {
        { -60, IWL_RATE_54M_INDEX },
        { -64, IWL_RATE_48M_INDEX },
        { -72, IWL_RATE_36M_INDEX },
@@ -2427,6 +2428,124 @@ static const struct rs_init_rate_info rs_init_rates_5ghz[] = {
        { S8_MIN, IWL_RATE_6M_INDEX },
 };
 
+static const struct rs_init_rate_info rs_optimal_rates_ht[] = {
+       { -60, IWL_RATE_MCS_7_INDEX },
+       { -64, IWL_RATE_MCS_6_INDEX },
+       { -68, IWL_RATE_MCS_5_INDEX },
+       { -72, IWL_RATE_MCS_4_INDEX },
+       { -80, IWL_RATE_MCS_3_INDEX },
+       { -84, IWL_RATE_MCS_2_INDEX },
+       { -85, IWL_RATE_MCS_1_INDEX },
+       { S8_MIN, IWL_RATE_MCS_0_INDEX},
+};
+
+static const struct rs_init_rate_info rs_optimal_rates_vht_20mhz[] = {
+       { -60, IWL_RATE_MCS_8_INDEX },
+       { -64, IWL_RATE_MCS_7_INDEX },
+       { -68, IWL_RATE_MCS_6_INDEX },
+       { -72, IWL_RATE_MCS_5_INDEX },
+       { -80, IWL_RATE_MCS_4_INDEX },
+       { -84, IWL_RATE_MCS_3_INDEX },
+       { -85, IWL_RATE_MCS_2_INDEX },
+       { -87, IWL_RATE_MCS_1_INDEX },
+       { S8_MIN, IWL_RATE_MCS_0_INDEX},
+};
+
+static const struct rs_init_rate_info rs_optimal_rates_vht_40_80mhz[] = {
+       { -60, IWL_RATE_MCS_9_INDEX },
+       { -64, IWL_RATE_MCS_8_INDEX },
+       { -68, IWL_RATE_MCS_7_INDEX },
+       { -72, IWL_RATE_MCS_6_INDEX },
+       { -80, IWL_RATE_MCS_5_INDEX },
+       { -84, IWL_RATE_MCS_4_INDEX },
+       { -85, IWL_RATE_MCS_3_INDEX },
+       { -87, IWL_RATE_MCS_2_INDEX },
+       { -88, IWL_RATE_MCS_1_INDEX },
+       { S8_MIN, IWL_RATE_MCS_0_INDEX },
+};
+
+/* Init the optimal rate based on STA caps
+ * This combined with rssi is used to report the last tx rate
+ * to userspace when we haven't transmitted enough frames.
+ */
+static void rs_init_optimal_rate(struct iwl_mvm *mvm,
+                                struct ieee80211_sta *sta,
+                                struct iwl_lq_sta *lq_sta)
+{
+       struct rs_rate *rate = &lq_sta->optimal_rate;
+
+       if (lq_sta->max_mimo2_rate_idx != IWL_RATE_INVALID)
+               rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
+       else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID)
+               rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
+       else if (lq_sta->band == IEEE80211_BAND_5GHZ)
+               rate->type = LQ_LEGACY_A;
+       else
+               rate->type = LQ_LEGACY_G;
+
+       rate->bw = rs_bw_from_sta_bw(sta);
+       rate->sgi = rs_sgi_allow(mvm, sta, rate, NULL);
+
+       /* ANT/LDPC/STBC aren't relevant for the rate reported to userspace */
+
+       if (is_mimo(rate)) {
+               lq_sta->optimal_rate_mask = lq_sta->active_mimo2_rate;
+       } else if (is_siso(rate)) {
+               lq_sta->optimal_rate_mask = lq_sta->active_siso_rate;
+       } else {
+               lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate;
+
+               if (lq_sta->band == IEEE80211_BAND_5GHZ) {
+                       lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy;
+                       lq_sta->optimal_nentries =
+                               ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
+               } else {
+                       lq_sta->optimal_rates = rs_optimal_rates_24ghz_legacy;
+                       lq_sta->optimal_nentries =
+                               ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
+               }
+       }
+
+       if (is_vht(rate)) {
+               if (rate->bw == RATE_MCS_CHAN_WIDTH_20) {
+                       lq_sta->optimal_rates = rs_optimal_rates_vht_20mhz;
+                       lq_sta->optimal_nentries =
+                               ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
+               } else {
+                       lq_sta->optimal_rates = rs_optimal_rates_vht_40_80mhz;
+                       lq_sta->optimal_nentries =
+                               ARRAY_SIZE(rs_optimal_rates_vht_40_80mhz);
+               }
+       } else if (is_ht(rate)) {
+               lq_sta->optimal_rates = rs_optimal_rates_ht;
+               lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_ht);
+       }
+}
+
+/* Compute the optimal rate index based on RSSI */
+static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm,
+                                          struct iwl_lq_sta *lq_sta)
+{
+       struct rs_rate *rate = &lq_sta->optimal_rate;
+       int i;
+
+       rate->index = find_first_bit(&lq_sta->optimal_rate_mask,
+                                    BITS_PER_LONG);
+
+       for (i = 0; i < lq_sta->optimal_nentries; i++) {
+               int rate_idx = lq_sta->optimal_rates[i].rate_idx;
+
+               if ((lq_sta->pers.last_rssi >= lq_sta->optimal_rates[i].rssi) &&
+                   (BIT(rate_idx) & lq_sta->optimal_rate_mask)) {
+                       rate->index = rate_idx;
+                       break;
+               }
+       }
+
+       rs_dump_rate(mvm, rate, "OPTIMAL RATE");
+       return rate;
+}
+
 /* Choose an initial legacy rate and antenna to use based on the RSSI
  * of last Rx
  */
@@ -2468,12 +2587,12 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
 
        if (band == IEEE80211_BAND_5GHZ) {
                rate->type = LQ_LEGACY_A;
-               initial_rates = rs_init_rates_5ghz;
-               nentries = ARRAY_SIZE(rs_init_rates_5ghz);
+               initial_rates = rs_optimal_rates_5ghz_legacy;
+               nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
        } else {
                rate->type = LQ_LEGACY_G;
-               initial_rates = rs_init_rates_24ghz;
-               nentries = ARRAY_SIZE(rs_init_rates_24ghz);
+               initial_rates = rs_optimal_rates_24ghz_legacy;
+               nentries = ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
        }
 
        if (IWL_MVM_RS_RSSI_BASED_INIT_RATE) {
@@ -2496,10 +2615,21 @@ void rs_update_last_rssi(struct iwl_mvm *mvm,
                         struct iwl_lq_sta *lq_sta,
                         struct ieee80211_rx_status *rx_status)
 {
+       int i;
+
        lq_sta->pers.chains = rx_status->chains;
        lq_sta->pers.chain_signal[0] = rx_status->chain_signal[0];
        lq_sta->pers.chain_signal[1] = rx_status->chain_signal[1];
        lq_sta->pers.chain_signal[2] = rx_status->chain_signal[2];
+       lq_sta->pers.last_rssi = S8_MIN;
+
+       for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) {
+               if (!(lq_sta->pers.chains & BIT(i)))
+                       continue;
+
+               if (lq_sta->pers.chain_signal[i] > lq_sta->pers.last_rssi)
+                       lq_sta->pers.last_rssi = lq_sta->pers.chain_signal[i];
+       }
 }
 
 /**
@@ -2538,6 +2668,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
        rate = &tbl->rate;
 
        rs_get_initial_rate(mvm, lq_sta, band, rate);
+       rs_init_optimal_rate(mvm, sta, lq_sta);
 
        WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B);
        if (rate->ant == ANT_A)
@@ -2560,6 +2691,8 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
        struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct iwl_lq_sta *lq_sta = mvm_sta;
+       struct rs_rate *optimal_rate;
+       u32 last_ucode_rate;
 
        if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) {
                /* if vif isn't initialized mvm doesn't know about
@@ -2583,8 +2716,18 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
 
        iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
                                  info->band, &info->control.rates[0]);
-
        info->control.rates[0].count = 1;
+
+       /* Report the optimal rate based on rssi and STA caps if we haven't
+        * converged yet (too little traffic) or exploring other modulations
+        */
+       if (lq_sta->rs_state != RS_STATE_STAY_IN_COLUMN) {
+               optimal_rate = rs_get_optimal_rate(mvm, lq_sta);
+               last_ucode_rate = ucode_rate_from_rs_rate(mvm,
+                                                         optimal_rate);
+               iwl_mvm_hwrate_to_tx_rate(last_ucode_rate, info->band,
+                                         &txrc->reported_rate);
+       }
 }
 
 static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
@@ -2605,6 +2748,7 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
 #endif
        lq_sta->pers.chains = 0;
        memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
+       lq_sta->pers.last_rssi = S8_MIN;
 
        return &sta_priv->lq_sta;
 }
index 2a3da314305ab548e3c72c7a5479e6e666be5184..81314ad9ebe09a9b069f339958a33129c05c4988 100644 (file)
@@ -1,6 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -316,6 +317,14 @@ struct iwl_lq_sta {
        u8 max_siso_rate_idx;
        u8 max_mimo2_rate_idx;
 
+       /* Optimal rate based on RSSI and STA caps.
+        * Used only to reflect link speed to userspace.
+        */
+       struct rs_rate optimal_rate;
+       unsigned long optimal_rate_mask;
+       const struct rs_init_rate_info *optimal_rates;
+       int optimal_nentries;
+
        u8 missed_rate_counter;
 
        struct iwl_lq_cmd lq;
@@ -341,6 +350,7 @@ struct iwl_lq_sta {
 #endif
                u8 chains;
                s8 chain_signal[IEEE80211_MAX_CHAINS];
+               s8 last_rssi;
                struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
                struct iwl_mvm *drv;
        } pers;
index 8f1d93b7a13aa1059a4844c64f825822ee07605a..c37c10a423ce16b985ce620d2162d58c9c736fad 100644 (file)
@@ -61,6 +61,7 @@
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *****************************************************************************/
+#include <linux/skbuff.h>
 #include "iwl-trans.h"
 #include "mvm.h"
 #include "fw-api.h"
@@ -71,8 +72,7 @@
  * Copies the phy information in mvm->last_phy_info, it will be used when the
  * actual data will come from the fw in the next packet.
  */
-int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
@@ -86,8 +86,6 @@ int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                spin_unlock(&mvm->drv_stats_lock);
        }
 #endif
-
-       return 0;
 }
 
 /*
@@ -96,6 +94,7 @@ int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
  * Adds the rxb to a new skb and give it to mac80211
  */
 static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
+                                           struct napi_struct *napi,
                                            struct sk_buff *skb,
                                            struct ieee80211_hdr *hdr, u16 len,
                                            u32 ampdu_status, u8 crypt_len,
@@ -129,7 +128,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
                                fraglen, rxb->truesize);
        }
 
-       ieee80211_rx(mvm->hw, skb);
+       ieee80211_rx_napi(mvm->hw, skb, napi);
 }
 
 /*
@@ -237,13 +236,26 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
        return 0;
 }
 
+static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
+                           struct sk_buff *skb,
+                           u32 status)
+{
+       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
+       if (mvmvif->features & NETIF_F_RXCSUM &&
+           status & RX_MPDU_RES_STATUS_CSUM_DONE &&
+           status & RX_MPDU_RES_STATUS_CSUM_OK)
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
 /*
  * iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler
  *
  * Handles the actual data of the Rx packet from the fw
  */
-int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                      struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+                       struct iwl_rx_cmd_buffer *rxb)
 {
        struct ieee80211_hdr *hdr;
        struct ieee80211_rx_status *rx_status;
@@ -271,7 +283,7 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
        skb = alloc_skb(128, GFP_ATOMIC);
        if (!skb) {
                IWL_ERR(mvm, "alloc_skb failed\n");
-               return 0;
+               return;
        }
 
        rx_status = IEEE80211_SKB_RXCB(skb);
@@ -284,14 +296,14 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n",
                               rx_pkt_status);
                kfree_skb(skb);
-               return 0;
+               return;
        }
 
        if ((unlikely(phy_info->cfg_phy_cnt > 20))) {
                IWL_DEBUG_DROP(mvm, "dsp size out of range [0,20]: %d\n",
                               phy_info->cfg_phy_cnt);
                kfree_skb(skb);
-               return 0;
+               return;
        }
 
        /*
@@ -366,6 +378,9 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                }
        }
 
+       if (sta && ieee80211_is_data(hdr->frame_control))
+               iwl_mvm_rx_csum(sta, skb, rx_pkt_status);
+
        rcu_read_unlock();
 
        /* set the preamble flag if appropriate */
@@ -429,9 +444,8 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
        iwl_mvm_update_frame_stats(mvm, rate_n_flags,
                                   rx_status->flag & RX_FLAG_AMPDU_DETAILS);
 #endif
-       iwl_mvm_pass_packet_to_mac80211(mvm, skb, hdr, len, ampdu_status,
+       iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status,
                                        crypt_len, rxb);
-       return 0;
 }
 
 static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,
@@ -623,10 +637,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
                iwl_rx_packet_payload_len(pkt));
 }
 
-int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
-                         struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb));
-       return 0;
 }
index 5514ad6d4e54373d2a48564ec790489dfe1808dc..15055462cd110a399dd2b16fe405d9ef86fc4e87 100644 (file)
 #define IWL_DENSE_EBS_SCAN_RATIO 5
 #define IWL_SPARSE_EBS_SCAN_RATIO 1
 
-struct iwl_mvm_scan_params {
-       u32 max_out_time;
+enum iwl_mvm_scan_type {
+       IWL_SCAN_TYPE_UNASSOC,
+       IWL_SCAN_TYPE_WILD,
+       IWL_SCAN_TYPE_MILD,
+       IWL_SCAN_TYPE_FRAGMENTED,
+};
+
+enum iwl_mvm_traffic_load {
+       IWL_MVM_TRAFFIC_LOW,
+       IWL_MVM_TRAFFIC_MEDIUM,
+       IWL_MVM_TRAFFIC_HIGH,
+};
+
+struct iwl_mvm_scan_timing_params {
+       u32 dwell_active;
+       u32 dwell_passive;
+       u32 dwell_fragmented;
        u32 suspend_time;
-       bool passive_fragmented;
+       u32 max_out_time;
+};
+
+static struct iwl_mvm_scan_timing_params scan_timing[] = {
+       [IWL_SCAN_TYPE_UNASSOC] = {
+               .dwell_active = 10,
+               .dwell_passive = 110,
+               .dwell_fragmented = 44,
+               .suspend_time = 0,
+               .max_out_time = 0,
+       },
+       [IWL_SCAN_TYPE_WILD] = {
+               .dwell_active = 10,
+               .dwell_passive = 110,
+               .dwell_fragmented = 44,
+               .suspend_time = 30,
+               .max_out_time = 120,
+       },
+       [IWL_SCAN_TYPE_MILD] = {
+               .dwell_active = 10,
+               .dwell_passive = 110,
+               .dwell_fragmented = 44,
+               .suspend_time = 120,
+               .max_out_time = 120,
+       },
+       [IWL_SCAN_TYPE_FRAGMENTED] = {
+               .dwell_active = 10,
+               .dwell_passive = 110,
+               .dwell_fragmented = 44,
+               .suspend_time = 95,
+               .max_out_time = 44,
+       },
+};
+
+struct iwl_mvm_scan_params {
+       enum iwl_mvm_scan_type type;
        u32 n_channels;
        u16 delay;
        int n_ssids;
@@ -90,15 +140,7 @@ struct iwl_mvm_scan_params {
        int n_match_sets;
        struct iwl_scan_probe_req preq;
        struct cfg80211_match_set *match_sets;
-       struct _dwell {
-               u16 passive;
-               u16 active;
-               u16 fragmented;
-       } dwell[IEEE80211_NUM_BANDS];
-       struct {
-               u8 iterations;
-               u8 full_scan_mul; /* not used for UMAC */
-       } schedule[2];
+       u8 iterations[2];
 };
 
 static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
@@ -147,34 +189,6 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
                return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
 }
 
-/*
- * If req->n_ssids > 0, it means we should do an active scan.
- * In case of active scan w/o directed scan, we receive a zero-length SSID
- * just to notify that this scan is active and not passive.
- * In order to notify the FW of the number of SSIDs we wish to scan (including
- * the zero-length one), we need to set the corresponding bits in chan->type,
- * one for each SSID, and set the active bit (first). If the first SSID is
- * already included in the probe template, so we need to set only
- * req->n_ssids - 1 bits in addition to the first bit.
- */
-static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
-                                   enum ieee80211_band band, int n_ssids)
-{
-       if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
-               return 10;
-       if (band == IEEE80211_BAND_2GHZ)
-               return 20  + 3 * (n_ssids + 1);
-       return 10  + 2 * (n_ssids + 1);
-}
-
-static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
-                                    enum ieee80211_band band)
-{
-       if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
-                       return 110;
-       return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
-}
-
 static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
                                            struct ieee80211_vif *vif)
 {
@@ -186,90 +200,39 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
                *global_cnt += 1;
 }
 
-static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm,
-                                   struct ieee80211_vif *vif,
-                                   struct iwl_mvm_scan_params *params)
+static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
+{
+       return IWL_MVM_TRAFFIC_LOW;
+}
+
+static enum
+iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
+                                       struct ieee80211_vif *vif,
+                                       struct iwl_mvm_scan_params *params)
 {
        int global_cnt = 0;
-       enum ieee80211_band band;
-       u8 frag_passive_dwell = 0;
+       enum iwl_mvm_traffic_load load;
+       bool low_latency;
 
        ieee80211_iterate_active_interfaces_atomic(mvm->hw,
                                            IEEE80211_IFACE_ITER_NORMAL,
                                            iwl_mvm_scan_condition_iterator,
                                            &global_cnt);
        if (!global_cnt)
-               goto not_bound;
-
-       params->suspend_time = 30;
-       params->max_out_time = 120;
-
-       if (iwl_mvm_low_latency(mvm)) {
-               if (fw_has_api(&mvm->fw->ucode_capa,
-                              IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
-
-                       params->suspend_time = 105;
-                       /*
-                        * If there is more than one active interface make
-                        * passive scan more fragmented.
-                        */
-                       frag_passive_dwell = 40;
-                       params->max_out_time = frag_passive_dwell;
-               } else {
-                       params->suspend_time = 120;
-                       params->max_out_time = 120;
-               }
-       }
+               return IWL_SCAN_TYPE_UNASSOC;
 
-       if (frag_passive_dwell &&
-           fw_has_api(&mvm->fw->ucode_capa,
-                      IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
-               /*
-                * P2P device scan should not be fragmented to avoid negative
-                * impact on P2P device discovery. Configure max_out_time to be
-                * equal to dwell time on passive channel. Take a longest
-                * possible value, one that corresponds to 2GHz band
-                */
-               if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
-                       u32 passive_dwell =
-                               iwl_mvm_get_passive_dwell(mvm,
-                                                         IEEE80211_BAND_2GHZ);
-                       params->max_out_time = passive_dwell;
-               } else {
-                       params->passive_fragmented = true;
-               }
-       }
-
-       if ((params->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
-           (params->max_out_time > 200))
-               params->max_out_time = 200;
-
-not_bound:
+       load = iwl_mvm_get_traffic_load(mvm);
+       low_latency = iwl_mvm_low_latency(mvm);
 
-       for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
-               if (params->passive_fragmented)
-                       params->dwell[band].fragmented = frag_passive_dwell;
+       if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) &&
+           vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+           fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN))
+               return IWL_SCAN_TYPE_FRAGMENTED;
 
-               params->dwell[band].passive = iwl_mvm_get_passive_dwell(mvm,
-                                                                       band);
-               params->dwell[band].active =
-                       iwl_mvm_get_active_dwell(mvm, band, params->n_ssids);
-       }
+       if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
+               return IWL_SCAN_TYPE_MILD;
 
-       IWL_DEBUG_SCAN(mvm,
-                      "scan parameters: max_out_time %d, suspend_time %d, passive_fragmented %d\n",
-                      params->max_out_time, params->suspend_time,
-                      params->passive_fragmented);
-       IWL_DEBUG_SCAN(mvm,
-                      "dwell[IEEE80211_BAND_2GHZ]: passive %d, active %d, fragmented %d\n",
-                      params->dwell[IEEE80211_BAND_2GHZ].passive,
-                      params->dwell[IEEE80211_BAND_2GHZ].active,
-                      params->dwell[IEEE80211_BAND_2GHZ].fragmented);
-       IWL_DEBUG_SCAN(mvm,
-                      "dwell[IEEE80211_BAND_5GHZ]: passive %d, active %d, fragmented %d\n",
-                      params->dwell[IEEE80211_BAND_5GHZ].passive,
-                      params->dwell[IEEE80211_BAND_5GHZ].active,
-                      params->dwell[IEEE80211_BAND_5GHZ].fragmented);
+       return IWL_SCAN_TYPE_WILD;
 }
 
 static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
@@ -327,9 +290,8 @@ static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res,
        return buf;
 }
 
-int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
-                                            struct iwl_rx_cmd_buffer *rxb,
-                                            struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+                                             struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
@@ -341,17 +303,13 @@ int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
                       iwl_mvm_dump_channel_list(notif->results,
                                                 notif->scanned_channels, buf,
                                                 sizeof(buf)));
-       return 0;
 }
 
-int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+                                struct iwl_rx_cmd_buffer *rxb)
 {
        IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
        ieee80211_sched_scan_results(mvm->hw);
-
-       return 0;
 }
 
 static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
@@ -368,9 +326,8 @@ static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
        }
 }
 
-int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
-                                       struct iwl_rx_cmd_buffer *rxb,
-                                       struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+                                        struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
@@ -392,9 +349,13 @@ int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
        if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) {
                WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR);
 
-               IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
+               IWL_DEBUG_SCAN(mvm,
+                              "Scheduled scan %s, EBS status %s, Last line %d, Last iteration %d, Time after last iteration %d\n",
                               aborted ? "aborted" : "completed",
-                              iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+                              iwl_mvm_ebs_status_str(scan_notif->ebs_status),
+                              scan_notif->last_schedule_line,
+                              scan_notif->last_schedule_iteration,
+                              __le32_to_cpu(scan_notif->time_after_last_iter));
 
                mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
        } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
@@ -406,9 +367,13 @@ int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
        } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
                WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
 
-               IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s (FW)\n",
+               IWL_DEBUG_SCAN(mvm,
+                              "Scheduled scan %s, EBS status %s, Last line %d, Last iteration %d, Time after last iteration %d (FW)\n",
                               aborted ? "aborted" : "completed",
-                              iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+                              iwl_mvm_ebs_status_str(scan_notif->ebs_status),
+                              scan_notif->last_schedule_line,
+                              scan_notif->last_schedule_iteration,
+                              __le32_to_cpu(scan_notif->time_after_last_iter));
 
                mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
                ieee80211_sched_scan_stopped(mvm->hw);
@@ -426,8 +391,6 @@ int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
        mvm->last_ebs_successful =
                        scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
                        scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
-
-       return 0;
 }
 
 static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
@@ -751,13 +714,11 @@ static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
                                    struct iwl_scan_req_lmac *cmd,
                                    struct iwl_mvm_scan_params *params)
 {
-       cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
-       cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
-       if (params->passive_fragmented)
-               cmd->fragmented_dwell =
-                               params->dwell[IEEE80211_BAND_2GHZ].fragmented;
-       cmd->max_out_time = cpu_to_le32(params->max_out_time);
-       cmd->suspend_time = cpu_to_le32(params->suspend_time);
+       cmd->active_dwell = scan_timing[params->type].dwell_active;
+       cmd->passive_dwell = scan_timing[params->type].dwell_passive;
+       cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
+       cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
+       cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
        cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 }
 
@@ -794,7 +755,7 @@ static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
 
 static int iwl_mvm_scan_total_iterations(struct iwl_mvm_scan_params *params)
 {
-       return params->schedule[0].iterations + params->schedule[1].iterations;
+       return params->iterations[0] + params->iterations[1];
 }
 
 static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
@@ -808,7 +769,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
        if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
                flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
 
-       if (params->passive_fragmented)
+       if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
                flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
 
        if (iwl_mvm_rrm_scan_needed(mvm))
@@ -861,11 +822,11 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        ssid_bitmap <<= 1;
 
        cmd->schedule[0].delay = cpu_to_le16(params->interval);
-       cmd->schedule[0].iterations = params->schedule[0].iterations;
-       cmd->schedule[0].full_scan_mul = params->schedule[0].full_scan_mul;
+       cmd->schedule[0].iterations = params->iterations[0];
+       cmd->schedule[0].full_scan_mul = 1;
        cmd->schedule[1].delay = cpu_to_le16(params->interval);
-       cmd->schedule[1].iterations = params->schedule[1].iterations;
-       cmd->schedule[1].full_scan_mul = params->schedule[1].iterations;
+       cmd->schedule[1].iterations = params->iterations[1];
+       cmd->schedule[1].full_scan_mul = 1;
 
        if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) {
                cmd->channel_opt[0].flags =
@@ -937,9 +898,9 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
        int num_channels =
                mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
                mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
-       int ret, i, j = 0, cmd_size, data_size;
+       int ret, i, j = 0, cmd_size;
        struct iwl_host_cmd cmd = {
-               .id = SCAN_CFG_CMD,
+               .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
        };
 
        if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
@@ -951,8 +912,6 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
        if (!scan_config)
                return -ENOMEM;
 
-       data_size = cmd_size - sizeof(struct iwl_mvm_umac_cmd_hdr);
-       scan_config->hdr.size = cpu_to_le16(data_size);
        scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE |
                                         SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
                                         SCAN_CONFIG_FLAG_SET_TX_CHAINS |
@@ -1013,13 +972,11 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
                                    struct iwl_scan_req_umac *cmd,
                                    struct iwl_mvm_scan_params *params)
 {
-       cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
-       cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
-       if (params->passive_fragmented)
-               cmd->fragmented_dwell =
-                               params->dwell[IEEE80211_BAND_2GHZ].fragmented;
-       cmd->max_out_time = cpu_to_le32(params->max_out_time);
-       cmd->suspend_time = cpu_to_le32(params->suspend_time);
+       cmd->active_dwell = scan_timing[params->type].dwell_active;
+       cmd->passive_dwell = scan_timing[params->type].dwell_passive;
+       cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
+       cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
+       cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
        cmd->scan_priority =
                iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 
@@ -1059,7 +1016,7 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
        if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
                flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
 
-       if (params->passive_fragmented)
+       if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
                flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
 
        if (iwl_mvm_rrm_scan_needed(mvm))
@@ -1099,8 +1056,6 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                return uid;
 
        memset(cmd, 0, ksize(cmd));
-       cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
-                                   sizeof(struct iwl_mvm_umac_cmd_hdr));
 
        iwl_mvm_scan_umac_dwell(mvm, cmd, params);
 
@@ -1230,17 +1185,15 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        params.n_match_sets = 0;
        params.match_sets = NULL;
 
-       params.schedule[0].iterations = 1;
-       params.schedule[0].full_scan_mul = 0;
-       params.schedule[1].iterations = 0;
-       params.schedule[1].full_scan_mul = 0;
+       params.iterations[0] = 1;
+       params.iterations[1] = 0;
 
-       iwl_mvm_scan_calc_dwell(mvm, vif, &params);
+       params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
 
        iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
 
        if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
-               hcmd.id = SCAN_REQ_UMAC;
+               hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
                ret = iwl_mvm_scan_umac(mvm, vif, &params,
                                        IWL_MVM_SCAN_REGULAR);
        } else {
@@ -1313,10 +1266,10 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
        params.n_match_sets = req->n_match_sets;
        params.match_sets = req->match_sets;
 
-       params.schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS;
-       params.schedule[0].full_scan_mul = 1;
-       params.schedule[1].iterations = 0xff;
-       params.schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
+       params.iterations[0] = 0;
+       params.iterations[1] = 0xff;
+
+       params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
 
        if (req->interval > U16_MAX) {
                IWL_DEBUG_SCAN(mvm,
@@ -1339,8 +1292,6 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
                params.delay = req->delay;
        }
 
-       iwl_mvm_scan_calc_dwell(mvm, vif, &params);
-
        ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
        if (ret)
                return ret;
@@ -1348,7 +1299,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
        iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
 
        if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
-               hcmd.id = SCAN_REQ_UMAC;
+               hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
                ret = iwl_mvm_scan_umac(mvm, vif, &params, IWL_MVM_SCAN_SCHED);
        } else {
                hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
@@ -1374,9 +1325,8 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
        return ret;
 }
 
-int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
-                                       struct iwl_rx_cmd_buffer *rxb,
-                                       struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+                                        struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_umac_scan_complete *notif = (void *)pkt->data;
@@ -1384,7 +1334,7 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
        bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
 
        if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
-               return 0;
+               return;
 
        /* if the scan is already stopping, we don't need to notify mac80211 */
        if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
@@ -1395,26 +1345,24 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
        }
 
        mvm->scan_status &= ~mvm->scan_uid_status[uid];
-
        IWL_DEBUG_SCAN(mvm,
-                      "Scan completed, uid %u type %u, status %s, EBS status %s\n",
+                      "Scan completed, uid %u type %u, status %s, EBS status %s, Last line %d, Last iteration %d, Time from last iteration %d\n",
                       uid, mvm->scan_uid_status[uid],
                       notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
                                "completed" : "aborted",
-                      iwl_mvm_ebs_status_str(notif->ebs_status));
+                      iwl_mvm_ebs_status_str(notif->ebs_status),
+                      notif->last_schedule, notif->last_iter,
+                      __le32_to_cpu(notif->time_from_last_iter));
 
        if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
            notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
                mvm->last_ebs_successful = false;
 
        mvm->scan_uid_status[uid] = 0;
-
-       return 0;
 }
 
-int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
-                                            struct iwl_rx_cmd_buffer *rxb,
-                                            struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+                                             struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
@@ -1426,15 +1374,11 @@ int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
                       iwl_mvm_dump_channel_list(notif->results,
                                                 notif->scanned_channels, buf,
                                                 sizeof(buf)));
-       return 0;
 }
 
 static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
 {
-       struct iwl_umac_scan_abort cmd = {
-               .hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) -
-                                       sizeof(struct iwl_mvm_umac_cmd_hdr)),
-       };
+       struct iwl_umac_scan_abort cmd = {};
        int uid, ret;
 
        lockdep_assert_held(&mvm->mutex);
@@ -1451,7 +1395,10 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
 
        IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm,
+                                  iwl_cmd_id(SCAN_ABORT_UMAC,
+                                             IWL_ALWAYS_LONG_GROUP, 0),
+                                  0, sizeof(cmd), &cmd);
        if (!ret)
                mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
 
@@ -1461,7 +1408,7 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
 static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
 {
        struct iwl_notification_wait wait_scan_done;
-       static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
+       static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
                                              SCAN_OFFLOAD_COMPLETE, };
        int ret;
 
index 26f076e821491e09d7805c5f37d229d01480229c..df216cd0c98f4659d1c00d7d07c8adb31c8244c6 100644 (file)
@@ -1148,18 +1148,31 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
 {
-       int i;
+       int i, max = -1, max_offs = -1;
 
        lockdep_assert_held(&mvm->mutex);
 
-       i = find_first_zero_bit(mvm->fw_key_table, STA_KEY_MAX_NUM);
+       /* Pick the unused key offset with the highest 'deleted'
+        * counter. Every time a key is deleted, all the counters
+        * are incremented and the one that was just deleted is
+        * reset to zero. Thus, the highest counter is the one
+        * that was deleted longest ago. Pick that one.
+        */
+       for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+               if (test_bit(i, mvm->fw_key_table))
+                       continue;
+               if (mvm->fw_key_deleted[i] > max) {
+                       max = mvm->fw_key_deleted[i];
+                       max_offs = i;
+               }
+       }
 
-       if (i == STA_KEY_MAX_NUM)
+       if (max_offs < 0)
                return STA_KEY_IDX_INVALID;
 
-       __set_bit(i, mvm->fw_key_table);
+       __set_bit(max_offs, mvm->fw_key_table);
 
-       return i;
+       return max_offs;
 }
 
 static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
@@ -1277,8 +1290,6 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
                const u8 *pn;
 
                memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
-               ieee80211_aes_cmac_calculate_k1_k2(keyconf,
-                                                  igtk_cmd.K1, igtk_cmd.K2);
                ieee80211_get_key_rx_seq(keyconf, 0, &seq);
                pn = seq.aes_cmac.pn;
                igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
@@ -1479,7 +1490,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
 {
        bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
        u8 sta_id;
-       int ret;
+       int ret, i;
 
        lockdep_assert_held(&mvm->mutex);
 
@@ -1498,6 +1509,13 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
                return -ENOENT;
        }
 
+       /* track which key was deleted last */
+       for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+               if (mvm->fw_key_deleted[i] < U8_MAX)
+                       mvm->fw_key_deleted[i]++;
+       }
+       mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
+
        if (sta_id == IWL_MVM_STATION_COUNT) {
                IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
                return 0;
@@ -1661,9 +1679,8 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
 }
 
-int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
-                         struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+                          struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
@@ -1671,15 +1688,13 @@ int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
        u32 sta_id = le32_to_cpu(notif->sta_id);
 
        if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
-               return 0;
+               return;
 
        rcu_read_lock();
        sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
        if (!IS_ERR_OR_NULL(sta))
                ieee80211_sta_eosp(sta);
        rcu_read_unlock();
-
-       return 0;
 }
 
 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
index 748f5dc3f9f4337952efc84fe93e5bfb89f97c81..eedb215eba3f6efd08f2ca387de80e46ec39bfb2 100644 (file)
@@ -378,9 +378,8 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
                             struct ieee80211_sta *sta, u32 iv32,
                             u16 *phase1key);
 
-int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
-                         struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+                          struct iwl_rx_cmd_buffer *rxb);
 
 /* AMPDU */
 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
index a87b506c8c7272393304824409ae8473d6022277..fe2fa5650443894a0534e4b8f76cb5ea09f7fef6 100644 (file)
@@ -169,18 +169,11 @@ static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                return;
 
        pkt = cmd.resp_pkt;
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(mvm, "Bad return from TDLS_CONFIG_COMMAND (0x%08X)\n",
-                       pkt->hdr.flags);
-               goto exit;
-       }
 
-       if (WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp)))
-               goto exit;
+       WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
 
        /* we don't really care about the response at this point */
 
-exit:
        iwl_free_resp(&cmd);
 }
 
@@ -261,8 +254,7 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
                mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
 }
 
-int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
@@ -277,17 +269,17 @@ int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
        /* can fail sometimes */
        if (!le32_to_cpu(notif->status)) {
                iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
-               goto out;
+               return;
        }
 
        if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
-               goto out;
+               return;
 
        sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
                                        lockdep_is_held(&mvm->mutex));
        /* the station may not be here, but if it is, it must be a TDLS peer */
        if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
-               goto out;
+               return;
 
        mvmsta = iwl_mvm_sta_from_mac80211(sta);
        vif = mvmsta->vif;
@@ -301,9 +293,6 @@ int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                         msecs_to_jiffies(delay));
 
        iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
-
-out:
-       return 0;
 }
 
 static int
@@ -471,13 +460,19 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
        cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
 
        info = IEEE80211_SKB_CB(skb);
-       if (info->control.hw_key)
-               iwl_mvm_set_tx_cmd_crypto(mvm, info, &cmd.frame.tx_cmd, skb);
+       hdr = (void *)skb->data;
+       if (info->control.hw_key) {
+               if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) {
+                       rcu_read_unlock();
+                       ret = -EINVAL;
+                       goto out;
+               }
+               iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd);
+       }
 
        iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
                           mvmsta->sta_id);
 
-       hdr = (void *)skb->data;
        iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
                                hdr->frame_control);
        rcu_read_unlock();
index e472729e5f149a451fd7128d45fa54856c8a77ce..dbd7d544575de68a3972588bb117a8fa5b560ef3 100644 (file)
@@ -410,9 +410,8 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
 /*
  * The Rx handler for time event notifications
  */
-int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+                                struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_time_event_notif *notif = (void *)pkt->data;
@@ -433,8 +432,6 @@ int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
        }
 unlock:
        spin_unlock_bh(&mvm->time_event_lock);
-
-       return 0;
 }
 
 static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
@@ -503,7 +500,7 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
                                       struct iwl_mvm_time_event_data *te_data,
                                       struct iwl_time_event_cmd *te_cmd)
 {
-       static const u8 time_event_response[] = { TIME_EVENT_CMD };
+       static const u16 time_event_response[] = { TIME_EVENT_CMD };
        struct iwl_notification_wait wait_time_event;
        int ret;
 
@@ -566,7 +563,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
-       const u8 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
+       const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
        struct iwl_notification_wait wait_te_notif;
        struct iwl_time_event_cmd time_cmd = {};
 
@@ -599,8 +596,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
                cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
        time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
 
-       time_cmd.apply_time =
-               cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
+       time_cmd.apply_time = cpu_to_le32(0);
 
        time_cmd.max_frags = TE_V2_FRAG_NONE;
        time_cmd.max_delay = cpu_to_le32(max_delay);
index de4fbc6d57f150130e095fbec5471e9149d951bf..cbdf8e52a5f1cc705f57711a14265303b4b78c77 100644 (file)
@@ -157,9 +157,8 @@ void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
 /*
  * iwl_mvm_rx_time_event_notif - handles %TIME_EVENT_NOTIFICATION.
  */
-int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+                                struct iwl_rx_cmd_buffer *rxb);
 
 /**
  * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionality
diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.c b/drivers/net/wireless/iwlwifi/mvm/tof.c
new file mode 100644 (file)
index 0000000..380972f
--- /dev/null
@@ -0,0 +1,304 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "mvm.h"
+#include "fw-api-tof.h"
+
+#define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256
+
+void iwl_mvm_tof_init(struct iwl_mvm *mvm)
+{
+       struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
+
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+               return;
+
+       memset(tof_data, 0, sizeof(*tof_data));
+
+       tof_data->tof_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_CONFIG_CMD);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (IWL_MVM_TOF_IS_RESPONDER) {
+               tof_data->responder_cfg.sub_grp_cmd_id =
+                       cpu_to_le32(TOF_RESPONDER_CONFIG_CMD);
+               tof_data->responder_cfg.sta_id = IWL_MVM_STATION_COUNT;
+       }
+#endif
+
+       tof_data->range_req.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_REQ_CMD);
+       tof_data->range_req.req_timeout = 1;
+       tof_data->range_req.initiator = 1;
+       tof_data->range_req.report_policy = 3;
+
+       tof_data->range_req_ext.sub_grp_cmd_id =
+               cpu_to_le32(TOF_RANGE_REQ_EXT_CMD);
+
+       mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+}
+
+void iwl_mvm_tof_clean(struct iwl_mvm *mvm)
+{
+       struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
+
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+               return;
+
+       memset(tof_data, 0, sizeof(*tof_data));
+       mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+}
+
+static void iwl_tof_iterator(void *_data, u8 *mac,
+                            struct ieee80211_vif *vif)
+{
+       bool *enabled = _data;
+
+       /* non bss vif exists */
+       if (ieee80211_vif_type_p2p(vif) !=  NL80211_IFTYPE_STATION)
+               *enabled = false;
+}
+
+int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm)
+{
+       struct iwl_tof_config_cmd *cmd = &mvm->tof_data.tof_cfg;
+       bool enabled;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+               return -EINVAL;
+
+       ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+                                                  IEEE80211_IFACE_ITER_NORMAL,
+                                                  iwl_tof_iterator, &enabled);
+       if (!enabled) {
+               IWL_DEBUG_INFO(mvm, "ToF is not supported (non bss vif)\n");
+               return -EINVAL;
+       }
+
+       mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+       return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+                                                   IWL_ALWAYS_LONG_GROUP, 0),
+                                   0, sizeof(*cmd), cmd);
+}
+
+int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id)
+{
+       struct iwl_tof_range_abort_cmd cmd = {
+               .sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_ABORT_CMD),
+               .request_id = id,
+       };
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+               return -EINVAL;
+
+       if (id != mvm->tof_data.active_range_request) {
+               IWL_ERR(mvm, "Invalid range request id %d (active %d)\n",
+                       id, mvm->tof_data.active_range_request);
+               return -EINVAL;
+       }
+
+       /* after abort is sent there's no active request anymore */
+       mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+
+       return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+                                                   IWL_ALWAYS_LONG_GROUP, 0),
+                                   0, sizeof(cmd), &cmd);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
+                             struct ieee80211_vif *vif)
+{
+       struct iwl_tof_responder_config_cmd *cmd = &mvm->tof_data.responder_cfg;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+               return -EINVAL;
+
+       if (vif->p2p || vif->type != NL80211_IFTYPE_AP) {
+               IWL_ERR(mvm, "Cannot start responder, not in AP mode\n");
+               return -EIO;
+       }
+
+       cmd->sta_id = mvmvif->bcast_sta.sta_id;
+       return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+                                                   IWL_ALWAYS_LONG_GROUP, 0),
+                                   0, sizeof(*cmd), cmd);
+}
+#endif
+
+int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
+                                 struct ieee80211_vif *vif)
+{
+       struct iwl_host_cmd cmd = {
+               .id = iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+               .len = { sizeof(mvm->tof_data.range_req), },
+               /* no copy because of the command size */
+               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+       };
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+               return -EINVAL;
+
+       if (ieee80211_vif_type_p2p(vif) !=  NL80211_IFTYPE_STATION) {
+               IWL_ERR(mvm, "Cannot send range request, not STA mode\n");
+               return -EIO;
+       }
+
+       /* nesting of range requests is not supported in FW */
+       if (mvm->tof_data.active_range_request !=
+               IWL_MVM_TOF_RANGE_REQ_MAX_ID) {
+               IWL_ERR(mvm, "Cannot send range req, already active req %d\n",
+                       mvm->tof_data.active_range_request);
+               return -EIO;
+       }
+
+       mvm->tof_data.active_range_request = mvm->tof_data.range_req.request_id;
+
+       cmd.data[0] = &mvm->tof_data.range_req;
+       return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
+                                     struct ieee80211_vif *vif)
+{
+       lockdep_assert_held(&mvm->mutex);
+
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+               return -EINVAL;
+
+       if (ieee80211_vif_type_p2p(vif) !=  NL80211_IFTYPE_STATION) {
+               IWL_ERR(mvm, "Cannot send ext range req, not in STA mode\n");
+               return -EIO;
+       }
+
+       return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+                                                   IWL_ALWAYS_LONG_GROUP, 0),
+                                   0, sizeof(mvm->tof_data.range_req_ext),
+                                   &mvm->tof_data.range_req_ext);
+}
+
+static int iwl_mvm_tof_range_resp(struct iwl_mvm *mvm, void *data)
+{
+       struct iwl_tof_range_rsp_ntfy *resp = (void *)data;
+
+       if (resp->request_id != mvm->tof_data.active_range_request) {
+               IWL_ERR(mvm, "Request id mismatch, got %d, active %d\n",
+                       resp->request_id, mvm->tof_data.active_range_request);
+               return -EIO;
+       }
+
+       memcpy(&mvm->tof_data.range_resp, resp,
+              sizeof(struct iwl_tof_range_rsp_ntfy));
+       mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+
+       return 0;
+}
+
+static int iwl_mvm_tof_mcsi_notif(struct iwl_mvm *mvm, void *data)
+{
+       struct iwl_tof_mcsi_notif *resp = (struct iwl_tof_mcsi_notif *)data;
+
+       IWL_DEBUG_INFO(mvm, "MCSI notification, token %d\n", resp->token);
+       return 0;
+}
+
+static int iwl_mvm_tof_nb_report_notif(struct iwl_mvm *mvm, void *data)
+{
+       struct iwl_tof_neighbor_report *report =
+               (struct iwl_tof_neighbor_report *)data;
+
+       IWL_DEBUG_INFO(mvm, "NB report, bssid %pM, token %d, status 0x%x\n",
+                      report->bssid, report->request_token, report->status);
+       return 0;
+}
+
+void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
+                             struct iwl_rx_cmd_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_tof_gen_resp_cmd *resp = (void *)pkt->data;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       switch (le32_to_cpu(resp->sub_grp_cmd_id)) {
+       case TOF_RANGE_RESPONSE_NOTIF:
+               iwl_mvm_tof_range_resp(mvm, resp->data);
+               break;
+       case TOF_MCSI_DEBUG_NOTIF:
+               iwl_mvm_tof_mcsi_notif(mvm, resp->data);
+               break;
+       case TOF_NEIGHBOR_REPORT_RSP_NOTIF:
+               iwl_mvm_tof_nb_report_notif(mvm, resp->data);
+               break;
+       default:
+              IWL_ERR(mvm, "Unknown sub-group command 0x%x\n",
+                      resp->sub_grp_cmd_id);
+              break;
+       }
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.h b/drivers/net/wireless/iwlwifi/mvm/tof.h
new file mode 100644 (file)
index 0000000..50ae8ad
--- /dev/null
@@ -0,0 +1,94 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __tof
+#define __tof_h__
+
+#include "fw-api-tof.h"
+
+struct iwl_mvm_tof_data {
+       struct iwl_tof_config_cmd tof_cfg;
+       struct iwl_tof_range_req_cmd range_req;
+       struct iwl_tof_range_req_ext_cmd range_req_ext;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       struct iwl_tof_responder_config_cmd responder_cfg;
+#endif
+       struct iwl_tof_range_rsp_ntfy range_resp;
+       u8 last_abort_id;
+       u16 active_range_request;
+};
+
+void iwl_mvm_tof_init(struct iwl_mvm *mvm);
+void iwl_mvm_tof_clean(struct iwl_mvm *mvm);
+int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm);
+int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id);
+int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
+                                 struct ieee80211_vif *vif);
+void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
+                             struct iwl_rx_cmd_buffer *rxb);
+int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
+                                     struct ieee80211_vif *vif);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
+                             struct ieee80211_vif *vif);
+#endif
+#endif /* __tof_h__ */
index 80d07db6e7e8c80f472ee84fe6c31fc0f53844b1..fe7145c2c98acaa47bb19fe9dbbe4145f07491bd 100644 (file)
@@ -33,6 +33,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -154,24 +155,20 @@ static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait,
        return true;
 }
 
-int iwl_mvm_temp_notif(struct iwl_mvm *mvm,
-                      struct iwl_rx_cmd_buffer *rxb,
-                      struct iwl_device_cmd *cmd)
+void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        int temp;
 
        /* the notification is handled synchronously in ctkill, so skip here */
        if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
-               return 0;
+               return;
 
        temp = iwl_mvm_temp_notif_parse(mvm, pkt);
        if (temp < 0)
-               return 0;
+               return;
 
        iwl_mvm_tt_temp_changed(mvm, temp);
-
-       return 0;
 }
 
 static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
@@ -187,7 +184,7 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
 int iwl_mvm_get_temp(struct iwl_mvm *mvm)
 {
        struct iwl_notification_wait wait_temp_notif;
-       static const u8 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION };
+       static const u16 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION };
        int ret, temp;
 
        lockdep_assert_held(&mvm->mutex);
index 89116864d2a0ec346941ec8e4fded14691612ff3..6df5aada4f161d6b533dae2b07e01146b53798ef 100644 (file)
@@ -153,18 +153,20 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
 
        if (ieee80211_is_mgmt(fc)) {
                if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
-                       tx_cmd->pm_frame_timeout = cpu_to_le16(3);
+                       tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
+               else if (ieee80211_is_action(fc))
+                       tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
                else
-                       tx_cmd->pm_frame_timeout = cpu_to_le16(2);
+                       tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
 
                /* The spec allows Action frames in A-MPDU, we don't support
                 * it
                 */
                WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
        } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
-               tx_cmd->pm_frame_timeout = cpu_to_le16(2);
+               tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
        } else {
-               tx_cmd->pm_frame_timeout = 0;
+               tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
        }
 
        if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
@@ -268,19 +270,29 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
 /*
  * Sets the fields in the Tx cmd that are crypto related
  */
-void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
-                              struct ieee80211_tx_info *info,
-                              struct iwl_tx_cmd *tx_cmd,
-                              struct sk_buff *skb_frag)
+static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
+                                     struct ieee80211_tx_info *info,
+                                     struct iwl_tx_cmd *tx_cmd,
+                                     struct sk_buff *skb_frag,
+                                     int hdrlen)
 {
        struct ieee80211_key_conf *keyconf = info->control.hw_key;
+       u8 *crypto_hdr = skb_frag->data + hdrlen;
+       u64 pn;
 
        switch (keyconf->cipher) {
        case WLAN_CIPHER_SUITE_CCMP:
-               tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
-               memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
-               if (info->flags & IEEE80211_TX_CTL_AMPDU)
-                       tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
+       case WLAN_CIPHER_SUITE_CCMP_256:
+               iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
+               pn = atomic64_inc_return(&keyconf->tx_pn);
+               crypto_hdr[0] = pn;
+               crypto_hdr[2] = 0;
+               crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
+               crypto_hdr[1] = pn >> 8;
+               crypto_hdr[4] = pn >> 16;
+               crypto_hdr[5] = pn >> 24;
+               crypto_hdr[6] = pn >> 32;
+               crypto_hdr[7] = pn >> 40;
                break;
 
        case WLAN_CIPHER_SUITE_TKIP:
@@ -308,7 +320,7 @@ void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
  */
 static struct iwl_device_cmd *
 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
-                     struct ieee80211_sta *sta, u8 sta_id)
+                     int hdrlen, struct ieee80211_sta *sta, u8 sta_id)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -325,7 +337,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
        tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
 
        if (info->control.hw_key)
-               iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb);
+               iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
 
        iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
 
@@ -346,6 +358,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
        struct iwl_device_cmd *dev_cmd;
        struct iwl_tx_cmd *tx_cmd;
        u8 sta_id;
+       int hdrlen = ieee80211_hdrlen(hdr->frame_control);
 
        if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU))
                return -1;
@@ -366,23 +379,34 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
                IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
 
        /*
-        * If the interface on which frame is sent is the P2P_DEVICE
+        * If the interface on which the frame is sent is the P2P_DEVICE
         * or an AP/GO interface use the broadcast station associated
-        * with it; otherwise use the AUX station.
+        * with it; otherwise if the interface is a managed interface
+        * use the AP station associated with it for multicast traffic
+        * (this is not possible for unicast packets as a TLDS discovery
+        * response are sent without a station entry); otherwise use the
+        * AUX station.
         */
-       if (info->control.vif &&
-           (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
-            info->control.vif->type == NL80211_IFTYPE_AP)) {
+       sta_id = mvm->aux_sta.sta_id;
+       if (info->control.vif) {
                struct iwl_mvm_vif *mvmvif =
                        iwl_mvm_vif_from_mac80211(info->control.vif);
-               sta_id = mvmvif->bcast_sta.sta_id;
-       } else {
-               sta_id = mvm->aux_sta.sta_id;
+
+               if (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+                   info->control.vif->type == NL80211_IFTYPE_AP)
+                       sta_id = mvmvif->bcast_sta.sta_id;
+               else if (info->control.vif->type == NL80211_IFTYPE_STATION &&
+                        is_multicast_ether_addr(hdr->addr1)) {
+                       u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
+
+                       if (ap_sta_id != IWL_MVM_STATION_COUNT)
+                               sta_id = ap_sta_id;
+               }
        }
 
        IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue);
 
-       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, NULL, sta_id);
+       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, NULL, sta_id);
        if (!dev_cmd)
                return -1;
 
@@ -390,7 +414,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
        tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
 
        /* Copy MAC header from skb into command buffer */
-       memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control));
+       memcpy(tx_cmd->hdr, hdr, hdrlen);
 
        if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) {
                iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
@@ -416,9 +440,11 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
        u8 tid = IWL_MAX_TID_COUNT;
        u8 txq_id = info->hw_queue;
        bool is_data_qos = false, is_ampdu = false;
+       int hdrlen;
 
        mvmsta = iwl_mvm_sta_from_mac80211(sta);
        fc = hdr->frame_control;
+       hdrlen = ieee80211_hdrlen(fc);
 
        if (WARN_ON_ONCE(!mvmsta))
                return -1;
@@ -426,7 +452,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
        if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
                return -1;
 
-       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, sta, mvmsta->sta_id);
+       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, sta, mvmsta->sta_id);
        if (!dev_cmd)
                goto drop;
 
@@ -458,7 +484,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
        }
 
        /* Copy MAC header from skb into command buffer */
-       memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(fc));
+       memcpy(tx_cmd->hdr, hdr, hdrlen);
 
        WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
 
@@ -911,8 +937,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
        rcu_read_unlock();
 }
 
-int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                     struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
@@ -921,8 +946,6 @@ int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                iwl_mvm_rx_tx_cmd_single(mvm, pkt);
        else
                iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
-
-       return 0;
 }
 
 static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
@@ -942,8 +965,7 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
                (void *)(uintptr_t)tid_data->rate_n_flags;
 }
 
-int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                       struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
@@ -965,7 +987,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
        if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
                      tid >= IWL_MAX_TID_COUNT,
                      "sta_id %d tid %d", sta_id, tid))
-               return 0;
+               return;
 
        rcu_read_lock();
 
@@ -974,7 +996,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
        /* Reclaiming frames for a station that has been deleted ? */
        if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
                rcu_read_unlock();
-               return 0;
+               return;
        }
 
        mvmsta = iwl_mvm_sta_from_mac80211(sta);
@@ -985,7 +1007,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                        "invalid BA notification: Q %d, tid %d, flow %d\n",
                        tid_data->txq_id, tid, scd_flow);
                rcu_read_unlock();
-               return 0;
+               return;
        }
 
        spin_lock_bh(&mvmsta->lock);
@@ -1072,8 +1094,6 @@ out:
                skb = __skb_dequeue(&reclaimed_skbs);
                ieee80211_tx_status(mvm->hw, skb);
        }
-
-       return 0;
 }
 
 /*
index 03f8e06dded72fc74a302c7e52fced302631dc01..a7d434256423382af219c1ab9221efdfe686ea69 100644 (file)
@@ -108,7 +108,7 @@ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
        return ret;
 }
 
-int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id,
+int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
                         u32 flags, u16 len, const void *data)
 {
        struct iwl_host_cmd cmd = {
@@ -166,11 +166,6 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
                goto out_free_resp;
        }
 
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               ret = -EIO;
-               goto out_free_resp;
-       }
-
        resp_len = iwl_rx_packet_payload_len(pkt);
        if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
                ret = -EIO;
@@ -187,7 +182,7 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
 /*
  * We assume that the caller set the status to the sucess value
  */
-int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id, u16 len,
+int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
                                const void *data, u32 *status)
 {
        struct iwl_host_cmd cmd = {
@@ -243,8 +238,7 @@ u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx)
        return fw_rate_idx_to_plcp[rate_idx];
 }
 
-int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_error_resp *err_resp = (void *)pkt->data;
@@ -256,7 +250,6 @@ int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                le32_to_cpu(err_resp->error_service));
        IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n",
                le64_to_cpu(err_resp->timestamp));
-       return 0;
 }
 
 /*
index 9f65c1cff1b1958057ab3bedf385604b34a3323e..b0825c402c732c0514637b3b21b26288a7275444 100644 (file)
@@ -614,6 +614,7 @@ static int iwl_pci_resume(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct iwl_trans *trans = pci_get_drvdata(pdev);
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        bool hw_rfkill;
 
        /* Before you put code here, think about WoWLAN. You cannot check here
@@ -631,20 +632,16 @@ static int iwl_pci_resume(struct device *device)
                return 0;
 
        /*
-        * On suspend, ict is disabled, and the interrupt mask
-        * gets cleared. Reconfigure them both in case of d0i3
-        * image. Otherwise, only enable rfkill interrupt (in
-        * order to keep track of the rfkill status)
+        * Enable rfkill interrupt (in order to keep track of
+        * the rfkill status)
         */
-       if (trans->wowlan_d0i3) {
-               iwl_pcie_reset_ict(trans);
-               iwl_enable_interrupts(trans);
-       } else {
-               iwl_enable_rfkill_int(trans);
-       }
+       iwl_enable_rfkill_int(trans);
 
        hw_rfkill = iwl_is_rfkill_set(trans);
+
+       mutex_lock(&trans_pcie->mutex);
        iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+       mutex_unlock(&trans_pcie->mutex);
 
        return 0;
 }
index 376b84e54ad7e8bbb48d039d354c03748665451c..feb2f7e8113464113f330c3141a03f05ea4bef33 100644 (file)
 #include "iwl-io.h"
 #include "iwl-op-mode.h"
 
+/* We need 2 entries for the TX command and header, and another one might
+ * be needed for potential data in the SKB's head. The remaining ones can
+ * be used for frags.
+ */
+#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3)
+
+/*
+ * RX related structures and functions
+ */
+#define RX_NUM_QUEUES 1
+#define RX_POST_REQ_ALLOC 2
+#define RX_CLAIM_REQ_ALLOC 8
+#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
+#define RX_LOW_WATERMARK 8
+
 struct iwl_host_cmd;
 
 /*This file includes the declaration that are internal to the
@@ -77,29 +92,29 @@ struct isr_statistics {
  * struct iwl_rxq - Rx queue
  * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
  * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @pool:
- * @queue:
  * @read: Shared index to newest available Rx buffer
  * @write: Shared index to oldest written Rx packet
  * @free_count: Number of pre-allocated buffers in rx_free
+ * @used_count: Number of RBDs handled to allocator to use for allocation
  * @write_actual:
- * @rx_free: list of free SKBs for use
- * @rx_used: List of Rx buffers with no SKB
+ * @rx_free: list of RBDs with allocated RB ready for use
+ * @rx_used: list of RBDs with no RB attached
  * @need_update: flag to indicate we need to update read/write index
  * @rb_stts: driver's pointer to receive buffer status
  * @rb_stts_dma: bus address of receive buffer status
  * @lock:
+ * @pool: initial pool of iwl_rx_mem_buffer for the queue
+ * @queue: actual rx queue
  *
  * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
  */
 struct iwl_rxq {
        __le32 *bd;
        dma_addr_t bd_dma;
-       struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
-       struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
        u32 read;
        u32 write;
        u32 free_count;
+       u32 used_count;
        u32 write_actual;
        struct list_head rx_free;
        struct list_head rx_used;
@@ -107,6 +122,32 @@ struct iwl_rxq {
        struct iwl_rb_status *rb_stts;
        dma_addr_t rb_stts_dma;
        spinlock_t lock;
+       struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
+       struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+};
+
+/**
+ * struct iwl_rb_allocator - Rx allocator
+ * @pool: initial pool of allocator
+ * @req_pending: number of requests the allcator had not processed yet
+ * @req_ready: number of requests honored and ready for claiming
+ * @rbd_allocated: RBDs with pages allocated and ready to be handled to
+ *     the queue. This is a list of &struct iwl_rx_mem_buffer
+ * @rbd_empty: RBDs with no page attached for allocator use. This is a list
+ *     of &struct iwl_rx_mem_buffer
+ * @lock: protects the rbd_allocated and rbd_empty lists
+ * @alloc_wq: work queue for background calls
+ * @rx_alloc: work struct for background calls
+ */
+struct iwl_rb_allocator {
+       struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
+       atomic_t req_pending;
+       atomic_t req_ready;
+       struct list_head rbd_allocated;
+       struct list_head rbd_empty;
+       spinlock_t lock;
+       struct workqueue_struct *alloc_wq;
+       struct work_struct rx_alloc;
 };
 
 struct iwl_dma_ptr {
@@ -250,7 +291,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
 /**
  * struct iwl_trans_pcie - PCIe transport specific data
  * @rxq: all the RX queue data
- * @rx_replenish: work that will be called when buffers need to be allocated
+ * @rba: allocator for RX replenishing
  * @drv - pointer to iwl_drv
  * @trans: pointer to the generic transport area
  * @scd_base_addr: scheduler sram base address in SRAM
@@ -264,8 +305,10 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
  * @rx_buf_size_8k: 8 kB RX buffer size
  * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
  * @scd_set_active: should the transport configure the SCD for HCMD queue
+ * @wide_cmd_header: true when ucode supports wide command header format
  * @rx_page_order: page order for receive buffer size
  * @reg_lock: protect hw register access
+ * @mutex: to protect stop_device / start_fw / start_hw
  * @cmd_in_flight: true when we have a host command in flight
  * @fw_mon_phys: physical address of the buffer for the firmware monitor
  * @fw_mon_page: points to the first page of the buffer for the firmware monitor
@@ -273,7 +316,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
  */
 struct iwl_trans_pcie {
        struct iwl_rxq rxq;
-       struct work_struct rx_replenish;
+       struct iwl_rb_allocator rba;
        struct iwl_trans *trans;
        struct iwl_drv *drv;
 
@@ -285,9 +328,11 @@ struct iwl_trans_pcie {
        dma_addr_t ict_tbl_dma;
        int ict_index;
        bool use_ict;
+       bool is_down;
        struct isr_statistics isr_stats;
 
        spinlock_t irq_lock;
+       struct mutex mutex;
        u32 inta_mask;
        u32 scd_base_addr;
        struct iwl_dma_ptr scd_bc_tbls;
@@ -314,6 +359,7 @@ struct iwl_trans_pcie {
        bool rx_buf_size_8k;
        bool bc_table_dword;
        bool scd_set_active;
+       bool wide_cmd_header;
        u32 rx_page_order;
 
        const char *const *command_names;
@@ -385,7 +431,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
-                           struct iwl_rx_cmd_buffer *rxb, int handler_status);
+                           struct iwl_rx_cmd_buffer *rxb);
 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
                            struct sk_buff_head *skbs);
 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
index adad8d0fae7f2766812826377c46d29f1f77d4ed..e06591f625c4a9633aedbece40bf9d74bd39d8f7 100644 (file)
@@ -1,7 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
  * resets the Rx queue buffers with new memory.
  *
  * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
- *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- *   to replenish the iwl->rxq->rx_free.
- * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
- *   iwl->rxq is replenished and the READ INDEX is updated (updating the
- *   'processed' and 'read' driver indexes as well)
+ * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
+ *   When the interrupt handler is called, the request is processed.
+ *   The page is either stolen - transferred to the upper layer
+ *   or reused - added immediately to the iwl->rxq->rx_free list.
+ * + When the page is stolen - the driver updates the matching queue's used
+ *   count, detaches the RBD and transfers it to the queue used list.
+ *   When there are two used RBDs - they are transferred to the allocator empty
+ *   list. Work is then scheduled for the allocator to start allocating
+ *   eight buffers.
+ *   When there are another 6 used RBDs - they are transferred to the allocator
+ *   empty list and the driver tries to claim the pre-allocated buffers and
+ *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
+ *   until ready.
+ *   When there are 8+ buffers in the free list - either from allocation or from
+ *   8 reused unstolen pages - restock is called to update the FW and indexes.
+ * + In order to make sure the allocator always has RBDs to use for allocation
+ *   the allocator has initial pool in the size of num_queues*(8-2) - the
+ *   maximum missing RBDs per allocation request (request posted with 2
+ *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
+ *   The queues supplies the recycle of the rest of the RBDs.
  * + A received packet is processed and handed to the kernel network stack,
  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
- *   rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
+ * + If there are no allocated buffers in iwl->rxq->rx_free,
  *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
  *   If there were enough free buffers and RX_STALLED is set it is cleared.
  *
  *
  * iwl_rxq_alloc()            Allocates rx_free
  * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
- *                            iwl_pcie_rxq_restock
+ *                            iwl_pcie_rxq_restock.
+ *                            Used only during initialization.
  * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
  *                            queue, updates firmware pointers, and updates
- *                            the WRITE index.  If insufficient rx_free buffers
- *                            are available, schedules iwl_pcie_rx_replenish
+ *                            the WRITE index.
+ * iwl_pcie_rx_allocator()     Background work for allocating pages.
  *
  * -- enable interrupts --
  * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
  *                            READ INDEX, detaching the SKB from the pool.
  *                            Moves the packet buffer from queue to rx_used.
+ *                            Posts and claims requests to the allocator.
  *                            Calls iwl_pcie_rxq_restock to refill any empty
  *                            slots.
+ *
+ * RBD life-cycle:
+ *
+ * Init:
+ * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
+ *
+ * Regular Receive interrupt:
+ * Page Stolen:
+ * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
+ * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
+ * Page not Stolen:
+ * rxq.queue -> rxq.rx_free -> rxq.queue
  * ...
  *
  */
@@ -240,10 +267,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
                rxq->free_count--;
        }
        spin_unlock(&rxq->lock);
-       /* If the pre-allocated buffer pool is dropping low, schedule to
-        * refill it */
-       if (rxq->free_count <= RX_LOW_WATERMARK)
-               schedule_work(&trans_pcie->rx_replenish);
 
        /* If we've added more space for the firmware to place data, tell it.
         * Increment device's write pointer in multiples of 8. */
@@ -254,6 +277,45 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
        }
 }
 
+/*
+ * iwl_pcie_rx_alloc_page - allocates and returns a page.
+ *
+ */
+static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
+                                          gfp_t priority)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rxq *rxq = &trans_pcie->rxq;
+       struct page *page;
+       gfp_t gfp_mask = priority;
+
+       if (rxq->free_count > RX_LOW_WATERMARK)
+               gfp_mask |= __GFP_NOWARN;
+
+       if (trans_pcie->rx_page_order > 0)
+               gfp_mask |= __GFP_COMP;
+
+       /* Alloc a new receive buffer */
+       page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
+       if (!page) {
+               if (net_ratelimit())
+                       IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
+                                      trans_pcie->rx_page_order);
+               /* Issue an error if the hardware has consumed more than half
+                * of its free buffer list and we don't have enough
+                * pre-allocated buffers.
+`               */
+               if (rxq->free_count <= RX_LOW_WATERMARK &&
+                   iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
+                   net_ratelimit())
+                       IWL_CRIT(trans,
+                                "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
+                                rxq->free_count);
+               return NULL;
+       }
+       return page;
+}
+
 /*
  * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
  *
@@ -269,7 +331,6 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
        struct iwl_rxq *rxq = &trans_pcie->rxq;
        struct iwl_rx_mem_buffer *rxb;
        struct page *page;
-       gfp_t gfp_mask = priority;
 
        while (1) {
                spin_lock(&rxq->lock);
@@ -279,32 +340,10 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
                }
                spin_unlock(&rxq->lock);
 
-               if (rxq->free_count > RX_LOW_WATERMARK)
-                       gfp_mask |= __GFP_NOWARN;
-
-               if (trans_pcie->rx_page_order > 0)
-                       gfp_mask |= __GFP_COMP;
-
                /* Alloc a new receive buffer */
-               page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
-               if (!page) {
-                       if (net_ratelimit())
-                               IWL_DEBUG_INFO(trans, "alloc_pages failed, "
-                                          "order: %d\n",
-                                          trans_pcie->rx_page_order);
-
-                       if ((rxq->free_count <= RX_LOW_WATERMARK) &&
-                           net_ratelimit())
-                               IWL_CRIT(trans, "Failed to alloc_pages with %s."
-                                        "Only %u free buffers remaining.\n",
-                                        priority == GFP_ATOMIC ?
-                                        "GFP_ATOMIC" : "GFP_KERNEL",
-                                        rxq->free_count);
-                       /* We don't reschedule replenish work here -- we will
-                        * call the restock method and if it still needs
-                        * more buffers it will schedule replenish */
+               page = iwl_pcie_rx_alloc_page(trans, priority);
+               if (!page)
                        return;
-               }
 
                spin_lock(&rxq->lock);
 
@@ -355,7 +394,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
 
        lockdep_assert_held(&rxq->lock);
 
-       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+       for (i = 0; i < RX_QUEUE_SIZE; i++) {
                if (!rxq->pool[i].page)
                        continue;
                dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
@@ -372,32 +411,164 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
  * When moving to rx_free an page is allocated for the slot.
  *
  * Also restock the Rx queue via iwl_pcie_rxq_restock.
- * This is called as a scheduled work item (except for during initialization)
+ * This is called only during initialization
  */
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
 {
-       iwl_pcie_rxq_alloc_rbs(trans, gfp);
+       iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
 
        iwl_pcie_rxq_restock(trans);
 }
 
-static void iwl_pcie_rx_replenish_work(struct work_struct *data)
+/*
+ * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
+ *
+ * Allocates for each received request 8 pages
+ * Called as a scheduled work item.
+ */
+static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
 {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
+       struct list_head local_empty;
+       int pending = atomic_xchg(&rba->req_pending, 0);
+
+       IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
+
+       /* If we were scheduled - there is at least one request */
+       spin_lock(&rba->lock);
+       /* swap out the rba->rbd_empty to a local list */
+       list_replace_init(&rba->rbd_empty, &local_empty);
+       spin_unlock(&rba->lock);
+
+       while (pending) {
+               int i;
+               struct list_head local_allocated;
+
+               INIT_LIST_HEAD(&local_allocated);
+
+               for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
+                       struct iwl_rx_mem_buffer *rxb;
+                       struct page *page;
+
+                       /* List should never be empty - each reused RBD is
+                        * returned to the list, and initial pool covers any
+                        * possible gap between the time the page is allocated
+                        * to the time the RBD is added.
+                        */
+                       BUG_ON(list_empty(&local_empty));
+                       /* Get the first rxb from the rbd list */
+                       rxb = list_first_entry(&local_empty,
+                                              struct iwl_rx_mem_buffer, list);
+                       BUG_ON(rxb->page);
+
+                       /* Alloc a new receive buffer */
+                       page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL);
+                       if (!page)
+                               continue;
+                       rxb->page = page;
+
+                       /* Get physical address of the RB */
+                       rxb->page_dma = dma_map_page(trans->dev, page, 0,
+                                       PAGE_SIZE << trans_pcie->rx_page_order,
+                                       DMA_FROM_DEVICE);
+                       if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+                               rxb->page = NULL;
+                               __free_pages(page, trans_pcie->rx_page_order);
+                               continue;
+                       }
+                       /* dma address must be no more than 36 bits */
+                       BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+                       /* and also 256 byte aligned! */
+                       BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+                       /* move the allocated entry to the out list */
+                       list_move(&rxb->list, &local_allocated);
+                       i++;
+               }
+
+               pending--;
+               if (!pending) {
+                       pending = atomic_xchg(&rba->req_pending, 0);
+                       IWL_DEBUG_RX(trans,
+                                    "Pending allocation requests = %d\n",
+                                    pending);
+               }
+
+               spin_lock(&rba->lock);
+               /* add the allocated rbds to the allocator allocated list */
+               list_splice_tail(&local_allocated, &rba->rbd_allocated);
+               /* get more empty RBDs for current pending requests */
+               list_splice_tail_init(&rba->rbd_empty, &local_empty);
+               spin_unlock(&rba->lock);
+
+               atomic_inc(&rba->req_ready);
+       }
+
+       spin_lock(&rba->lock);
+       /* return unused rbds to the allocator empty list */
+       list_splice_tail(&local_empty, &rba->rbd_empty);
+       spin_unlock(&rba->lock);
+}
+
+/*
+ * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
+.*
+.* Called by queue when the queue posted allocation request and
+ * has freed 8 RBDs in order to restock itself.
+ */
+static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
+                                    struct iwl_rx_mem_buffer
+                                    *out[RX_CLAIM_REQ_ALLOC])
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
+       int i;
+
+       /*
+        * atomic_dec_if_positive returns req_ready - 1 for any scenario.
+        * If req_ready is 0 atomic_dec_if_positive will return -1 and this
+        * function will return -ENOMEM, as there are no ready requests.
+        * atomic_dec_if_positive will perofrm the *actual* decrement only if
+        * req_ready > 0, i.e. - there are ready requests and the function
+        * hands one request to the caller.
+        */
+       if (atomic_dec_if_positive(&rba->req_ready) < 0)
+               return -ENOMEM;
+
+       spin_lock(&rba->lock);
+       for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
+               /* Get next free Rx buffer, remove it from free list */
+               out[i] = list_first_entry(&rba->rbd_allocated,
+                              struct iwl_rx_mem_buffer, list);
+               list_del(&out[i]->list);
+       }
+       spin_unlock(&rba->lock);
+
+       return 0;
+}
+
+static void iwl_pcie_rx_allocator_work(struct work_struct *data)
+{
+       struct iwl_rb_allocator *rba_p =
+               container_of(data, struct iwl_rb_allocator, rx_alloc);
        struct iwl_trans_pcie *trans_pcie =
-           container_of(data, struct iwl_trans_pcie, rx_replenish);
+               container_of(rba_p, struct iwl_trans_pcie, rba);
 
-       iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
+       iwl_pcie_rx_allocator(trans_pcie->trans);
 }
 
 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
        struct device *dev = trans->dev;
 
        memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
 
        spin_lock_init(&rxq->lock);
+       spin_lock_init(&rba->lock);
 
        if (WARN_ON(rxq->bd || rxq->rb_stts))
                return -EINVAL;
@@ -487,15 +658,49 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
        INIT_LIST_HEAD(&rxq->rx_free);
        INIT_LIST_HEAD(&rxq->rx_used);
        rxq->free_count = 0;
+       rxq->used_count = 0;
 
-       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+       for (i = 0; i < RX_QUEUE_SIZE; i++)
                list_add(&rxq->pool[i].list, &rxq->rx_used);
 }
 
+static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
+{
+       int i;
+
+       lockdep_assert_held(&rba->lock);
+
+       INIT_LIST_HEAD(&rba->rbd_allocated);
+       INIT_LIST_HEAD(&rba->rbd_empty);
+
+       for (i = 0; i < RX_POOL_SIZE; i++)
+               list_add(&rba->pool[i].list, &rba->rbd_empty);
+}
+
+static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
+       int i;
+
+       lockdep_assert_held(&rba->lock);
+
+       for (i = 0; i < RX_POOL_SIZE; i++) {
+               if (!rba->pool[i].page)
+                       continue;
+               dma_unmap_page(trans->dev, rba->pool[i].page_dma,
+                              PAGE_SIZE << trans_pcie->rx_page_order,
+                              DMA_FROM_DEVICE);
+               __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
+               rba->pool[i].page = NULL;
+       }
+}
+
 int iwl_pcie_rx_init(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
        int i, err;
 
        if (!rxq->bd) {
@@ -503,11 +708,21 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
                if (err)
                        return err;
        }
+       if (!rba->alloc_wq)
+               rba->alloc_wq = alloc_workqueue("rb_allocator",
+                                               WQ_HIGHPRI | WQ_UNBOUND, 1);
+       INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
+
+       spin_lock(&rba->lock);
+       atomic_set(&rba->req_pending, 0);
+       atomic_set(&rba->req_ready, 0);
+       /* free all first - we might be reconfigured for a different size */
+       iwl_pcie_rx_free_rba(trans);
+       iwl_pcie_rx_init_rba(rba);
+       spin_unlock(&rba->lock);
 
        spin_lock(&rxq->lock);
 
-       INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
-
        /* free all first - we might be reconfigured for a different size */
        iwl_pcie_rxq_free_rbs(trans);
        iwl_pcie_rx_init_rxb_lists(rxq);
@@ -522,7 +737,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
        memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
        spin_unlock(&rxq->lock);
 
-       iwl_pcie_rx_replenish(trans, GFP_KERNEL);
+       iwl_pcie_rx_replenish(trans);
 
        iwl_pcie_rx_hw_init(trans, rxq);
 
@@ -537,6 +752,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
 
        /*if rxq->bd is NULL, it means that nothing has been allocated,
         * exit now */
@@ -545,7 +761,15 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
                return;
        }
 
-       cancel_work_sync(&trans_pcie->rx_replenish);
+       cancel_work_sync(&rba->rx_alloc);
+       if (rba->alloc_wq) {
+               destroy_workqueue(rba->alloc_wq);
+               rba->alloc_wq = NULL;
+       }
+
+       spin_lock(&rba->lock);
+       iwl_pcie_rx_free_rba(trans);
+       spin_unlock(&rba->lock);
 
        spin_lock(&rxq->lock);
        iwl_pcie_rxq_free_rbs(trans);
@@ -566,8 +790,49 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
        rxq->rb_stts = NULL;
 }
 
+/*
+ * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
+ *
+ * Called when a RBD can be reused. The RBD is transferred to the allocator.
+ * When there are 2 empty RBDs - a request for allocation is posted
+ */
+static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
+                                 struct iwl_rx_mem_buffer *rxb,
+                                 struct iwl_rxq *rxq, bool emergency)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
+
+       /* Move the RBD to the used list, will be moved to allocator in batches
+        * before claiming or posting a request*/
+       list_add_tail(&rxb->list, &rxq->rx_used);
+
+       if (unlikely(emergency))
+               return;
+
+       /* Count the allocator owned RBDs */
+       rxq->used_count++;
+
+       /* If we have RX_POST_REQ_ALLOC new released rx buffers -
+        * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
+        * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
+        * after but we still need to post another request.
+        */
+       if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
+               /* Move the 2 RBDs to the allocator ownership.
+                Allocator has another 6 from pool for the request completion*/
+               spin_lock(&rba->lock);
+               list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+               spin_unlock(&rba->lock);
+
+               atomic_inc(&rba->req_pending);
+               queue_work(rba->alloc_wq, &rba->rx_alloc);
+       }
+}
+
 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
-                               struct iwl_rx_mem_buffer *rxb)
+                               struct iwl_rx_mem_buffer *rxb,
+                               bool emergency)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
@@ -583,10 +848,9 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
 
        while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
                struct iwl_rx_packet *pkt;
-               struct iwl_device_cmd *cmd;
                u16 sequence;
                bool reclaim;
-               int index, cmd_index, err, len;
+               int index, cmd_index, len;
                struct iwl_rx_cmd_buffer rxcb = {
                        ._offset = offset,
                        ._rx_page_order = trans_pcie->rx_page_order,
@@ -634,12 +898,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                index = SEQ_TO_INDEX(sequence);
                cmd_index = get_cmd_index(&txq->q, index);
 
-               if (reclaim)
-                       cmd = txq->entries[cmd_index].cmd;
-               else
-                       cmd = NULL;
-
-               err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
+               iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb);
 
                if (reclaim) {
                        kzfree(txq->entries[cmd_index].free_buf);
@@ -657,7 +916,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                         * iwl_trans_send_cmd()
                         * as we reclaim the driver command queue */
                        if (!rxcb._page_stolen)
-                               iwl_pcie_hcmd_complete(trans, &rxcb, err);
+                               iwl_pcie_hcmd_complete(trans, &rxcb);
                        else
                                IWL_WARN(trans, "Claim null rxb?\n");
                }
@@ -688,13 +947,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                         */
                        __free_pages(rxb->page, trans_pcie->rx_page_order);
                        rxb->page = NULL;
-                       list_add_tail(&rxb->list, &rxq->rx_used);
+                       iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
                } else {
                        list_add_tail(&rxb->list, &rxq->rx_free);
                        rxq->free_count++;
                }
        } else
-               list_add_tail(&rxb->list, &rxq->rx_used);
+               iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
 }
 
 /*
@@ -704,10 +963,8 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
-       u32 r, i;
-       u8 fill_rx = 0;
-       u32 count = 8;
-       int total_empty;
+       u32 r, i, j, count = 0;
+       bool emergency = false;
 
 restart:
        spin_lock(&rxq->lock);
@@ -720,47 +977,95 @@ restart:
        if (i == r)
                IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
 
-       /* calculate total frames need to be restock after handling RX */
-       total_empty = r - rxq->write_actual;
-       if (total_empty < 0)
-               total_empty += RX_QUEUE_SIZE;
-
-       if (total_empty > (RX_QUEUE_SIZE / 2))
-               fill_rx = 1;
-
        while (i != r) {
                struct iwl_rx_mem_buffer *rxb;
 
+               if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2))
+                       emergency = true;
+
                rxb = rxq->queue[i];
                rxq->queue[i] = NULL;
 
                IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
                             r, i, rxb);
-               iwl_pcie_rx_handle_rb(trans, rxb);
+               iwl_pcie_rx_handle_rb(trans, rxb, emergency);
 
                i = (i + 1) & RX_QUEUE_MASK;
-               /* If there are a lot of unused frames,
-                * restock the Rx queue so ucode wont assert. */
-               if (fill_rx) {
+
+               /* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
+                * try to claim the pre-allocated buffers from the allocator */
+               if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
+                       struct iwl_rb_allocator *rba = &trans_pcie->rba;
+                       struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
+
+                       if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 &&
+                           !emergency) {
+                               /* Add the remaining 6 empty RBDs
+                               * for allocator use
+                                */
+                               spin_lock(&rba->lock);
+                               list_splice_tail_init(&rxq->rx_used,
+                                                     &rba->rbd_empty);
+                               spin_unlock(&rba->lock);
+                       }
+
+                       /* If not ready - continue, will try to reclaim later.
+                       * No need to reschedule work - allocator exits only on
+                       * success */
+                       if (!iwl_pcie_rx_allocator_get(trans, out)) {
+                               /* If success - then RX_CLAIM_REQ_ALLOC
+                                * buffers were retrieved and should be added
+                                * to free list */
+                               rxq->used_count -= RX_CLAIM_REQ_ALLOC;
+                               for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
+                                       list_add_tail(&out[j]->list,
+                                                     &rxq->rx_free);
+                                       rxq->free_count++;
+                               }
+                       }
+               }
+               if (emergency) {
                        count++;
-                       if (count >= 8) {
-                               rxq->read = i;
-                               spin_unlock(&rxq->lock);
-                               iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
+                       if (count == 8) {
                                count = 0;
-                               goto restart;
+                               if (rxq->used_count < RX_QUEUE_SIZE / 3)
+                                       emergency = false;
+                               spin_unlock(&rxq->lock);
+                               iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
+                               spin_lock(&rxq->lock);
                        }
                }
+               /* handle restock for three cases, can be all of them at once:
+               * - we just pulled buffers from the allocator
+               * - we have 8+ unstolen pages accumulated
+               * - we are in emergency and allocated buffers
+                */
+               if (rxq->free_count >=  RX_CLAIM_REQ_ALLOC) {
+                       rxq->read = i;
+                       spin_unlock(&rxq->lock);
+                       iwl_pcie_rxq_restock(trans);
+                       goto restart;
+               }
        }
 
        /* Backtrack one entry */
        rxq->read = i;
        spin_unlock(&rxq->lock);
 
-       if (fill_rx)
-               iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
-       else
-               iwl_pcie_rxq_restock(trans);
+       /*
+        * handle a case where in emergency there are some unallocated RBDs.
+        * those RBDs are in the used list, but are not tracked by the queue's
+        * used_count which counts allocator owned RBDs.
+        * unallocated emergency RBDs must be allocated on exit, otherwise
+        * when called again the function may not be in emergency mode and
+        * they will be handed to the allocator with no tracking in the RBD
+        * allocator counters, which will lead to them never being claimed back
+        * by the queue.
+        * by allocating them here, they are now in the queue free list, and
+        * will be restocked by the next call of iwl_pcie_rxq_restock.
+        */
+       if (unlikely(emergency && count))
+               iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
 
        if (trans_pcie->napi.poll)
                napi_gro_flush(&trans_pcie->napi, false);
@@ -772,6 +1077,7 @@ restart:
 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int i;
 
        /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
        if (trans->cfg->internal_wimax_coex &&
@@ -795,6 +1101,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
        iwl_trans_fw_error(trans);
        local_bh_enable();
 
+       for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
+               del_timer(&trans_pcie->txq[i].stuck_timer);
+
        clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
        wake_up(&trans_pcie->wait_command_queue);
 }
@@ -1003,7 +1312,9 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
 
                isr_stats->rfkill++;
 
+               mutex_lock(&trans_pcie->mutex);
                iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+               mutex_unlock(&trans_pcie->mutex);
                if (hw_rfkill) {
                        set_bit(STATUS_RFKILL, &trans->status);
                        if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
@@ -1195,8 +1506,9 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
 
        val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
 
-       val |= CSR_DRAM_INT_TBL_ENABLE;
-       val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
+       val |= CSR_DRAM_INT_TBL_ENABLE |
+              CSR_DRAM_INIT_TBL_WRAP_CHECK |
+              CSR_DRAM_INIT_TBL_WRITE_POINTER;
 
        IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
 
index 9e144e71da0b5980264702a6210684cfa34edab5..6ba7d300b08f35e1ce05fdf8b3fcab058819f150 100644 (file)
@@ -780,8 +780,15 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
        for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
                last_read_idx = i;
 
+               /*
+                * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
+                * CPU1 to CPU2.
+                * PAGING_SEPARATOR_SECTION delimiter - separate between
+                * CPU2 non paged to CPU2 paging sec.
+                */
                if (!image->sec[i].data ||
-                   image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+                   image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
+                   image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
                        IWL_DEBUG_FW(trans,
                                     "Break since Data not valid or Empty section, sec = %d\n",
                                     i);
@@ -829,8 +836,15 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
        for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
                last_read_idx = i;
 
+               /*
+                * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
+                * CPU1 to CPU2.
+                * PAGING_SEPARATOR_SECTION delimiter - separate between
+                * CPU2 non paged to CPU2 paging sec.
+                */
                if (!image->sec[i].data ||
-                   image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+                   image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
+                   image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
                        IWL_DEBUG_FW(trans,
                                     "Break since Data not valid or Empty section, sec = %d\n",
                                     i);
@@ -897,6 +911,14 @@ static void iwl_pcie_apply_destination(struct iwl_trans *trans)
                case PRPH_CLEARBIT:
                        iwl_clear_bits_prph(trans, addr, BIT(val));
                        break;
+               case PRPH_BLOCKBIT:
+                       if (iwl_read_prph(trans, addr) & BIT(val)) {
+                               IWL_ERR(trans,
+                                       "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
+                                       val, addr);
+                               goto monitor;
+                       }
+                       break;
                default:
                        IWL_ERR(trans, "FW debug - unknown OP %d\n",
                                dest->reg_ops[i].op);
@@ -904,6 +926,7 @@ static void iwl_pcie_apply_destination(struct iwl_trans *trans)
                }
        }
 
+monitor:
        if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
                iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
                               trans_pcie->fw_mon_phys >> dest->base_shift);
@@ -998,13 +1021,25 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
                                   const struct fw_img *fw, bool run_in_rfkill)
 {
-       int ret;
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        bool hw_rfkill;
+       int ret;
+
+       mutex_lock(&trans_pcie->mutex);
+
+       /* Someone called stop_device, don't try to start_fw */
+       if (trans_pcie->is_down) {
+               IWL_WARN(trans,
+                        "Can't start_fw since the HW hasn't been started\n");
+               ret = EIO;
+               goto out;
+       }
 
        /* This may fail if AMT took ownership of the device */
        if (iwl_pcie_prepare_card_hw(trans)) {
                IWL_WARN(trans, "Exit HW not ready\n");
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
 
        iwl_enable_rfkill_int(trans);
@@ -1016,15 +1051,17 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
        else
                clear_bit(STATUS_RFKILL, &trans->status);
        iwl_trans_pcie_rf_kill(trans, hw_rfkill);
-       if (hw_rfkill && !run_in_rfkill)
-               return -ERFKILL;
+       if (hw_rfkill && !run_in_rfkill) {
+               ret = -ERFKILL;
+               goto out;
+       }
 
        iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
 
        ret = iwl_pcie_nic_init(trans);
        if (ret) {
                IWL_ERR(trans, "Unable to init nic\n");
-               return ret;
+               goto out;
        }
 
        /* make sure rfkill handshake bits are cleared */
@@ -1042,9 +1079,13 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
 
        /* Load the given image to the HW */
        if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
-               return iwl_pcie_load_given_ucode_8000(trans, fw);
+               ret = iwl_pcie_load_given_ucode_8000(trans, fw);
        else
-               return iwl_pcie_load_given_ucode(trans, fw);
+               ret = iwl_pcie_load_given_ucode(trans, fw);
+
+out:
+       mutex_unlock(&trans_pcie->mutex);
+       return ret;
 }
 
 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
@@ -1053,11 +1094,18 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
        iwl_pcie_tx_start(trans, scd_addr);
 }
 
-static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        bool hw_rfkill, was_hw_rfkill;
 
+       lockdep_assert_held(&trans_pcie->mutex);
+
+       if (trans_pcie->is_down)
+               return;
+
+       trans_pcie->is_down = true;
+
        was_hw_rfkill = iwl_is_rfkill_set(trans);
 
        /* tell the device to stop sending interrupts */
@@ -1147,14 +1195,36 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
        iwl_pcie_prepare_card_hw(trans);
 }
 
+static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       mutex_lock(&trans_pcie->mutex);
+       _iwl_trans_pcie_stop_device(trans, low_power);
+       mutex_unlock(&trans_pcie->mutex);
+}
+
 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
 {
+       struct iwl_trans_pcie __maybe_unused *trans_pcie =
+               IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       lockdep_assert_held(&trans_pcie->mutex);
+
        if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
-               iwl_trans_pcie_stop_device(trans, true);
+               _iwl_trans_pcie_stop_device(trans, true);
 }
 
 static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
 {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       if (trans->wowlan_d0i3) {
+               /* Enable persistence mode to avoid reset */
+               iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+                           CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
+       }
+
        iwl_disable_interrupts(trans);
 
        /*
@@ -1166,17 +1236,21 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
 
        iwl_pcie_disable_ict(trans);
 
+       synchronize_irq(trans_pcie->pci_dev->irq);
+
        iwl_clear_bit(trans, CSR_GP_CNTRL,
                      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
        iwl_clear_bit(trans, CSR_GP_CNTRL,
                      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
 
-       /*
-        * reset TX queues -- some of their registers reset during S3
-        * so if we don't reset everything here the D3 image would try
-        * to execute some invalid memory upon resume
-        */
-       iwl_trans_pcie_tx_reset(trans);
+       if (!trans->wowlan_d0i3) {
+               /*
+                * reset TX queues -- some of their registers reset during S3
+                * so if we don't reset everything here the D3 image would try
+                * to execute some invalid memory upon resume
+                */
+               iwl_trans_pcie_tx_reset(trans);
+       }
 
        iwl_pcie_set_pwr(trans, true);
 }
@@ -1218,12 +1292,18 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
 
        iwl_pcie_set_pwr(trans, false);
 
-       iwl_trans_pcie_tx_reset(trans);
+       if (trans->wowlan_d0i3) {
+               iwl_clear_bit(trans, CSR_GP_CNTRL,
+                             CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+       } else {
+               iwl_trans_pcie_tx_reset(trans);
 
-       ret = iwl_pcie_rx_init(trans);
-       if (ret) {
-               IWL_ERR(trans, "Failed to resume the device (RX reset)\n");
-               return ret;
+               ret = iwl_pcie_rx_init(trans);
+               if (ret) {
+                       IWL_ERR(trans,
+                               "Failed to resume the device (RX reset)\n");
+                       return ret;
+               }
        }
 
        val = iwl_read32(trans, CSR_RESET);
@@ -1235,11 +1315,14 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
        return 0;
 }
 
-static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
 {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        bool hw_rfkill;
        int err;
 
+       lockdep_assert_held(&trans_pcie->mutex);
+
        err = iwl_pcie_prepare_card_hw(trans);
        if (err) {
                IWL_ERR(trans, "Error while preparing HW: %d\n", err);
@@ -1256,20 +1339,38 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
        /* From now on, the op_mode will be kept updated about RF kill state */
        iwl_enable_rfkill_int(trans);
 
+       /* Set is_down to false here so that...*/
+       trans_pcie->is_down = false;
+
        hw_rfkill = iwl_is_rfkill_set(trans);
        if (hw_rfkill)
                set_bit(STATUS_RFKILL, &trans->status);
        else
                clear_bit(STATUS_RFKILL, &trans->status);
+       /* ... rfkill can call stop_device and set it false if needed */
        iwl_trans_pcie_rf_kill(trans, hw_rfkill);
 
        return 0;
 }
 
+static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int ret;
+
+       mutex_lock(&trans_pcie->mutex);
+       ret = _iwl_trans_pcie_start_hw(trans, low_power);
+       mutex_unlock(&trans_pcie->mutex);
+
+       return ret;
+}
+
 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
+       mutex_lock(&trans_pcie->mutex);
+
        /* disable interrupts - don't enable HW RF kill interrupt */
        spin_lock(&trans_pcie->irq_lock);
        iwl_disable_interrupts(trans);
@@ -1282,6 +1383,10 @@ static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
        spin_unlock(&trans_pcie->irq_lock);
 
        iwl_pcie_disable_ict(trans);
+
+       mutex_unlock(&trans_pcie->mutex);
+
+       synchronize_irq(trans_pcie->pci_dev->irq);
 }
 
 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
@@ -1342,6 +1447,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
        else
                trans_pcie->rx_page_order = get_order(4 * 1024);
 
+       trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header;
        trans_pcie->command_names = trans_cfg->command_names;
        trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
        trans_pcie->scd_set_active = trans_cfg->scd_set_active;
@@ -1354,11 +1460,10 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
         * As this function may be called again in some corner cases don't
         * do anything if NAPI was already initialized.
         */
-       if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
+       if (!trans_pcie->napi.poll) {
                init_dummy_netdev(&trans_pcie->napi_dev);
-               iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
-                                    &trans_pcie->napi_dev,
-                                    iwl_pcie_dummy_napi_poll, 64);
+               netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi,
+                              iwl_pcie_dummy_napi_poll, 64);
        }
 }
 
@@ -2185,6 +2290,47 @@ static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans,
        return prph_len;
 }
 
+static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
+                                  struct iwl_fw_error_dump_data **data,
+                                  int allocated_rb_nums)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+       struct iwl_rxq *rxq = &trans_pcie->rxq;
+       u32 i, r, j, rb_len = 0;
+
+       spin_lock(&rxq->lock);
+
+       r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+
+       for (i = rxq->read, j = 0;
+            i != r && j < allocated_rb_nums;
+            i = (i + 1) & RX_QUEUE_MASK, j++) {
+               struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
+               struct iwl_fw_error_dump_rb *rb;
+
+               dma_unmap_page(trans->dev, rxb->page_dma, max_len,
+                              DMA_FROM_DEVICE);
+
+               rb_len += sizeof(**data) + sizeof(*rb) + max_len;
+
+               (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
+               (*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
+               rb = (void *)(*data)->data;
+               rb->index = cpu_to_le32(i);
+               memcpy(rb->data, page_address(rxb->page), max_len);
+               /* remap the page for the free benefit */
+               rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
+                                                    max_len,
+                                                    DMA_FROM_DEVICE);
+
+               *data = iwl_fw_error_next_data(*data);
+       }
+
+       spin_unlock(&rxq->lock);
+
+       return rb_len;
+}
 #define IWL_CSR_TO_DUMP (0x250)
 
 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
@@ -2254,17 +2400,97 @@ iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
        return monitor_len;
 }
 
-static
-struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
+static u32
+iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
+                           struct iwl_fw_error_dump_data **data,
+                           u32 monitor_len)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       u32 len = 0;
+
+       if ((trans_pcie->fw_mon_page &&
+            trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
+           trans->dbg_dest_tlv) {
+               struct iwl_fw_error_dump_fw_mon *fw_mon_data;
+               u32 base, write_ptr, wrap_cnt;
+
+               /* If there was a dest TLV - use the values from there */
+               if (trans->dbg_dest_tlv) {
+                       write_ptr =
+                               le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
+                       wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
+                       base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+               } else {
+                       base = MON_BUFF_BASE_ADDR;
+                       write_ptr = MON_BUFF_WRPTR;
+                       wrap_cnt = MON_BUFF_CYCLE_CNT;
+               }
+
+               (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
+               fw_mon_data = (void *)(*data)->data;
+               fw_mon_data->fw_mon_wr_ptr =
+                       cpu_to_le32(iwl_read_prph(trans, write_ptr));
+               fw_mon_data->fw_mon_cycle_cnt =
+                       cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
+               fw_mon_data->fw_mon_base_ptr =
+                       cpu_to_le32(iwl_read_prph(trans, base));
+
+               len += sizeof(**data) + sizeof(*fw_mon_data);
+               if (trans_pcie->fw_mon_page) {
+                       /*
+                        * The firmware is now asserted, it won't write anything
+                        * to the buffer. CPU can take ownership to fetch the
+                        * data. The buffer will be handed back to the device
+                        * before the firmware will be restarted.
+                        */
+                       dma_sync_single_for_cpu(trans->dev,
+                                               trans_pcie->fw_mon_phys,
+                                               trans_pcie->fw_mon_size,
+                                               DMA_FROM_DEVICE);
+                       memcpy(fw_mon_data->data,
+                              page_address(trans_pcie->fw_mon_page),
+                              trans_pcie->fw_mon_size);
+
+                       monitor_len = trans_pcie->fw_mon_size;
+               } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
+                       /*
+                        * Update pointers to reflect actual values after
+                        * shifting
+                        */
+                       base = iwl_read_prph(trans, base) <<
+                              trans->dbg_dest_tlv->base_shift;
+                       iwl_trans_read_mem(trans, base, fw_mon_data->data,
+                                          monitor_len / sizeof(u32));
+               } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
+                       monitor_len =
+                               iwl_trans_pci_dump_marbh_monitor(trans,
+                                                                fw_mon_data,
+                                                                monitor_len);
+               } else {
+                       /* Didn't match anything - output no monitor data */
+                       monitor_len = 0;
+               }
+
+               len += monitor_len;
+               (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
+       }
+
+       return len;
+}
+
+static struct iwl_trans_dump_data
+*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
+                         struct iwl_fw_dbg_trigger_tlv *trigger)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_fw_error_dump_data *data;
        struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
        struct iwl_fw_error_dump_txcmd *txcmd;
        struct iwl_trans_dump_data *dump_data;
-       u32 len;
+       u32 len, num_rbs;
        u32 monitor_len;
        int i, ptr;
+       bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status);
 
        /* transport dump header */
        len = sizeof(*dump_data);
@@ -2273,22 +2499,6 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
        len += sizeof(*data) +
                cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
 
-       /* CSR registers */
-       len += sizeof(*data) + IWL_CSR_TO_DUMP;
-
-       /* PRPH registers */
-       for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
-               /* The range includes both boundaries */
-               int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
-                       iwl_prph_dump_addr[i].start + 4;
-
-               len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
-                       num_bytes_in_chunk;
-       }
-
-       /* FH registers */
-       len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
-
        /* FW monitor */
        if (trans_pcie->fw_mon_page) {
                len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
@@ -2316,6 +2526,45 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
                monitor_len = 0;
        }
 
+       if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
+               dump_data = vzalloc(len);
+               if (!dump_data)
+                       return NULL;
+
+               data = (void *)dump_data->data;
+               len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
+               dump_data->len = len;
+
+               return dump_data;
+       }
+
+       /* CSR registers */
+       len += sizeof(*data) + IWL_CSR_TO_DUMP;
+
+       /* PRPH registers */
+       for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
+               /* The range includes both boundaries */
+               int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
+                       iwl_prph_dump_addr[i].start + 4;
+
+               len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
+                      num_bytes_in_chunk;
+       }
+
+       /* FH registers */
+       len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
+
+       if (dump_rbs) {
+               /* RBs */
+               num_rbs = le16_to_cpu(ACCESS_ONCE(
+                                     trans_pcie->rxq.rb_stts->closed_rb_num))
+                                     & 0x0FFF;
+               num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK;
+               len += num_rbs * (sizeof(*data) +
+                                 sizeof(struct iwl_fw_error_dump_rb) +
+                                 (PAGE_SIZE << trans_pcie->rx_page_order));
+       }
+
        dump_data = vzalloc(len);
        if (!dump_data)
                return NULL;
@@ -2352,74 +2601,10 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
        len += iwl_trans_pcie_dump_prph(trans, &data);
        len += iwl_trans_pcie_dump_csr(trans, &data);
        len += iwl_trans_pcie_fh_regs_dump(trans, &data);
-       /* data is already pointing to the next section */
+       if (dump_rbs)
+               len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
 
-       if ((trans_pcie->fw_mon_page &&
-            trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
-           trans->dbg_dest_tlv) {
-               struct iwl_fw_error_dump_fw_mon *fw_mon_data;
-               u32 base, write_ptr, wrap_cnt;
-
-               /* If there was a dest TLV - use the values from there */
-               if (trans->dbg_dest_tlv) {
-                       write_ptr =
-                               le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
-                       wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
-                       base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
-               } else {
-                       base = MON_BUFF_BASE_ADDR;
-                       write_ptr = MON_BUFF_WRPTR;
-                       wrap_cnt = MON_BUFF_CYCLE_CNT;
-               }
-
-               data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
-               fw_mon_data = (void *)data->data;
-               fw_mon_data->fw_mon_wr_ptr =
-                       cpu_to_le32(iwl_read_prph(trans, write_ptr));
-               fw_mon_data->fw_mon_cycle_cnt =
-                       cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
-               fw_mon_data->fw_mon_base_ptr =
-                       cpu_to_le32(iwl_read_prph(trans, base));
-
-               len += sizeof(*data) + sizeof(*fw_mon_data);
-               if (trans_pcie->fw_mon_page) {
-                       /*
-                        * The firmware is now asserted, it won't write anything
-                        * to the buffer. CPU can take ownership to fetch the
-                        * data. The buffer will be handed back to the device
-                        * before the firmware will be restarted.
-                        */
-                       dma_sync_single_for_cpu(trans->dev,
-                                               trans_pcie->fw_mon_phys,
-                                               trans_pcie->fw_mon_size,
-                                               DMA_FROM_DEVICE);
-                       memcpy(fw_mon_data->data,
-                              page_address(trans_pcie->fw_mon_page),
-                              trans_pcie->fw_mon_size);
-
-                       monitor_len = trans_pcie->fw_mon_size;
-               } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
-                       /*
-                        * Update pointers to reflect actual values after
-                        * shifting
-                        */
-                       base = iwl_read_prph(trans, base) <<
-                              trans->dbg_dest_tlv->base_shift;
-                       iwl_trans_read_mem(trans, base, fw_mon_data->data,
-                                          monitor_len / sizeof(u32));
-               } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
-                       monitor_len =
-                               iwl_trans_pci_dump_marbh_monitor(trans,
-                                                                fw_mon_data,
-                                                                monitor_len);
-               } else {
-                       /* Didn't match anything - output no monitor data */
-                       monitor_len = 0;
-               }
-
-               len += monitor_len;
-               data->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
-       }
+       len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
 
        dump_data->len = len;
 
@@ -2482,12 +2667,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        if (!trans)
                return ERR_PTR(-ENOMEM);
 
+       trans->max_skb_frags = IWL_PCIE_MAX_FRAGS;
+
        trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
        trans_pcie->trans = trans;
        spin_lock_init(&trans_pcie->irq_lock);
        spin_lock_init(&trans_pcie->reg_lock);
        spin_lock_init(&trans_pcie->ref_lock);
+       mutex_init(&trans_pcie->mutex);
        init_waitqueue_head(&trans_pcie->ucode_write_waitq);
 
        ret = pci_enable_device(pdev);
index 607acb53c847558793d47a528d0830f9c79c8cbf..a8c8a4a7420b53d02a798bcc261f408f77440ace 100644 (file)
@@ -219,8 +219,6 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
 
        scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
 
-       WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
-
        sta_id = tx_cmd->sta_id;
        sec_ctl = tx_cmd->sec_ctl;
 
@@ -239,6 +237,9 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
        if (trans_pcie->bc_table_dword)
                len = DIV_ROUND_UP(len, 4);
 
+       if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
+               return;
+
        bc_ent = cpu_to_le16(len | (sta_id << 12));
 
        scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
@@ -387,11 +388,18 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
 
        /* first TB is never freed - it's the scratchbuf data */
 
-       for (i = 1; i < num_tbs; i++)
-               dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
-                                iwl_pcie_tfd_tb_get_len(tfd, i),
-                                DMA_TO_DEVICE);
-
+       for (i = 1; i < num_tbs; i++) {
+               if (meta->flags & BIT(i + CMD_TB_BITMAP_POS))
+                       dma_unmap_page(trans->dev,
+                                      iwl_pcie_tfd_tb_get_addr(tfd, i),
+                                      iwl_pcie_tfd_tb_get_len(tfd, i),
+                                      DMA_TO_DEVICE);
+               else
+                       dma_unmap_single(trans->dev,
+                                        iwl_pcie_tfd_tb_get_addr(tfd, i),
+                                        iwl_pcie_tfd_tb_get_len(tfd, i),
+                                        DMA_TO_DEVICE);
+       }
        tfd->num_tbs = 0;
 }
 
@@ -467,7 +475,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
 
        iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
 
-       return 0;
+       return num_tbs;
 }
 
 static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
@@ -915,6 +923,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
                }
        }
 
+       iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
        if (trans->cfg->base_params->num_of_queues > 20)
                iwl_set_bits_prph(trans, SCD_GP_CTRL,
                                  SCD_GP_CTRL_ENABLE_31_QUEUES);
@@ -1320,13 +1329,24 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
        int idx;
        u16 copy_size, cmd_size, scratch_size;
        bool had_nocopy = false;
+       u8 group_id = iwl_cmd_groupid(cmd->id);
        int i, ret;
        u32 cmd_pos;
        const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
        u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
 
-       copy_size = sizeof(out_cmd->hdr);
-       cmd_size = sizeof(out_cmd->hdr);
+       if (WARN(!trans_pcie->wide_cmd_header &&
+                group_id > IWL_ALWAYS_LONG_GROUP,
+                "unsupported wide command %#x\n", cmd->id))
+               return -EINVAL;
+
+       if (group_id != 0) {
+               copy_size = sizeof(struct iwl_cmd_header_wide);
+               cmd_size = sizeof(struct iwl_cmd_header_wide);
+       } else {
+               copy_size = sizeof(struct iwl_cmd_header);
+               cmd_size = sizeof(struct iwl_cmd_header);
+       }
 
        /* need one for the header if the first is NOCOPY */
        BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
@@ -1416,16 +1436,32 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                out_meta->source = cmd;
 
        /* set up the header */
-
-       out_cmd->hdr.cmd = cmd->id;
-       out_cmd->hdr.flags = 0;
-       out_cmd->hdr.sequence =
-               cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
-                                        INDEX_TO_SEQ(q->write_ptr));
+       if (group_id != 0) {
+               out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
+               out_cmd->hdr_wide.group_id = group_id;
+               out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
+               out_cmd->hdr_wide.length =
+                       cpu_to_le16(cmd_size -
+                                   sizeof(struct iwl_cmd_header_wide));
+               out_cmd->hdr_wide.reserved = 0;
+               out_cmd->hdr_wide.sequence =
+                       cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+                                                INDEX_TO_SEQ(q->write_ptr));
+
+               cmd_pos = sizeof(struct iwl_cmd_header_wide);
+               copy_size = sizeof(struct iwl_cmd_header_wide);
+       } else {
+               out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
+               out_cmd->hdr.sequence =
+                       cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+                                                INDEX_TO_SEQ(q->write_ptr));
+               out_cmd->hdr.group_id = 0;
+
+               cmd_pos = sizeof(struct iwl_cmd_header);
+               copy_size = sizeof(struct iwl_cmd_header);
+       }
 
        /* and copy the data that needs to be copied */
-       cmd_pos = offsetof(struct iwl_device_cmd, payload);
-       copy_size = sizeof(out_cmd->hdr);
        for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
                int copy;
 
@@ -1464,9 +1500,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
        }
 
        IWL_DEBUG_HC(trans,
-                    "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
+                    "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
                     get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
-                    out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
+                    group_id, out_cmd->hdr.cmd,
+                    le16_to_cpu(out_cmd->hdr.sequence),
                     cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
 
        /* start the TFD with the scratchbuf */
@@ -1516,12 +1553,14 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
        }
 
+       BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS >
+                    sizeof(out_meta->flags) * BITS_PER_BYTE);
        out_meta->flags = cmd->flags;
        if (WARN_ON_ONCE(txq->entries[idx].free_buf))
                kzfree(txq->entries[idx].free_buf);
        txq->entries[idx].free_buf = dup_buf;
 
-       trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
+       trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
 
        /* start timer if queue currently empty */
        if (q->read_ptr == q->write_ptr && txq->wd_timeout)
@@ -1552,15 +1591,13 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
 /*
  * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
  * @rxb: Rx buffer to reclaim
- * @handler_status: return value of the handler of the command
- *     (put in setup_rx_handlers)
  *
  * If an Rx buffer has an async callback associated with it the callback
  * will be executed.  The attached skb (if present) will only be freed
  * if the callback returns 1
  */
 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
-                           struct iwl_rx_cmd_buffer *rxb, int handler_status)
+                           struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -1599,7 +1636,6 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
                meta->source->resp_pkt = pkt;
                meta->source->_rx_page_addr = (unsigned long)page_address(p);
                meta->source->_rx_page_order = trans_pcie->rx_page_order;
-               meta->source->handler_status = handler_status;
        }
 
        iwl_pcie_cmdq_reclaim(trans, txq_id, index);
@@ -1762,7 +1798,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                      struct iwl_device_cmd *dev_cmd, int txq_id)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_hdr *hdr;
        struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
        struct iwl_cmd_meta *out_meta;
        struct iwl_txq *txq;
@@ -1771,9 +1807,10 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        void *tb1_addr;
        u16 len, tb1_len, tb2_len;
        bool wait_write_ptr;
-       __le16 fc = hdr->frame_control;
-       u8 hdr_len = ieee80211_hdrlen(fc);
+       __le16 fc;
+       u8 hdr_len;
        u16 wifi_seq;
+       int i;
 
        txq = &trans_pcie->txq[txq_id];
        q = &txq->q;
@@ -1782,6 +1819,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                      "TX on unused queue %d\n", txq_id))
                return -EINVAL;
 
+       if (skb_is_nonlinear(skb) &&
+           skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS &&
+           __skb_linearize(skb))
+               return -ENOMEM;
+
+       /* mac80211 always puts the full header into the SKB's head,
+        * so there's no need to check if it's readable there
+        */
+       hdr = (struct ieee80211_hdr *)skb->data;
+       fc = hdr->frame_control;
+       hdr_len = ieee80211_hdrlen(fc);
+
        spin_lock(&txq->lock);
 
        /* In AGG mode, the index in the ring must correspond to the WiFi
@@ -1812,6 +1861,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
        /* Set up first empty entry in queue's array of Tx/cmd buffers */
        out_meta = &txq->entries[q->write_ptr].meta;
+       out_meta->flags = 0;
 
        /*
         * The second TB (tb1) points to the remainder of the TX command
@@ -1845,9 +1895,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
        /*
         * Set up TFD's third entry to point directly to remainder
-        * of skb, if any (802.11 null frames have no payload).
+        * of skb's head, if any
         */
-       tb2_len = skb->len - hdr_len;
+       tb2_len = skb_headlen(skb) - hdr_len;
        if (tb2_len > 0) {
                dma_addr_t tb2_phys = dma_map_single(trans->dev,
                                                     skb->data + hdr_len,
@@ -1860,6 +1910,29 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
        }
 
+       /* set up the remaining entries to point to the data */
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+               dma_addr_t tb_phys;
+               int tb_idx;
+
+               if (!skb_frag_size(frag))
+                       continue;
+
+               tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
+                                          skb_frag_size(frag), DMA_TO_DEVICE);
+
+               if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+                       iwl_pcie_tfd_unmap(trans, out_meta,
+                                          &txq->tfds[q->write_ptr]);
+                       goto out_err;
+               }
+               tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
+                                               skb_frag_size(frag), false);
+
+               out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS);
+       }
+
        /* Set up entry for this TFD in Tx byte-count array */
        iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
 
@@ -1869,7 +1942,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                             &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
                             skb->data + hdr_len, tb2_len);
        trace_iwlwifi_dev_tx_data(trans->dev, skb,
-                                 skb->data + hdr_len, tb2_len);
+                                 hdr_len, skb->len - hdr_len);
 
        wait_write_ptr = ieee80211_has_morefrags(fc);
 
index 99e873dc86847c80de5a4d27cfdf84c62d83a519..520bef80747f295bea9f98e4ca2573bbc5dde48a 100644 (file)
@@ -2399,6 +2399,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
        ieee80211_hw_set(hw, AMPDU_AGGREGATION);
        ieee80211_hw_set(hw, MFP_CAPABLE);
        ieee80211_hw_set(hw, SIGNAL_DBM);
+       ieee80211_hw_set(hw, TDLS_WIDER_BW);
        if (rctbl)
                ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
 
@@ -2676,7 +2677,7 @@ static void hwsim_mon_setup(struct net_device *dev)
        dev->netdev_ops = &hwsim_netdev_ops;
        dev->destructor = free_netdev;
        ether_setup(dev);
-       dev->tx_queue_len = 0;
+       dev->priv_flags |= IFF_NO_QUEUE;
        dev->type = ARPHRD_IEEE80211_RADIOTAP;
        eth_zero_addr(dev->dev_addr);
        dev->dev_addr[0] = 0x12;
@@ -3120,8 +3121,10 @@ static int hwsim_init_netlink(void)
                goto failure;
 
        rc = netlink_register_notifier(&hwsim_netlink_notifier);
-       if (rc)
+       if (rc) {
+               genl_unregister_family(&hwsim_genl_family);
                goto failure;
+       }
 
        return 0;
 
index 7217da4f1543aed26d965ae7efed4da0f402765b..57a80cfa39b1b50743f3e1041efff37eda1bb913 100644 (file)
@@ -112,7 +112,9 @@ static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
        if (!skb)
                return;
 
-       ieee80211_rx_ni(dev->hw, skb);
+       spin_lock(&dev->mac_lock);
+       ieee80211_rx(dev->hw, skb);
+       spin_unlock(&dev->mac_lock);
 }
 
 static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
@@ -236,23 +238,42 @@ static void mt7601u_complete_tx(struct urb *urb)
        skb = q->e[q->start].skb;
        trace_mt_tx_dma_done(dev, skb);
 
-       mt7601u_tx_status(dev, skb);
+       __skb_queue_tail(&dev->tx_skb_done, skb);
+       tasklet_schedule(&dev->tx_tasklet);
 
        if (q->used == q->entries - q->entries / 8)
                ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
 
        q->start = (q->start + 1) % q->entries;
        q->used--;
+out:
+       spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
 
-       if (urb->status)
-               goto out;
+static void mt7601u_tx_tasklet(unsigned long data)
+{
+       struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
+       struct sk_buff_head skbs;
+       unsigned long flags;
+
+       __skb_queue_head_init(&skbs);
+
+       spin_lock_irqsave(&dev->tx_lock, flags);
 
        set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
        if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
                queue_delayed_work(dev->stat_wq, &dev->stat_work,
                                   msecs_to_jiffies(10));
-out:
+
+       skb_queue_splice_init(&dev->tx_skb_done, &skbs);
+
        spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+       while (!skb_queue_empty(&skbs)) {
+               struct sk_buff *skb = __skb_dequeue(&skbs);
+
+               mt7601u_tx_status(dev, skb);
+       }
 }
 
 static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
@@ -475,6 +496,7 @@ int mt7601u_dma_init(struct mt7601u_dev *dev)
 {
        int ret = -ENOMEM;
 
+       tasklet_init(&dev->tx_tasklet, mt7601u_tx_tasklet, (unsigned long) dev);
        tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev);
 
        ret = mt7601u_alloc_tx(dev);
@@ -502,4 +524,6 @@ void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
 
        mt7601u_free_rx(dev);
        mt7601u_free_tx(dev);
+
+       tasklet_kill(&dev->tx_tasklet);
 }
index df3dd56199a7ec8a43202c8d40b8d919df846006..26190fd33407bc5e0e33eff25d223d1956ebb16e 100644 (file)
@@ -454,8 +454,10 @@ struct mt7601u_dev *mt7601u_alloc_device(struct device *pdev)
        spin_lock_init(&dev->tx_lock);
        spin_lock_init(&dev->rx_lock);
        spin_lock_init(&dev->lock);
+       spin_lock_init(&dev->mac_lock);
        spin_lock_init(&dev->con_mon_lock);
        atomic_set(&dev->avg_ampdu_len, 1);
+       skb_queue_head_init(&dev->tx_skb_done);
 
        dev->stat_wq = alloc_workqueue("mt7601u", WQ_UNBOUND, 0);
        if (!dev->stat_wq) {
index 7514bce1ac91dfa61bfe82e822c92eac57f1a129..e21c53ed09fb902b91793b792f21762779e89345 100644 (file)
@@ -181,7 +181,11 @@ void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat)
        }
 
        mt76_mac_fill_tx_status(dev, &info, stat);
+
+       spin_lock_bh(&dev->mac_lock);
        ieee80211_tx_status_noskb(dev->hw, sta, &info);
+       spin_unlock_bh(&dev->mac_lock);
+
        rcu_read_unlock();
 }
 
index 9102be6b95cb70bb51f4ce34864549046e922605..428bd2f10b7b3450a2afbdafd988254db44920c7 100644 (file)
@@ -141,12 +141,13 @@ enum {
 /**
  * struct mt7601u_dev - adapter structure
  * @lock:              protects @wcid->tx_rate.
+ * @mac_lock:          locks out mac80211's tx status and rx paths.
  * @tx_lock:           protects @tx_q and changes of MT7601U_STATE_*_STATS
                      flags in @state.
*                     flags in @state.
  * @rx_lock:           protects @rx_q.
  * @con_mon_lock:      protects @ap_bssid, @bcn_*, @avg_rssi.
  * @mutex:             ensures exclusive access from mac80211 callbacks.
- * @vendor_req_mutex:  ensures atomicity of vendor requests.
+ * @vendor_req_mutex:  protects @vend_buf, ensures atomicity of split writes.
  * @reg_atomic_mutex:  ensures atomicity of indirect register accesses
  *                     (accesses to RF and BBP).
  * @hw_atomic_mutex:   ensures exclusive access to HW during critical
@@ -177,6 +178,7 @@ struct mt7601u_dev {
        struct mt76_wcid __rcu *wcid[N_WCIDS];
 
        spinlock_t lock;
+       spinlock_t mac_lock;
 
        const u16 *beacon_offsets;
 
@@ -184,6 +186,8 @@ struct mt7601u_dev {
        struct mt7601u_eeprom_params *ee;
 
        struct mutex vendor_req_mutex;
+       void *vend_buf;
+
        struct mutex reg_atomic_mutex;
        struct mutex hw_atomic_mutex;
 
@@ -197,7 +201,9 @@ struct mt7601u_dev {
 
        /* TX */
        spinlock_t tx_lock;
+       struct tasklet_struct tx_tasklet;
        struct mt7601u_tx_queue *tx_q;
+       struct sk_buff_head tx_skb_done;
 
        atomic_t avg_ampdu_len;
 
index 0be2080ceab387ef9e21c1849b1e96e4eb635309..a0a33dc8f6bcbd31c1b58012f6811bf6c9d19720 100644 (file)
@@ -116,7 +116,10 @@ void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb)
        ieee80211_tx_info_clear_status(info);
        info->status.rates[0].idx = -1;
        info->flags |= IEEE80211_TX_STAT_ACK;
+
+       spin_lock(&dev->mac_lock);
        ieee80211_tx_status(dev->hw, skb);
+       spin_unlock(&dev->mac_lock);
 }
 
 static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
index 54dba400186511bf2842972f631bd2dde51c5a46..416c6045ff3128005664fd8600d97c3cba9245a9 100644 (file)
@@ -92,10 +92,9 @@ void mt7601u_complete_urb(struct urb *urb)
        complete(cmpl);
 }
 
-static int
-__mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
-                        const u8 direction, const u16 val, const u16 offset,
-                        void *buf, const size_t buflen)
+int mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
+                          const u8 direction, const u16 val, const u16 offset,
+                          void *buf, const size_t buflen)
 {
        int i, ret;
        struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
@@ -110,6 +109,8 @@ __mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
                trace_mt_vend_req(dev, pipe, req, req_type, val, offset,
                                  buf, buflen, ret);
 
+               if (ret == -ENODEV)
+                       set_bit(MT7601U_STATE_REMOVED, &dev->state);
                if (ret >= 0 || ret == -ENODEV)
                        return ret;
 
@@ -122,25 +123,6 @@ __mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
        return ret;
 }
 
-int
-mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
-                      const u8 direction, const u16 val, const u16 offset,
-                      void *buf, const size_t buflen)
-{
-       int ret;
-
-       mutex_lock(&dev->vendor_req_mutex);
-
-       ret = __mt7601u_vendor_request(dev, req, direction, val, offset,
-                                      buf, buflen);
-       if (ret == -ENODEV)
-               set_bit(MT7601U_STATE_REMOVED, &dev->state);
-
-       mutex_unlock(&dev->vendor_req_mutex);
-
-       return ret;
-}
-
 void mt7601u_vendor_reset(struct mt7601u_dev *dev)
 {
        mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
@@ -150,19 +132,21 @@ void mt7601u_vendor_reset(struct mt7601u_dev *dev)
 u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset)
 {
        int ret;
-       __le32 reg;
-       u32 val;
+       u32 val = ~0;
 
        WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset);
 
+       mutex_lock(&dev->vendor_req_mutex);
+
        ret = mt7601u_vendor_request(dev, MT_VEND_MULTI_READ, USB_DIR_IN,
-                                    0, offset, &reg, sizeof(reg));
-       val = le32_to_cpu(reg);
-       if (ret > 0 && ret != sizeof(reg)) {
+                                    0, offset, dev->vend_buf, MT_VEND_BUF);
+       if (ret == MT_VEND_BUF)
+               val = get_unaligned_le32(dev->vend_buf);
+       else if (ret > 0)
                dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n",
                        ret, offset);
-               val = ~0;
-       }
+
+       mutex_unlock(&dev->vendor_req_mutex);
 
        trace_reg_read(dev, offset, val);
        return val;
@@ -173,12 +157,17 @@ int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req,
 {
        int ret;
 
+       mutex_lock(&dev->vendor_req_mutex);
+
        ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
                                     val & 0xffff, offset, NULL, 0);
-       if (ret)
-               return ret;
-       return mt7601u_vendor_request(dev, req, USB_DIR_OUT,
-                                     val >> 16, offset + 2, NULL, 0);
+       if (!ret)
+               ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
+                                            val >> 16, offset + 2, NULL, 0);
+
+       mutex_unlock(&dev->vendor_req_mutex);
+
+       return ret;
 }
 
 void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val)
@@ -275,6 +264,12 @@ static int mt7601u_probe(struct usb_interface *usb_intf,
 
        usb_set_intfdata(usb_intf, dev);
 
+       dev->vend_buf = devm_kmalloc(dev->dev, MT_VEND_BUF, GFP_KERNEL);
+       if (!dev->vend_buf) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
        ret = mt7601u_assign_pipes(usb_intf, dev);
        if (ret)
                goto err;
index 49e188fa37983788b99bcd082e82ef8f5a43e4f7..bc182022b9d6398ed748ce7b3b7153c9831d8d4a 100644 (file)
@@ -23,6 +23,8 @@
 
 #define MT_VEND_DEV_MODE_RESET 1
 
+#define MT_VEND_BUF            sizeof(__le32)
+
 enum mt_vendor_req {
        MT_VEND_DEV_MODE = 1,
        MT_VEND_WRITE = 2,
index 48edf387683ebbd79a98f5257689f816378be3cf..317d99189556ab1c3025bf11afd7b61449fd124e 100644 (file)
@@ -9,36 +9,36 @@ config MWIFIEX
          mwifiex.
 
 config MWIFIEX_SDIO
-       tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897"
+       tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8997"
        depends on MWIFIEX && MMC
        select FW_LOADER
        select WANT_DEV_COREDUMP
        ---help---
          This adds support for wireless adapters based on Marvell
-         8786/8787/8797/8887/8897 chipsets with SDIO interface.
+         8786/8787/8797/8887/8897/8997 chipsets with SDIO interface.
 
          If you choose to build it as a module, it will be called
          mwifiex_sdio.
 
 config MWIFIEX_PCIE
-       tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897"
+       tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897/8997"
        depends on MWIFIEX && PCI
        select FW_LOADER
        select WANT_DEV_COREDUMP
        ---help---
          This adds support for wireless adapters based on Marvell
-         8766/8897 chipsets with PCIe interface.
+         8766/8897/8997 chipsets with PCIe interface.
 
          If you choose to build it as a module, it will be called
          mwifiex_pcie.
 
 config MWIFIEX_USB
-       tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897"
+       tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897/8997"
        depends on MWIFIEX && USB
        select FW_LOADER
        ---help---
          This adds support for wireless adapters based on Marvell
-         8797/8897 chipset with USB interface.
+         8797/8897/8997 chipset with USB interface.
 
          If you choose to build it as a module, it will be called
          mwifiex_usb.
index 207da40500f4309fcdd0405e99365f4aa274f502..45ae38e32621805edb24168963a6ee915754148c 100644 (file)
@@ -167,8 +167,6 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
                mwifiex_dbg(adapter, ERROR,
                            "DNLD_CMD: FW in reset state, ignore cmd %#x\n",
                        cmd_code);
-               if (cmd_node->wait_q_enabled)
-                       mwifiex_complete_cmd(adapter, cmd_node);
                mwifiex_recycle_cmd_node(adapter, cmd_node);
                queue_work(adapter->workqueue, &adapter->main_work);
                return -1;
@@ -809,17 +807,6 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
        adapter->is_cmd_timedout = 0;
 
        resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
-       if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) {
-               mwifiex_dbg(adapter, ERROR,
-                           "CMD_RESP: %#x been canceled\n",
-                           le16_to_cpu(resp->command));
-               mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
-               spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
-               adapter->curr_cmd = NULL;
-               spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
-               return -1;
-       }
-
        if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) {
                /* Copy original response back to response buffer */
                struct mwifiex_ds_misc_cmd *hostcmd;
@@ -989,12 +976,13 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
 
                if (cmd_node->wait_q_enabled) {
                        adapter->cmd_wait_q.status = -ETIMEDOUT;
-                       wake_up_interruptible(&adapter->cmd_wait_q.wait);
                        mwifiex_cancel_pending_ioctl(adapter);
                }
        }
-       if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
+       if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) {
                mwifiex_init_fw_complete(adapter);
+               return;
+       }
 
        if (adapter->if_ops.device_dump)
                adapter->if_ops.device_dump(adapter);
@@ -1024,6 +1012,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
                adapter->curr_cmd->wait_q_enabled = false;
                adapter->cmd_wait_q.status = -1;
                mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+               /* no recycle probably wait for response */
        }
        /* Cancel all pending command */
        spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
@@ -1032,11 +1021,8 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
                list_del(&cmd_node->list);
                spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
 
-               if (cmd_node->wait_q_enabled) {
+               if (cmd_node->wait_q_enabled)
                        adapter->cmd_wait_q.status = -1;
-                       mwifiex_complete_cmd(adapter, cmd_node);
-                       cmd_node->wait_q_enabled = false;
-               }
                mwifiex_recycle_cmd_node(adapter, cmd_node);
                spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
        }
@@ -1094,12 +1080,18 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
            (adapter->curr_cmd->wait_q_enabled)) {
                spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
                cmd_node = adapter->curr_cmd;
-               cmd_node->wait_q_enabled = false;
-               cmd_node->cmd_flag |= CMD_F_CANCELED;
-               mwifiex_recycle_cmd_node(adapter, cmd_node);
-               mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+               /* setting curr_cmd to NULL is quite dangerous, because
+                * mwifiex_process_cmdresp checks curr_cmd to be != NULL
+                * at the beginning then relies on it and dereferences
+                * it at will
+                * this probably works since mwifiex_cmd_timeout_func
+                * is the only caller of this function and responses
+                * at that point
+                */
                adapter->curr_cmd = NULL;
                spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+
+               mwifiex_recycle_cmd_node(adapter, cmd_node);
        }
 
        /* Cancel all pending scan command */
@@ -1129,7 +1121,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
                        }
                }
        }
-       adapter->cmd_wait_q.status = -1;
 }
 
 /*
index cff38ad129aad93fa8f63c7a8636afc80b51eb96..3ec2ac82e394a158e6c346d75e73a29715e7ec29 100644 (file)
@@ -438,7 +438,6 @@ enum P2P_MODES {
 
 
 #define CMD_F_HOSTCMD           (1 << 0)
-#define CMD_F_CANCELED          (1 << 1)
 
 #define HostCmd_CMD_ID_MASK             0x0fff
 
@@ -686,6 +685,7 @@ struct mwifiex_fw_chan_stats {
 enum mwifiex_chan_scan_mode_bitmasks {
        MWIFIEX_PASSIVE_SCAN = BIT(0),
        MWIFIEX_DISABLE_CHAN_FILT = BIT(1),
+       MWIFIEX_HIDDEN_SSID_REPORT = BIT(4),
 };
 
 struct mwifiex_chan_scan_param_set {
index 8fa363add9706364843207a85d475096f20dbeb7..5d3ae63baea4c1950203563f712bda2f83c04988 100644 (file)
@@ -301,7 +301,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
        adapter->iface_limit.sta_intf = MWIFIEX_MAX_STA_NUM;
        adapter->iface_limit.uap_intf = MWIFIEX_MAX_UAP_NUM;
        adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM;
-
+       adapter->active_scan_triggered = false;
        setup_timer(&adapter->wakeup_timer, wakeup_timer_fn,
                    (unsigned long)adapter);
 }
@@ -551,11 +551,6 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter)
                }
        }
 
-       if (adapter->if_ops.init_fw_port) {
-               if (adapter->if_ops.init_fw_port(adapter))
-                       return -1;
-       }
-
        for (i = 0; i < adapter->priv_num; i++) {
                if (adapter->priv[i]) {
                        ret = mwifiex_sta_init_cmd(adapter->priv[i], first_sta,
index face7478937f6559e106c4932d6551569b8beaac..6b9512140e7aa6b21d36e55d128486cf29210e74 100644 (file)
@@ -666,6 +666,7 @@ struct mwifiex_private {
        struct mwifiex_11h_intf_state state_11h;
        struct mwifiex_ds_mem_rw mem_rw;
        struct sk_buff_head bypass_txq;
+       struct mwifiex_user_scan_chan hidden_chan[MWIFIEX_USER_SCAN_CHAN_MAX];
 };
 
 
@@ -986,6 +987,7 @@ struct mwifiex_adapter {
        u8 coex_tx_win_size;
        u8 coex_rx_win_size;
        bool drcs_enabled;
+       u8 active_scan_triggered;
 };
 
 void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
index 77b9055a2d147411515b5875f67b90210938ac9b..408b6846071655cbd87385fcbb1eb1f41b0fc181 100644 (file)
@@ -266,12 +266,17 @@ static const struct pci_device_id mwifiex_ids[] = {
        {
                PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P,
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               .driver_data = (unsigned long) &mwifiex_pcie8766,
+               .driver_data = (unsigned long)&mwifiex_pcie8766,
        },
        {
                PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8897,
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               .driver_data = (unsigned long) &mwifiex_pcie8897,
+               .driver_data = (unsigned long)&mwifiex_pcie8897,
+       },
+       {
+               PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8997,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               .driver_data = (unsigned long)&mwifiex_pcie8997,
        },
        {},
 };
@@ -1082,6 +1087,7 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
                        card->txbd_rdptr++;
                        break;
                case PCIE_DEVICE_ID_MARVELL_88W8897:
+               case PCIE_DEVICE_ID_MARVELL_88W8997:
                        card->txbd_rdptr += reg->ring_tx_start_ptr;
                        break;
                }
@@ -1179,6 +1185,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
                        card->txbd_wrptr++;
                        break;
                case PCIE_DEVICE_ID_MARVELL_88W8897:
+               case PCIE_DEVICE_ID_MARVELL_88W8997:
                        card->txbd_wrptr += reg->ring_tx_start_ptr;
                        break;
                }
@@ -1807,6 +1814,8 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
 
        if (!card->evt_buf_list[rdptr]) {
                skb_push(skb, INTF_HEADER_LEN);
+               skb_put(skb, MAX_EVENT_SIZE - skb->len);
+               memset(skb->data, 0, MAX_EVENT_SIZE);
                if (mwifiex_map_pci_memory(adapter, skb,
                                           MAX_EVENT_SIZE,
                                           PCI_DMA_FROMDEVICE))
@@ -2731,3 +2740,4 @@ MODULE_VERSION(PCIE_VERSION);
 MODULE_LICENSE("GPL v2");
 MODULE_FIRMWARE(PCIE8766_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(PCIE8897_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(PCIE8997_DEFAULT_FW_NAME);
index 0e7ee8b72358f7feba632f43349113a6e662b210..48e549c3b285b362b2a7ffe97b0106df04c10eb7 100644 (file)
 
 #define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin"
 #define PCIE8897_DEFAULT_FW_NAME "mrvl/pcie8897_uapsta.bin"
+#define PCIE8997_DEFAULT_FW_NAME "mrvl/pcie8997_uapsta.bin"
 
 #define PCIE_VENDOR_ID_MARVELL              (0x11ab)
 #define PCIE_DEVICE_ID_MARVELL_88W8766P                (0x2b30)
 #define PCIE_DEVICE_ID_MARVELL_88W8897         (0x2b38)
+#define PCIE_DEVICE_ID_MARVELL_88W8997         (0x2b42)
 
 /* Constants for Buffer Descriptor (BD) rings */
 #define MWIFIEX_MAX_TXRX_BD                    0x20
@@ -197,7 +199,38 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = {
        .sleep_cookie = 0,
        .fw_dump_ctrl = 0xcf4,
        .fw_dump_start = 0xcf8,
-       .fw_dump_end = 0xcff
+       .fw_dump_end = 0xcff,
+};
+
+static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = {
+       .cmd_addr_lo = PCIE_SCRATCH_0_REG,
+       .cmd_addr_hi = PCIE_SCRATCH_1_REG,
+       .cmd_size = PCIE_SCRATCH_2_REG,
+       .fw_status = PCIE_SCRATCH_3_REG,
+       .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
+       .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
+       .tx_rdptr = 0xC1A4,
+       .tx_wrptr = 0xC1A8,
+       .rx_rdptr = 0xC1A8,
+       .rx_wrptr = 0xC1A4,
+       .evt_rdptr = PCIE_SCRATCH_10_REG,
+       .evt_wrptr = PCIE_SCRATCH_11_REG,
+       .drv_rdy = PCIE_SCRATCH_12_REG,
+       .tx_start_ptr = 16,
+       .tx_mask = 0x0FFF0000,
+       .tx_wrap_mask = 0x01FF0000,
+       .rx_mask = 0x00000FFF,
+       .rx_wrap_mask = 0x000001FF,
+       .tx_rollover_ind = BIT(28),
+       .rx_rollover_ind = BIT(12),
+       .evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND,
+       .ring_flag_sop = MWIFIEX_BD_FLAG_SOP,
+       .ring_flag_eop = MWIFIEX_BD_FLAG_EOP,
+       .ring_flag_xs_sop = MWIFIEX_BD_FLAG_XS_SOP,
+       .ring_flag_xs_eop = MWIFIEX_BD_FLAG_XS_EOP,
+       .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
+       .pfu_enabled = 1,
+       .sleep_cookie = 0,
 };
 
 struct mwifiex_pcie_device {
@@ -227,6 +260,15 @@ static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
        .can_ext_scan = true,
 };
 
+static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
+       .firmware       = PCIE8997_DEFAULT_FW_NAME,
+       .reg            = &mwifiex_reg_8997,
+       .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+       .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+       .can_dump_fw = false,
+       .can_ext_scan = true,
+};
+
 struct mwifiex_evt_buf_desc {
        u64 paddr;
        u16 len;
@@ -325,6 +367,7 @@ mwifiex_pcie_txbd_not_full(struct pcie_service_card *card)
                        return 1;
                break;
        case PCIE_DEVICE_ID_MARVELL_88W8897:
+       case PCIE_DEVICE_ID_MARVELL_88W8997:
                if (((card->txbd_wrptr & reg->tx_mask) !=
                     (card->txbd_rdptr & reg->tx_mask)) ||
                    ((card->txbd_wrptr & reg->tx_rollover_ind) ==
index ef8da8ebcbab4ee5af196c4ec854b178c3b81322..5847863a2d6bec573956ff88c42a16c3495333b3 100644 (file)
@@ -527,7 +527,8 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
 
                        if (ch->flags & IEEE80211_CHAN_NO_IR)
                                scan_chan_list[chan_idx].chan_scan_mode_bitmap
-                                       |= MWIFIEX_PASSIVE_SCAN;
+                                       |= (MWIFIEX_PASSIVE_SCAN |
+                                           MWIFIEX_HIDDEN_SSID_REPORT);
                        else
                                scan_chan_list[chan_idx].chan_scan_mode_bitmap
                                        &= ~MWIFIEX_PASSIVE_SCAN;
@@ -1049,7 +1050,8 @@ mwifiex_config_scan(struct mwifiex_private *priv,
                        if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
                                (scan_chan_list +
                                 chan_idx)->chan_scan_mode_bitmap
-                                       |= MWIFIEX_PASSIVE_SCAN;
+                                       |= (MWIFIEX_PASSIVE_SCAN |
+                                           MWIFIEX_HIDDEN_SSID_REPORT);
                        else
                                (scan_chan_list +
                                 chan_idx)->chan_scan_mode_bitmap
@@ -1600,6 +1602,62 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
        return ret;
 }
 
+/* This function checks if SSID string contains all zeroes or length is zero */
+static bool mwifiex_is_hidden_ssid(struct cfg80211_ssid *ssid)
+{
+       int idx;
+
+       for (idx = 0; idx < ssid->ssid_len; idx++) {
+               if (ssid->ssid[idx])
+                       return false;
+       }
+
+       return true;
+}
+
+/* This function checks if any hidden SSID found in passive scan channels
+ * and save those channels for specific SSID active scan
+ */
+static int mwifiex_save_hidden_ssid_channels(struct mwifiex_private *priv,
+                                            struct cfg80211_bss *bss)
+{
+       struct mwifiex_bssdescriptor *bss_desc;
+       int ret;
+       int chid;
+
+       /* Allocate and fill new bss descriptor */
+       bss_desc = kzalloc(sizeof(*bss_desc), GFP_KERNEL);
+       if (!bss_desc)
+               return -ENOMEM;
+
+       ret = mwifiex_fill_new_bss_desc(priv, bss, bss_desc);
+       if (ret)
+               goto done;
+
+       if (mwifiex_is_hidden_ssid(&bss_desc->ssid)) {
+               mwifiex_dbg(priv->adapter, INFO, "found hidden SSID\n");
+               for (chid = 0 ; chid < MWIFIEX_USER_SCAN_CHAN_MAX; chid++) {
+                       if (priv->hidden_chan[chid].chan_number ==
+                           bss->channel->hw_value)
+                               break;
+
+                       if (!priv->hidden_chan[chid].chan_number) {
+                               priv->hidden_chan[chid].chan_number =
+                                       bss->channel->hw_value;
+                               priv->hidden_chan[chid].radio_type =
+                                       bss->channel->band;
+                               priv->hidden_chan[chid].scan_type =
+                                       MWIFIEX_SCAN_TYPE_ACTIVE;
+                               break;
+                       }
+               }
+       }
+
+done:
+       kfree(bss_desc);
+       return 0;
+}
+
 static int mwifiex_update_curr_bss_params(struct mwifiex_private *priv,
                                          struct cfg80211_bss *bss)
 {
@@ -1789,6 +1847,14 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
                                    .mac_address, ETH_ALEN))
                                mwifiex_update_curr_bss_params(priv, bss);
                        cfg80211_put_bss(priv->wdev.wiphy, bss);
+
+                       if ((chan->flags & IEEE80211_CHAN_RADAR) ||
+                           (chan->flags & IEEE80211_CHAN_NO_IR)) {
+                               mwifiex_dbg(adapter, INFO,
+                                           "radar or passive channel %d\n",
+                                           channel);
+                               mwifiex_save_hidden_ssid_channels(priv, bss);
+                       }
                }
        } else {
                mwifiex_dbg(adapter, WARN, "missing BSS channel IE\n");
@@ -1812,6 +1878,57 @@ static void mwifiex_complete_scan(struct mwifiex_private *priv)
        }
 }
 
+/* This function checks if any hidden SSID found in passive scan channels
+ * and do specific SSID active scan for those channels
+ */
+static int
+mwifiex_active_scan_req_for_passive_chan(struct mwifiex_private *priv)
+{
+       int ret;
+       struct mwifiex_adapter *adapter = priv->adapter;
+       u8 id = 0;
+       struct mwifiex_user_scan_cfg  *user_scan_cfg;
+
+       if (adapter->active_scan_triggered) {
+               adapter->active_scan_triggered = false;
+               return 0;
+       }
+
+       if (!priv->hidden_chan[0].chan_number) {
+               mwifiex_dbg(adapter, INFO, "No BSS with hidden SSID found on DFS channels\n");
+               return 0;
+       }
+       user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL);
+
+       if (!user_scan_cfg)
+               return -ENOMEM;
+
+       memset(user_scan_cfg, 0, sizeof(*user_scan_cfg));
+
+       for (id = 0; id < MWIFIEX_USER_SCAN_CHAN_MAX; id++) {
+               if (!priv->hidden_chan[id].chan_number)
+                       break;
+               memcpy(&user_scan_cfg->chan_list[id],
+                      &priv->hidden_chan[id],
+                      sizeof(struct mwifiex_user_scan_chan));
+       }
+
+       adapter->active_scan_triggered = true;
+       user_scan_cfg->num_ssids = priv->scan_request->n_ssids;
+       user_scan_cfg->ssid_list = priv->scan_request->ssids;
+
+       ret = mwifiex_scan_networks(priv, user_scan_cfg);
+       kfree(user_scan_cfg);
+
+       memset(&priv->hidden_chan, 0, sizeof(priv->hidden_chan));
+
+       if (ret) {
+               dev_err(priv->adapter->dev, "scan failed: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
 static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
 {
        struct mwifiex_adapter *adapter = priv->adapter;
@@ -1825,6 +1942,8 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
                adapter->scan_processing = false;
                spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
 
+               mwifiex_active_scan_req_for_passive_chan(priv);
+
                if (!adapter->ext_scan)
                        mwifiex_complete_scan(priv);
 
@@ -1851,15 +1970,17 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
                adapter->scan_processing = false;
                spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
 
-               if (priv->scan_request) {
-                       mwifiex_dbg(adapter, INFO,
-                                   "info: aborting scan\n");
-                       cfg80211_scan_done(priv->scan_request, 1);
-                       priv->scan_request = NULL;
-               } else {
-                       priv->scan_aborting = false;
-                       mwifiex_dbg(adapter, INFO,
-                                   "info: scan already aborted\n");
+               if (!adapter->active_scan_triggered) {
+                       if (priv->scan_request) {
+                               mwifiex_dbg(adapter, INFO,
+                                           "info: aborting scan\n");
+                               cfg80211_scan_done(priv->scan_request, 1);
+                               priv->scan_request = NULL;
+                       } else {
+                               priv->scan_aborting = false;
+                               mwifiex_dbg(adapter, INFO,
+                                           "info: scan already aborted\n");
+                       }
                }
        } else {
                /* Get scan command from scan_pending_q and put to
index a0b121f3460c871eefca6fd30d79a3217ef401c8..5d05c6fe642985cd377eefec707d75112f0240ea 100644 (file)
@@ -51,6 +51,10 @@ static unsigned long iface_work_flags;
 
 static struct semaphore add_remove_card_sem;
 
+static struct memory_type_mapping generic_mem_type_map[] = {
+       {"DUMP", NULL, 0, 0xDD},
+};
+
 static struct memory_type_mapping mem_type_mapping_tbl[] = {
        {"ITCM", NULL, 0, 0xF0},
        {"DTCM", NULL, 0, 0xF1},
@@ -91,6 +95,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
                return -ENOMEM;
 
        card->func = func;
+       card->device_id = id;
 
        func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
 
@@ -107,6 +112,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
                card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size;
                card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size;
                card->can_dump_fw = data->can_dump_fw;
+               card->fw_dump_enh = data->fw_dump_enh;
                card->can_auto_tdls = data->can_auto_tdls;
                card->can_ext_scan = data->can_ext_scan;
        }
@@ -287,6 +293,8 @@ static int mwifiex_sdio_suspend(struct device *dev)
 #define SDIO_DEVICE_ID_MARVELL_8887   (0x9135)
 /* Device ID for SD8801 */
 #define SDIO_DEVICE_ID_MARVELL_8801   (0x9139)
+/* Device ID for SD8997 */
+#define SDIO_DEVICE_ID_MARVELL_8997   (0x9141)
 
 
 /* WLAN IDs */
@@ -303,6 +311,8 @@ static const struct sdio_device_id mwifiex_ids[] = {
                .driver_data = (unsigned long)&mwifiex_sdio_sd8887},
        {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8801),
                .driver_data = (unsigned long)&mwifiex_sdio_sd8801},
+       {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997),
+               .driver_data = (unsigned long)&mwifiex_sdio_sd8997},
        {},
 };
 
@@ -910,6 +920,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
        if (!fwbuf)
                return -ENOMEM;
 
+       sdio_claim_host(card->func);
+
        /* Perform firmware data transfer */
        do {
                /* The host polls for the DN_LD_CARD_RDY and CARD_IO_READY
@@ -1014,6 +1026,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                offset += txlen;
        } while (true);
 
+       sdio_release_host(card->func);
+
        mwifiex_dbg(adapter, MSG,
                    "info: FW download over, size %d bytes\n", offset);
 
@@ -1964,8 +1978,13 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
        adapter->dev = &func->dev;
 
        strcpy(adapter->fw_name, card->firmware);
-       adapter->mem_type_mapping_tbl = mem_type_mapping_tbl;
-       adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl);
+       if (card->fw_dump_enh) {
+               adapter->mem_type_mapping_tbl = generic_mem_type_map;
+               adapter->num_mem_types = 1;
+       } else {
+               adapter->mem_type_mapping_tbl = mem_type_mapping_tbl;
+               adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl);
+       }
 
        return 0;
 }
@@ -2107,26 +2126,46 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
                    port, card->mp_data_port_mask);
 }
 
+static void mwifiex_recreate_adapter(struct sdio_mmc_card *card)
+{
+       struct sdio_func *func = card->func;
+       const struct sdio_device_id *device_id = card->device_id;
+
+       /* TODO mmc_hw_reset does not require destroying and re-probing the
+        * whole adapter. Hence there was no need to for this rube-goldberg
+        * design to reload the fw from an external workqueue. If we don't
+        * destroy the adapter we could reload the fw from
+        * mwifiex_main_work_queue directly.
+        * The real difficulty with fw reset is to restore all the user
+        * settings applied through ioctl. By destroying and recreating the
+        * adapter, we take the easy way out, since we rely on user space to
+        * restore them. We assume that user space will treat the new
+        * incarnation of the adapter(interfaces) as if they had been just
+        * discovered and initializes them from scratch.
+        */
+
+       mwifiex_sdio_remove(func);
+
+       /* power cycle the adapter */
+       sdio_claim_host(func);
+       mmc_hw_reset(func->card->host);
+       sdio_release_host(func);
+
+       mwifiex_sdio_probe(func, device_id);
+}
+
 static struct mwifiex_adapter *save_adapter;
 static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
 {
        struct sdio_mmc_card *card = adapter->card;
-       struct mmc_host *target = card->func->card->host;
-
-       /* The actual reset operation must be run outside of driver thread.
-        * This is because mmc_remove_host() will cause the device to be
-        * instantly destroyed, and the driver then needs to end its thread,
-        * leading to a deadlock.
-        *
-        * We run it in a totally independent workqueue.
-        */
 
-       mwifiex_dbg(adapter, WARN, "Resetting card...\n");
-       mmc_remove_host(target);
-       /* 200ms delay is based on experiment with sdhci controller */
-       mdelay(200);
-       target->rescan_entered = 0; /* rescan non-removable cards */
-       mmc_add_host(target);
+       /* TODO card pointer is unprotected. If the adapter is removed
+        * physically, sdio core might trigger mwifiex_sdio_remove, before this
+        * workqueue is run, which will destroy the adapter struct. When this
+        * workqueue eventually exceutes it will dereference an invalid adapter
+        * pointer
+        */
+       mwifiex_recreate_adapter(card);
 }
 
 /* This function read/write firmware */
@@ -2138,8 +2177,8 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
        int ret, tries;
        u8 ctrl_data = 0;
 
-       sdio_writeb(card->func, FW_DUMP_HOST_READY, card->reg->fw_dump_ctrl,
-                   &ret);
+       sdio_writeb(card->func, card->reg->fw_dump_host_ready,
+                   card->reg->fw_dump_ctrl, &ret);
        if (ret) {
                mwifiex_dbg(adapter, ERROR, "SDIO Write ERR\n");
                return RDWR_STATUS_FAILURE;
@@ -2155,10 +2194,10 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
                        break;
                if (doneflag && ctrl_data == doneflag)
                        return RDWR_STATUS_DONE;
-               if (ctrl_data != FW_DUMP_HOST_READY) {
+               if (ctrl_data != card->reg->fw_dump_host_ready) {
                        mwifiex_dbg(adapter, WARN,
-                                   "The ctrl reg was changed, re-try again!\n");
-                       sdio_writeb(card->func, FW_DUMP_HOST_READY,
+                                   "The ctrl reg was changed, re-try again\n");
+                       sdio_writeb(card->func, card->reg->fw_dump_host_ready,
                                    card->reg->fw_dump_ctrl, &ret);
                        if (ret) {
                                mwifiex_dbg(adapter, ERROR, "SDIO write err\n");
@@ -2167,7 +2206,7 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
                }
                usleep_range(100, 200);
        }
-       if (ctrl_data == FW_DUMP_HOST_READY) {
+       if (ctrl_data == card->reg->fw_dump_host_ready) {
                mwifiex_dbg(adapter, ERROR,
                            "Fail to pull ctrl_data\n");
                return RDWR_STATUS_FAILURE;
@@ -2300,10 +2339,129 @@ done:
        sdio_release_host(card->func);
 }
 
+static void mwifiex_sdio_generic_fw_dump(struct mwifiex_adapter *adapter)
+{
+       struct sdio_mmc_card *card = adapter->card;
+       struct memory_type_mapping *entry = &generic_mem_type_map[0];
+       unsigned int reg, reg_start, reg_end;
+       u8 start_flag = 0, done_flag = 0;
+       u8 *dbg_ptr, *end_ptr;
+       enum rdwr_status stat;
+       int ret = -1, tries;
+
+       if (!card->fw_dump_enh)
+               return;
+
+       if (entry->mem_ptr) {
+               vfree(entry->mem_ptr);
+               entry->mem_ptr = NULL;
+       }
+       entry->mem_size = 0;
+
+       mwifiex_pm_wakeup_card(adapter);
+       sdio_claim_host(card->func);
+
+       mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump start ==\n");
+
+       stat = mwifiex_sdio_rdwr_firmware(adapter, done_flag);
+       if (stat == RDWR_STATUS_FAILURE)
+               goto done;
+
+       reg_start = card->reg->fw_dump_start;
+       reg_end = card->reg->fw_dump_end;
+       for (reg = reg_start; reg <= reg_end; reg++) {
+               for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
+                       start_flag = sdio_readb(card->func, reg, &ret);
+                       if (ret) {
+                               mwifiex_dbg(adapter, ERROR,
+                                           "SDIO read err\n");
+                               goto done;
+                       }
+                       if (start_flag == 0)
+                               break;
+                       if (tries == MAX_POLL_TRIES) {
+                               mwifiex_dbg(adapter, ERROR,
+                                           "FW not ready to dump\n");
+                               ret = -1;
+                               goto done;
+                       }
+               }
+               usleep_range(100, 200);
+       }
+
+       entry->mem_ptr = vmalloc(0xf0000 + 1);
+       if (!entry->mem_ptr) {
+               ret = -1;
+               goto done;
+       }
+       dbg_ptr = entry->mem_ptr;
+       entry->mem_size = 0xf0000;
+       end_ptr = dbg_ptr + entry->mem_size;
+
+       done_flag = entry->done_flag;
+       mwifiex_dbg(adapter, DUMP,
+                   "Start %s output, please wait...\n", entry->mem_name);
+
+       while (true) {
+               stat = mwifiex_sdio_rdwr_firmware(adapter, done_flag);
+               if (stat == RDWR_STATUS_FAILURE)
+                       goto done;
+               for (reg = reg_start; reg <= reg_end; reg++) {
+                       *dbg_ptr = sdio_readb(card->func, reg, &ret);
+                       if (ret) {
+                               mwifiex_dbg(adapter, ERROR,
+                                           "SDIO read err\n");
+                               goto done;
+                       }
+                       dbg_ptr++;
+                       if (dbg_ptr >= end_ptr) {
+                               u8 *tmp_ptr;
+
+                               tmp_ptr = vmalloc(entry->mem_size + 0x4000 + 1);
+                               if (!tmp_ptr)
+                                       goto done;
+
+                               memcpy(tmp_ptr, entry->mem_ptr,
+                                      entry->mem_size);
+                               vfree(entry->mem_ptr);
+                               entry->mem_ptr = tmp_ptr;
+                               tmp_ptr = NULL;
+                               dbg_ptr = entry->mem_ptr + entry->mem_size;
+                               entry->mem_size += 0x4000;
+                               end_ptr = entry->mem_ptr + entry->mem_size;
+                       }
+               }
+               if (stat == RDWR_STATUS_DONE) {
+                       entry->mem_size = dbg_ptr - entry->mem_ptr;
+                       mwifiex_dbg(adapter, DUMP, "dump %s done size=0x%x\n",
+                                   entry->mem_name, entry->mem_size);
+                       ret = 0;
+                       break;
+               }
+       }
+       mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n");
+
+done:
+       if (ret) {
+               mwifiex_dbg(adapter, ERROR, "firmware dump failed\n");
+               if (entry->mem_ptr) {
+                       vfree(entry->mem_ptr);
+                       entry->mem_ptr = NULL;
+               }
+               entry->mem_size = 0;
+       }
+       sdio_release_host(card->func);
+}
+
 static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter)
 {
+       struct sdio_mmc_card *card = adapter->card;
+
        mwifiex_drv_info_dump(adapter);
-       mwifiex_sdio_fw_dump(adapter);
+       if (card->fw_dump_enh)
+               mwifiex_sdio_generic_fw_dump(adapter);
+       else
+               mwifiex_sdio_fw_dump(adapter);
        mwifiex_upload_device_dump(adapter);
 }
 
@@ -2510,3 +2668,4 @@ MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(SD8997_DEFAULT_FW_NAME);
index 6f645cf47369baddaa10bc6489837fa914a14a57..b9fbc5cf6262d8647d6064ddde51211acf749f72 100644 (file)
@@ -35,6 +35,7 @@
 #define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin"
 #define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin"
 #define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin"
+#define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin"
 
 #define BLOCK_MODE     1
 #define BYTE_MODE      0
@@ -222,6 +223,7 @@ struct mwifiex_sdio_card_reg {
        u8 cmd_cfg_1;
        u8 cmd_cfg_2;
        u8 cmd_cfg_3;
+       u8 fw_dump_host_ready;
        u8 fw_dump_ctrl;
        u8 fw_dump_start;
        u8 fw_dump_end;
@@ -257,11 +259,15 @@ struct sdio_mmc_card {
        bool supports_sdio_new_mode;
        bool has_control_mask;
        bool can_dump_fw;
+       bool fw_dump_enh;
        bool can_auto_tdls;
        bool can_ext_scan;
 
        struct mwifiex_sdio_mpa_tx mpa_tx;
        struct mwifiex_sdio_mpa_rx mpa_rx;
+
+       /* needed for card reset */
+       const struct sdio_device_id *device_id;
 };
 
 struct mwifiex_sdio_device {
@@ -275,6 +281,7 @@ struct mwifiex_sdio_device {
        bool supports_sdio_new_mode;
        bool has_control_mask;
        bool can_dump_fw;
+       bool fw_dump_enh;
        bool can_auto_tdls;
        bool can_ext_scan;
 };
@@ -350,6 +357,7 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = {
        .cmd_cfg_1 = 0xb9,
        .cmd_cfg_2 = 0xba,
        .cmd_cfg_3 = 0xbb,
+       .fw_dump_host_ready = 0xee,
        .fw_dump_ctrl = 0xe2,
        .fw_dump_start = 0xe3,
        .fw_dump_end = 0xea,
@@ -361,6 +369,59 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = {
                                 0x59, 0x5c, 0x5d},
 };
 
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8997 = {
+       .start_rd_port = 0,
+       .start_wr_port = 0,
+       .base_0_reg = 0xF8,
+       .base_1_reg = 0xF9,
+       .poll_reg = 0x5C,
+       .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
+                       CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
+       .host_int_rsr_reg = 0x4,
+       .host_int_status_reg = 0x0C,
+       .host_int_mask_reg = 0x08,
+       .status_reg_0 = 0xE8,
+       .status_reg_1 = 0xE9,
+       .sdio_int_mask = 0xff,
+       .data_port_mask = 0xffffffff,
+       .io_port_0_reg = 0xE4,
+       .io_port_1_reg = 0xE5,
+       .io_port_2_reg = 0xE6,
+       .max_mp_regs = 196,
+       .rd_bitmap_l = 0x10,
+       .rd_bitmap_u = 0x11,
+       .rd_bitmap_1l = 0x12,
+       .rd_bitmap_1u = 0x13,
+       .wr_bitmap_l = 0x14,
+       .wr_bitmap_u = 0x15,
+       .wr_bitmap_1l = 0x16,
+       .wr_bitmap_1u = 0x17,
+       .rd_len_p0_l = 0x18,
+       .rd_len_p0_u = 0x19,
+       .card_misc_cfg_reg = 0xd8,
+       .card_cfg_2_1_reg = 0xd9,
+       .cmd_rd_len_0 = 0xc0,
+       .cmd_rd_len_1 = 0xc1,
+       .cmd_rd_len_2 = 0xc2,
+       .cmd_rd_len_3 = 0xc3,
+       .cmd_cfg_0 = 0xc4,
+       .cmd_cfg_1 = 0xc5,
+       .cmd_cfg_2 = 0xc6,
+       .cmd_cfg_3 = 0xc7,
+       .fw_dump_host_ready = 0xcc,
+       .fw_dump_ctrl = 0xf0,
+       .fw_dump_start = 0xf1,
+       .fw_dump_end = 0xf8,
+       .func1_dump_reg_start = 0x10,
+       .func1_dump_reg_end = 0x17,
+       .func1_scratch_reg = 0xe8,
+       .func1_spec_reg_num = 13,
+       .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D,
+                                0x60, 0x61, 0x62, 0x64,
+                                0x65, 0x66, 0x68, 0x69,
+                                0x6a},
+};
+
 static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = {
        .start_rd_port = 0,
        .start_wr_port = 0,
@@ -469,6 +530,22 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
        .can_ext_scan = true,
 };
 
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
+       .firmware = SD8997_DEFAULT_FW_NAME,
+       .reg = &mwifiex_reg_sd8997,
+       .max_ports = 32,
+       .mp_agg_pkt_limit = 16,
+       .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+       .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+       .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+       .supports_sdio_new_mode = true,
+       .has_control_mask = false,
+       .can_dump_fw = true,
+       .fw_dump_enh = true,
+       .can_auto_tdls = false,
+       .can_ext_scan = true,
+};
+
 static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
        .firmware = SD8887_DEFAULT_FW_NAME,
        .reg = &mwifiex_reg_sd8887,
index 89e8dafb473876a23d48819f5550b4a24fa0bb1d..87b69d8ad120e0e3f51ffb7edfbf39ff6f1d6dbf 100644 (file)
@@ -895,7 +895,7 @@ static int mwifiex_ret_tdls_oper(struct mwifiex_private *priv,
        case ACT_TDLS_DELETE:
                if (reason) {
                        if (!node || reason == TDLS_ERR_LINK_NONEXISTENT)
-                               mwifiex_dbg(priv->adapter, ERROR,
+                               mwifiex_dbg(priv->adapter, MSG,
                                            "TDLS link delete for %pM failed: reason %d\n",
                                            cmd_tdls_oper->peer_mac, reason);
                        else
index d8b7d9c20450f704988c22e26ce81656d4e1a621..a6c8a4f7bfe96aa44b5f46db60c29944abf5a79b 100644 (file)
@@ -66,8 +66,8 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
        if (status <= 0) {
                if (status == 0)
                        status = -ETIMEDOUT;
-               mwifiex_dbg(adapter, ERROR,
-                           "cmd_wait_q terminated: %d\n", status);
+               mwifiex_dbg(adapter, ERROR, "cmd_wait_q terminated: %d\n",
+                           status);
                mwifiex_cancel_all_pending_cmd(adapter);
                return status;
        }
index aa3d3c5ed07b3671b8d9064ff020f8fab998efcd..b3e163de98995ba62767ab904f636853e2847ac3 100644 (file)
@@ -164,7 +164,7 @@ static void mwifiex_tdls_add_aid(struct mwifiex_private *priv,
        pos = (void *)skb_put(skb, 4);
        *pos++ = WLAN_EID_AID;
        *pos++ = 2;
-       *pos++ = le16_to_cpu(assoc_rsp->a_id);
+       memcpy(pos, &assoc_rsp->a_id, sizeof(assoc_rsp->a_id));
 
        return;
 }
index 492a8b3c636e2bcaac9b0fb81620bbf1d1327cd2..46c972a650a43c2918bb7703c98eeb877ccb79e6 100644 (file)
@@ -41,6 +41,8 @@ static int mwifiex_check_uap_capabilties(struct mwifiex_private *priv,
        mwifiex_dbg_dump(priv->adapter, EVT_D, "uap capabilties:",
                         event->data, event->len);
 
+       skb_push(event, MWIFIEX_BSS_START_EVT_FIX_SIZE);
+
        while ((evt_len >= sizeof(tlv_hdr->header))) {
                tlv_hdr = (struct mwifiex_ie_types_data *)curr;
                tlv_len = le16_to_cpu(tlv_hdr->header.len);
index fbad99c503078ab9e2e840da438892a2067e92aa..5e789b2e06ea658d78f9997041a7c9853de171ae 100644 (file)
@@ -47,6 +47,11 @@ static struct usb_device_id mwifiex_usb_table[] = {
        {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8897_PID_2,
                                       USB_CLASS_VENDOR_SPEC,
                                       USB_SUBCLASS_VENDOR_SPEC, 0xff)},
+       /* 8997 */
+       {USB_DEVICE(USB8XXX_VID, USB8997_PID_1)},
+       {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8997_PID_2,
+                                      USB_CLASS_VENDOR_SPEC,
+                                      USB_SUBCLASS_VENDOR_SPEC, 0xff)},
        { }     /* Terminating entry */
 };
 
@@ -382,12 +387,14 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
        case USB8797_PID_1:
        case USB8801_PID_1:
        case USB8897_PID_1:
+       case USB8997_PID_1:
                card->usb_boot_state = USB8XXX_FW_DNLD;
                break;
        case USB8766_PID_2:
        case USB8797_PID_2:
        case USB8801_PID_2:
        case USB8897_PID_2:
+       case USB8997_PID_2:
                card->usb_boot_state = USB8XXX_FW_READY;
                break;
        default:
@@ -814,6 +821,12 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
        adapter->dev = &card->udev->dev;
 
        switch (le16_to_cpu(card->udev->descriptor.idProduct)) {
+       case USB8997_PID_1:
+       case USB8997_PID_2:
+               adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
+               strcpy(adapter->fw_name, USB8997_DEFAULT_FW_NAME);
+               adapter->ext_scan = true;
+               break;
        case USB8897_PID_1:
        case USB8897_PID_2:
                adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
@@ -870,8 +883,10 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
 
        /* Allocate memory for transmit */
        fwdata = kzalloc(FW_DNLD_TX_BUF_SIZE, GFP_KERNEL);
-       if (!fwdata)
+       if (!fwdata) {
+               ret = -ENOMEM;
                goto fw_exit;
+       }
 
        /* Allocate memory for receive */
        recv_buff = kzalloc(FW_DNLD_RX_BUF_SIZE, GFP_KERNEL);
@@ -1121,3 +1136,4 @@ MODULE_FIRMWARE(USB8766_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(USB8797_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(USB8801_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(USB8897_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(USB8997_DEFAULT_FW_NAME);
index 57e1a5736318d6dec825f9348ed1fb7c5cd435db..f0051f8c8981eb4d7bd1614999a2db9238a66fd2 100644 (file)
@@ -32,6 +32,8 @@
 #define USB8897_PID_2          0x2046
 #define USB8801_PID_1          0x2049
 #define USB8801_PID_2          0x204a
+#define USB8997_PID_1          0x204d
+#define USB8997_PID_2          0x204e
 
 
 #define USB8XXX_FW_DNLD                1
@@ -46,6 +48,7 @@
 #define USB8797_DEFAULT_FW_NAME        "mrvl/usb8797_uapsta.bin"
 #define USB8801_DEFAULT_FW_NAME        "mrvl/usb8801_uapsta.bin"
 #define USB8897_DEFAULT_FW_NAME        "mrvl/usb8897_uapsta.bin"
+#define USB8997_DEFAULT_FW_NAME        "mrvl/usb8997_uapsta.bin"
 
 #define FW_DNLD_TX_BUF_SIZE    620
 #define FW_DNLD_RX_BUF_SIZE    2048
index 2504e422364a526246581a963969bcf87e4fc01b..0cec8a64473e9c1f764b841b27c74c9b36800fc5 100644 (file)
@@ -126,6 +126,10 @@ static int num_of_items = ARRAY_SIZE(items);
 int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter)
 {
 
+       if (adapter->hw_status == MWIFIEX_HW_STATUS_READY)
+               if (adapter->if_ops.init_fw_port)
+                       adapter->if_ops.init_fw_port(adapter);
+
        adapter->init_wait_q_woken = true;
        wake_up_interruptible(&adapter->init_wait_q);
        return 0;
@@ -496,16 +500,12 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
 int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
                         struct cmd_ctrl_node *cmd_node)
 {
-       mwifiex_dbg(adapter, CMD,
-                   "cmd completed: status=%d\n",
+       WARN_ON(!cmd_node->wait_q_enabled);
+       mwifiex_dbg(adapter, CMD, "cmd completed: status=%d\n",
                    adapter->cmd_wait_q.status);
 
-       *(cmd_node->condition) = true;
-
-       if (adapter->cmd_wait_q.status == -ETIMEDOUT)
-               mwifiex_dbg(adapter, ERROR, "cmd timeout\n");
-       else
-               wake_up_interruptible(&adapter->cmd_wait_q.wait);
+       *cmd_node->condition = true;
+       wake_up_interruptible(&adapter->cmd_wait_q.wait);
 
        return 0;
 }
index 77361af68b1868cfcfd67cc71f928ef95a243880..9420fc61c2e6ab09aeba53d01930be1ebc9fd0f3 100644 (file)
@@ -5019,35 +5019,36 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
 
                rcu_read_unlock();
-       }
 
-       if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc &&
-           !priv->ap_fw) {
-               rc = mwl8k_cmd_set_rate(hw, vif, ap_legacy_rates, ap_mcs_rates);
-               if (rc)
-                       goto out;
+               if (changed & BSS_CHANGED_ASSOC) {
+                       if (!priv->ap_fw) {
+                               rc = mwl8k_cmd_set_rate(hw, vif,
+                                                       ap_legacy_rates,
+                                                       ap_mcs_rates);
+                               if (rc)
+                                       goto out;
 
-               rc = mwl8k_cmd_use_fixed_rate_sta(hw);
-               if (rc)
-                       goto out;
-       } else {
-               if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc &&
-                   priv->ap_fw) {
-                       int idx;
-                       int rate;
+                               rc = mwl8k_cmd_use_fixed_rate_sta(hw);
+                               if (rc)
+                                       goto out;
+                       } else {
+                               int idx;
+                               int rate;
 
-                       /* Use AP firmware specific rate command.
-                        */
-                       idx = ffs(vif->bss_conf.basic_rates);
-                       if (idx)
-                               idx--;
+                               /* Use AP firmware specific rate command.
+                                */
+                               idx = ffs(vif->bss_conf.basic_rates);
+                               if (idx)
+                                       idx--;
 
-                       if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
-                               rate = mwl8k_rates_24[idx].hw_value;
-                       else
-                               rate = mwl8k_rates_50[idx].hw_value;
+                               if (hw->conf.chandef.chan->band ==
+                                   IEEE80211_BAND_2GHZ)
+                                       rate = mwl8k_rates_24[idx].hw_value;
+                               else
+                                       rate = mwl8k_rates_50[idx].hw_value;
 
-                       mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
+                               mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
+                       }
                }
        }
 
index c410180479e694880f6a3c73561fd9ce4b670658..7b5c554323c73ab40caddb47ca965560d787d127 100644 (file)
@@ -2321,8 +2321,6 @@ void free_orinocodev(struct orinoco_private *priv)
        struct orinoco_rx_data *rx_data, *temp;
        struct orinoco_scan_data *sd, *sdtemp;
 
-       wiphy_unregister(wiphy);
-
        /* If the tasklet is scheduled when we call tasklet_kill it
         * will run one final time. However the tasklet will only
         * drain priv->rx_list if the hw is still available. */
index c0a27377d9e26306ea7dc32b44efc6749723bcda..a956f965a1e5ec77e19c03e86ec87dc810461315 100644 (file)
@@ -118,6 +118,7 @@ static void orinoco_cs_detach(struct pcmcia_device *link)
 
        orinoco_cs_release(link);
 
+       wiphy_unregister(priv_to_wiphy(priv));
        free_orinocodev(priv);
 }                              /* orinoco_cs_detach */
 
index 1b543e30eff7b5fe4dafaa9cd904ecec1a4ef5db..048693b6c6c24f06e701a2fa5856484c228065a6 100644 (file)
@@ -223,13 +223,15 @@ static int orinoco_nortel_init_one(struct pci_dev *pdev,
        err = orinoco_if_add(priv, 0, 0, NULL);
        if (err) {
                printk(KERN_ERR PFX "orinoco_if_add() failed\n");
-               goto fail;
+               goto fail_wiphy;
        }
 
        pci_set_drvdata(pdev, priv);
 
        return 0;
 
+ fail_wiphy:
+       wiphy_unregister(priv_to_wiphy(priv));
  fail:
        free_irq(pdev->irq, priv);
 
@@ -263,6 +265,7 @@ static void orinoco_nortel_remove_one(struct pci_dev *pdev)
        iowrite16(0, card->bridge_io + 10);
 
        orinoco_if_del(priv);
+       wiphy_unregister(priv_to_wiphy(priv));
        free_irq(pdev->irq, priv);
        free_orinocodev(priv);
        pci_iounmap(pdev, priv->hw.iobase);
index 74219d59d7e1e5d7d918b42acac99b913157a499..4938a2208a37ce72371f245c78cf9de7d3c90345 100644 (file)
@@ -173,13 +173,15 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
        err = orinoco_if_add(priv, 0, 0, NULL);
        if (err) {
                printk(KERN_ERR PFX "orinoco_if_add() failed\n");
-               goto fail;
+               goto fail_wiphy;
        }
 
        pci_set_drvdata(pdev, priv);
 
        return 0;
 
+ fail_wiphy:
+       wiphy_unregister(priv_to_wiphy(priv));
  fail:
        free_irq(pdev->irq, priv);
 
@@ -203,6 +205,7 @@ static void orinoco_pci_remove_one(struct pci_dev *pdev)
        struct orinoco_private *priv = pci_get_drvdata(pdev);
 
        orinoco_if_del(priv);
+       wiphy_unregister(priv_to_wiphy(priv));
        free_irq(pdev->irq, priv);
        free_orinocodev(priv);
        pci_iounmap(pdev, priv->hw.iobase);
index 8b045236b6e0111b8661b0df0b8e5d428710f11d..221352027779f75c5ceb0e605b15297e53b70c2f 100644 (file)
@@ -262,13 +262,15 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
        err = orinoco_if_add(priv, 0, 0, NULL);
        if (err) {
                printk(KERN_ERR PFX "orinoco_if_add() failed\n");
-               goto fail;
+               goto fail_wiphy;
        }
 
        pci_set_drvdata(pdev, priv);
 
        return 0;
 
+ fail_wiphy:
+       wiphy_unregister(priv_to_wiphy(priv));
  fail:
        free_irq(pdev->irq, priv);
 
@@ -299,6 +301,7 @@ static void orinoco_plx_remove_one(struct pci_dev *pdev)
        struct orinoco_pci_card *card = priv->card;
 
        orinoco_if_del(priv);
+       wiphy_unregister(priv_to_wiphy(priv));
        free_irq(pdev->irq, priv);
        free_orinocodev(priv);
        pci_iounmap(pdev, priv->hw.iobase);
index 91f05442de28809a662290b66c26ac42be488144..26a57d773d3031147e5985cc004b3d80937d4081 100644 (file)
@@ -1502,6 +1502,7 @@ static inline void ezusb_delete(struct ezusb_priv *upriv)
        if (upriv->dev) {
                struct orinoco_private *priv = ndev_priv(upriv->dev);
                orinoco_if_del(priv);
+               wiphy_unregister(priv_to_wiphy(upriv));
                free_orinocodev(priv);
        }
 }
@@ -1695,6 +1696,7 @@ static int ezusb_probe(struct usb_interface *interface,
        if (orinoco_if_add(priv, 0, 0, &ezusb_netdev_ops) != 0) {
                upriv->dev = NULL;
                err("%s: orinoco_if_add() failed", __func__);
+               wiphy_unregister(priv_to_wiphy(priv));
                goto error;
        }
        upriv->dev = priv->ndev;
index 2b4ef256c6b9432675b2de9bae9b6cd95c6309d2..de62f5dcb62f7971fbc21adcc1212a1ed9ac68aa 100644 (file)
@@ -240,7 +240,6 @@ config RT2X00_LIB_USB
 
 config RT2X00_LIB
        tristate
-       select AVERAGE
 
 config RT2X00_LIB_FIRMWARE
        bool
index afba0739c3b87ded3965521b449408fef35d3d33..78cc035b2d1765576da6b17fe144425803cf7109 100644 (file)
@@ -54,7 +54,7 @@
 #define CSR_REG_BASE                   0x0400
 #define CSR_REG_SIZE                   0x0100
 #define EEPROM_BASE                    0x0000
-#define EEPROM_SIZE                    0x006a
+#define EEPROM_SIZE                    0x006e
 #define BBP_BASE                       0x0000
 #define BBP_SIZE                       0x0060
 #define RF_BASE                                0x0004
index 9bb398bed9bb68ba133d702c2e5be8b0b089a8ae..3282ddb766f4224a8e10e037fa7ea4332dfe0e27 100644 (file)
@@ -254,6 +254,8 @@ struct link_qual {
        int tx_failed;
 };
 
+DECLARE_EWMA(rssi, 1024, 8)
+
 /*
  * Antenna settings about the currently active link.
  */
@@ -285,7 +287,7 @@ struct link_ant {
         * Similar to the avg_rssi in the link_qual structure
         * this value is updated by using the walking average.
         */
-       struct ewma rssi_ant;
+       struct ewma_rssi rssi_ant;
 };
 
 /*
@@ -314,7 +316,7 @@ struct link {
        /*
         * Currently active average RSSI value
         */
-       struct ewma avg_rssi;
+       struct ewma_rssi avg_rssi;
 
        /*
         * Work structure for scheduling periodic link tuning.
index 9b941c0c12648d4a5b8e42c1e0679d9882febce1..017188e5a73628b2c0454fbb1916646da9ab650d 100644 (file)
  */
 #define DEFAULT_RSSI           -128
 
-/* Constants for EWMA calculations. */
-#define RT2X00_EWMA_FACTOR     1024
-#define RT2X00_EWMA_WEIGHT     8
-
-static inline int rt2x00link_get_avg_rssi(struct ewma *ewma)
+static inline int rt2x00link_get_avg_rssi(struct ewma_rssi *ewma)
 {
        unsigned long avg;
 
-       avg = ewma_read(ewma);
+       avg = ewma_rssi_read(ewma);
        if (avg)
                return -avg;
 
@@ -76,8 +72,7 @@ static void rt2x00link_antenna_update_rssi_history(struct rt2x00_dev *rt2x00dev,
 
 static void rt2x00link_antenna_reset(struct rt2x00_dev *rt2x00dev)
 {
-       ewma_init(&rt2x00dev->link.ant.rssi_ant, RT2X00_EWMA_FACTOR,
-                 RT2X00_EWMA_WEIGHT);
+       ewma_rssi_init(&rt2x00dev->link.ant.rssi_ant);
 }
 
 static void rt2x00lib_antenna_diversity_sample(struct rt2x00_dev *rt2x00dev)
@@ -225,12 +220,12 @@ void rt2x00link_update_stats(struct rt2x00_dev *rt2x00dev,
        /*
         * Update global RSSI
         */
-       ewma_add(&link->avg_rssi, -rxdesc->rssi);
+       ewma_rssi_add(&link->avg_rssi, -rxdesc->rssi);
 
        /*
         * Update antenna RSSI
         */
-       ewma_add(&ant->rssi_ant, -rxdesc->rssi);
+       ewma_rssi_add(&ant->rssi_ant, -rxdesc->rssi);
 }
 
 void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
@@ -285,8 +280,7 @@ void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna)
         */
        rt2x00dev->link.count = 0;
        memset(qual, 0, sizeof(*qual));
-       ewma_init(&rt2x00dev->link.avg_rssi, RT2X00_EWMA_FACTOR,
-                 RT2X00_EWMA_WEIGHT);
+       ewma_rssi_init(&rt2x00dev->link.avg_rssi);
 
        /*
         * Restore the VGC level as stored in the registers,
index c8058aa73ecfc1aa8f282cc80aa27b789f657906..629125658b8728678931848504be377e17849ecd 100644 (file)
@@ -200,7 +200,7 @@ int rtl88e_download_fw(struct ieee80211_hw *hw,
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       struct rtl92c_firmware_header *pfwheader;
+       struct rtlwifi_firmware_header *pfwheader;
        u8 *pfwdata;
        u32 fwsize;
        int err;
@@ -209,7 +209,7 @@ int rtl88e_download_fw(struct ieee80211_hw *hw,
        if (!rtlhal->pfirmware)
                return 1;
 
-       pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
+       pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
        pfwdata = rtlhal->pfirmware;
        fwsize = rtlhal->fwsize;
        RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
@@ -219,10 +219,10 @@ int rtl88e_download_fw(struct ieee80211_hw *hw,
                RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
                         "Firmware Version(%d), Signature(%#x), Size(%d)\n",
                          pfwheader->version, pfwheader->signature,
-                         (int)sizeof(struct rtl92c_firmware_header));
+                         (int)sizeof(struct rtlwifi_firmware_header));
 
-               pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
-               fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
+               pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+               fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
        }
 
        if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) {
index 05e944e451f442f2d610502348c7e8c2c0e8098f..21bd4a5337abc633686b1d56e749f70a3b42c944 100644 (file)
@@ -37,7 +37,7 @@
 #define FW_8192C_POLLING_TIMEOUT_COUNT         3000
 
 #define IS_FW_HEADER_EXIST(_pfwhdr)            \
-       ((_pfwhdr->signature&0xFFFF) == 0x88E1)
+       ((le16_to_cpu(_pfwhdr->signature) & 0xFFFF) == 0x88E1)
 #define USE_OLD_WOWLAN_DEBUG_FW                        0
 
 #define H2C_88E_RSVDPAGE_LOC_LEN               5
 #define        FW_PWR_STATE_ACTIVE     ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
 #define        FW_PWR_STATE_RF_OFF             0
 
-struct rtl92c_firmware_header {
-       u16 signature;
-       u8 category;
-       u8 function;
-       u16 version;
-       u8 subversion;
-       u8 rsvd1;
-       u8 month;
-       u8 date;
-       u8 hour;
-       u8 minute;
-       u16 ramcodesize;
-       u16 rsvd2;
-       u32 svnindex;
-       u32 rsvd3;
-       u32 rsvd4;
-       u32 rsvd5;
-};
-
 enum rtl8188e_h2c_cmd {
        H2C_88E_RSVDPAGE = 0,
        H2C_88E_JOINBSSRPT = 1,
index 0aca6f47487c2fe293f082ec562f13172dc13c38..03cbe4cf110b5f42deabe86397fff9fa41101974 100644 (file)
@@ -39,6 +39,7 @@
 #define BT_RSSI_STATE_SPECIAL_LOW      BIT_OFFSET_LEN_MASK_32(2, 1)
 #define BT_RSSI_STATE_BG_EDCA_LOW      BIT_OFFSET_LEN_MASK_32(3, 1)
 #define BT_RSSI_STATE_TXPOWER_LOW      BIT_OFFSET_LEN_MASK_32(4, 1)
+#define BT_MASK                                0x00ffffff
 
 #define RTLPRIV                        (struct rtl_priv *)
 #define GET_UNDECORATED_AVERAGE_RSSI(_priv)    \
@@ -312,7 +313,7 @@ static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
        struct dig_t *digtable = &rtlpriv->dm_digtable;
        u32 isbt;
 
-       /* modify DIG lower bound, deal with abnorally large false alarm */
+       /* modify DIG lower bound, deal with abnormally large false alarm */
        if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
                digtable->large_fa_hit++;
                if (digtable->forbidden_igi < digtable->cur_igvalue) {
@@ -1536,13 +1537,11 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
                return false;
 
        bt_state = rtl_read_byte(rtlpriv, 0x4fd);
-       bt_tx = rtl_read_dword(rtlpriv, 0x488);
-       bt_tx = bt_tx & 0x00ffffff;
-       bt_pri = rtl_read_dword(rtlpriv, 0x48c);
-       bt_pri = bt_pri & 0x00ffffff;
+       bt_tx = rtl_read_dword(rtlpriv, 0x488) & BT_MASK;
+       bt_pri = rtl_read_dword(rtlpriv, 0x48c) & BT_MASK;
        polling = rtl_read_dword(rtlpriv, 0x490);
 
-       if (bt_tx == 0xffffffff && bt_pri == 0xffffffff &&
+       if (bt_tx == BT_MASK && bt_pri == BT_MASK &&
            polling == 0xffffffff && bt_state == 0xff)
                return false;
 
index 14b819ea8b71886cc6e6b46ec949e5ce1a87c39d..43fcb25c885f15d691b6ddb1300599cd0cf6b347 100644 (file)
@@ -221,7 +221,7 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       struct rtl92c_firmware_header *pfwheader;
+       struct rtlwifi_firmware_header *pfwheader;
        u8 *pfwdata;
        u32 fwsize;
        int err;
@@ -230,19 +230,19 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
        if (!rtlhal->pfirmware)
                return 1;
 
-       pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
+       pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
        pfwdata = (u8 *)rtlhal->pfirmware;
        fwsize = rtlhal->fwsize;
        if (IS_FW_HEADER_EXIST(pfwheader)) {
                RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
                         "Firmware Version(%d), Signature(%#x),Size(%d)\n",
                          pfwheader->version, pfwheader->signature,
-                         (int)sizeof(struct rtl92c_firmware_header));
+                         (int)sizeof(struct rtlwifi_firmware_header));
 
-               rtlhal->fw_version = pfwheader->version;
+               rtlhal->fw_version = le16_to_cpu(pfwheader->version);
                rtlhal->fw_subversion = pfwheader->subversion;
-               pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
-               fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
+               pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+               fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
        }
 
        _rtl92c_enable_fw_download(hw, true);
index e9f4281f5067d9554ea22985ce9cc463a162a8e7..864806c19ca7b21043de97cdbbe79c9763b289b6 100644 (file)
        ((GET_CVID_CUT_VERSION(version) == \
                CHIP_VENDOR_UMC_B_CUT) ? true : false) : false)
 
-struct rtl92c_firmware_header {
-       __le16 signature;
-       u8 category;
-       u8 function;
-       __le16 version;
-       u8 subversion;
-       u8 rsvd1;
-       u8 month;
-       u8 date;
-       u8 hour;
-       u8 minute;
-       __le16 ramcodeSize;
-       __le16 rsvd2;
-       __le32 svnindex;
-       __le32 rsvd3;
-       __le32 rsvd4;
-       __le32 rsvd5;
-};
-
 #define pagenum_128(_len)      (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
 
 #define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val)                 \
index 7cf36619f25005e4395251ba663b2b15cd14e41a..25db369b5d18c4e62a9862015f572d18affdf53d 100644 (file)
@@ -818,26 +818,29 @@ static void _rtl92cu_init_usb_aggregation(struct ieee80211_hw *hw)
 
 static void _rtl92cu_init_wmac_setting(struct ieee80211_hw *hw)
 {
-       u16                     value16;
-
+       u16 value16;
+       u32 value32;
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 
-       mac->rx_conf = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APPFCS |
-                     RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL |
-                     RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32);
-       rtl_write_dword(rtlpriv, REG_RCR, mac->rx_conf);
+       value32 = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APPFCS |
+                  RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL |
+                  RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32);
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&value32));
        /* Accept all multicast address */
        rtl_write_dword(rtlpriv,  REG_MAR, 0xFFFFFFFF);
        rtl_write_dword(rtlpriv,  REG_MAR + 4, 0xFFFFFFFF);
        /* Accept all management frames */
        value16 = 0xFFFF;
-       rtl92c_set_mgt_filter(hw, value16);
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_MGT_FILTER,
+                                     (u8 *)(&value16));
        /* Reject all control frame - default value is 0 */
-       rtl92c_set_ctrl_filter(hw, 0x0);
+       value16 = 0x0;
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CTRL_FILTER,
+                                     (u8 *)(&value16));
        /* Accept all data frames */
        value16 = 0xFFFF;
-       rtl92c_set_data_filter(hw, value16);
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DATA_FILTER,
+                                     (u8 *)(&value16));
 }
 
 static void _rtl92cu_init_beacon_parameters(struct ieee80211_hw *hw)
@@ -988,17 +991,6 @@ static void _InitPABias(struct ieee80211_hw *hw)
        }
 }
 
-static void _update_mac_setting(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-
-       mac->rx_conf = rtl_read_dword(rtlpriv, REG_RCR);
-       mac->rx_mgt_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP0);
-       mac->rx_ctrl_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP1);
-       mac->rx_data_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
-}
-
 int rtl92cu_hw_init(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1068,7 +1060,6 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
        }
        _rtl92cu_hw_configure(hw);
        _InitPABias(hw);
-       _update_mac_setting(hw);
        rtl92c_dm_init(hw);
 exit:
        local_irq_restore(flags);
@@ -1620,7 +1611,6 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
        struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
        enum wireless_mode wirelessmode = mac->mode;
        u8 idx = 0;
 
@@ -1829,63 +1819,10 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                                u4b_ac_param);
                                break;
                        default:
-                               RT_ASSERT(false,
-                                         "SetHwReg8185(): invalid aci: %d !\n",
+                               RT_ASSERT(false, "invalid aci: %d !\n",
                                          e_aci);
                                break;
                        }
-                       if (rtlusb->acm_method != EACMWAY2_SW)
-                               rtlpriv->cfg->ops->set_hw_reg(hw,
-                                        HW_VAR_ACM_CTRL, &e_aci);
-                       break;
-               }
-       case HW_VAR_ACM_CTRL:{
-                       u8 e_aci = *val;
-                       union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)
-                                                       (&(mac->ac[0].aifs));
-                       u8 acm = p_aci_aifsn->f.acm;
-                       u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
-
-                       acm_ctrl =
-                           acm_ctrl | ((rtlusb->acm_method == 2) ? 0x0 : 0x1);
-                       if (acm) {
-                               switch (e_aci) {
-                               case AC0_BE:
-                                       acm_ctrl |= AcmHw_BeqEn;
-                                       break;
-                               case AC2_VI:
-                                       acm_ctrl |= AcmHw_ViqEn;
-                                       break;
-                               case AC3_VO:
-                                       acm_ctrl |= AcmHw_VoqEn;
-                                       break;
-                               default:
-                                       RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
-                                                "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
-                                                acm);
-                                       break;
-                               }
-                       } else {
-                               switch (e_aci) {
-                               case AC0_BE:
-                                       acm_ctrl &= (~AcmHw_BeqEn);
-                                       break;
-                               case AC2_VI:
-                                       acm_ctrl &= (~AcmHw_ViqEn);
-                                       break;
-                               case AC3_VO:
-                                       acm_ctrl &= (~AcmHw_VoqEn);
-                                       break;
-                               default:
-                                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                                "switch case not processed\n");
-                                       break;
-                               }
-                       }
-                       RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
-                                "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
-                                acm_ctrl);
-                       rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
                        break;
                }
        case HW_VAR_RCR:{
@@ -1999,12 +1936,15 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                }
        case HW_VAR_MGT_FILTER:
                rtl_write_word(rtlpriv, REG_RXFLTMAP0, *(u16 *)val);
+               mac->rx_mgt_filter = *(u16 *)val;
                break;
        case HW_VAR_CTRL_FILTER:
                rtl_write_word(rtlpriv, REG_RXFLTMAP1, *(u16 *)val);
+               mac->rx_ctrl_filter = *(u16 *)val;
                break;
        case HW_VAR_DATA_FILTER:
                rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *)val);
+               mac->rx_data_filter = *(u16 *)val;
                break;
        default:
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
index 1c55a002d4bd9d9cc48986483a29cbfd5228b0ab..035713311a4ab3cdd160d079fa7376fd06c6c7ad 100644 (file)
@@ -393,59 +393,9 @@ void rtl92c_disable_interrupt(struct ieee80211_hw *hw)
 void rtl92c_set_qos(struct ieee80211_hw *hw, int aci)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-       u32 u4b_ac_param;
 
        rtl92c_dm_init_edca_turbo(hw);
-       u4b_ac_param = (u32) mac->ac[aci].aifs;
-       u4b_ac_param |=
-           ((u32) le16_to_cpu(mac->ac[aci].cw_min) & 0xF) <<
-           AC_PARAM_ECW_MIN_OFFSET;
-       u4b_ac_param |=
-           ((u32) le16_to_cpu(mac->ac[aci].cw_max) & 0xF) <<
-           AC_PARAM_ECW_MAX_OFFSET;
-       u4b_ac_param |= (u32) le16_to_cpu(mac->ac[aci].tx_op) <<
-                        AC_PARAM_TXOP_OFFSET;
-       RT_TRACE(rtlpriv, COMP_QOS, DBG_LOUD, "queue:%x, ac_param:%x\n",
-                aci, u4b_ac_param);
-       switch (aci) {
-       case AC1_BK:
-               rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param);
-               break;
-       case AC0_BE:
-               rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param);
-               break;
-       case AC2_VI:
-               rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, u4b_ac_param);
-               break;
-       case AC3_VO:
-               rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, u4b_ac_param);
-               break;
-       default:
-               RT_ASSERT(false, "invalid aci: %d !\n", aci);
-               break;
-       }
-}
-
-/*-------------------------------------------------------------------------
- * HW MAC Address
- *-------------------------------------------------------------------------*/
-void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr)
-{
-       u32 i;
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       for (i = 0 ; i < ETH_ALEN ; i++)
-               rtl_write_byte(rtlpriv, (REG_MACID + i), *(addr+i));
-
-       RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
-                "MAC Address: %02X-%02X-%02X-%02X-%02X-%02X\n",
-                rtl_read_byte(rtlpriv, REG_MACID),
-                rtl_read_byte(rtlpriv, REG_MACID+1),
-                rtl_read_byte(rtlpriv, REG_MACID+2),
-                rtl_read_byte(rtlpriv, REG_MACID+3),
-                rtl_read_byte(rtlpriv, REG_MACID+4),
-                rtl_read_byte(rtlpriv, REG_MACID+5));
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, (u8 *)&aci);
 }
 
 void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size)
@@ -644,47 +594,6 @@ void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T)
        rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, value);
 }
 
-u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       return rtl_read_word(rtlpriv, REG_RXFLTMAP0);
-}
-
-void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       rtl_write_word(rtlpriv, REG_RXFLTMAP0, filter);
-}
-
-u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       return rtl_read_word(rtlpriv, REG_RXFLTMAP1);
-}
-
-void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       rtl_write_word(rtlpriv, REG_RXFLTMAP1, filter);
-}
-
-u16 rtl92c_get_data_filter(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       return rtl_read_word(rtlpriv,  REG_RXFLTMAP2);
-}
-
-void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       rtl_write_word(rtlpriv, REG_RXFLTMAP2, filter);
-}
 /*==============================================================*/
 
 static u8 _rtl92c_query_rxpwrpercentage(char antpower)
index e34f0f14ccd775954e56840fdddd1a736dc71aae..553a4bfac66894a2999a61314ea5c6a683aa4854 100644 (file)
@@ -48,7 +48,6 @@ void rtl92c_set_qos(struct ieee80211_hw *hw, int aci);
 /*---------------------------------------------------------------
  *     Hardware init functions
  *---------------------------------------------------------------*/
-void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr);
 void rtl92c_init_interrupt(struct ieee80211_hw *hw);
 void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size);
 
@@ -73,15 +72,6 @@ void rtl92c_init_retry_function(struct ieee80211_hw *hw);
 void rtl92c_disable_fast_edca(struct ieee80211_hw *hw);
 void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T);
 
-/* For filter */
-u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw);
-void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter);
-u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw);
-void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter);
-u16 rtl92c_get_data_filter(struct ieee80211_hw *hw);
-void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter);
-
-
 u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw);
 
 struct rx_fwinfo_92c {
index 587b8c505a7625e11b810fefc0b63af3367708e8..7c1db7e7572dc3fa44d6f7849250f537206475e1 100644 (file)
@@ -420,7 +420,7 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
                 "dm_DIG() Before: Recover_cnt=%d, rx_gain_min=%x\n",
                 de_digtable->recover_cnt, de_digtable->rx_gain_min);
 
-       /* deal with abnorally large false alarm */
+       /* deal with abnormally large false alarm */
        if (falsealm_cnt->cnt_all > 10000) {
                RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
                         "dm_DIG(): Abnormally false alarm case\n");
index 1646e7c3d0f8cfe35e584697cb629cfcc1b7da72..8a38daa316cb502164634e9d58d8af507e2eaef7 100644 (file)
 #define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val)    \
        SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 2, 0, 8, __val)
 
-struct rtl92d_firmware_header {
-       u16 signature;
-       u8 category;
-       u8 function;
-       u16 version;
-       u8 subversion;
-       u8 rsvd1;
-
-       u8 month;
-       u8 date;
-       u8 hour;
-       u8 minute;
-       u16 ramcodeSize;
-       u16 rsvd2;
-
-       u32 svnindex;
-       u32 rsvd3;
-
-       u32 rsvd4;
-       u32 rsvd5;
-};
-
 int rtl92d_download_fw(struct ieee80211_hw *hw);
 void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
                         u32 cmd_len, u8 *p_cmdbuffer);
index 232865cc3ffdd01a7672d8e4cf38203f33a8285d..0708eedd967132fc947e7b4b79aab22daafd3e10 100644 (file)
@@ -198,7 +198,7 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       struct rtl92c_firmware_header *pfwheader;
+       struct rtlwifi_firmware_header *pfwheader;
        u8 *pfwdata;
        u32 fwsize;
        int err;
@@ -207,8 +207,8 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
        if (!rtlhal->pfirmware)
                return 1;
 
-       pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
-       rtlhal->fw_version = pfwheader->version;
+       pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
+       rtlhal->fw_version = le16_to_cpu(pfwheader->version);
        rtlhal->fw_subversion = pfwheader->subversion;
        pfwdata = (u8 *)rtlhal->pfirmware;
        fwsize = rtlhal->fwsize;
@@ -219,10 +219,10 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
                RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
                         "Firmware Version(%d), Signature(%#x),Size(%d)\n",
                          pfwheader->version, pfwheader->signature,
-                         (int)sizeof(struct rtl92c_firmware_header));
+                         (int)sizeof(struct rtlwifi_firmware_header));
 
-               pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
-               fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
+               pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+               fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
        } else {
                RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
                         "Firmware no Header, Signature(%#x)\n",
index 3e2a48e5fb4deffb1ad2a5f7b78ed3050aef4fe9..069da1e7e80a481fbd17179917fdca018536a77d 100644 (file)
@@ -33,7 +33,7 @@
 #define FW_8192C_POLLING_TIMEOUT_COUNT         3000
 
 #define IS_FW_HEADER_EXIST(_pfwhdr)    \
-       ((_pfwhdr->signature&0xFFF0) == 0x92E0)
+       ((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x92E0)
 #define USE_OLD_WOWLAN_DEBUG_FW 0
 
 #define H2C_92E_RSVDPAGE_LOC_LEN               5
 #define        FW_PWR_STATE_ACTIVE     ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
 #define        FW_PWR_STATE_RF_OFF     0
 
-struct rtl92c_firmware_header {
-       u16 signature;
-       u8 category;
-       u8 function;
-       u16 version;
-       u8 subversion;
-       u8 rsvd1;
-       u8 month;
-       u8 date;
-       u8 hour;
-       u8 minute;
-       u16 ramcodesize;
-       u16 rsvd2;
-       u32 svnindex;
-       u32 rsvd3;
-       u32 rsvd4;
-       u32 rsvd5;
-};
-
 enum rtl8192e_h2c_cmd {
        H2C_92E_RSVDPAGE = 0,
        H2C_92E_MSRRPT = 1,
index 8280bab43df4ce47755a1ab545e8dc2cf5aad086..3859b3e3d158d5e5dde4914073320283a2455dff 100644 (file)
@@ -205,9 +205,9 @@ bool rtl8723e_get_btc_status(void)
        return true;
 }
 
-static bool is_fw_header(struct rtl8723e_firmware_header *hdr)
+static bool is_fw_header(struct rtlwifi_firmware_header *hdr)
 {
-       return (hdr->signature & 0xfff0) == 0x2300;
+       return (le16_to_cpu(hdr->signature) & 0xfff0) == 0x2300;
 }
 
 static struct rtl_hal_ops rtl8723e_hal_ops = {
index 7bf88d9dcdc3fc4732170c121ef78d5a85ca3fa9..d091f1d5f91eb2bbd5f399e01187c7de47cf4aab 100644 (file)
@@ -209,9 +209,9 @@ bool rtl8723be_get_btc_status(void)
        return true;
 }
 
-static bool is_fw_header(struct rtl8723e_firmware_header *hdr)
+static bool is_fw_header(struct rtlwifi_firmware_header *hdr)
 {
-       return (hdr->signature & 0xfff0) == 0x5300;
+       return (le16_to_cpu(hdr->signature) & 0xfff0) == 0x5300;
 }
 
 static struct rtl_hal_ops rtl8723be_hal_ops = {
index dd698e7e9aceffb90c1e5281f6da123f6635fc56..a2f5e89bedfed333b6f157e431b61239bd6c593c 100644 (file)
@@ -253,7 +253,7 @@ int rtl8723_download_fw(struct ieee80211_hw *hw,
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       struct rtl8723e_firmware_header *pfwheader;
+       struct rtlwifi_firmware_header *pfwheader;
        u8 *pfwdata;
        u32 fwsize;
        int err;
@@ -263,7 +263,7 @@ int rtl8723_download_fw(struct ieee80211_hw *hw,
        if (!rtlhal->pfirmware)
                return 1;
 
-       pfwheader = (struct rtl8723e_firmware_header *)rtlhal->pfirmware;
+       pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
        pfwdata = rtlhal->pfirmware;
        fwsize = rtlhal->fwsize;
 
@@ -275,10 +275,10 @@ int rtl8723_download_fw(struct ieee80211_hw *hw,
                RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
                         "Firmware Version(%d), Signature(%#x), Size(%d)\n",
                         pfwheader->version, pfwheader->signature,
-                        (int)sizeof(struct rtl8723e_firmware_header));
+                        (int)sizeof(struct rtlwifi_firmware_header));
 
-               pfwdata = pfwdata + sizeof(struct rtl8723e_firmware_header);
-               fwsize = fwsize - sizeof(struct rtl8723e_firmware_header);
+               pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+               fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
        }
 
        if (rtl_read_byte(rtlpriv, REG_MCUFWDL)&BIT(7)) {
index 3ebafc80972fc1a3f5aae3f9071e96749ba966d2..8ea372d1626e5e75432c363f1b592df04a710817 100644 (file)
@@ -50,25 +50,6 @@ enum version_8723e {
        VERSION_UNKNOWN = 0xFF,
 };
 
-struct rtl8723e_firmware_header {
-       u16 signature;
-       u8 category;
-       u8 function;
-       u16 version;
-       u8 subversion;
-       u8 rsvd1;
-       u8 month;
-       u8 date;
-       u8 hour;
-       u8 minute;
-       u16 ramcodesize;
-       u16 rsvd2;
-       u32 svnindex;
-       u32 rsvd3;
-       u32 rsvd4;
-       u32 rsvd5;
-};
-
 enum rtl8723be_cmd {
        H2C_8723BE_RSVDPAGE = 0,
        H2C_8723BE_JOINBSSRPT = 1,
index 95e95626b6325c29c43ceb440b4b3ea0b352874e..525eb234627c965daa3d0c7e9611163b9734df3a 100644 (file)
@@ -210,7 +210,7 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       struct rtl8821a_firmware_header *pfwheader;
+       struct rtlwifi_firmware_header *pfwheader;
        u8 *pfwdata;
        u32 fwsize;
        int err;
@@ -228,8 +228,8 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
                        return 1;
 
                pfwheader =
-                 (struct rtl8821a_firmware_header *)rtlhal->wowlan_firmware;
-               rtlhal->fw_version = pfwheader->version;
+                 (struct rtlwifi_firmware_header *)rtlhal->wowlan_firmware;
+               rtlhal->fw_version = le16_to_cpu(pfwheader->version);
                rtlhal->fw_subversion = pfwheader->subversion;
                pfwdata = (u8 *)rtlhal->wowlan_firmware;
                fwsize = rtlhal->wowlan_fwsize;
@@ -238,8 +238,8 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
                        return 1;
 
                pfwheader =
-                 (struct rtl8821a_firmware_header *)rtlhal->pfirmware;
-               rtlhal->fw_version = pfwheader->version;
+                 (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
+               rtlhal->fw_version = le16_to_cpu(pfwheader->version);
                rtlhal->fw_subversion = pfwheader->subversion;
                pfwdata = (u8 *)rtlhal->pfirmware;
                fwsize = rtlhal->fwsize;
@@ -255,8 +255,8 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
                         "Firmware Version(%d), Signature(%#x)\n",
                         pfwheader->version, pfwheader->signature);
 
-               pfwdata = pfwdata + sizeof(struct rtl8821a_firmware_header);
-               fwsize = fwsize - sizeof(struct rtl8821a_firmware_header);
+               pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+               fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
        }
 
        if (rtlhal->mac_func_enable) {
index 591c14c0b9b52bd9247676cf972b18853874d07a..8f5b4aade3c91f356748dffa41318ce557c10bd5 100644 (file)
 #define FW_8821AE_POLLING_TIMEOUT_COUNT        6000
 
 #define IS_FW_HEADER_EXIST_8812(_pfwhdr)       \
-       ((_pfwhdr->signature&0xFFF0) == 0x9500)
+       ((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x9500)
 
 #define IS_FW_HEADER_EXIST_8821(_pfwhdr)       \
-       ((_pfwhdr->signature&0xFFF0) == 0x2100)
+       ((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x2100)
 
 #define USE_OLD_WOWLAN_DEBUG_FW 0
 
 #define        FW_PWR_STATE_ACTIVE     ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
 #define        FW_PWR_STATE_RF_OFF     0
 
-struct rtl8821a_firmware_header {
-       u16 signature;
-       u8 category;
-       u8 function;
-       u16 version;
-       u8 subversion;
-       u8 rsvd1;
-       u8 month;
-       u8 date;
-       u8 hour;
-       u8 minute;
-       u16 ramcodeSize;
-       u16 rsvd2;
-       u32 svnindex;
-       u32 rsvd3;
-       u32 rsvd4;
-       u32 rsvd5;
-};
-
 enum rtl8812_c2h_evt {
        C2H_8812_DBG = 0,
        C2H_8812_LB = 1,
index 2b770b5e2620fc629ea2eca7ef1b499b300eaf8d..b90ca618b123209a1724bc808c4fdfe8ea330a69 100644 (file)
@@ -222,6 +222,25 @@ enum rf_tx_num {
 #define        WOL_REASON_REALWOW_V2_WAKEUPPKT BIT(9)
 #define        WOL_REASON_REALWOW_V2_ACKLOST   BIT(10)
 
+struct rtlwifi_firmware_header {
+       __le16 signature;
+       u8 category;
+       u8 function;
+       __le16 version;
+       u8 subversion;
+       u8 rsvd1;
+       u8 month;
+       u8 date;
+       u8 hour;
+       u8 minute;
+       __le16 ramcodeSize;
+       __le16 rsvd2;
+       __le32 svnindex;
+       __le32 rsvd3;
+       __le32 rsvd4;
+       __le32 rsvd5;
+};
+
 struct txpower_info_2g {
        u8 index_cck_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
        u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
@@ -2064,16 +2083,12 @@ struct rtl_tcb_desc {
        bool tx_enable_sw_calc_duration;
 };
 
-struct rtl92c_firmware_header;
-
 struct rtl_wow_pattern {
        u8 type;
        u16 crc;
        u32 mask[4];
 };
 
-struct rtl8723e_firmware_header;
-
 struct rtl_hal_ops {
        int (*init_sw_vars) (struct ieee80211_hw *hw);
        void (*deinit_sw_vars) (struct ieee80211_hw *hw);
@@ -2177,7 +2192,7 @@ struct rtl_hal_ops {
        void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id,
                              u32 cmd_len, u8 *p_cmdbuffer);
        bool (*get_btc_status) (void);
-       bool (*is_fw_header)(struct rtl8723e_firmware_header *hdr);
+       bool (*is_fw_header)(struct rtlwifi_firmware_header *hdr);
        u32 (*rx_command_packet)(struct ieee80211_hw *hw,
                                 struct rtl_stats status, struct sk_buff *skb);
        void (*add_wowlan_pattern)(struct ieee80211_hw *hw,
index 0c0d5cd98514207c25b3faf804573f13af78976e..7c355fff2c5ea50616407545639b32aa7d558d8f 100644 (file)
@@ -118,7 +118,11 @@ static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
        if (passive)
                scan_options |= WL1271_SCAN_OPT_PASSIVE;
 
-       cmd->params.role_id = wlvif->role_id;
+       /* scan on the dev role if the regular one is not started */
+       if (wlcore_is_p2p_mgmt(wlvif))
+               cmd->params.role_id = wlvif->dev_role_id;
+       else
+               cmd->params.role_id = wlvif->role_id;
 
        if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) {
                ret = -EINVAL;
index 67f2a0eec8542190ea6d8c374240492d9b95ed73..4be0409308cb6f2a9d315b7ed77d2150f65ad6f0 100644 (file)
@@ -282,3 +282,30 @@ out:
        kfree(acx);
        return ret;
 }
+
+int wl18xx_acx_dynamic_fw_traces(struct wl1271 *wl)
+{
+       struct acx_dynamic_fw_traces_cfg *acx;
+       int ret;
+
+       wl1271_debug(DEBUG_ACX, "acx dynamic fw traces config %d",
+                    wl->dynamic_fw_traces);
+
+       acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+       if (!acx) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       acx->dynamic_fw_traces = cpu_to_le32(wl->dynamic_fw_traces);
+
+       ret = wl1271_cmd_configure(wl, ACX_DYNAMIC_TRACES_CFG,
+                                  acx, sizeof(*acx));
+       if (ret < 0) {
+               wl1271_warning("acx config dynamic fw traces failed: %d", ret);
+               goto out;
+       }
+out:
+       kfree(acx);
+       return ret;
+}
index 4afccd4b946752b6312bc2301ab13e02c14944da..342a2993ef986c3342c02b34188ff4cc5f0fd5be 100644 (file)
@@ -35,7 +35,8 @@ enum {
        ACX_PEER_CAP                     = 0x0056,
        ACX_INTERRUPT_NOTIFY             = 0x0057,
        ACX_RX_BA_FILTER                 = 0x0058,
-       ACX_AP_SLEEP_CFG                 = 0x0059
+       ACX_AP_SLEEP_CFG                 = 0x0059,
+       ACX_DYNAMIC_TRACES_CFG           = 0x005A,
 };
 
 /* numbers of bits the length field takes (add 1 for the actual number) */
@@ -92,27 +93,26 @@ struct wl18xx_acx_checksum_state {
 
 
 struct wl18xx_acx_error_stats {
-       u32 error_frame;
-       u32 error_null_Frame_tx_start;
-       u32 error_numll_frame_cts_start;
-       u32 error_bar_retry;
-       u32 error_frame_cts_nul_flid;
-} __packed;
-
-struct wl18xx_acx_debug_stats {
-       u32 debug1;
-       u32 debug2;
-       u32 debug3;
-       u32 debug4;
-       u32 debug5;
-       u32 debug6;
-} __packed;
-
-struct wl18xx_acx_ring_stats {
-       u32 prepared_descs;
-       u32 tx_cmplt;
+       u32 error_frame_non_ctrl;
+       u32 error_frame_ctrl;
+       u32 error_frame_during_protection;
+       u32 null_frame_tx_start;
+       u32 null_frame_cts_start;
+       u32 bar_retry;
+       u32 num_frame_cts_nul_flid;
+       u32 tx_abort_failure;
+       u32 tx_resume_failure;
+       u32 rx_cmplt_db_overflow_cnt;
+       u32 elp_while_rx_exch;
+       u32 elp_while_tx_exch;
+       u32 elp_while_tx;
+       u32 elp_while_nvic_pending;
+       u32 rx_excessive_frame_len;
+       u32 burst_mismatch;
+       u32 tbc_exch_mismatch;
 } __packed;
 
+#define NUM_OF_RATES_INDEXES 30
 struct wl18xx_acx_tx_stats {
        u32 tx_prepared_descs;
        u32 tx_cmplt;
@@ -122,7 +122,7 @@ struct wl18xx_acx_tx_stats {
        u32 tx_data_programmed;
        u32 tx_burst_programmed;
        u32 tx_starts;
-       u32 tx_imm_resp;
+       u32 tx_stop;
        u32 tx_start_templates;
        u32 tx_start_int_templates;
        u32 tx_start_fw_gen;
@@ -131,13 +131,14 @@ struct wl18xx_acx_tx_stats {
        u32 tx_exch;
        u32 tx_retry_template;
        u32 tx_retry_data;
+       u32 tx_retry_per_rate[NUM_OF_RATES_INDEXES];
        u32 tx_exch_pending;
        u32 tx_exch_expiry;
        u32 tx_done_template;
        u32 tx_done_data;
        u32 tx_done_int_template;
-       u32 tx_frame_checksum;
-       u32 tx_checksum_result;
+       u32 tx_cfe1;
+       u32 tx_cfe2;
        u32 frag_called;
        u32 frag_mpdu_alloc_failed;
        u32 frag_init_called;
@@ -165,11 +166,8 @@ struct wl18xx_acx_rx_stats {
        u32 rx_cmplt_task;
        u32 rx_phy_hdr;
        u32 rx_timeout;
+       u32 rx_rts_timeout;
        u32 rx_timeout_wa;
-       u32 rx_wa_density_dropped_frame;
-       u32 rx_wa_ba_not_expected;
-       u32 rx_frame_checksum;
-       u32 rx_checksum_result;
        u32 defrag_called;
        u32 defrag_init_called;
        u32 defrag_in_process_called;
@@ -179,6 +177,7 @@ struct wl18xx_acx_rx_stats {
        u32 decrypt_key_not_found;
        u32 defrag_need_decrypt;
        u32 rx_tkip_replays;
+       u32 rx_xfr;
 } __packed;
 
 struct wl18xx_acx_isr_stats {
@@ -193,21 +192,13 @@ struct wl18xx_acx_pwr_stats {
        u32 connection_out_of_sync;
        u32 cont_miss_bcns_spread[PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD];
        u32 rcvd_awake_bcns_cnt;
-} __packed;
-
-struct wl18xx_acx_event_stats {
-       u32 calibration;
-       u32 rx_mismatch;
-       u32 rx_mem_empty;
-} __packed;
-
-struct wl18xx_acx_ps_poll_stats {
-       u32 ps_poll_timeouts;
-       u32 upsd_timeouts;
-       u32 upsd_max_ap_turn;
-       u32 ps_poll_max_ap_turn;
-       u32 ps_poll_utilization;
-       u32 upsd_utilization;
+       u32 sleep_time_count;
+       u32 sleep_time_avg;
+       u32 sleep_cycle_avg;
+       u32 sleep_percent;
+       u32 ap_sleep_active_conf;
+       u32 ap_sleep_user_conf;
+       u32 ap_sleep_counter;
 } __packed;
 
 struct wl18xx_acx_rx_filter_stats {
@@ -227,11 +218,11 @@ struct wl18xx_acx_rx_rate_stats {
 } __packed;
 
 #define AGGR_STATS_TX_AGG      16
-#define AGGR_STATS_TX_RATE     16
 #define AGGR_STATS_RX_SIZE_LEN 16
 
 struct wl18xx_acx_aggr_stats {
-       u32 tx_agg_vs_rate[AGGR_STATS_TX_AGG * AGGR_STATS_TX_RATE];
+       u32 tx_agg_rate[AGGR_STATS_TX_AGG];
+       u32 tx_agg_len[AGGR_STATS_TX_AGG];
        u32 rx_size[AGGR_STATS_RX_SIZE_LEN];
 } __packed;
 
@@ -240,8 +231,6 @@ struct wl18xx_acx_aggr_stats {
 struct wl18xx_acx_pipeline_stats {
        u32 hs_tx_stat_fifo_int;
        u32 hs_rx_stat_fifo_int;
-       u32 tcp_tx_stat_fifo_int;
-       u32 tcp_rx_stat_fifo_int;
        u32 enc_tx_stat_fifo_int;
        u32 enc_rx_stat_fifo_int;
        u32 rx_complete_stat_fifo_int;
@@ -249,38 +238,61 @@ struct wl18xx_acx_pipeline_stats {
        u32 post_proc_swi;
        u32 sec_frag_swi;
        u32 pre_to_defrag_swi;
-       u32 defrag_to_csum_swi;
-       u32 csum_to_rx_xfer_swi;
+       u32 defrag_to_rx_xfer_swi;
        u32 dec_packet_in;
        u32 dec_packet_in_fifo_full;
        u32 dec_packet_out;
-       u32 cs_rx_packet_in;
-       u32 cs_rx_packet_out;
        u16 pipeline_fifo_full[PIPE_STATS_HW_FIFO];
+       u16 padding;
+} __packed;
+
+#define DIVERSITY_STATS_NUM_OF_ANT     2
+
+struct wl18xx_acx_diversity_stats {
+       u32 num_of_packets_per_ant[DIVERSITY_STATS_NUM_OF_ANT];
+       u32 total_num_of_toggles;
 } __packed;
 
-struct wl18xx_acx_mem_stats {
-       u32 rx_free_mem_blks;
-       u32 tx_free_mem_blks;
-       u32 fwlog_free_mem_blks;
-       u32 fw_gen_free_mem_blks;
+struct wl18xx_acx_thermal_stats {
+       u16 irq_thr_low;
+       u16 irq_thr_high;
+       u16 tx_stop;
+       u16 tx_resume;
+       u16 false_irq;
+       u16 adc_source_unexpected;
+} __packed;
+
+#define WL18XX_NUM_OF_CALIBRATIONS_ERRORS 18
+struct wl18xx_acx_calib_failure_stats {
+       u16 fail_count[WL18XX_NUM_OF_CALIBRATIONS_ERRORS];
+       u32 calib_count;
+} __packed;
+
+struct wl18xx_roaming_stats {
+       s32 rssi_level;
+} __packed;
+
+struct wl18xx_dfs_stats {
+       u32 num_of_radar_detections;
 } __packed;
 
 struct wl18xx_acx_statistics {
        struct acx_header header;
 
        struct wl18xx_acx_error_stats           error;
-       struct wl18xx_acx_debug_stats           debug;
        struct wl18xx_acx_tx_stats              tx;
        struct wl18xx_acx_rx_stats              rx;
        struct wl18xx_acx_isr_stats             isr;
        struct wl18xx_acx_pwr_stats             pwr;
-       struct wl18xx_acx_ps_poll_stats         ps_poll;
        struct wl18xx_acx_rx_filter_stats       rx_filter;
        struct wl18xx_acx_rx_rate_stats         rx_rate;
        struct wl18xx_acx_aggr_stats            aggr_size;
        struct wl18xx_acx_pipeline_stats        pipeline;
-       struct wl18xx_acx_mem_stats             mem;
+       struct wl18xx_acx_diversity_stats       diversity;
+       struct wl18xx_acx_thermal_stats         thermal;
+       struct wl18xx_acx_calib_failure_stats   calib;
+       struct wl18xx_roaming_stats             roaming;
+       struct wl18xx_dfs_stats                 dfs;
 } __packed;
 
 struct wl18xx_acx_clear_statistics {
@@ -367,6 +379,15 @@ struct acx_ap_sleep_cfg {
        u8 idle_conn_thresh;
 } __packed;
 
+/*
+ * ACX_DYNAMIC_TRACES_CFG
+ * configure the FW dynamic traces
+ */
+struct acx_dynamic_fw_traces_cfg {
+       struct acx_header header;
+       __le32 dynamic_fw_traces;
+} __packed;
+
 int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
                                  u32 sdio_blk_size, u32 extra_mem_blks,
                                  u32 len_field_size);
@@ -380,5 +401,6 @@ int wl18xx_acx_set_peer_cap(struct wl1271 *wl,
 int wl18xx_acx_interrupt_notify_config(struct wl1271 *wl, bool action);
 int wl18xx_acx_rx_ba_filter(struct wl1271 *wl, bool action);
 int wl18xx_acx_ap_sleep(struct wl1271 *wl);
+int wl18xx_acx_dynamic_fw_traces(struct wl1271 *wl);
 
 #endif /* __WL18XX_ACX_H__ */
index 5fbd2230f372f2a17d763be90efa3228bca5bdbd..4edfe28395f03be65d2deabbd15d03ccc13a1af0 100644 (file)
        DEBUGFS_FWSTATS_FILE_ARRAY(a, b, c, wl18xx_acx_statistics)
 
 
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug1, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug2, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug3, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug4, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug5, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug6, "%u");
-
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_null_Frame_tx_start, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_numll_frame_cts_start, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_bar_retry, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_cts_nul_flid, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_non_ctrl, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_ctrl, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_during_protection, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, null_frame_tx_start, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, null_frame_cts_start, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, bar_retry, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, num_frame_cts_nul_flid, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, tx_abort_failure, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, tx_resume_failure, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, rx_cmplt_db_overflow_cnt, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_rx_exch, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_tx_exch, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_tx, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_nvic_pending, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, rx_excessive_frame_len, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, burst_mismatch, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, tbc_exch_mismatch, "%u");
 
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_prepared_descs, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cmplt, "%u");
@@ -57,7 +62,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_template_programmed, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_data_programmed, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_burst_programmed, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_starts, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_imm_resp, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_stop, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_templates, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_int_templates, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_fw_gen, "%u");
@@ -66,13 +71,15 @@ WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_null_frame, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_template, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_data, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(tx, tx_retry_per_rate,
+                                 NUM_OF_RATES_INDEXES);
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_pending, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_expiry, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_template, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_data, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_int_template, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_frame_checksum, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_checksum_result, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cfe1, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cfe2, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_called, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_mpdu_alloc_failed, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_init_called, "%u");
@@ -97,11 +104,8 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_pre_complt, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_cmplt_task, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_phy_hdr, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_rts_timeout, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout_wa, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_density_dropped_frame, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_ba_not_expected, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_frame_checksum, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_checksum_result, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_called, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_init_called, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_in_process_called, "%u");
@@ -111,6 +115,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_decrypt_failed, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, decrypt_key_not_found, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_need_decrypt, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_tkip_replays, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_xfr, "%u");
 
 WL18XX_DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
 
@@ -120,14 +125,13 @@ WL18XX_DEBUGFS_FWSTATS_FILE(pwr, connection_out_of_sync, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pwr, cont_miss_bcns_spread,
                                  PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD);
 WL18XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_bcns_cnt, "%u");
-
-
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_timeouts, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_timeouts, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_max_ap_turn, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_max_ap_turn, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_utilization, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_utilization, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_time_count, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_time_avg, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_cycle_avg, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_percent, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_active_conf, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_user_conf, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_counter, "%u");
 
 WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, beacon_filter, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, arp_filter, "%u");
@@ -141,14 +145,14 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");
 
 WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50);
 
-WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
-                                 AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_rate,
+                                 AGGR_STATS_TX_AGG);
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_len,
+                                 AGGR_STATS_TX_AGG);
 WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, rx_size,
                                  AGGR_STATS_RX_SIZE_LEN);
 
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, hs_tx_stat_fifo_int, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_tx_stat_fifo_int, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_rx_stat_fifo_int, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_tx_stat_fifo_int, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_rx_stat_fifo_int, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, rx_complete_stat_fifo_int, "%u");
@@ -156,21 +160,32 @@ WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, pre_proc_swi, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, post_proc_swi, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, sec_frag_swi, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, pre_to_defrag_swi, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, defrag_to_csum_swi, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, csum_to_rx_xfer_swi, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, defrag_to_rx_xfer_swi, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in_fifo_full, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_out, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_in, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_out, "%u");
 
 WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pipeline, pipeline_fifo_full,
                                  PIPE_STATS_HW_FIFO);
 
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, rx_free_mem_blks, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, tx_free_mem_blks, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, fwlog_free_mem_blks, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, fw_gen_free_mem_blks, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(diversity, num_of_packets_per_ant,
+                                 DIVERSITY_STATS_NUM_OF_ANT);
+WL18XX_DEBUGFS_FWSTATS_FILE(diversity, total_num_of_toggles, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, irq_thr_low, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, irq_thr_high, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, tx_stop, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, tx_resume, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, false_irq, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, adc_source_unexpected, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(calib, fail_count,
+                                 WL18XX_NUM_OF_CALIBRATIONS_ERRORS);
+WL18XX_DEBUGFS_FWSTATS_FILE(calib, calib_count, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(roaming, rssi_level, "%d");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(dfs, num_of_radar_detections, "%d");
 
 static ssize_t conf_read(struct file *file, char __user *user_buf,
                         size_t count, loff_t *ppos)
@@ -281,6 +296,55 @@ static const struct file_operations radar_detection_ops = {
        .llseek = default_llseek,
 };
 
+static ssize_t dynamic_fw_traces_write(struct file *file,
+                                       const char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct wl1271 *wl = file->private_data;
+       unsigned long value;
+       int ret;
+
+       ret = kstrtoul_from_user(user_buf, count, 0, &value);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&wl->mutex);
+
+       wl->dynamic_fw_traces = value;
+
+       if (unlikely(wl->state != WLCORE_STATE_ON))
+               goto out;
+
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out;
+
+       ret = wl18xx_acx_dynamic_fw_traces(wl);
+       if (ret < 0)
+               count = ret;
+
+       wl1271_ps_elp_sleep(wl);
+out:
+       mutex_unlock(&wl->mutex);
+       return count;
+}
+
+static ssize_t dynamic_fw_traces_read(struct file *file,
+                                       char __user *userbuf,
+                                       size_t count, loff_t *ppos)
+{
+       struct wl1271 *wl = file->private_data;
+       return wl1271_format_buffer(userbuf, count, ppos,
+                                   "%d\n", wl->dynamic_fw_traces);
+}
+
+static const struct file_operations dynamic_fw_traces_ops = {
+       .read = dynamic_fw_traces_read,
+       .write = dynamic_fw_traces_write,
+       .open = simple_open,
+       .llseek = default_llseek,
+};
+
 int wl18xx_debugfs_add_files(struct wl1271 *wl,
                             struct dentry *rootdir)
 {
@@ -301,18 +365,23 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
 
        DEBUGFS_ADD(clear_fw_stats, stats);
 
-       DEBUGFS_FWSTATS_ADD(debug, debug1);
-       DEBUGFS_FWSTATS_ADD(debug, debug2);
-       DEBUGFS_FWSTATS_ADD(debug, debug3);
-       DEBUGFS_FWSTATS_ADD(debug, debug4);
-       DEBUGFS_FWSTATS_ADD(debug, debug5);
-       DEBUGFS_FWSTATS_ADD(debug, debug6);
-
-       DEBUGFS_FWSTATS_ADD(error, error_frame);
-       DEBUGFS_FWSTATS_ADD(error, error_null_Frame_tx_start);
-       DEBUGFS_FWSTATS_ADD(error, error_numll_frame_cts_start);
-       DEBUGFS_FWSTATS_ADD(error, error_bar_retry);
-       DEBUGFS_FWSTATS_ADD(error, error_frame_cts_nul_flid);
+       DEBUGFS_FWSTATS_ADD(error, error_frame_non_ctrl);
+       DEBUGFS_FWSTATS_ADD(error, error_frame_ctrl);
+       DEBUGFS_FWSTATS_ADD(error, error_frame_during_protection);
+       DEBUGFS_FWSTATS_ADD(error, null_frame_tx_start);
+       DEBUGFS_FWSTATS_ADD(error, null_frame_cts_start);
+       DEBUGFS_FWSTATS_ADD(error, bar_retry);
+       DEBUGFS_FWSTATS_ADD(error, num_frame_cts_nul_flid);
+       DEBUGFS_FWSTATS_ADD(error, tx_abort_failure);
+       DEBUGFS_FWSTATS_ADD(error, tx_resume_failure);
+       DEBUGFS_FWSTATS_ADD(error, rx_cmplt_db_overflow_cnt);
+       DEBUGFS_FWSTATS_ADD(error, elp_while_rx_exch);
+       DEBUGFS_FWSTATS_ADD(error, elp_while_tx_exch);
+       DEBUGFS_FWSTATS_ADD(error, elp_while_tx);
+       DEBUGFS_FWSTATS_ADD(error, elp_while_nvic_pending);
+       DEBUGFS_FWSTATS_ADD(error, rx_excessive_frame_len);
+       DEBUGFS_FWSTATS_ADD(error, burst_mismatch);
+       DEBUGFS_FWSTATS_ADD(error, tbc_exch_mismatch);
 
        DEBUGFS_FWSTATS_ADD(tx, tx_prepared_descs);
        DEBUGFS_FWSTATS_ADD(tx, tx_cmplt);
@@ -322,7 +391,7 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
        DEBUGFS_FWSTATS_ADD(tx, tx_data_programmed);
        DEBUGFS_FWSTATS_ADD(tx, tx_burst_programmed);
        DEBUGFS_FWSTATS_ADD(tx, tx_starts);
-       DEBUGFS_FWSTATS_ADD(tx, tx_imm_resp);
+       DEBUGFS_FWSTATS_ADD(tx, tx_stop);
        DEBUGFS_FWSTATS_ADD(tx, tx_start_templates);
        DEBUGFS_FWSTATS_ADD(tx, tx_start_int_templates);
        DEBUGFS_FWSTATS_ADD(tx, tx_start_fw_gen);
@@ -331,13 +400,14 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
        DEBUGFS_FWSTATS_ADD(tx, tx_exch);
        DEBUGFS_FWSTATS_ADD(tx, tx_retry_template);
        DEBUGFS_FWSTATS_ADD(tx, tx_retry_data);
+       DEBUGFS_FWSTATS_ADD(tx, tx_retry_per_rate);
        DEBUGFS_FWSTATS_ADD(tx, tx_exch_pending);
        DEBUGFS_FWSTATS_ADD(tx, tx_exch_expiry);
        DEBUGFS_FWSTATS_ADD(tx, tx_done_template);
        DEBUGFS_FWSTATS_ADD(tx, tx_done_data);
        DEBUGFS_FWSTATS_ADD(tx, tx_done_int_template);
-       DEBUGFS_FWSTATS_ADD(tx, tx_frame_checksum);
-       DEBUGFS_FWSTATS_ADD(tx, tx_checksum_result);
+       DEBUGFS_FWSTATS_ADD(tx, tx_cfe1);
+       DEBUGFS_FWSTATS_ADD(tx, tx_cfe2);
        DEBUGFS_FWSTATS_ADD(tx, frag_called);
        DEBUGFS_FWSTATS_ADD(tx, frag_mpdu_alloc_failed);
        DEBUGFS_FWSTATS_ADD(tx, frag_init_called);
@@ -362,11 +432,8 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
        DEBUGFS_FWSTATS_ADD(rx, rx_cmplt_task);
        DEBUGFS_FWSTATS_ADD(rx, rx_phy_hdr);
        DEBUGFS_FWSTATS_ADD(rx, rx_timeout);
+       DEBUGFS_FWSTATS_ADD(rx, rx_rts_timeout);
        DEBUGFS_FWSTATS_ADD(rx, rx_timeout_wa);
-       DEBUGFS_FWSTATS_ADD(rx, rx_wa_density_dropped_frame);
-       DEBUGFS_FWSTATS_ADD(rx, rx_wa_ba_not_expected);
-       DEBUGFS_FWSTATS_ADD(rx, rx_frame_checksum);
-       DEBUGFS_FWSTATS_ADD(rx, rx_checksum_result);
        DEBUGFS_FWSTATS_ADD(rx, defrag_called);
        DEBUGFS_FWSTATS_ADD(rx, defrag_init_called);
        DEBUGFS_FWSTATS_ADD(rx, defrag_in_process_called);
@@ -376,6 +443,7 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
        DEBUGFS_FWSTATS_ADD(rx, decrypt_key_not_found);
        DEBUGFS_FWSTATS_ADD(rx, defrag_need_decrypt);
        DEBUGFS_FWSTATS_ADD(rx, rx_tkip_replays);
+       DEBUGFS_FWSTATS_ADD(rx, rx_xfr);
 
        DEBUGFS_FWSTATS_ADD(isr, irqs);
 
@@ -384,13 +452,13 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
        DEBUGFS_FWSTATS_ADD(pwr, connection_out_of_sync);
        DEBUGFS_FWSTATS_ADD(pwr, cont_miss_bcns_spread);
        DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_bcns_cnt);
-
-       DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_timeouts);
-       DEBUGFS_FWSTATS_ADD(ps_poll, upsd_timeouts);
-       DEBUGFS_FWSTATS_ADD(ps_poll, upsd_max_ap_turn);
-       DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_max_ap_turn);
-       DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_utilization);
-       DEBUGFS_FWSTATS_ADD(ps_poll, upsd_utilization);
+       DEBUGFS_FWSTATS_ADD(pwr, sleep_time_count);
+       DEBUGFS_FWSTATS_ADD(pwr, sleep_time_avg);
+       DEBUGFS_FWSTATS_ADD(pwr, sleep_cycle_avg);
+       DEBUGFS_FWSTATS_ADD(pwr, sleep_percent);
+       DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_active_conf);
+       DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_user_conf);
+       DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_counter);
 
        DEBUGFS_FWSTATS_ADD(rx_filter, beacon_filter);
        DEBUGFS_FWSTATS_ADD(rx_filter, arp_filter);
@@ -404,12 +472,11 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
 
        DEBUGFS_FWSTATS_ADD(rx_rate, rx_frames_per_rates);
 
-       DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_vs_rate);
+       DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_rate);
+       DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_len);
        DEBUGFS_FWSTATS_ADD(aggr_size, rx_size);
 
        DEBUGFS_FWSTATS_ADD(pipeline, hs_tx_stat_fifo_int);
-       DEBUGFS_FWSTATS_ADD(pipeline, tcp_tx_stat_fifo_int);
-       DEBUGFS_FWSTATS_ADD(pipeline, tcp_rx_stat_fifo_int);
        DEBUGFS_FWSTATS_ADD(pipeline, enc_tx_stat_fifo_int);
        DEBUGFS_FWSTATS_ADD(pipeline, enc_rx_stat_fifo_int);
        DEBUGFS_FWSTATS_ADD(pipeline, rx_complete_stat_fifo_int);
@@ -417,22 +484,33 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
        DEBUGFS_FWSTATS_ADD(pipeline, post_proc_swi);
        DEBUGFS_FWSTATS_ADD(pipeline, sec_frag_swi);
        DEBUGFS_FWSTATS_ADD(pipeline, pre_to_defrag_swi);
-       DEBUGFS_FWSTATS_ADD(pipeline, defrag_to_csum_swi);
-       DEBUGFS_FWSTATS_ADD(pipeline, csum_to_rx_xfer_swi);
+       DEBUGFS_FWSTATS_ADD(pipeline, defrag_to_rx_xfer_swi);
        DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in);
        DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in_fifo_full);
        DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_out);
-       DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_in);
-       DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_out);
        DEBUGFS_FWSTATS_ADD(pipeline, pipeline_fifo_full);
 
-       DEBUGFS_FWSTATS_ADD(mem, rx_free_mem_blks);
-       DEBUGFS_FWSTATS_ADD(mem, tx_free_mem_blks);
-       DEBUGFS_FWSTATS_ADD(mem, fwlog_free_mem_blks);
-       DEBUGFS_FWSTATS_ADD(mem, fw_gen_free_mem_blks);
+       DEBUGFS_FWSTATS_ADD(diversity, num_of_packets_per_ant);
+       DEBUGFS_FWSTATS_ADD(diversity, total_num_of_toggles);
+
+       DEBUGFS_FWSTATS_ADD(thermal, irq_thr_low);
+       DEBUGFS_FWSTATS_ADD(thermal, irq_thr_high);
+       DEBUGFS_FWSTATS_ADD(thermal, tx_stop);
+       DEBUGFS_FWSTATS_ADD(thermal, tx_resume);
+       DEBUGFS_FWSTATS_ADD(thermal, false_irq);
+       DEBUGFS_FWSTATS_ADD(thermal, adc_source_unexpected);
+
+       DEBUGFS_FWSTATS_ADD(calib, fail_count);
+
+       DEBUGFS_FWSTATS_ADD(calib, calib_count);
+
+       DEBUGFS_FWSTATS_ADD(roaming, rssi_level);
+
+       DEBUGFS_FWSTATS_ADD(dfs, num_of_radar_detections);
 
        DEBUGFS_ADD(conf, moddir);
        DEBUGFS_ADD(radar_detection, moddir);
+       DEBUGFS_ADD(dynamic_fw_traces, moddir);
 
        return 0;
 
index 548bb9e7e91ec054ad7da9022d325e42b007a4cf..09c7e098f4607bd6cb6d0b0f89654f63bcf6de74 100644 (file)
@@ -112,6 +112,14 @@ static int wlcore_smart_config_decode_event(struct wl1271 *wl,
        return 0;
 }
 
+static void wlcore_event_time_sync(struct wl1271 *wl, u16 tsf_msb, u16 tsf_lsb)
+{
+       u32 clock;
+       /* convert the MSB+LSB to a u32 TSF value */
+       clock = (tsf_msb << 16) | tsf_lsb;
+       wl1271_info("TIME_SYNC_EVENT_ID: clock %u", clock);
+}
+
 int wl18xx_process_mailbox_events(struct wl1271 *wl)
 {
        struct wl18xx_event_mailbox *mbox = wl->mbox;
@@ -128,6 +136,11 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl)
                        wl18xx_scan_completed(wl, wl->scan_wlvif);
        }
 
+       if (vector & TIME_SYNC_EVENT_ID)
+               wlcore_event_time_sync(wl,
+                               mbox->time_sync_tsf_msb,
+                               mbox->time_sync_tsf_lsb);
+
        if (vector & RADAR_DETECTED_EVENT_ID) {
                wl1271_info("radar event: channel %d type %s",
                            mbox->radar_channel,
index 266ee87834e4332e9fe87abdc6fb172e58db8d8c..f3d4f13379cb0dcd1846119d27941c8301167dac 100644 (file)
@@ -38,8 +38,9 @@ enum {
        REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID      = BIT(18),
        DFS_CHANNELS_CONFIG_COMPLETE_EVENT       = BIT(19),
        PERIODIC_SCAN_REPORT_EVENT_ID            = BIT(20),
-       SMART_CONFIG_SYNC_EVENT_ID               = BIT(22),
-       SMART_CONFIG_DECODE_EVENT_ID             = BIT(23),
+       SMART_CONFIG_SYNC_EVENT_ID               = BIT(22),
+       SMART_CONFIG_DECODE_EVENT_ID             = BIT(23),
+       TIME_SYNC_EVENT_ID                       = BIT(24),
 };
 
 enum wl18xx_radar_types {
@@ -95,13 +96,16 @@ struct wl18xx_event_mailbox {
        /* smart config sync channel */
        u8 sc_sync_channel;
        u8 sc_sync_band;
-       u8 padding2[2];
 
+       /* time sync msb*/
+       u16 time_sync_tsf_msb;
        /* radar detect */
        u8 radar_channel;
        u8 radar_type;
 
-       u8 padding3[2];
+       /* time sync lsb*/
+       u16 time_sync_tsf_lsb;
+
 } __packed;
 
 int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
index 49aca2cf76050200771705ecb6c535ba1eee804e..abbf054fb6da892bee387e18345c41e842c01bd2 100644 (file)
@@ -422,6 +422,8 @@ static struct wlcore_conf wl18xx_conf = {
                .num_probe_reqs                 = 2,
                .rssi_threshold                 = -90,
                .snr_threshold                  = 0,
+               .num_short_intervals            = SCAN_MAX_SHORT_INTERVALS,
+               .long_interval                  = 30000,
        },
        .ht = {
                .rx_ba_win_size = 32,
@@ -1026,8 +1028,8 @@ static int wl18xx_boot(struct wl1271 *wl)
                CHANNEL_SWITCH_COMPLETE_EVENT_ID |
                DFS_CHANNELS_CONFIG_COMPLETE_EVENT |
                SMART_CONFIG_SYNC_EVENT_ID |
-               SMART_CONFIG_DECODE_EVENT_ID;
-;
+               SMART_CONFIG_DECODE_EVENT_ID |
+               TIME_SYNC_EVENT_ID;
 
        wl->ap_event_mask = MAX_TX_FAILURE_EVENT_ID;
 
@@ -1159,6 +1161,11 @@ static int wl18xx_hw_init(struct wl1271 *wl)
        if (ret < 0)
                return ret;
 
+       /* set the dynamic fw traces bitmap */
+       ret = wl18xx_acx_dynamic_fw_traces(wl);
+       if (ret < 0)
+               return ret;
+
        if (checksum_param) {
                ret = wl18xx_acx_set_checksum_state(wl);
                if (ret != 0)
@@ -1797,7 +1804,7 @@ static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = {
 
 static const struct ieee80211_iface_limit wl18xx_iface_limits[] = {
        {
-               .max = 3,
+               .max = 2,
                .types = BIT(NL80211_IFTYPE_STATION),
        },
        {
@@ -1806,6 +1813,10 @@ static const struct ieee80211_iface_limit wl18xx_iface_limits[] = {
                         BIT(NL80211_IFTYPE_P2P_GO) |
                         BIT(NL80211_IFTYPE_P2P_CLIENT),
        },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+       },
 };
 
 static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = {
@@ -1813,6 +1824,48 @@ static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = {
                .max = 2,
                .types = BIT(NL80211_IFTYPE_AP),
        },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+       },
+};
+
+static const struct ieee80211_iface_limit wl18xx_iface_ap_cl_limits[] = {
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_STATION),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_AP),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+       },
+};
+
+static const struct ieee80211_iface_limit wl18xx_iface_ap_go_limits[] = {
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_STATION),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_AP),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_P2P_GO),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+       },
 };
 
 static const struct ieee80211_iface_combination
index 98666f235a12d9a70873e18eaa3245298007284f..c938c494c785703b6f3b4a6a1fed48530028627c 100644 (file)
@@ -51,7 +51,11 @@ static int wl18xx_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                goto out;
        }
 
-       cmd->role_id = wlvif->role_id;
+       /* scan on the dev role if the regular one is not started */
+       if (wlcore_is_p2p_mgmt(wlvif))
+               cmd->role_id = wlvif->dev_role_id;
+       else
+               cmd->role_id = wlvif->role_id;
 
        if (WARN_ON(cmd->role_id == WL12XX_INVALID_ROLE_ID)) {
                ret = -EINVAL;
@@ -223,9 +227,20 @@ int wl18xx_scan_sched_scan_config(struct wl1271 *wl,
                                    SCAN_TYPE_PERIODIC);
        wl18xx_adjust_channels(cmd, cmd_channels);
 
-       cmd->short_cycles_sec = 0;
-       cmd->long_cycles_sec = cpu_to_le16(req->interval);
-       cmd->short_cycles_count = 0;
+       if (c->num_short_intervals && c->long_interval &&
+           c->long_interval > req->interval) {
+               cmd->short_cycles_msec = cpu_to_le16(req->interval);
+               cmd->long_cycles_msec = cpu_to_le16(c->long_interval);
+               cmd->short_cycles_count = c->num_short_intervals;
+       } else {
+               cmd->short_cycles_msec = 0;
+               cmd->long_cycles_msec = cpu_to_le16(req->interval);
+               cmd->short_cycles_count = 0;
+       }
+       wl1271_debug(DEBUG_SCAN, "short_interval: %d, long_interval: %d, num_short: %d",
+                    le16_to_cpu(cmd->short_cycles_msec),
+                    le16_to_cpu(cmd->long_cycles_msec),
+                    cmd->short_cycles_count);
 
        cmd->total_cycles = 0;
 
index 2e636aa5dba9bac8d11ff63d5fe02b7efc19af9c..66a763f644d2f8a043050623601f6c9c3fffce08 100644 (file)
@@ -74,8 +74,8 @@ struct wl18xx_cmd_scan_params {
        u8 dfs;            /* number of dfs channels in 5ghz */
        u8 passive_active; /* number of passive before active channels 2.4ghz */
 
-       __le16 short_cycles_sec;
-       __le16 long_cycles_sec;
+       __le16 short_cycles_msec;
+       __le16 long_cycles_msec;
        u8 short_cycles_count;
        u8 total_cycles; /* 0 - infinite */
        u8 padding[2];
index 68919f8d4310455fad623381ee16e2f1c96dac69..f01d24baff7cf00712cf07667f9b046b3d00dab2 100644 (file)
@@ -2003,12 +2003,15 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                      wlvif->bss_type == BSS_TYPE_IBSS)))
                return -EINVAL;
 
-       ret = wl12xx_cmd_role_enable(wl,
-                                    wl12xx_wlvif_to_vif(wlvif)->addr,
-                                    WL1271_ROLE_DEVICE,
-                                    &wlvif->dev_role_id);
-       if (ret < 0)
-               goto out;
+       /* the dev role is already started for p2p mgmt interfaces */
+       if (!wlcore_is_p2p_mgmt(wlvif)) {
+               ret = wl12xx_cmd_role_enable(wl,
+                                            wl12xx_wlvif_to_vif(wlvif)->addr,
+                                            WL1271_ROLE_DEVICE,
+                                            &wlvif->dev_role_id);
+               if (ret < 0)
+                       goto out;
+       }
 
        ret = wl12xx_cmd_role_start_dev(wl, wlvif, band, channel);
        if (ret < 0)
@@ -2023,7 +2026,8 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 out_stop:
        wl12xx_cmd_role_stop_dev(wl, wlvif);
 out_disable:
-       wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
+       if (!wlcore_is_p2p_mgmt(wlvif))
+               wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
 out:
        return ret;
 }
@@ -2052,10 +2056,42 @@ int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
        if (ret < 0)
                goto out;
 
-       ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
-       if (ret < 0)
-               goto out;
+       if (!wlcore_is_p2p_mgmt(wlvif)) {
+               ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
+               if (ret < 0)
+                       goto out;
+       }
 
 out:
        return ret;
 }
+
+int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                          u8 feature, u8 enable, u8 value)
+{
+       struct wlcore_cmd_generic_cfg *cmd;
+       int ret;
+
+       wl1271_debug(DEBUG_CMD,
+                    "cmd generic cfg (role %d feature %d enable %d value %d)",
+                    wlvif->role_id, feature, enable, value);
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd)
+               return -ENOMEM;
+
+       cmd->role_id = wlvif->role_id;
+       cmd->feature = feature;
+       cmd->enable = enable;
+       cmd->value = value;
+
+       ret = wl1271_cmd_send(wl, CMD_GENERIC_CFG, cmd, sizeof(*cmd), 0);
+       if (ret < 0) {
+               wl1271_error("failed to send generic cfg command");
+               goto out_free;
+       }
+out_free:
+       kfree(cmd);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(wlcore_cmd_generic_cfg);
index e14cd407a6aea6295f786033a63a234c12bc6f6c..8dc46c0a489a1b6ea8d3e6ef752d13906e670feb 100644 (file)
@@ -92,6 +92,8 @@ int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
                                     enum ieee80211_band band);
 int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl);
+int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                          u8 feature, u8 enable, u8 value);
 int wl12xx_cmd_config_fwlog(struct wl1271 *wl);
 int wl12xx_cmd_start_fwlog(struct wl1271 *wl);
 int wl12xx_cmd_stop_fwlog(struct wl1271 *wl);
@@ -652,6 +654,19 @@ struct wl12xx_cmd_regdomain_dfs_config {
        u8 padding[3];
 } __packed;
 
+enum wlcore_generic_cfg_feature {
+       WLCORE_CFG_FEATURE_RADAR_DEBUG = 2,
+};
+
+struct wlcore_cmd_generic_cfg {
+       struct wl1271_cmd_header header;
+
+       u8 role_id;
+       u8 feature;
+       u8 enable;
+       u8 value;
+} __packed;
+
 struct wl12xx_cmd_config_fwlog {
        struct wl1271_cmd_header header;
 
index 166add00b50fb43984b458f4ab9cf254b9a456d5..52a9d1b140203612ba05285c1542f3ce7c28182d 100644 (file)
@@ -1186,6 +1186,15 @@ struct conf_sched_scan_settings {
 
        /* SNR threshold to be used for filtering */
        s8 snr_threshold;
+
+       /*
+        * number of short intervals scheduled scan cycles before
+        * switching to long intervals
+        */
+       u8 num_short_intervals;
+
+       /* interval between each long scheduled scan cycle (in ms) */
+       u16 long_interval;
 } __packed;
 
 struct conf_ht_setting {
@@ -1352,7 +1361,7 @@ struct conf_recovery_settings {
  * version, the two LSB are the lower driver's private conf
  * version.
  */
-#define WLCORE_CONF_VERSION    (0x0006 << 16)
+#define WLCORE_CONF_VERSION    (0x0007 << 16)
 #define WLCORE_CONF_MASK       0xffff0000
 #define WLCORE_CONF_SIZE       (sizeof(struct wlcore_conf_header) +    \
                                 sizeof(struct wlcore_conf))
index 5ca1fb161a50c8bbc612d859891fb9a3c8a441cf..e92f2639af2c8835d5c2faddd2c5630e99f89067 100644 (file)
@@ -348,7 +348,7 @@ static int wl12xx_init_fwlog(struct wl1271 *wl)
 }
 
 /* generic sta initialization (non vif-specific) */
-static int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int ret;
 
index a45fbfddec192e67791be7cb23ac419c2b3a82ba..fd1cdb6bc3e4d1e6ee972a09728e7dde0a8217d3 100644 (file)
@@ -35,5 +35,6 @@ int wl1271_hw_init(struct wl1271 *wl);
 int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif);
 int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif);
+int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 
 #endif
index 337223b9f6f89aab54ae4c3cd93421e1ebb404fe..e819369d8f8f43a2ce37a5260c3c8c017c7ad5eb 100644 (file)
@@ -1792,6 +1792,9 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
 
        wl->wow_enabled = true;
        wl12xx_for_each_wlvif(wl, wlvif) {
+               if (wlcore_is_p2p_mgmt(wlvif))
+                       continue;
+
                ret = wl1271_configure_suspend(wl, wlvif, wow);
                if (ret < 0) {
                        mutex_unlock(&wl->mutex);
@@ -1901,6 +1904,9 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
                goto out;
 
        wl12xx_for_each_wlvif(wl, wlvif) {
+               if (wlcore_is_p2p_mgmt(wlvif))
+                       continue;
+
                wl1271_configure_resume(wl, wlvif);
        }
 
@@ -2256,6 +2262,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
                wlvif->p2p = 1;
                /* fall-through */
        case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_P2P_DEVICE:
                wlvif->bss_type = BSS_TYPE_STA_BSS;
                break;
        case NL80211_IFTYPE_ADHOC:
@@ -2477,7 +2484,8 @@ static void wlcore_hw_queue_iter(void *data, u8 *mac,
 {
        struct wlcore_hw_queue_iter_data *iter_data = data;
 
-       if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
+       if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+           WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
                return;
 
        if (iter_data->cur_running || vif == iter_data->vif) {
@@ -2495,6 +2503,11 @@ static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
        struct wlcore_hw_queue_iter_data iter_data = {};
        int i, q_base;
 
+       if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+               vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+               return 0;
+       }
+
        iter_data.vif = vif;
 
        /* mark all bits taken by active interfaces */
@@ -2618,14 +2631,27 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
                        goto out;
        }
 
-       ret = wl12xx_cmd_role_enable(wl, vif->addr,
-                                    role_type, &wlvif->role_id);
-       if (ret < 0)
-               goto out;
+       if (!wlcore_is_p2p_mgmt(wlvif)) {
+               ret = wl12xx_cmd_role_enable(wl, vif->addr,
+                                            role_type, &wlvif->role_id);
+               if (ret < 0)
+                       goto out;
 
-       ret = wl1271_init_vif_specific(wl, vif);
-       if (ret < 0)
-               goto out;
+               ret = wl1271_init_vif_specific(wl, vif);
+               if (ret < 0)
+                       goto out;
+
+       } else {
+               ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
+                                            &wlvif->dev_role_id);
+               if (ret < 0)
+                       goto out;
+
+               /* needed mainly for configuring rate policies */
+               ret = wl1271_sta_hw_init(wl, wlvif);
+               if (ret < 0)
+                       goto out;
+       }
 
        list_add(&wlvif->list, &wl->wlvif_list);
        set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
@@ -2696,9 +2722,15 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
                                wl12xx_stop_dev(wl, wlvif);
                }
 
-               ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
-               if (ret < 0)
-                       goto deinit;
+               if (!wlcore_is_p2p_mgmt(wlvif)) {
+                       ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
+                       if (ret < 0)
+                               goto deinit;
+               } else {
+                       ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
+                       if (ret < 0)
+                               goto deinit;
+               }
 
                wl1271_ps_elp_sleep(wl);
        }
@@ -3088,6 +3120,9 @@ static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 {
        int ret;
 
+       if (wlcore_is_p2p_mgmt(wlvif))
+               return 0;
+
        if (conf->power_level != wlvif->power_level) {
                ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
                if (ret < 0)
@@ -3207,6 +3242,9 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
                goto out;
 
        wl12xx_for_each_wlvif(wl, wlvif) {
+               if (wlcore_is_p2p_mgmt(wlvif))
+                       continue;
+
                if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
                        if (*total & FIF_ALLMULTI)
                                ret = wl1271_acx_group_address_tbl(wl, wlvif,
@@ -4837,6 +4875,9 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
        u8 ps_scheme;
        int ret = 0;
 
+       if (wlcore_is_p2p_mgmt(wlvif))
+               return 0;
+
        mutex_lock(&wl->mutex);
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
@@ -6078,8 +6119,10 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
        wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
 
        wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
-               BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
-               BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
+                                        BIT(NL80211_IFTYPE_AP) |
+                                        BIT(NL80211_IFTYPE_P2P_DEVICE) |
+                                        BIT(NL80211_IFTYPE_P2P_CLIENT) |
+                                        BIT(NL80211_IFTYPE_P2P_GO);
        wl->hw->wiphy->max_scan_ssids = 1;
        wl->hw->wiphy->max_sched_scan_ssids = 16;
        wl->hw->wiphy->max_match_sets = 16;
index 7df672a84530b0217378d8f9b66c2e10d31e044e..5b2927391d1cef5aaa0c6c4f3947e7f51aa1a1eb 100644 (file)
@@ -74,6 +74,12 @@ static void wl1271_rx_status(struct wl1271 *wl,
        if (desc->rate <= wl->hw_min_ht_rate)
                status->flag |= RX_FLAG_HT;
 
+       /*
+       * Read the signal level and antenna diversity indication.
+       * The msb in the signal level is always set as it is a
+       * negative number.
+       * The antenna indication is the msb of the rssi.
+       */
        status->signal = ((desc->rssi & RSSI_LEVEL_BITMASK) | BIT(7));
        status->antenna = ((desc->rssi & ANT_DIVERSITY_BITMASK) >> 7);
 
index 4dadd0c62cde5251f83d924e20b1c5312e3d2806..782eb297c196955c9d0b90002d8e8e05588ac511 100644 (file)
@@ -83,6 +83,12 @@ struct wl1271_cmd_trigger_scan_to {
 #define MAX_CHANNELS_5GHZ      42
 
 #define SCAN_MAX_CYCLE_INTERVALS 16
+
+/* The FW intervals can take up to 16 entries.
+ * The 1st entry isn't used (scan is immediate). The last
+ * entry should be used for the long_interval
+ */
+#define SCAN_MAX_SHORT_INTERVALS (SCAN_MAX_CYCLE_INTERVALS - 2)
 #define SCAN_MAX_BANDS 3
 
 enum {
index 7f363fa566a3478b24e354abd3dadacb19275e6a..a1b6040e6491219544be4054343ff00f6c555db1 100644 (file)
@@ -500,6 +500,9 @@ struct wl1271 {
        /* interface combinations supported by the hw */
        const struct ieee80211_iface_combination *iface_combinations;
        u8 n_iface_combinations;
+
+       /* dynamic fw traces */
+       u32 dynamic_fw_traces;
 };
 
 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
index 39efc6d78b10dd743f65c0b96dfe72f86747afe3..27c56876b2c13821606a662a3d06a6ae4e24ccdb 100644 (file)
@@ -503,6 +503,11 @@ struct ieee80211_vif *wl12xx_wlvif_to_vif(struct wl12xx_vif *wlvif)
        return container_of((void *)wlvif, struct ieee80211_vif, drv_priv);
 }
 
+static inline bool wlcore_is_p2p_mgmt(struct wl12xx_vif *wlvif)
+{
+       return wl12xx_wlvif_to_vif(wlvif)->type == NL80211_IFTYPE_P2P_DEVICE;
+}
+
 #define wl12xx_for_each_wlvif(wl, wlvif) \
                list_for_each_entry(wlvif, &wl->wlvif_list, list)
 
index f948c46d51329970c186b2886c267ffba2e807db..002062db2f1dc791b73a1a98c5a4368dfe756179 100644 (file)
@@ -1336,7 +1336,7 @@ static void xennet_disconnect_backend(struct netfront_info *info)
 
        netif_carrier_off(info->netdev);
 
-       for (i = 0; i < num_queues; ++i) {
+       for (i = 0; i < num_queues && info->queues; ++i) {
                struct netfront_queue *queue = &info->queues[i];
 
                if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
@@ -2101,7 +2101,8 @@ static int xennet_remove(struct xenbus_device *dev)
 
        unregister_netdev(info->netdev);
 
-       xennet_destroy_queues(info);
+       if (info->queues)
+               xennet_destroy_queues(info);
        xennet_free_netdev(info->netdev);
 
        return 0;
index 1b3a094734522803c7f3fecd39da5c297a1feb43..30f9ef0c0d4f8cea52b182f370a04ed1ba4a9908 100644 (file)
@@ -733,8 +733,6 @@ static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
        if (resp) {
                resp(sp, fp, arg);
                res = true;
-       } else if (!IS_ERR(fp)) {
-               fc_frame_free(fp);
        }
 
        spin_lock_bh(&ep->ex_lock);
@@ -1596,7 +1594,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
         * If new exch resp handler is valid then call that
         * first.
         */
-       fc_invoke_resp(ep, sp, fp);
+       if (!fc_invoke_resp(ep, sp, fp))
+               fc_frame_free(fp);
 
        fc_exch_release(ep);
        return;
@@ -1695,7 +1694,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
        fc_exch_hold(ep);
        if (!rc)
                fc_exch_delete(ep);
-       fc_invoke_resp(ep, sp, fp);
+       if (!fc_invoke_resp(ep, sp, fp))
+               fc_frame_free(fp);
        if (has_rec)
                fc_exch_timer_set(ep, ep->r_a_tov);
        fc_exch_release(ep);
index c6795941b45d98579cd6117cc5b72f8c5c4d0bf9..2d5909c4685ca63375f5041d960effada171f5fc 100644 (file)
@@ -1039,11 +1039,26 @@ restart:
                fc_fcp_pkt_hold(fsp);
                spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
 
-               if (!fc_fcp_lock_pkt(fsp)) {
+               spin_lock_bh(&fsp->scsi_pkt_lock);
+               if (!(fsp->state & FC_SRB_COMPL)) {
+                       fsp->state |= FC_SRB_COMPL;
+                       /*
+                        * TODO: dropping scsi_pkt_lock and then reacquiring
+                        * again around fc_fcp_cleanup_cmd() is required,
+                        * since fc_fcp_cleanup_cmd() calls into
+                        * fc_seq_set_resp() and that func preempts cpu using
+                        * schedule. May be schedule and related code should be
+                        * removed instead of unlocking here to avoid scheduling
+                        * while atomic bug.
+                        */
+                       spin_unlock_bh(&fsp->scsi_pkt_lock);
+
                        fc_fcp_cleanup_cmd(fsp, error);
+
+                       spin_lock_bh(&fsp->scsi_pkt_lock);
                        fc_io_compl(fsp);
-                       fc_fcp_unlock_pkt(fsp);
                }
+               spin_unlock_bh(&fsp->scsi_pkt_lock);
 
                fc_fcp_pkt_release(fsp);
                spin_lock_irqsave(&si->scsi_queue_lock, flags);
index 8053f24f03499335112721cd50c2816da40393a6..98d9bb6ff725ff46621a408bdf1f208175e21daf 100644 (file)
@@ -2941,10 +2941,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
 {
        struct iscsi_conn *conn = cls_conn->dd_data;
        struct iscsi_session *session = conn->session;
-       unsigned long flags;
 
        del_timer_sync(&conn->transport_timer);
 
+       mutex_lock(&session->eh_mutex);
        spin_lock_bh(&session->frwd_lock);
        conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
        if (session->leadconn == conn) {
@@ -2956,28 +2956,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
        }
        spin_unlock_bh(&session->frwd_lock);
 
-       /*
-        * Block until all in-progress commands for this connection
-        * time out or fail.
-        */
-       for (;;) {
-               spin_lock_irqsave(session->host->host_lock, flags);
-               if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */
-                       spin_unlock_irqrestore(session->host->host_lock, flags);
-                       break;
-               }
-               spin_unlock_irqrestore(session->host->host_lock, flags);
-               msleep_interruptible(500);
-               iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
-                                 "host_busy %d host_failed %d\n",
-                                 atomic_read(&session->host->host_busy),
-                                 session->host->host_failed);
-               /*
-                * force eh_abort() to unblock
-                */
-               wake_up(&conn->ehwait);
-       }
-
        /* flush queued up work because we free the connection below */
        iscsi_suspend_tx(conn);
 
@@ -2994,6 +2972,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
        if (session->leadconn == conn)
                session->leadconn = NULL;
        spin_unlock_bh(&session->frwd_lock);
+       mutex_unlock(&session->eh_mutex);
 
        iscsi_destroy_conn(cls_conn);
 }
index cfadccef045c5f91d9efb5f575c98d1d9c68b090..6457a8a0db9c37ed8892c28effe768a533f2e7f3 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/jiffies.h>
-#include <asm/unaligned.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -2523,33 +2522,3 @@ void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
        }
 }
 EXPORT_SYMBOL(scsi_build_sense_buffer);
-
-/**
- * scsi_set_sense_information - set the information field in a
- *             formatted sense data buffer
- * @buf:       Where to build sense data
- * @info:      64-bit information value to be set
- *
- **/
-void scsi_set_sense_information(u8 *buf, u64 info)
-{
-       if ((buf[0] & 0x7f) == 0x72) {
-               u8 *ucp, len;
-
-               len = buf[7];
-               ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0);
-               if (!ucp) {
-                       buf[7] = len + 0xa;
-                       ucp = buf + 8 + len;
-               }
-               ucp[0] = 0;
-               ucp[1] = 0xa;
-               ucp[2] = 0x80; /* Valid bit */
-               ucp[3] = 0;
-               put_unaligned_be64(info, &ucp[4]);
-       } else if ((buf[0] & 0x7f) == 0x70) {
-               buf[0] |= 0x80;
-               put_unaligned_be64(info, &buf[3]);
-       }
-}
-EXPORT_SYMBOL(scsi_set_sense_information);
index 3b2fcb4fada0491c4500b42555fdef542b4399d2..a20da8c25b4f960224fb4d772aafea38c57e1656 100644 (file)
@@ -2770,9 +2770,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
        max_xfer = sdkp->max_xfer_blocks;
        max_xfer <<= ilog2(sdp->sector_size) - 9;
 
-       max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
-                               max_xfer);
-       blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
+       sdkp->disk->queue->limits.max_sectors =
+               min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
+
        set_capacity(disk, sdkp->capacity);
        sd_config_write_same(sdkp);
        kfree(buffer);
index f5296f53a3d24186e235db3972b6eac75c3a6cdc..a1f66addda8da97eacb5ece261e23f65b26a95b0 100644 (file)
@@ -493,7 +493,7 @@ static void WILC_WFI_mon_setup(struct net_device *dev)
        /* dev->destructor = free_netdev; */
        PRINT_INFO(CORECONFIG_DBG, "In Ethernet setup function\n");
        ether_setup(dev);
-       dev->tx_queue_len = 0;
+       dev->priv_flags |= IFF_NO_QUEUE;
        dev->type = ARPHRD_IEEE80211_RADIOTAP;
        memset(dev->dev_addr, 0, ETH_ALEN);
 
index cd77a064c772f1bbe897482d2372fe5bc6434328..fd092909a4577a7c4a708516bf3344fee331ad84 100644 (file)
@@ -968,9 +968,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
 
        conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
-       if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+       if (hdr->flags & ISCSI_FLAG_CMD_READ)
                cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
-       } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
+       else
                cmd->targ_xfer_tag = 0xFFFFFFFF;
        cmd->cmd_sn             = be32_to_cpu(hdr->cmdsn);
        cmd->exp_stat_sn        = be32_to_cpu(hdr->exp_statsn);
index c2e9fea90b4a4bc16a0384d79fa9684c9f4176e0..860e840461778271191ba4ee8f1a4011dba5e78c 100644 (file)
@@ -457,8 +457,15 @@ void target_unregister_template(const struct target_core_fabric_ops *fo)
                if (!strcmp(t->tf_ops->name, fo->name)) {
                        BUG_ON(atomic_read(&t->tf_access_cnt));
                        list_del(&t->tf_list);
+                       mutex_unlock(&g_tf_lock);
+                       /*
+                        * Wait for any outstanding fabric se_deve_entry->rcu_head
+                        * callbacks to complete post kfree_rcu(), before allowing
+                        * fabric driver unload of TFO->module to proceed.
+                        */
+                       rcu_barrier();
                        kfree(t);
-                       break;
+                       return;
                }
        }
        mutex_unlock(&g_tf_lock);
index 62ea4e8e70a8935398f2a0e86fc44627dfa3368e..be9cefc07407e80ef5dd7dfcbd8a0d025faf97f6 100644 (file)
@@ -84,8 +84,16 @@ void target_backend_unregister(const struct target_backend_ops *ops)
        list_for_each_entry(tb, &backend_list, list) {
                if (tb->ops == ops) {
                        list_del(&tb->list);
+                       mutex_unlock(&backend_mutex);
+                       /*
+                        * Wait for any outstanding backend driver ->rcu_head
+                        * callbacks to complete post TBO->free_device() ->
+                        * call_rcu(), before allowing backend driver module
+                        * unload of target_backend_ops->owner to proceed.
+                        */
+                       rcu_barrier();
                        kfree(tb);
-                       break;
+                       return;
                }
        }
        mutex_unlock(&backend_mutex);
index b5ba1ec3c35476361103d7dca47a1934cdd3289f..f87d4cef6d398c072e953e7eaa6b5d9d5b469d70 100644 (file)
@@ -1203,17 +1203,13 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
        struct se_dev_entry *deve;
        struct se_session *sess = cmd->se_sess;
        struct se_node_acl *nacl;
+       struct scsi_lun slun;
        unsigned char *buf;
        u32 lun_count = 0, offset = 8;
-
-       if (cmd->data_length < 16) {
-               pr_warn("REPORT LUNS allocation length %u too small\n",
-                       cmd->data_length);
-               return TCM_INVALID_CDB_FIELD;
-       }
+       __be32 len;
 
        buf = transport_kmap_data_sg(cmd);
-       if (!buf)
+       if (cmd->data_length && !buf)
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
        /*
@@ -1221,11 +1217,9 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
         * coming via a target_core_mod PASSTHROUGH op, and not through
         * a $FABRIC_MOD.  In that case, report LUN=0 only.
         */
-       if (!sess) {
-               int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
-               lun_count = 1;
+       if (!sess)
                goto done;
-       }
+
        nacl = sess->se_node_acl;
 
        rcu_read_lock();
@@ -1236,10 +1230,12 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
                 * See SPC2-R20 7.19.
                 */
                lun_count++;
-               if ((offset + 8) > cmd->data_length)
+               if (offset >= cmd->data_length)
                        continue;
 
-               int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
+               int_to_scsilun(deve->mapped_lun, &slun);
+               memcpy(buf + offset, &slun,
+                      min(8u, cmd->data_length - offset));
                offset += 8;
        }
        rcu_read_unlock();
@@ -1248,12 +1244,22 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
         * See SPC3 r07, page 159.
         */
 done:
-       lun_count *= 8;
-       buf[0] = ((lun_count >> 24) & 0xff);
-       buf[1] = ((lun_count >> 16) & 0xff);
-       buf[2] = ((lun_count >> 8) & 0xff);
-       buf[3] = (lun_count & 0xff);
-       transport_kunmap_data_sg(cmd);
+       /*
+        * If no LUNs are accessible, report virtual LUN 0.
+        */
+       if (lun_count == 0) {
+               int_to_scsilun(0, &slun);
+               if (cmd->data_length > 8)
+                       memcpy(buf + offset, &slun,
+                              min(8u, cmd->data_length - offset));
+               lun_count = 1;
+       }
+
+       if (buf) {
+               len = cpu_to_be32(lun_count * 8);
+               memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length));
+               transport_kunmap_data_sg(cmd);
+       }
 
        target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
        return 0;
index 6509c61b96484993333a4198138945f43154fdfd..620dcd405ff6eec9ae65b0af8e93c25daae15d1f 100644 (file)
@@ -68,7 +68,7 @@ struct power_table {
  *     registered cooling device.
  * @cpufreq_state: integer value representing the current state of cpufreq
  *     cooling devices.
- * @cpufreq_val: integer value representing the absolute value of the clipped
+ * @clipped_freq: integer value representing the absolute value of the clipped
  *     frequency.
  * @max_level: maximum cooling level. One less than total number of valid
  *     cpufreq frequencies.
@@ -91,7 +91,7 @@ struct cpufreq_cooling_device {
        int id;
        struct thermal_cooling_device *cool_dev;
        unsigned int cpufreq_state;
-       unsigned int cpufreq_val;
+       unsigned int clipped_freq;
        unsigned int max_level;
        unsigned int *freq_table;       /* In descending order */
        struct cpumask allowed_cpus;
@@ -107,6 +107,9 @@ struct cpufreq_cooling_device {
 static DEFINE_IDR(cpufreq_idr);
 static DEFINE_MUTEX(cooling_cpufreq_lock);
 
+static unsigned int cpufreq_dev_count;
+
+static DEFINE_MUTEX(cooling_list_lock);
 static LIST_HEAD(cpufreq_dev_list);
 
 /**
@@ -185,14 +188,14 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
 {
        struct cpufreq_cooling_device *cpufreq_dev;
 
-       mutex_lock(&cooling_cpufreq_lock);
+       mutex_lock(&cooling_list_lock);
        list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
                if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
-                       mutex_unlock(&cooling_cpufreq_lock);
+                       mutex_unlock(&cooling_list_lock);
                        return get_level(cpufreq_dev, freq);
                }
        }
-       mutex_unlock(&cooling_cpufreq_lock);
+       mutex_unlock(&cooling_list_lock);
 
        pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
        return THERMAL_CSTATE_INVALID;
@@ -215,29 +218,35 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
                                    unsigned long event, void *data)
 {
        struct cpufreq_policy *policy = data;
-       unsigned long max_freq = 0;
+       unsigned long clipped_freq;
        struct cpufreq_cooling_device *cpufreq_dev;
 
-       switch (event) {
+       if (event != CPUFREQ_ADJUST)
+               return NOTIFY_DONE;
 
-       case CPUFREQ_ADJUST:
-               mutex_lock(&cooling_cpufreq_lock);
-               list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
-                       if (!cpumask_test_cpu(policy->cpu,
-                                             &cpufreq_dev->allowed_cpus))
-                               continue;
+       mutex_lock(&cooling_list_lock);
+       list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+               if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
+                       continue;
 
-                       max_freq = cpufreq_dev->cpufreq_val;
+               /*
+                * policy->max is the maximum allowed frequency defined by user
+                * and clipped_freq is the maximum that thermal constraints
+                * allow.
+                *
+                * If clipped_freq is lower than policy->max, then we need to
+                * readjust policy->max.
+                *
+                * But, if clipped_freq is greater than policy->max, we don't
+                * need to do anything.
+                */
+               clipped_freq = cpufreq_dev->clipped_freq;
 
-                       if (policy->max != max_freq)
-                               cpufreq_verify_within_limits(policy, 0,
-                                                            max_freq);
-               }
-               mutex_unlock(&cooling_cpufreq_lock);
+               if (policy->max > clipped_freq)
+                       cpufreq_verify_within_limits(policy, 0, clipped_freq);
                break;
-       default:
-               return NOTIFY_DONE;
        }
+       mutex_unlock(&cooling_list_lock);
 
        return NOTIFY_OK;
 }
@@ -519,7 +528,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
 
        clip_freq = cpufreq_device->freq_table[state];
        cpufreq_device->cpufreq_state = state;
-       cpufreq_device->cpufreq_val = clip_freq;
+       cpufreq_device->clipped_freq = clip_freq;
 
        cpufreq_update_policy(cpu);
 
@@ -861,17 +870,19 @@ __cpufreq_cooling_register(struct device_node *np,
                        pr_debug("%s: freq:%u KHz\n", __func__, freq);
        }
 
-       cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0];
+       cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
        cpufreq_dev->cool_dev = cool_dev;
 
        mutex_lock(&cooling_cpufreq_lock);
 
+       mutex_lock(&cooling_list_lock);
+       list_add(&cpufreq_dev->node, &cpufreq_dev_list);
+       mutex_unlock(&cooling_list_lock);
+
        /* Register the notifier for first cpufreq cooling device */
-       if (list_empty(&cpufreq_dev_list))
+       if (!cpufreq_dev_count++)
                cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
                                          CPUFREQ_POLICY_NOTIFIER);
-       list_add(&cpufreq_dev->node, &cpufreq_dev_list);
-
        mutex_unlock(&cooling_cpufreq_lock);
 
        return cool_dev;
@@ -1013,13 +1024,17 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
                return;
 
        cpufreq_dev = cdev->devdata;
-       mutex_lock(&cooling_cpufreq_lock);
-       list_del(&cpufreq_dev->node);
 
        /* Unregister the notifier for the last cpufreq cooling device */
-       if (list_empty(&cpufreq_dev_list))
+       mutex_lock(&cooling_cpufreq_lock);
+       if (!--cpufreq_dev_count)
                cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
                                            CPUFREQ_POLICY_NOTIFIER);
+
+       mutex_lock(&cooling_list_lock);
+       list_del(&cpufreq_dev->node);
+       mutex_unlock(&cooling_list_lock);
+
        mutex_unlock(&cooling_cpufreq_lock);
 
        thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
index 63a448f9d93b66eedec7fde829983588fe6251f0..7006860f2f3693b04ee44996c5085e2f94ceae44 100644 (file)
@@ -334,7 +334,7 @@ static int allocate_power(struct thermal_zone_device *tz,
                                      max_allocatable_power, current_temp,
                                      (s32)control_temp - (s32)current_temp);
 
-       devm_kfree(&tz->device, req_power);
+       kfree(req_power);
 unlock:
        mutex_unlock(&tz->lock);
 
@@ -426,7 +426,7 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
                return -EINVAL;
        }
 
-       params = devm_kzalloc(&tz->device, sizeof(*params), GFP_KERNEL);
+       params = kzalloc(sizeof(*params), GFP_KERNEL);
        if (!params)
                return -ENOMEM;
 
@@ -468,14 +468,14 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
        return 0;
 
 free:
-       devm_kfree(&tz->device, params);
+       kfree(params);
        return ret;
 }
 
 static void power_allocator_unbind(struct thermal_zone_device *tz)
 {
        dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
-       devm_kfree(&tz->device, tz->governor_data);
+       kfree(tz->governor_data);
        tz->governor_data = NULL;
 }
 
index 80cc1b35d46043c16bc456e0cadf61e76c281d52..ebb5e37455a07acd86f5fbf1b76d474e99b937fb 100644 (file)
@@ -2246,7 +2246,15 @@ static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
 
                        err = -EINVAL;
                        if (old) {
-                               struct fuse_dev *fud = fuse_get_dev(old);
+                               struct fuse_dev *fud = NULL;
+
+                               /*
+                                * Check against file->f_op because CUSE
+                                * uses the same ioctl handler.
+                                */
+                               if (old->f_op == file->f_op &&
+                                   old->f_cred->user_ns == file->f_cred->user_ns)
+                                       fud = fuse_get_dev(old);
 
                                if (fud) {
                                        mutex_lock(&fuse_mutex);
index 57ca8cc383a615344498202384b1b814911bc766..3b4d8a4a23fb760867fc7d59ede2a3459eac2375 100644 (file)
@@ -743,8 +743,6 @@ struct drm_connector {
        uint8_t num_h_tile, num_v_tile;
        uint8_t tile_h_loc, tile_v_loc;
        uint16_t tile_h_size, tile_v_size;
-
-       struct list_head destroy_list;
 };
 
 /**
index 6c78956aa47092440edb3a73e7b9389ac3a57558..d2992bfa17063a052a08a106b9105284ce4bfa4a 100644 (file)
@@ -385,8 +385,6 @@ enum {
        SATA_SSP                = 0x06, /* Software Settings Preservation */
        SATA_DEVSLP             = 0x09, /* Device Sleep */
 
-       SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
-
        /* feature values for SET_MAX */
        ATA_SET_MAX_ADDR        = 0x00,
        ATA_SET_MAX_PASSWD      = 0x01,
@@ -530,8 +528,6 @@ struct ata_bmdma_prd {
 #define ata_id_cdb_intr(id)    (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
 #define ata_id_has_da(id)      ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
 #define ata_id_has_devslp(id)  ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
-#define ata_id_has_ncq_autosense(id) \
-                               ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
 
 static inline bool ata_id_has_hipm(const u16 *id)
 {
@@ -720,20 +716,6 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
        return false;
 }
 
-static inline bool ata_id_has_sense_reporting(const u16 *id)
-{
-       if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
-               return false;
-       return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
-}
-
-static inline bool ata_id_sense_reporting_enabled(const u16 *id)
-{
-       if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
-               return false;
-       return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
-}
-
 /**
  *     ata_id_major_version    -       get ATA level of drive
  *     @id: Identify data
index c6028fd742c1a18309a323a016e3be076d9b13be..d04aa58280ded5694bbde3638ec602269635a988 100644 (file)
@@ -3,28 +3,43 @@
 
 /* Exponentially weighted moving average (EWMA) */
 
-/* For more documentation see lib/average.c */
-
-struct ewma {
-       unsigned long internal;
-       unsigned long factor;
-       unsigned long weight;
-};
-
-extern void ewma_init(struct ewma *avg, unsigned long factor,
-                     unsigned long weight);
-
-extern struct ewma *ewma_add(struct ewma *avg, unsigned long val);
-
-/**
- * ewma_read() - Get average value
- * @avg: Average structure
- *
- * Returns the average value held in @avg.
- */
-static inline unsigned long ewma_read(const struct ewma *avg)
-{
-       return avg->internal >> avg->factor;
-}
+#define DECLARE_EWMA(name, _factor, _weight)                           \
+       struct ewma_##name {                                            \
+               unsigned long internal;                                 \
+       };                                                              \
+       static inline void ewma_##name##_init(struct ewma_##name *e)    \
+       {                                                               \
+               BUILD_BUG_ON(!__builtin_constant_p(_factor));           \
+               BUILD_BUG_ON(!__builtin_constant_p(_weight));           \
+               BUILD_BUG_ON_NOT_POWER_OF_2(_factor);                   \
+               BUILD_BUG_ON_NOT_POWER_OF_2(_weight);                   \
+               e->internal = 0;                                        \
+       }                                                               \
+       static inline unsigned long                                     \
+       ewma_##name##_read(struct ewma_##name *e)                       \
+       {                                                               \
+               BUILD_BUG_ON(!__builtin_constant_p(_factor));           \
+               BUILD_BUG_ON(!__builtin_constant_p(_weight));           \
+               BUILD_BUG_ON_NOT_POWER_OF_2(_factor);                   \
+               BUILD_BUG_ON_NOT_POWER_OF_2(_weight);                   \
+               return e->internal >> ilog2(_factor);                   \
+       }                                                               \
+       static inline void ewma_##name##_add(struct ewma_##name *e,     \
+                                            unsigned long val)         \
+       {                                                               \
+               unsigned long internal = ACCESS_ONCE(e->internal);      \
+               unsigned long weight = ilog2(_weight);                  \
+               unsigned long factor = ilog2(_factor);                  \
+                                                                       \
+               BUILD_BUG_ON(!__builtin_constant_p(_factor));           \
+               BUILD_BUG_ON(!__builtin_constant_p(_weight));           \
+               BUILD_BUG_ON_NOT_POWER_OF_2(_factor);                   \
+               BUILD_BUG_ON_NOT_POWER_OF_2(_weight);                   \
+                                                                       \
+               ACCESS_ONCE(e->internal) = internal ?                   \
+                       (((internal << weight) - internal) +            \
+                               (val << factor)) >> weight :            \
+                       (val << factor);                                \
+       }
 
 #endif /* _LINUX_AVERAGE_H */
index 6cceedf65ca27d787f995980cf716de2d0a2be47..cf038431a5cc8c22246110c716e1d96e09f1a1ed 100644 (file)
@@ -640,7 +640,6 @@ struct bcma_drv_cc {
        spinlock_t gpio_lock;
 #ifdef CONFIG_BCMA_DRIVER_GPIO
        struct gpio_chip gpio;
-       struct irq_domain *irq_domain;
 #endif
 };
 
index 9012f877520802662fb5f3704c60ef24d09c7136..eb049c622208e3a0815177c528ba9149e6bc8846 100644 (file)
@@ -76,7 +76,7 @@ static inline bool is_link_local_ether_addr(const u8 *addr)
 
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
        return (((*(const u32 *)addr) ^ (*(const u32 *)b)) |
-               ((a[2] ^ b[2]) & m)) == 0;
+               (__force int)((a[2] ^ b[2]) & m)) == 0;
 #else
        return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
 #endif
index b9c7897dc5668c3fe59c30ab8505d334a6689168..cfa906f28b7a277b480f5bf154bb8cf76e467ad5 100644 (file)
@@ -2074,8 +2074,8 @@ enum ieee80211_tdls_actioncode {
 #define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6)
 #define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED   BIT(7)
 
+#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED    BIT(5)
 #define WLAN_EXT_CAPA8_OPMODE_NOTIF    BIT(6)
-#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED    BIT(7)
 
 /* TDLS specific payload type in the LLC/SNAP header */
 #define WLAN_TDLS_SNAP_RFTYPE  0x2
index 2039546b0ec6d4ab9ce00bfed40e90db4db533df..8b6d6f2154a4eaab1cce3db487b92d8d1b36d4a9 100644 (file)
@@ -103,6 +103,7 @@ enum {
        MLX5_REG_PMTU            = 0x5003,
        MLX5_REG_PTYS            = 0x5004,
        MLX5_REG_PAOS            = 0x5006,
+       MLX5_REG_PFCC            = 0x5007,
        MLX5_REG_PPCNT           = 0x5008,
        MLX5_REG_PMAOS           = 0x5012,
        MLX5_REG_PUDE            = 0x5009,
@@ -152,8 +153,8 @@ enum mlx5_dev_event {
 };
 
 enum mlx5_port_status {
-       MLX5_PORT_UP        = 1 << 1,
-       MLX5_PORT_DOWN      = 1 << 2,
+       MLX5_PORT_UP        = 1,
+       MLX5_PORT_DOWN      = 2,
 };
 
 struct mlx5_uuar_info {
@@ -761,9 +762,10 @@ int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
                               u8 local_port);
 int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
                        int proto_mask);
-int mlx5_set_port_status(struct mlx5_core_dev *dev,
-                        enum mlx5_port_status status);
-int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status);
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+                              enum mlx5_port_status status);
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+                                enum mlx5_port_status *status);
 
 int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
 void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
@@ -773,6 +775,10 @@ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
 int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
                              u8 *vl_hw_cap, u8 local_port);
 
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+                         u32 *rx_pause, u32 *tx_pause);
+
 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
index f7a6ef2fae3a46a82aba6088b25ed15afd0f6952..6abe0d6f1e1d4b7db86285fd2ff370e1bb67d123 100644 (file)
@@ -1262,6 +1262,8 @@ struct net_device_ops {
  * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
  *     change when it's running
  * @IFF_MACVLAN: Macvlan device
+ * @IFF_VRF_MASTER: device is a VRF master
+ * @IFF_NO_QUEUE: device can run without qdisc attached
  */
 enum netdev_priv_flags {
        IFF_802_1Q_VLAN                 = 1<<0,
@@ -1290,6 +1292,7 @@ enum netdev_priv_flags {
        IFF_IPVLAN_MASTER               = 1<<23,
        IFF_IPVLAN_SLAVE                = 1<<24,
        IFF_VRF_MASTER                  = 1<<25,
+       IFF_NO_QUEUE                    = 1<<26,
 };
 
 #define IFF_802_1Q_VLAN                        IFF_802_1Q_VLAN
@@ -1318,6 +1321,7 @@ enum netdev_priv_flags {
 #define IFF_IPVLAN_MASTER              IFF_IPVLAN_MASTER
 #define IFF_IPVLAN_SLAVE               IFF_IPVLAN_SLAVE
 #define IFF_VRF_MASTER                 IFF_VRF_MASTER
+#define IFF_NO_QUEUE                   IFF_NO_QUEUE
 
 /**
  *     struct net_device - The DEVICE structure.
@@ -2307,8 +2311,7 @@ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
 
 static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
 {
-       return (NAPI_GRO_CB(skb)->gro_remcsum_start - skb_headroom(skb) ==
-               skb_gro_offset(skb));
+       return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
 }
 
 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
@@ -2404,37 +2407,58 @@ static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
        grc->delta = 0;
 }
 
-static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
-                                          int start, int offset,
-                                          struct gro_remcsum *grc,
-                                          bool nopartial)
+static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
+                                           unsigned int off, size_t hdrlen,
+                                           int start, int offset,
+                                           struct gro_remcsum *grc,
+                                           bool nopartial)
 {
        __wsum delta;
+       size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
 
        BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
 
        if (!nopartial) {
-               NAPI_GRO_CB(skb)->gro_remcsum_start =
-                   ((unsigned char *)ptr + start) - skb->head;
-               return;
+               NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
+               return ptr;
+       }
+
+       ptr = skb_gro_header_fast(skb, off);
+       if (skb_gro_header_hard(skb, off + plen)) {
+               ptr = skb_gro_header_slow(skb, off + plen, off);
+               if (!ptr)
+                       return NULL;
        }
 
-       delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset);
+       delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
+                              start, offset);
 
        /* Adjust skb->csum since we changed the packet */
        NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
 
-       grc->offset = (ptr + offset) - (void *)skb->head;
+       grc->offset = off + hdrlen + offset;
        grc->delta = delta;
+
+       return ptr;
 }
 
 static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
                                           struct gro_remcsum *grc)
 {
+       void *ptr;
+       size_t plen = grc->offset + sizeof(u16);
+
        if (!grc->delta)
                return;
 
-       remcsum_unadjust((__sum16 *)(skb->head + grc->offset), grc->delta);
+       ptr = skb_gro_header_fast(skb, grc->offset);
+       if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
+               ptr = skb_gro_header_slow(skb, plen, grc->offset);
+               if (!ptr)
+                       return;
+       }
+
+       remcsum_unadjust((__sum16 *)ptr, grc->delta);
 }
 
 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
@@ -3819,12 +3843,22 @@ static inline bool netif_is_vrf(const struct net_device *dev)
 
 static inline bool netif_index_is_vrf(struct net *net, int ifindex)
 {
-       struct net_device *dev = dev_get_by_index_rcu(net, ifindex);
        bool rc = false;
 
+#if IS_ENABLED(CONFIG_NET_VRF)
+       struct net_device *dev;
+
+       if (ifindex == 0)
+               return false;
+
+       rcu_read_lock();
+
+       dev = dev_get_by_index_rcu(net, ifindex);
        if (dev)
                rc = netif_is_vrf(dev);
 
+       rcu_read_unlock();
+#endif
        return rc;
 }
 
index 6ec975748742793fd51c274314a208ea5cb697db..80ca889b164e3eab5b42c7249d264f55650e44bf 100644 (file)
@@ -2,6 +2,7 @@
 #define _NFNL_ACCT_H_
 
 #include <uapi/linux/netfilter/nfnetlink_acct.h>
+#include <net/net_namespace.h>
 
 enum {
        NFACCT_NO_QUOTA         = -1,
@@ -11,7 +12,7 @@ enum {
 
 struct nf_acct;
 
-struct nf_acct *nfnl_acct_find_get(const char *filter_name);
+struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name);
 void nfnl_acct_put(struct nf_acct *acct);
 void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
 extern int nfnl_acct_overquota(const struct sk_buff *skb,
index dc03d77ad23bb2588869251a67c5a7143c874b21..a2f59ec98d24a1af1e8de056305b563b2ff94df7 100644 (file)
 #define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
 #define LOWPAN_NHC_UDP_CS_C    0x04 /* checksum elided */
 
+#define LOWPAN_PRIV_SIZE(llpriv_size)  \
+       (sizeof(struct lowpan_priv) + llpriv_size)
+
+enum lowpan_lltypes {
+       LOWPAN_LLTYPE_BTLE,
+       LOWPAN_LLTYPE_IEEE802154,
+};
+
+struct lowpan_priv {
+       enum lowpan_lltypes lltype;
+
+       /* must be last */
+       u8 priv[0] __aligned(sizeof(void *));
+};
+
+static inline
+struct lowpan_priv *lowpan_priv(const struct net_device *dev)
+{
+       return netdev_priv(dev);
+}
+
 #ifdef DEBUG
 /* print data in line */
 static inline void raw_dump_inline(const char *caller, char *msg,
@@ -372,6 +393,8 @@ lowpan_uncompress_size(const struct sk_buff *skb, u16 *dgram_offset)
        return skb->len + uncomp_header - ret;
 }
 
+void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype);
+
 int
 lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
                         const u8 *saddr, const u8 saddr_type,
index 2a6b0919e23f71af5f4660fce0a349bfa09b2fd9..9e1a59e01fa2f6316ae950833974aa628761cf51 100644 (file)
@@ -512,9 +512,11 @@ struct hci_conn_params {
                HCI_AUTO_CONN_DIRECT,
                HCI_AUTO_CONN_ALWAYS,
                HCI_AUTO_CONN_LINK_LOSS,
+               HCI_AUTO_CONN_EXPLICIT,
        } auto_connect;
 
        struct hci_conn *conn;
+       bool explicit_connect;
 };
 
 extern struct list_head hci_dev_list;
@@ -639,6 +641,7 @@ enum {
        HCI_CONN_DROP,
        HCI_CONN_PARAM_REMOVAL_PEND,
        HCI_CONN_NEW_LINK_KEY,
+       HCI_CONN_SCANNING,
 };
 
 static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
@@ -808,6 +811,26 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
        return NULL;
 }
 
+static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev)
+{
+       struct hci_conn_hash *h = &hdev->conn_hash;
+       struct hci_conn  *c;
+
+       rcu_read_lock();
+
+       list_for_each_entry_rcu(c, &h->list, list) {
+               if (c->type == LE_LINK && c->state == BT_CONNECT &&
+                   !test_bit(HCI_CONN_SCANNING, &c->flags)) {
+                       rcu_read_unlock();
+                       return c;
+               }
+       }
+
+       rcu_read_unlock();
+
+       return NULL;
+}
+
 int hci_disconnect(struct hci_conn *conn, __u8 reason);
 bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
 void hci_sco_setup(struct hci_conn *conn, __u8 status);
@@ -823,6 +846,9 @@ void hci_chan_del(struct hci_chan *chan);
 void hci_chan_list_flush(struct hci_conn *conn);
 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
 
+struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
+                                    u8 dst_type, u8 sec_level,
+                                    u16 conn_timeout, u8 role);
 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
                                u8 dst_type, u8 sec_level, u16 conn_timeout,
                                u8 role);
@@ -988,6 +1014,9 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev);
 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
                                                  bdaddr_t *addr,
                                                  u8 addr_type);
+struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
+                                                   bdaddr_t *addr,
+                                                   u8 addr_type);
 
 void hci_uuids_clear(struct hci_dev *hdev);
 
index 883fe1e7c5a17e982651a68cfb17cbc0bbe5b367..f0889a2476439553f055b31b6c577b6eb03e4d55 100644 (file)
@@ -2369,8 +2369,7 @@ struct cfg80211_qos_map {
  *     method returns 0.)
  *
  * @mgmt_frame_register: Notify driver that a management frame type was
- *     registered. Note that this callback may not sleep, and cannot run
- *     concurrently with itself.
+ *     registered. The callback is allowed to sleep.
  *
  * @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device.
  *     Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may
index 382f94b59f2f706eab23f2f6ebe9c2f007c5ed2e..76b1ffaea863600be09280cc4c2d168dc5bae41f 100644 (file)
@@ -63,6 +63,8 @@ struct cfg802154_ops {
                                         s8 max_frame_retries);
        int     (*set_lbt_mode)(struct wpan_phy *wpan_phy,
                                struct wpan_dev *wpan_dev, bool mode);
+       int     (*set_ackreq_default)(struct wpan_phy *wpan_phy,
+                                     struct wpan_dev *wpan_dev, bool ackreq);
 };
 
 static inline bool
@@ -173,6 +175,9 @@ struct wpan_dev {
        struct list_head list;
        struct net_device *netdev;
 
+       /* lowpan interface, set when the wpan_dev belongs to one lowpan_dev */
+       struct net_device *lowpan_dev;
+
        u32 identifier;
 
        /* MAC PIB */
@@ -193,6 +198,9 @@ struct wpan_dev {
        bool lbt;
 
        bool promiscuous_mode;
+
+       /* fallback for acknowledgment bit setting */
+       bool ackreq;
 };
 
 #define to_phy(_dev)   container_of(_dev, struct wpan_phy, dev)
index 2d1d73cb773e9bd160f5c43bb58981ee8597b87c..9fcaedf994ee2ba5db20a0635c78ceed06f57cc1 100644 (file)
@@ -140,14 +140,16 @@ static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
 
 struct sk_buff;
 void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
-                             __be32 from, __be32 to, int pseudohdr);
+                             __be32 from, __be32 to, bool pseudohdr);
 void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
                               const __be32 *from, const __be32 *to,
-                              int pseudohdr);
+                              bool pseudohdr);
+void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
+                                    __wsum diff, bool pseudohdr);
 
 static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
                                            __be16 from, __be16 to,
-                                           int pseudohdr)
+                                           bool pseudohdr)
 {
        inet_proto_csum_replace4(sum, skb, (__force __be32)from,
                                 (__force __be32)to, pseudohdr);
index bd9b76502458fb6d0c1d3aa79aca262487158d80..b34d812bc5d056d47b7d39181668572cde4f53ca 100644 (file)
@@ -171,6 +171,11 @@ static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
        return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
 }
 
+static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
+{
+       return !!((ds->dsa_port_mask) & (1 << p));
+}
+
 static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
 {
        return ds->phys_port_mask & (1 << p) && ds->ports[p];
index 2578811cef5167e94269bb9967f6b025c824084a..ef8f1d43a2033bc67ab3c8f3b08657191463654e 100644 (file)
@@ -84,12 +84,13 @@ struct dst_entry {
        __u32                   __pad2;
 #endif
 
+#ifdef CONFIG_64BIT
+       struct lwtunnel_state   *lwtstate;
        /*
         * Align __refcnt to a 64 bytes alignment
         * (L1_CACHE_SIZE would be too much)
         */
-#ifdef CONFIG_64BIT
-       long                    __pad_to_align_refcnt[2];
+       long                    __pad_to_align_refcnt[1];
 #endif
        /*
         * __refcnt wants to be on a different cache line from
@@ -98,6 +99,9 @@ struct dst_entry {
        atomic_t                __refcnt;       /* client references    */
        int                     __use;
        unsigned long           lastuse;
+#ifndef CONFIG_64BIT
+       struct lwtunnel_state   *lwtstate;
+#endif
        union {
                struct dst_entry        *next;
                struct rtable __rcu     *rt_next;
index 075f523ff23f44f471261b8539a4f7e219b0e92f..2cb52d562272aa395be9186f676c4dbcaf3c1462 100644 (file)
@@ -23,22 +23,17 @@ static inline struct metadata_dst *skb_metadata_dst(struct sk_buff *skb)
        return NULL;
 }
 
-static inline struct ip_tunnel_info *skb_tunnel_info(struct sk_buff *skb,
-                                                    int family)
+static inline struct ip_tunnel_info *skb_tunnel_info(struct sk_buff *skb)
 {
        struct metadata_dst *md_dst = skb_metadata_dst(skb);
-       struct rtable *rt;
+       struct dst_entry *dst;
 
        if (md_dst)
                return &md_dst->u.tun_info;
 
-       switch (family) {
-       case AF_INET:
-               rt = (struct rtable *)skb_dst(skb);
-               if (rt && rt->rt_lwtstate)
-                       return lwt_tun_info(rt->rt_lwtstate);
-               break;
-       }
+       dst = skb_dst(skb);
+       if (dst && dst->lwtstate)
+               return lwt_tun_info(dst->lwtstate);
 
        return NULL;
 }
index f305588fc16276fc530e165c3bcb7596e7939f0b..9e0297c4c11da8f1a1332f459c428c705c686bf0 100644 (file)
@@ -130,6 +130,7 @@ struct flowi6 {
 #define flowi6_proto           __fl_common.flowic_proto
 #define flowi6_flags           __fl_common.flowic_flags
 #define flowi6_secid           __fl_common.flowic_secid
+#define flowi6_tun_key         __fl_common.flowic_tun_key
        struct in6_addr         daddr;
        struct in6_addr         saddr;
        __be32                  flowlabel;
index 276328e3daa64a0d493a141e5690eb6dfe18c805..063d30474cf66077a7491ac8efbf4d400070c49e 100644 (file)
@@ -133,7 +133,6 @@ struct rt6_info {
        /* more non-fragment space at head required */
        unsigned short                  rt6i_nfheader_len;
        u8                              rt6i_protocol;
-       struct lwtunnel_state           *rt6i_lwtstate;
 };
 
 static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
index 984dbfa15e13f199682825fa10e0f295be9102ad..224e4ecec91b71eb626e8f37e3785e6ded52c92c 100644 (file)
 #define IPTUNNEL_ERR_TIMEO     (30*HZ)
 
 /* Used to memset ip_tunnel padding. */
-#define IP_TUNNEL_KEY_SIZE                                     \
-       (offsetof(struct ip_tunnel_key, tp_dst) +               \
-        FIELD_SIZEOF(struct ip_tunnel_key, tp_dst))
+#define IP_TUNNEL_KEY_SIZE     offsetofend(struct ip_tunnel_key, tp_dst)
+
+/* Used to memset ipv4 address padding. */
+#define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst)
+#define IP_TUNNEL_KEY_IPV4_PAD_LEN                             \
+       (FIELD_SIZEOF(struct ip_tunnel_key, u) -                \
+        FIELD_SIZEOF(struct ip_tunnel_key, u.ipv4))
 
 struct ip_tunnel_key {
        __be64                  tun_id;
-       __be32                  ipv4_src;
-       __be32                  ipv4_dst;
+       union {
+               struct {
+                       __be32  src;
+                       __be32  dst;
+               } ipv4;
+               struct {
+                       struct in6_addr src;
+                       struct in6_addr dst;
+               } ipv6;
+       } u;
        __be16                  tun_flags;
-       __u8                    ipv4_tos;
-       __u8                    ipv4_ttl;
+       u8                      tos;            /* TOS for IPv4, TC for IPv6 */
+       u8                      ttl;            /* TTL for IPv4, HL for IPv6 */
        __be16                  tp_src;
        __be16                  tp_dst;
-} __packed __aligned(4); /* Minimize padding. */
+};
 
 /* Indicates whether the tunnel info structure represents receive
  * or transmit tunnel parameters.
@@ -64,8 +76,8 @@ struct ip_tunnel_6rd_parm {
 #endif
 
 struct ip_tunnel_encap {
-       __u16                   type;
-       __u16                   flags;
+       u16                     type;
+       u16                     flags;
        __be16                  sport;
        __be16                  dport;
 };
@@ -95,8 +107,8 @@ struct ip_tunnel {
                                         * arrived */
 
        /* These four fields used only by GRE */
-       __u32           i_seqno;        /* The last seen seqno  */
-       __u32           o_seqno;        /* The last output seqno */
+       u32             i_seqno;        /* The last seen seqno  */
+       u32             o_seqno;        /* The last output seqno */
        int             tun_hlen;       /* Precalculated header length */
        int             mlink;
 
@@ -179,10 +191,12 @@ static inline void __ip_tunnel_info_init(struct ip_tunnel_info *tun_info,
                                         const void *opts, u8 opts_len)
 {
        tun_info->key.tun_id = tun_id;
-       tun_info->key.ipv4_src = saddr;
-       tun_info->key.ipv4_dst = daddr;
-       tun_info->key.ipv4_tos = tos;
-       tun_info->key.ipv4_ttl = ttl;
+       tun_info->key.u.ipv4.src = saddr;
+       tun_info->key.u.ipv4.dst = daddr;
+       memset((unsigned char *)&tun_info->key + IP_TUNNEL_KEY_IPV4_PAD,
+              0, IP_TUNNEL_KEY_IPV4_PAD_LEN);
+       tun_info->key.tos = tos;
+       tun_info->key.ttl = ttl;
        tun_info->key.tun_flags = tun_flags;
 
        /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
@@ -273,8 +287,8 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
 
 int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
 int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
-                 __be32 src, __be32 dst, __u8 proto,
-                 __u8 tos, __u8 ttl, __be16 df, bool xnet);
+                 __be32 src, __be32 dst, u8 proto,
+                 u8 tos, u8 ttl, __be16 df, bool xnet);
 
 struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
                                         int gso_type_mask);
index 33bd30963a95a529353da82309361743d30bc8cf..84348988444839280e64026bd9c28a1af1cdfc45 100644 (file)
 #define LWTUNNEL_HASH_SIZE   (1 << LWTUNNEL_HASH_BITS)
 
 /* lw tunnel state flags */
-#define LWTUNNEL_STATE_OUTPUT_REDIRECT 0x1
+#define LWTUNNEL_STATE_OUTPUT_REDIRECT BIT(0)
+#define LWTUNNEL_STATE_INPUT_REDIRECT  BIT(1)
 
 struct lwtunnel_state {
        __u16           type;
        __u16           flags;
        atomic_t        refcnt;
+       int             (*orig_output)(struct sock *sk, struct sk_buff *skb);
+       int             (*orig_input)(struct sk_buff *);
        int             len;
        __u8            data[0];
 };
@@ -25,6 +28,7 @@ struct lwtunnel_encap_ops {
        int (*build_state)(struct net_device *dev, struct nlattr *encap,
                           struct lwtunnel_state **ts);
        int (*output)(struct sock *sk, struct sk_buff *skb);
+       int (*input)(struct sk_buff *skb);
        int (*fill_encap)(struct sk_buff *skb,
                          struct lwtunnel_state *lwtstate);
        int (*get_encap_size)(struct lwtunnel_state *lwtstate);
@@ -32,6 +36,11 @@ struct lwtunnel_encap_ops {
 };
 
 #ifdef CONFIG_LWTUNNEL
+static inline void lwtstate_free(struct lwtunnel_state *lws)
+{
+       kfree(lws);
+}
+
 static inline struct lwtunnel_state *
 lwtstate_get(struct lwtunnel_state *lws)
 {
@@ -47,7 +56,7 @@ static inline void lwtstate_put(struct lwtunnel_state *lws)
                return;
 
        if (atomic_dec_and_test(&lws->refcnt))
-               kfree(lws);
+               lwtstate_free(lws);
 }
 
 static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
@@ -58,6 +67,13 @@ static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
        return false;
 }
 
+static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
+{
+       if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_INPUT_REDIRECT))
+               return true;
+
+       return false;
+}
 int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
                           unsigned int num);
 int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
@@ -71,10 +87,14 @@ int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate);
 struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len);
 int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b);
 int lwtunnel_output(struct sock *sk, struct sk_buff *skb);
-int lwtunnel_output6(struct sock *sk, struct sk_buff *skb);
+int lwtunnel_input(struct sk_buff *skb);
 
 #else
 
+static inline void lwtstate_free(struct lwtunnel_state *lws)
+{
+}
+
 static inline struct lwtunnel_state *
 lwtstate_get(struct lwtunnel_state *lws)
 {
@@ -90,6 +110,11 @@ static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
        return false;
 }
 
+static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
+{
+       return false;
+}
+
 static inline int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
                                         unsigned int num)
 {
@@ -137,7 +162,7 @@ static inline int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
        return -EOPNOTSUPP;
 }
 
-static inline int lwtunnel_output6(struct sock *sk, struct sk_buff *skb)
+static inline int lwtunnel_input(struct sk_buff *skb)
 {
        return -EOPNOTSUPP;
 }
index 6b1077c2a63faaaafe60a7d080024662daeb8f54..e3314e516681ed0733ec212d7464c393222428ef 100644 (file)
@@ -973,6 +973,10 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
  * @RX_FLAG_IV_STRIPPED: The IV/ICV are stripped from this frame.
  *     If this flag is set, the stack cannot do any replay detection
  *     hence the driver or hardware will have to do that.
+ * @RX_FLAG_PN_VALIDATED: Currently only valid for CCMP/GCMP frames, this
+ *     flag indicates that the PN was verified for replay protection.
+ *     Note that this flag is also currently only supported when a frame
+ *     is also decrypted (ie. @RX_FLAG_DECRYPTED must be set)
  * @RX_FLAG_FAILED_FCS_CRC: Set this flag if the FCS check failed on
  *     the frame.
  * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
@@ -997,9 +1001,6 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
  * @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference
  *     number (@ampdu_reference) must be populated and be a distinct number for
  *     each A-MPDU
- * @RX_FLAG_AMPDU_REPORT_ZEROLEN: driver reports 0-length subframes
- * @RX_FLAG_AMPDU_IS_ZEROLEN: This is a zero-length subframe, for
- *     monitoring purposes only
  * @RX_FLAG_AMPDU_LAST_KNOWN: last subframe is known, should be set on all
  *     subframes of a single A-MPDU
  * @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU
@@ -1039,8 +1040,8 @@ enum mac80211_rx_flags {
        RX_FLAG_NO_SIGNAL_VAL           = BIT(12),
        RX_FLAG_HT_GF                   = BIT(13),
        RX_FLAG_AMPDU_DETAILS           = BIT(14),
-       RX_FLAG_AMPDU_REPORT_ZEROLEN    = BIT(15),
-       RX_FLAG_AMPDU_IS_ZEROLEN        = BIT(16),
+       RX_FLAG_PN_VALIDATED            = BIT(15),
+       /* bit 16 free */
        RX_FLAG_AMPDU_LAST_KNOWN        = BIT(17),
        RX_FLAG_AMPDU_IS_LAST           = BIT(18),
        RX_FLAG_AMPDU_DELIM_CRC_ERROR   = BIT(19),
@@ -1491,8 +1492,10 @@ enum ieee80211_key_flags {
  *     - Temporal Authenticator Rx MIC Key (64 bits)
  * @icv_len: The ICV length for this key type
  * @iv_len: The IV length for this key type
+ * @drv_priv: pointer for driver use
  */
 struct ieee80211_key_conf {
+       void *drv_priv;
        atomic64_t tx_pn;
        u32 cipher;
        u8 icv_len;
@@ -1675,7 +1678,6 @@ struct ieee80211_sta_rates {
  * @tdls: indicates whether the STA is a TDLS peer
  * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
  *     valid if the STA is a TDLS peer in the first place.
- * @mfp: indicates whether the STA uses management frame protection or not.
  * @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
  */
 struct ieee80211_sta {
@@ -1693,7 +1695,6 @@ struct ieee80211_sta {
        struct ieee80211_sta_rates __rcu *rates;
        bool tdls;
        bool tdls_initiator;
-       bool mfp;
 
        struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
 
@@ -1888,6 +1889,9 @@ struct ieee80211_txq {
  * @IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands
  *     in one command, mac80211 doesn't have to run separate scans per band.
  *
+ * @IEEE80211_HW_TDLS_WIDER_BW: The device/driver supports wider bandwidth
+ *     than then BSS bandwidth for a TDLS link on the base channel.
+ *
  * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
  */
 enum ieee80211_hw_flags {
@@ -1920,6 +1924,7 @@ enum ieee80211_hw_flags {
        IEEE80211_HW_CHANCTX_STA_CSA,
        IEEE80211_HW_SUPPORTS_CLONED_SKBS,
        IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS,
+       IEEE80211_HW_TDLS_WIDER_BW,
 
        /* keep last, obviously */
        NUM_IEEE80211_HW_FLAGS
@@ -3696,20 +3701,28 @@ void ieee80211_free_hw(struct ieee80211_hw *hw);
 void ieee80211_restart_hw(struct ieee80211_hw *hw);
 
 /**
- * ieee80211_napi_add - initialize mac80211 NAPI context
- * @hw: the hardware to initialize the NAPI context on
- * @napi: the NAPI context to initialize
- * @napi_dev: dummy NAPI netdevice, here to not waste the space if the
- *     driver doesn't use NAPI
- * @poll: poll function
- * @weight: default weight
+ * ieee80211_rx_napi - receive frame from NAPI context
+ *
+ * Use this function to hand received frames to mac80211. The receive
+ * buffer in @skb must start with an IEEE 802.11 header. In case of a
+ * paged @skb is used, the driver is recommended to put the ieee80211
+ * header of the frame on the linear part of the @skb to avoid memory
+ * allocation and/or memcpy by the stack.
+ *
+ * This function may not be called in IRQ context. Calls to this function
+ * for a single hardware must be synchronized against each other. Calls to
+ * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be
+ * mixed for a single hardware. Must not run concurrently with
+ * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ *
+ * This function must be called with BHs disabled.
  *
- * See also netif_napi_add().
+ * @hw: the hardware this frame came in on
+ * @skb: the buffer to receive, owned by mac80211 after this call
+ * @napi: the NAPI context
  */
-void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi,
-                       struct net_device *napi_dev,
-                       int (*poll)(struct napi_struct *, int),
-                       int weight);
+void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
+                      struct napi_struct *napi);
 
 /**
  * ieee80211_rx - receive frame
@@ -3731,7 +3744,10 @@ void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi,
  * @hw: the hardware this frame came in on
  * @skb: the buffer to receive, owned by mac80211 after this call
  */
-void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb);
+static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+       ieee80211_rx_napi(hw, skb, NULL);
+}
 
 /**
  * ieee80211_rx_irqsafe - receive frame
@@ -4314,19 +4330,6 @@ void ieee80211_get_tkip_rx_p1k(struct ieee80211_key_conf *keyconf,
 void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf,
                            struct sk_buff *skb, u8 *p2k);
 
-/**
- * ieee80211_aes_cmac_calculate_k1_k2 - calculate the AES-CMAC sub keys
- *
- * This function computes the two AES-CMAC sub-keys, based on the
- * previously installed master key.
- *
- * @keyconf: the parameter passed with the set key
- * @k1: a buffer to be filled with the 1st sub-key
- * @k2: a buffer to be filled with the 2nd sub-key
- */
-void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
-                                       u8 *k1, u8 *k2);
-
 /**
  * ieee80211_get_key_tx_seq - get key TX sequence counter
  *
index b3a7751251b4cb9ce1a7fcb1d3999a63f4ff5074..aba5695fadb00df5fb83ff2439474c1f7ecda65a 100644 (file)
@@ -182,7 +182,8 @@ int ndisc_rcv(struct sk_buff *skb);
 
 void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
                   const struct in6_addr *solicit,
-                  const struct in6_addr *daddr, const struct in6_addr *saddr);
+                  const struct in6_addr *daddr, const struct in6_addr *saddr,
+                  struct sk_buff *oskb);
 
 void ndisc_send_rs(struct net_device *dev,
                   const struct in6_addr *saddr, const struct in6_addr *daddr);
index e951453e0a2378caf405f62910dd91d7c768ea3b..2dcea635ecce3ead337ffa5fd2ba68096db9f997 100644 (file)
@@ -118,6 +118,9 @@ struct net {
 #endif
        struct sock             *nfnl;
        struct sock             *nfnl_stash;
+#if IS_ENABLED(CONFIG_NETFILTER_NETLINK_ACCT)
+       struct list_head        nfnl_acct_list;
+#endif
 #endif
 #ifdef CONFIG_WEXT_CORE
        struct sk_buff_head     wext_nlevents;
diff --git a/include/net/netfilter/ipv4/nf_dup_ipv4.h b/include/net/netfilter/ipv4/nf_dup_ipv4.h
new file mode 100644 (file)
index 0000000..42008f1
--- /dev/null
@@ -0,0 +1,7 @@
+#ifndef _NF_DUP_IPV4_H_
+#define _NF_DUP_IPV4_H_
+
+void nf_dup_ipv4(struct sk_buff *skb, unsigned int hooknum,
+                const struct in_addr *gw, int oif);
+
+#endif /* _NF_DUP_IPV4_H_ */
diff --git a/include/net/netfilter/ipv6/nf_dup_ipv6.h b/include/net/netfilter/ipv6/nf_dup_ipv6.h
new file mode 100644 (file)
index 0000000..ed6bd66
--- /dev/null
@@ -0,0 +1,7 @@
+#ifndef _NF_DUP_IPV6_H_
+#define _NF_DUP_IPV6_H_
+
+void nf_dup_ipv6(struct sk_buff *skb, unsigned int hooknum,
+                const struct in6_addr *gw, int oif);
+
+#endif /* _NF_DUP_IPV6_H_ */
index 37cd3911d5c59e97fe6328a2852ea17040f4dbc3..f5e23c6dee8bcbcc66705a4d5cefdaef311eb98b 100644 (file)
@@ -250,8 +250,12 @@ void nf_ct_untracked_status_or(unsigned long bits);
 void nf_ct_iterate_cleanup(struct net *net,
                           int (*iter)(struct nf_conn *i, void *data),
                           void *data, u32 portid, int report);
+
+struct nf_conntrack_zone;
+
 void nf_conntrack_free(struct nf_conn *ct);
-struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
+struct nf_conn *nf_conntrack_alloc(struct net *net,
+                                  const struct nf_conntrack_zone *zone,
                                   const struct nf_conntrack_tuple *orig,
                                   const struct nf_conntrack_tuple *repl,
                                   gfp_t gfp);
@@ -291,7 +295,9 @@ extern unsigned int nf_conntrack_max;
 extern unsigned int nf_conntrack_hash_rnd;
 void init_nf_conntrack_hash_rnd(void);
 
-struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags);
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
+                                const struct nf_conntrack_zone *zone,
+                                gfp_t flags);
 
 #define NF_CT_STAT_INC(net, count)       __this_cpu_inc((net)->ct.stat->count)
 #define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
index f2f0fa3bb15073edeb0af54087087539842e17f1..c03f9c42b3cd32be938e282e47cf66e63536a8c3 100644 (file)
@@ -52,7 +52,8 @@ bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
 
 /* Find a connection corresponding to a tuple. */
 struct nf_conntrack_tuple_hash *
-nf_conntrack_find_get(struct net *net, u16 zone,
+nf_conntrack_find_get(struct net *net,
+                     const struct nf_conntrack_zone *zone,
                      const struct nf_conntrack_tuple *tuple);
 
 int __nf_conntrack_confirm(struct sk_buff *skb);
index 3f3aecbc8632e935e16e6517bc23eb8e07a9b16d..dce56f09ac9aed9c0f7d4a1d99acc9602e33ea5f 100644 (file)
@@ -4,7 +4,9 @@
 
 #ifndef _NF_CONNTRACK_EXPECT_H
 #define _NF_CONNTRACK_EXPECT_H
+
 #include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_zones.h>
 
 extern unsigned int nf_ct_expect_hsize;
 extern unsigned int nf_ct_expect_max;
@@ -76,15 +78,18 @@ int nf_conntrack_expect_init(void);
 void nf_conntrack_expect_fini(void);
 
 struct nf_conntrack_expect *
-__nf_ct_expect_find(struct net *net, u16 zone,
+__nf_ct_expect_find(struct net *net,
+                   const struct nf_conntrack_zone *zone,
                    const struct nf_conntrack_tuple *tuple);
 
 struct nf_conntrack_expect *
-nf_ct_expect_find_get(struct net *net, u16 zone,
+nf_ct_expect_find_get(struct net *net,
+                     const struct nf_conntrack_zone *zone,
                      const struct nf_conntrack_tuple *tuple);
 
 struct nf_conntrack_expect *
-nf_ct_find_expectation(struct net *net, u16 zone,
+nf_ct_find_expectation(struct net *net,
+                      const struct nf_conntrack_zone *zone,
                       const struct nf_conntrack_tuple *tuple);
 
 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
index 034efe8d45a544ac9557724b4e40359398dd1279..5316c7b3a374db1aa0b724e4a133a8f656bbb8da 100644 (file)
 #ifndef _NF_CONNTRACK_ZONES_H
 #define _NF_CONNTRACK_ZONES_H
 
-#define NF_CT_DEFAULT_ZONE     0
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
 
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-#include <net/netfilter/nf_conntrack_extend.h>
+#define NF_CT_DEFAULT_ZONE_ID  0
+
+#define NF_CT_ZONE_DIR_ORIG    (1 << IP_CT_DIR_ORIGINAL)
+#define NF_CT_ZONE_DIR_REPL    (1 << IP_CT_DIR_REPLY)
+
+#define NF_CT_DEFAULT_ZONE_DIR (NF_CT_ZONE_DIR_ORIG | NF_CT_ZONE_DIR_REPL)
+
+#define NF_CT_FLAG_MARK                1
 
 struct nf_conntrack_zone {
        u16     id;
+       u8      flags;
+       u8      dir;
 };
 
-static inline u16 nf_ct_zone(const struct nf_conn *ct)
+extern const struct nf_conntrack_zone nf_ct_zone_dflt;
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack_extend.h>
+
+static inline const struct nf_conntrack_zone *
+nf_ct_zone(const struct nf_conn *ct)
 {
+       const struct nf_conntrack_zone *nf_ct_zone = NULL;
+
 #ifdef CONFIG_NF_CONNTRACK_ZONES
-       struct nf_conntrack_zone *nf_ct_zone;
        nf_ct_zone = nf_ct_ext_find(ct, NF_CT_EXT_ZONE);
-       if (nf_ct_zone)
-               return nf_ct_zone->id;
 #endif
-       return NF_CT_DEFAULT_ZONE;
+       return nf_ct_zone ? nf_ct_zone : &nf_ct_zone_dflt;
+}
+
+static inline const struct nf_conntrack_zone *
+nf_ct_zone_init(struct nf_conntrack_zone *zone, u16 id, u8 dir, u8 flags)
+{
+       zone->id = id;
+       zone->flags = flags;
+       zone->dir = dir;
+
+       return zone;
+}
+
+static inline const struct nf_conntrack_zone *
+nf_ct_zone_tmpl(const struct nf_conn *tmpl, const struct sk_buff *skb,
+               struct nf_conntrack_zone *tmp)
+{
+       const struct nf_conntrack_zone *zone;
+
+       if (!tmpl)
+               return &nf_ct_zone_dflt;
+
+       zone = nf_ct_zone(tmpl);
+       if (zone->flags & NF_CT_FLAG_MARK)
+               zone = nf_ct_zone_init(tmp, skb->mark, zone->dir, 0);
+
+       return zone;
+}
+
+static inline int nf_ct_zone_add(struct nf_conn *ct, gfp_t flags,
+                                const struct nf_conntrack_zone *info)
+{
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+       struct nf_conntrack_zone *nf_ct_zone;
+
+       nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, flags);
+       if (!nf_ct_zone)
+               return -ENOMEM;
+
+       nf_ct_zone_init(nf_ct_zone, info->id, info->dir,
+                       info->flags);
+#endif
+       return 0;
 }
 
-#endif /* CONFIG_NF_CONNTRACK || CONFIG_NF_CONNTRACK_MODULE */
+static inline bool nf_ct_zone_matches_dir(const struct nf_conntrack_zone *zone,
+                                         enum ip_conntrack_dir dir)
+{
+       return zone->dir & (1 << dir);
+}
+
+static inline u16 nf_ct_zone_id(const struct nf_conntrack_zone *zone,
+                               enum ip_conntrack_dir dir)
+{
+       return nf_ct_zone_matches_dir(zone, dir) ?
+              zone->id : NF_CT_DEFAULT_ZONE_ID;
+}
+
+static inline bool nf_ct_zone_equal(const struct nf_conn *a,
+                                   const struct nf_conntrack_zone *b,
+                                   enum ip_conntrack_dir dir)
+{
+       return nf_ct_zone_id(nf_ct_zone(a), dir) ==
+              nf_ct_zone_id(b, dir);
+}
+
+static inline bool nf_ct_zone_equal_any(const struct nf_conn *a,
+                                       const struct nf_conntrack_zone *b)
+{
+       return nf_ct_zone(a)->id == b->id;
+}
+#endif /* IS_ENABLED(CONFIG_NF_CONNTRACK) */
 #endif /* _NF_CONNTRACK_ZONES_H */
diff --git a/include/net/netfilter/nft_dup.h b/include/net/netfilter/nft_dup.h
new file mode 100644 (file)
index 0000000..6b84cf6
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef _NFT_DUP_H_
+#define _NFT_DUP_H_
+
+struct nft_dup_inet {
+       enum nft_registers      sreg_addr:8;
+       enum nft_registers      sreg_dev:8;
+};
+
+#endif /* _NFT_DUP_H_ */
index b0ab530d28cde08478beb19881965c9a24e030cc..cf2713d8b975f11c6f7ba6725af165a53b5e82bb 100644 (file)
@@ -52,6 +52,8 @@ enum nl802154_commands {
 
        NL802154_CMD_SET_LBT_MODE,
 
+       NL802154_CMD_SET_ACKREQ_DEFAULT,
+
        /* add new commands above here */
 
        /* used to define NL802154_CMD_MAX below */
@@ -104,6 +106,8 @@ enum nl802154_attrs {
 
        NL802154_ATTR_SUPPORTED_COMMANDS,
 
+       NL802154_ATTR_ACKREQ_DEFAULT,
+
        /* add attributes here, update the policy in nl802154.c */
 
        __NL802154_ATTR_AFTER_LAST,
index 6dda2c1bf8c6d12951b7e3d81273ca77d16a1720..395d79bb556cf11b62a929cfb03172e30b84d4b1 100644 (file)
@@ -66,7 +66,6 @@ struct rtable {
 
        struct list_head        rt_uncached;
        struct uncached_list    *rt_uncached_list;
-       struct lwtunnel_state   *rt_lwtstate;
 };
 
 static inline bool rt_is_input_route(const struct rtable *rt)
index 0484d29d45896a15024d0fad95ea8c339e7a7d50..5bfb16237fd772cd83c80f4a867c7c4cb5af3bb9 100644 (file)
@@ -24,7 +24,6 @@ struct slave {
 
 struct slave_queue {
        struct list_head        all_slaves;
-       int                     num_slaves;
 };
 
 struct net_vrf {
@@ -44,9 +43,9 @@ static inline int vrf_master_ifindex_rcu(const struct net_device *dev)
        if (!dev)
                return 0;
 
-       if (netif_is_vrf(dev))
+       if (netif_is_vrf(dev)) {
                ifindex = dev->ifindex;
-       else {
+       else {
                vrf_ptr = rcu_dereference(dev->vrf_ptr);
                if (vrf_ptr)
                        ifindex = vrf_ptr->ifindex;
@@ -55,6 +54,17 @@ static inline int vrf_master_ifindex_rcu(const struct net_device *dev)
        return ifindex;
 }
 
+static inline int vrf_master_ifindex(const struct net_device *dev)
+{
+       int ifindex;
+
+       rcu_read_lock();
+       ifindex = vrf_master_ifindex_rcu(dev);
+       rcu_read_unlock();
+
+       return ifindex;
+}
+
 /* called with rcu_read_lock */
 static inline int vrf_dev_table_rcu(const struct net_device *dev)
 {
@@ -81,6 +91,25 @@ static inline int vrf_dev_table(const struct net_device *dev)
        return tb_id;
 }
 
+static inline int vrf_dev_table_ifindex(struct net *net, int ifindex)
+{
+       struct net_device *dev;
+       int tb_id = 0;
+
+       if (!ifindex)
+               return 0;
+
+       rcu_read_lock();
+
+       dev = dev_get_by_index_rcu(net, ifindex);
+       if (dev)
+               tb_id = vrf_dev_table_rcu(dev);
+
+       rcu_read_unlock();
+
+       return tb_id;
+}
+
 /* called with rtnl */
 static inline int vrf_dev_table_rtnl(const struct net_device *dev)
 {
@@ -115,6 +144,11 @@ static inline int vrf_master_ifindex_rcu(const struct net_device *dev)
        return 0;
 }
 
+static inline int vrf_master_ifindex(const struct net_device *dev)
+{
+       return 0;
+}
+
 static inline int vrf_dev_table_rcu(const struct net_device *dev)
 {
        return 0;
@@ -125,6 +159,11 @@ static inline int vrf_dev_table(const struct net_device *dev)
        return 0;
 }
 
+static inline int vrf_dev_table_ifindex(struct net *net, int ifindex)
+{
+       return 0;
+}
+
 static inline int vrf_dev_table_rtnl(const struct net_device *dev)
 {
        return 0;
index e4534f1b2d8c364ec0a7db9e5444dbc5a4a71179..6b3234599a2c52463d91d985a283db7f5b1d417a 100644 (file)
@@ -161,6 +161,7 @@ struct vxlan_dev {
        struct timer_list age_timer;
        spinlock_t        hash_lock;
        unsigned int      addrcnt;
+       struct gro_cells  gro_cells;
 
        struct vxlan_config     cfg;
 
@@ -241,3 +242,8 @@ static inline void vxlan_get_rx_port(struct net_device *netdev)
 }
 #endif
 #endif
+
+static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs)
+{
+       return vs->sock->sk->sk_family;
+}
index f0ee97eec24d28625d9c3f714ab18a72b0b8f125..312e3fee9ccfc098f70cd8ec6edb87881e1d1f99 100644 (file)
@@ -285,10 +285,13 @@ struct xfrm_policy_afinfo {
        unsigned short          family;
        struct dst_ops          *dst_ops;
        void                    (*garbage_collect)(struct net *net);
-       struct dst_entry        *(*dst_lookup)(struct net *net, int tos,
+       struct dst_entry        *(*dst_lookup)(struct net *net,
+                                              int tos, int oif,
                                               const xfrm_address_t *saddr,
                                               const xfrm_address_t *daddr);
-       int                     (*get_saddr)(struct net *net, xfrm_address_t *saddr, xfrm_address_t *daddr);
+       int                     (*get_saddr)(struct net *net, int oif,
+                                            xfrm_address_t *saddr,
+                                            xfrm_address_t *daddr);
        void                    (*decode_session)(struct sk_buff *skb,
                                                  struct flowi *fl,
                                                  int reverse);
index 4942710ef720ea5716e8cc6ebf0df941e22500ba..8d1d7fa67ec48bad6872be07258066f9410eec6e 100644 (file)
@@ -28,7 +28,6 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
                                   u64 * info_out);
 
 extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
-extern void scsi_set_sense_information(u8 *buf, u64 info);
 
 extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
 
index 865a141b118b15874e27b0e56473bac8854c4823..427bc41df3aef3a3f931bd94830f027bafea2661 100644 (file)
@@ -141,6 +141,8 @@ struct snd_soc_tplg_ops {
        int io_ops_count;
 };
 
+#ifdef CONFIG_SND_SOC_TOPOLOGY
+
 /* gets a pointer to data from the firmware block header */
 static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr)
 {
@@ -165,4 +167,14 @@ int snd_soc_tplg_widget_bind_event(struct snd_soc_dapm_widget *w,
        const struct snd_soc_tplg_widget_events *events, int num_events,
        u16 event_type);
 
+#else
+
+static inline int snd_soc_tplg_component_remove(struct snd_soc_component *comp,
+                                               u32 index)
+{
+       return 0;
+}
+
+#endif
+
 #endif
index d3d715f8c88f6d57c4318dc5b001e8efad2d074f..9e7edfd8141e5dea69129807f46b89b2b637ead9 100644 (file)
@@ -55,6 +55,7 @@ struct sockaddr_ll {
 #define PACKET_TX_HAS_OFF              19
 #define PACKET_QDISC_BYPASS            20
 #define PACKET_ROLLOVER_STATS          21
+#define PACKET_FANOUT_DATA             22
 
 #define PACKET_FANOUT_HASH             0
 #define PACKET_FANOUT_LB               1
@@ -62,6 +63,8 @@ struct sockaddr_ll {
 #define PACKET_FANOUT_ROLLOVER         3
 #define PACKET_FANOUT_RND              4
 #define PACKET_FANOUT_QM               5
+#define PACKET_FANOUT_CBPF             6
+#define PACKET_FANOUT_EBPF             7
 #define PACKET_FANOUT_FLAG_ROLLOVER    0x1000
 #define PACKET_FANOUT_FLAG_DEFRAG      0x8000
 
diff --git a/include/uapi/linux/ila.h b/include/uapi/linux/ila.h
new file mode 100644 (file)
index 0000000..7ed9e67
--- /dev/null
@@ -0,0 +1,15 @@
+/* ila.h - ILA Interface */
+
+#ifndef _UAPI_LINUX_ILA_H
+#define _UAPI_LINUX_ILA_H
+
+enum {
+       ILA_ATTR_UNSPEC,
+       ILA_ATTR_LOCATOR,                       /* u64 */
+
+       __ILA_ATTR_MAX,
+};
+
+#define ILA_ATTR_MAX           (__ILA_ATTR_MAX - 1)
+
+#endif /* _UAPI_LINUX_ILA_H */
index 31377bbea3f8fc51caa41a8e7f34e7bf283da9ff..34141a5dfe745877d1e3905373a2255e93b888ab 100644 (file)
@@ -7,10 +7,41 @@ enum lwtunnel_encap_types {
        LWTUNNEL_ENCAP_NONE,
        LWTUNNEL_ENCAP_MPLS,
        LWTUNNEL_ENCAP_IP,
+       LWTUNNEL_ENCAP_ILA,
+       LWTUNNEL_ENCAP_IP6,
        __LWTUNNEL_ENCAP_MAX,
 };
 
 #define LWTUNNEL_ENCAP_MAX (__LWTUNNEL_ENCAP_MAX - 1)
 
+enum lwtunnel_ip_t {
+       LWTUNNEL_IP_UNSPEC,
+       LWTUNNEL_IP_ID,
+       LWTUNNEL_IP_DST,
+       LWTUNNEL_IP_SRC,
+       LWTUNNEL_IP_TTL,
+       LWTUNNEL_IP_TOS,
+       LWTUNNEL_IP_SPORT,
+       LWTUNNEL_IP_DPORT,
+       LWTUNNEL_IP_FLAGS,
+       __LWTUNNEL_IP_MAX,
+};
+
+#define LWTUNNEL_IP_MAX (__LWTUNNEL_IP_MAX - 1)
+
+enum lwtunnel_ip6_t {
+       LWTUNNEL_IP6_UNSPEC,
+       LWTUNNEL_IP6_ID,
+       LWTUNNEL_IP6_DST,
+       LWTUNNEL_IP6_SRC,
+       LWTUNNEL_IP6_HOPLIMIT,
+       LWTUNNEL_IP6_TC,
+       LWTUNNEL_IP6_SPORT,
+       LWTUNNEL_IP6_DPORT,
+       LWTUNNEL_IP6_FLAGS,
+       __LWTUNNEL_IP6_MAX,
+};
+
+#define LWTUNNEL_IP6_MAX (__LWTUNNEL_IP6_MAX - 1)
 
 #endif /* _UAPI_LWTUNNEL_H_ */
index a99e6a9971408014514c21cc338df70ea5421f34..d8c8a7c9d88a7068c00d2b1cebe251c3c3c0dde4 100644 (file)
@@ -756,16 +756,25 @@ enum nft_ct_attributes {
 };
 #define NFTA_CT_MAX            (__NFTA_CT_MAX - 1)
 
+enum nft_limit_type {
+       NFT_LIMIT_PKTS,
+       NFT_LIMIT_PKT_BYTES
+};
+
 /**
  * enum nft_limit_attributes - nf_tables limit expression netlink attributes
  *
  * @NFTA_LIMIT_RATE: refill rate (NLA_U64)
  * @NFTA_LIMIT_UNIT: refill unit (NLA_U64)
+ * @NFTA_LIMIT_BURST: burst (NLA_U32)
+ * @NFTA_LIMIT_TYPE: type of limit (NLA_U32: enum nft_limit_type)
  */
 enum nft_limit_attributes {
        NFTA_LIMIT_UNSPEC,
        NFTA_LIMIT_RATE,
        NFTA_LIMIT_UNIT,
+       NFTA_LIMIT_BURST,
+       NFTA_LIMIT_TYPE,
        __NFTA_LIMIT_MAX
 };
 #define NFTA_LIMIT_MAX         (__NFTA_LIMIT_MAX - 1)
@@ -935,6 +944,20 @@ enum nft_redir_attributes {
 };
 #define NFTA_REDIR_MAX         (__NFTA_REDIR_MAX - 1)
 
+/**
+ * enum nft_dup_attributes - nf_tables dup expression netlink attributes
+ *
+ * @NFTA_DUP_SREG_ADDR: source register of address (NLA_U32: nft_registers)
+ * @NFTA_DUP_SREG_DEV: source register of output interface (NLA_U32: nft_register)
+ */
+enum nft_dup_attributes {
+       NFTA_DUP_UNSPEC,
+       NFTA_DUP_SREG_ADDR,
+       NFTA_DUP_SREG_DEV,
+       __NFTA_DUP_MAX
+};
+#define NFTA_DUP_MAX           (__NFTA_DUP_MAX - 1)
+
 /**
  * enum nft_gen_attributes - nf_tables ruleset generation attributes
  *
index acad6c52a6521d0fe81d1078a95fc7bb032d796c..c1a4e1441a25416e960349414b6a71e2c4409189 100644 (file)
@@ -61,6 +61,7 @@ enum ctattr_tuple {
        CTA_TUPLE_UNSPEC,
        CTA_TUPLE_IP,
        CTA_TUPLE_PROTO,
+       CTA_TUPLE_ZONE,
        __CTA_TUPLE_MAX
 };
 #define CTA_TUPLE_MAX (__CTA_TUPLE_MAX - 1)
index 5a688c1ca4d78e1cab449b26f8f3d2afdd190161..9e520418b858d5f5a355ee8514f5f1d3ee4a827f 100644 (file)
@@ -6,7 +6,13 @@
 enum {
        XT_CT_NOTRACK           = 1 << 0,
        XT_CT_NOTRACK_ALIAS     = 1 << 1,
-       XT_CT_MASK              = XT_CT_NOTRACK | XT_CT_NOTRACK_ALIAS,
+       XT_CT_ZONE_DIR_ORIG     = 1 << 2,
+       XT_CT_ZONE_DIR_REPL     = 1 << 3,
+       XT_CT_ZONE_MARK         = 1 << 4,
+
+       XT_CT_MASK              = XT_CT_NOTRACK | XT_CT_NOTRACK_ALIAS |
+                                 XT_CT_ZONE_DIR_ORIG | XT_CT_ZONE_DIR_REPL |
+                                 XT_CT_ZONE_MARK,
 };
 
 struct xt_ct_target_info {
index 47d24cb3fbc1f8017f715dff06ab5aa5f977f6b0..0d3d3cc43356e128bc618acbde912fe6b5524ff0 100644 (file)
@@ -286,21 +286,6 @@ enum rt_class_t {
 
 /* Routing message attributes */
 
-enum ip_tunnel_t {
-       IP_TUN_UNSPEC,
-       IP_TUN_ID,
-       IP_TUN_DST,
-       IP_TUN_SRC,
-       IP_TUN_TTL,
-       IP_TUN_TOS,
-       IP_TUN_SPORT,
-       IP_TUN_DPORT,
-       IP_TUN_FLAGS,
-       __IP_TUN_MAX,
-};
-
-#define IP_TUN_MAX (__IP_TUN_MAX - 1)
-
 enum rtattr_type_t {
        RTA_UNSPEC,
        RTA_DST,
index 51b8066a223b504ebf14ec287da3e56109a0e674..247c50bd60f0d067ad8884dbe4574adf0bfcf596 100644 (file)
 #include <linux/types.h>
 #include <sound/asound.h>
 
+#ifndef __KERNEL__
+#error This API is an early revision and not enabled in the current
+#error kernel release, it will be enabled in a future kernel version
+#error with incompatible changes to what is here.
+#endif
+
 /*
  * Maximum number of channels topology kcontrol can represent.
  */
index bc3d530cb23efacb2e5695ad85a9bd3898524fa2..b471e5a3863ddbca70f2bf4dee22f40df0345fbe 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -252,6 +252,16 @@ static void sem_rcu_free(struct rcu_head *head)
        ipc_rcu_free(head);
 }
 
+/*
+ * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
+ * are only control barriers.
+ * The code must pair with spin_unlock(&sem->lock) or
+ * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
+ *
+ * smp_rmb() is sufficient, as writes cannot pass the control barrier.
+ */
+#define ipc_smp_acquire__after_spin_is_unlocked()      smp_rmb()
+
 /*
  * Wait until all currently ongoing simple ops have completed.
  * Caller must own sem_perm.lock.
@@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma)
                sem = sma->sem_base + i;
                spin_unlock_wait(&sem->lock);
        }
+       ipc_smp_acquire__after_spin_is_unlocked();
 }
 
 /*
@@ -327,13 +338,12 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
                /* Then check that the global lock is free */
                if (!spin_is_locked(&sma->sem_perm.lock)) {
                        /*
-                        * The ipc object lock check must be visible on all
-                        * cores before rechecking the complex count.  Otherwise
-                        * we can race with  another thread that does:
+                        * We need a memory barrier with acquire semantics,
+                        * otherwise we can race with another thread that does:
                         *      complex_count++;
                         *      spin_unlock(sem_perm.lock);
                         */
-                       smp_rmb();
+                       ipc_smp_acquire__after_spin_is_unlocked();
 
                        /*
                         * Now repeat the test of complex_count:
@@ -2074,17 +2084,28 @@ void exit_sem(struct task_struct *tsk)
                rcu_read_lock();
                un = list_entry_rcu(ulp->list_proc.next,
                                    struct sem_undo, list_proc);
-               if (&un->list_proc == &ulp->list_proc)
-                       semid = -1;
-                else
-                       semid = un->semid;
+               if (&un->list_proc == &ulp->list_proc) {
+                       /*
+                        * We must wait for freeary() before freeing this ulp,
+                        * in case we raced with last sem_undo. There is a small
+                        * possibility where we exit while freeary() didn't
+                        * finish unlocking sem_undo_list.
+                        */
+                       spin_unlock_wait(&ulp->lock);
+                       rcu_read_unlock();
+                       break;
+               }
+               spin_lock(&ulp->lock);
+               semid = un->semid;
+               spin_unlock(&ulp->lock);
 
+               /* exit_sem raced with IPC_RMID, nothing to do */
                if (semid == -1) {
                        rcu_read_unlock();
-                       break;
+                       continue;
                }
 
-               sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
+               sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
                /* exit_sem raced with IPC_RMID, nothing to do */
                if (IS_ERR(sma)) {
                        rcu_read_unlock();
@@ -2112,9 +2133,11 @@ void exit_sem(struct task_struct *tsk)
                ipc_assert_locked_object(&sma->sem_perm);
                list_del(&un->list_id);
 
-               spin_lock(&ulp->lock);
+               /* we are the last process using this ulp, acquiring ulp->lock
+                * isn't required. Besides that, we are also protected against
+                * IPC_RMID as we hold sma->sem_perm lock now
+                */
                list_del_rcu(&un->list_proc);
-               spin_unlock(&ulp->lock);
 
                /* perform adjustments registered in un */
                for (i = 0; i < sma->sem_nsems; i++) {
index ee14e3a35a2994399edf176e7775e778c395e592..f0acff0f66c91380412dcbc1c899c94b1d3236b0 100644 (file)
@@ -1223,7 +1223,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
        spin_unlock_irq(&callback_lock);
 
        /* use trialcs->mems_allowed as a temp variable */
-       update_nodemasks_hier(cs, &cs->mems_allowed);
+       update_nodemasks_hier(cs, &trialcs->mems_allowed);
 done:
        return retval;
 }
index e2c6a8886d4d376450b61f8a3347427484ab392d..a1339b13c578516c64b5a49f39bec55ba8eda989 100644 (file)
@@ -1868,8 +1868,6 @@ event_sched_in(struct perf_event *event,
 
        perf_pmu_disable(event->pmu);
 
-       event->tstamp_running += tstamp - event->tstamp_stopped;
-
        perf_set_shadow_time(event, ctx, tstamp);
 
        perf_log_itrace_start(event);
@@ -1881,6 +1879,8 @@ event_sched_in(struct perf_event *event,
                goto out;
        }
 
+       event->tstamp_running += tstamp - event->tstamp_stopped;
+
        if (!is_software_event(event))
                cpuctx->active_oncpu++;
        if (!ctx->nr_active++)
@@ -4011,28 +4011,21 @@ static void perf_event_for_each(struct perf_event *event,
                perf_event_for_each_child(sibling, func);
 }
 
-static int perf_event_period(struct perf_event *event, u64 __user *arg)
-{
-       struct perf_event_context *ctx = event->ctx;
-       int ret = 0, active;
+struct period_event {
+       struct perf_event *event;
        u64 value;
+};
 
-       if (!is_sampling_event(event))
-               return -EINVAL;
-
-       if (copy_from_user(&value, arg, sizeof(value)))
-               return -EFAULT;
-
-       if (!value)
-               return -EINVAL;
+static int __perf_event_period(void *info)
+{
+       struct period_event *pe = info;
+       struct perf_event *event = pe->event;
+       struct perf_event_context *ctx = event->ctx;
+       u64 value = pe->value;
+       bool active;
 
-       raw_spin_lock_irq(&ctx->lock);
+       raw_spin_lock(&ctx->lock);
        if (event->attr.freq) {
-               if (value > sysctl_perf_event_sample_rate) {
-                       ret = -EINVAL;
-                       goto unlock;
-               }
-
                event->attr.sample_freq = value;
        } else {
                event->attr.sample_period = value;
@@ -4051,11 +4044,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
                event->pmu->start(event, PERF_EF_RELOAD);
                perf_pmu_enable(ctx->pmu);
        }
+       raw_spin_unlock(&ctx->lock);
 
-unlock:
+       return 0;
+}
+
+static int perf_event_period(struct perf_event *event, u64 __user *arg)
+{
+       struct period_event pe = { .event = event, };
+       struct perf_event_context *ctx = event->ctx;
+       struct task_struct *task;
+       u64 value;
+
+       if (!is_sampling_event(event))
+               return -EINVAL;
+
+       if (copy_from_user(&value, arg, sizeof(value)))
+               return -EFAULT;
+
+       if (!value)
+               return -EINVAL;
+
+       if (event->attr.freq && value > sysctl_perf_event_sample_rate)
+               return -EINVAL;
+
+       task = ctx->task;
+       pe.value = value;
+
+       if (!task) {
+               cpu_function_call(event->cpu, __perf_event_period, &pe);
+               return 0;
+       }
+
+retry:
+       if (!task_function_call(task, __perf_event_period, &pe))
+               return 0;
+
+       raw_spin_lock_irq(&ctx->lock);
+       if (ctx->is_active) {
+               raw_spin_unlock_irq(&ctx->lock);
+               task = ctx->task;
+               goto retry;
+       }
+
+       __perf_event_period(&pe);
        raw_spin_unlock_irq(&ctx->lock);
 
-       return ret;
+       return 0;
 }
 
 static const struct file_operations perf_fops;
@@ -4793,12 +4828,20 @@ static const struct file_operations perf_fops = {
  * to user-space before waking everybody up.
  */
 
+static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
+{
+       /* only the parent has fasync state */
+       if (event->parent)
+               event = event->parent;
+       return &event->fasync;
+}
+
 void perf_event_wakeup(struct perf_event *event)
 {
        ring_buffer_wakeup(event);
 
        if (event->pending_kill) {
-               kill_fasync(&event->fasync, SIGIO, event->pending_kill);
+               kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
                event->pending_kill = 0;
        }
 }
@@ -6177,7 +6220,7 @@ static int __perf_event_overflow(struct perf_event *event,
        else
                perf_event_output(event, data, regs);
 
-       if (event->fasync && event->pending_kill) {
+       if (*perf_event_fasync(event) && event->pending_kill) {
                event->pending_wakeup = 1;
                irq_work_queue(&event->pending);
        }
index b2be01b1aa9dcb7a70792fa381c264b229a106d0..c8aa3f75bc4db8ad7a2242aae6406bfd6f86f8c5 100644 (file)
@@ -559,11 +559,13 @@ static void __rb_free_aux(struct ring_buffer *rb)
                rb->aux_priv = NULL;
        }
 
-       for (pg = 0; pg < rb->aux_nr_pages; pg++)
-               rb_free_aux_page(rb, pg);
+       if (rb->aux_nr_pages) {
+               for (pg = 0; pg < rb->aux_nr_pages; pg++)
+                       rb_free_aux_page(rb, pg);
 
-       kfree(rb->aux_pages);
-       rb->aux_nr_pages = 0;
+               kfree(rb->aux_pages);
+               rb->aux_nr_pages = 0;
+       }
 }
 
 void rb_free_aux(struct ring_buffer *rb)
index 04ab18151cc8fa174a5859124ee07c144f33d505..df19ae4debd09c134d438b57e4ead7c71462c2b6 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/hash.h>
 #include <linux/bootmem.h>
+#include <linux/debug_locks.h>
 
 /*
  * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
@@ -286,15 +287,23 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
 {
        struct __qspinlock *l = (void *)lock;
        struct pv_node *node;
+       u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
 
        /*
         * We must not unlock if SLOW, because in that case we must first
         * unhash. Otherwise it would be possible to have multiple @lock
         * entries, which would be BAD.
         */
-       if (likely(cmpxchg(&l->locked, _Q_LOCKED_VAL, 0) == _Q_LOCKED_VAL))
+       if (likely(lockval == _Q_LOCKED_VAL))
                return;
 
+       if (unlikely(lockval != _Q_SLOW_VAL)) {
+               if (debug_locks_silent)
+                       return;
+               WARN(1, "pvqspinlock: lock %p has corrupted value 0x%x!\n", lock, atomic_read(&lock->val));
+               return;
+       }
+
        /*
         * Since the above failed to release, this must be the SLOW path.
         * Therefore start by looking up the blocked node and unhashing it.
index 3a2ef67db6c724f12b6d4ac0550ebb5fd58c5c58..278890dd104980514a7ba379f8be338a178100aa 100644 (file)
@@ -460,16 +460,6 @@ config ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
 config LRU_CACHE
        tristate
 
-config AVERAGE
-       bool "Averaging functions"
-       help
-         This option is provided for the case where no in-kernel-tree
-         modules require averaging functions, but a module built outside
-         the kernel tree does. Such modules that use library averaging
-         functions require Y here.
-
-         If unsure, say N.
-
 config CLZ_TAB
        bool
 
diff --git a/lib/average.c b/lib/average.c
deleted file mode 100644 (file)
index 114d1be..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * lib/average.c
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2.  See the file COPYING for more details.
- */
-
-#include <linux/export.h>
-#include <linux/average.h>
-#include <linux/kernel.h>
-#include <linux/bug.h>
-#include <linux/log2.h>
-
-/**
- * DOC: Exponentially Weighted Moving Average (EWMA)
- *
- * These are generic functions for calculating Exponentially Weighted Moving
- * Averages (EWMA). We keep a structure with the EWMA parameters and a scaled
- * up internal representation of the average value to prevent rounding errors.
- * The factor for scaling up and the exponential weight (or decay rate) have to
- * be specified thru the init fuction. The structure should not be accessed
- * directly but only thru the helper functions.
- */
-
-/**
- * ewma_init() - Initialize EWMA parameters
- * @avg: Average structure
- * @factor: Factor to use for the scaled up internal value. The maximum value
- *     of averages can be ULONG_MAX/(factor*weight). For performance reasons
- *     factor has to be a power of 2.
- * @weight: Exponential weight, or decay rate. This defines how fast the
- *     influence of older values decreases. For performance reasons weight has
- *     to be a power of 2.
- *
- * Initialize the EWMA parameters for a given struct ewma @avg.
- */
-void ewma_init(struct ewma *avg, unsigned long factor, unsigned long weight)
-{
-       WARN_ON(!is_power_of_2(weight) || !is_power_of_2(factor));
-
-       avg->weight = ilog2(weight);
-       avg->factor = ilog2(factor);
-       avg->internal = 0;
-}
-EXPORT_SYMBOL(ewma_init);
-
-/**
- * ewma_add() - Exponentially weighted moving average (EWMA)
- * @avg: Average structure
- * @val: Current value
- *
- * Add a sample to the average.
- */
-struct ewma *ewma_add(struct ewma *avg, unsigned long val)
-{
-       unsigned long internal = ACCESS_ONCE(avg->internal);
-
-       ACCESS_ONCE(avg->internal) = internal ?
-               (((internal << avg->weight) - internal) +
-                       (val << avg->factor)) >> avg->weight :
-               (val << avg->factor);
-       return avg;
-}
-EXPORT_SYMBOL(ewma_add);
index 9af7cefb195d3d3537366396d0af31180292ece9..8c1ad1ced72cc1deaed9439a789ec5e672e65ccf 100644 (file)
 #include <linux/init.h>
 #include <linux/jhash.h>
 #include <linux/kernel.h>
+#include <linux/kthread.h>
 #include <linux/module.h>
 #include <linux/rcupdate.h>
 #include <linux/rhashtable.h>
+#include <linux/semaphore.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/vmalloc.h>
 
 #define MAX_ENTRIES    1000000
 #define TEST_INSERT_FAIL INT_MAX
@@ -45,11 +48,21 @@ static int size = 8;
 module_param(size, int, 0);
 MODULE_PARM_DESC(size, "Initial size hint of table (default: 8)");
 
+static int tcount = 10;
+module_param(tcount, int, 0);
+MODULE_PARM_DESC(tcount, "Number of threads to spawn (default: 10)");
+
 struct test_obj {
        int                     value;
        struct rhash_head       node;
 };
 
+struct thread_data {
+       int id;
+       struct task_struct *task;
+       struct test_obj *objs;
+};
+
 static struct test_obj array[MAX_ENTRIES];
 
 static struct rhashtable_params test_rht_params = {
@@ -60,6 +73,9 @@ static struct rhashtable_params test_rht_params = {
        .nulls_base = (3U << RHT_BASE_SHIFT),
 };
 
+static struct semaphore prestart_sem;
+static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0);
+
 static int __init test_rht_lookup(struct rhashtable *ht)
 {
        unsigned int i;
@@ -200,10 +216,97 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
 
 static struct rhashtable ht;
 
+static int thread_lookup_test(struct thread_data *tdata)
+{
+       int i, err = 0;
+
+       for (i = 0; i < entries; i++) {
+               struct test_obj *obj;
+               int key = (tdata->id << 16) | i;
+
+               obj = rhashtable_lookup_fast(&ht, &key, test_rht_params);
+               if (obj && (tdata->objs[i].value == TEST_INSERT_FAIL)) {
+                       pr_err("  found unexpected object %d\n", key);
+                       err++;
+               } else if (!obj && (tdata->objs[i].value != TEST_INSERT_FAIL)) {
+                       pr_err("  object %d not found!\n", key);
+                       err++;
+               } else if (obj && (obj->value != key)) {
+                       pr_err("  wrong object returned (got %d, expected %d)\n",
+                              obj->value, key);
+                       err++;
+               }
+       }
+       return err;
+}
+
+static int threadfunc(void *data)
+{
+       int i, step, err = 0, insert_fails = 0;
+       struct thread_data *tdata = data;
+
+       up(&prestart_sem);
+       if (down_interruptible(&startup_sem))
+               pr_err("  thread[%d]: down_interruptible failed\n", tdata->id);
+
+       for (i = 0; i < entries; i++) {
+               tdata->objs[i].value = (tdata->id << 16) | i;
+               err = rhashtable_insert_fast(&ht, &tdata->objs[i].node,
+                                            test_rht_params);
+               if (err == -ENOMEM || err == -EBUSY) {
+                       tdata->objs[i].value = TEST_INSERT_FAIL;
+                       insert_fails++;
+               } else if (err) {
+                       pr_err("  thread[%d]: rhashtable_insert_fast failed\n",
+                              tdata->id);
+                       goto out;
+               }
+       }
+       if (insert_fails)
+               pr_info("  thread[%d]: %d insert failures\n",
+                       tdata->id, insert_fails);
+
+       err = thread_lookup_test(tdata);
+       if (err) {
+               pr_err("  thread[%d]: rhashtable_lookup_test failed\n",
+                      tdata->id);
+               goto out;
+       }
+
+       for (step = 10; step > 0; step--) {
+               for (i = 0; i < entries; i += step) {
+                       if (tdata->objs[i].value == TEST_INSERT_FAIL)
+                               continue;
+                       err = rhashtable_remove_fast(&ht, &tdata->objs[i].node,
+                                                    test_rht_params);
+                       if (err) {
+                               pr_err("  thread[%d]: rhashtable_remove_fast failed\n",
+                                      tdata->id);
+                               goto out;
+                       }
+                       tdata->objs[i].value = TEST_INSERT_FAIL;
+               }
+               err = thread_lookup_test(tdata);
+               if (err) {
+                       pr_err("  thread[%d]: rhashtable_lookup_test (2) failed\n",
+                              tdata->id);
+                       goto out;
+               }
+       }
+out:
+       while (!kthread_should_stop()) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               schedule();
+       }
+       return err;
+}
+
 static int __init test_rht_init(void)
 {
-       int i, err;
+       int i, err, started_threads = 0, failed_threads = 0;
        u64 total_time = 0;
+       struct thread_data *tdata;
+       struct test_obj *objs;
 
        entries = min(entries, MAX_ENTRIES);
 
@@ -239,6 +342,57 @@ static int __init test_rht_init(void)
        do_div(total_time, runs);
        pr_info("Average test time: %llu\n", total_time);
 
+       if (!tcount)
+               return 0;
+
+       pr_info("Testing concurrent rhashtable access from %d threads\n",
+               tcount);
+       sema_init(&prestart_sem, 1 - tcount);
+       tdata = vzalloc(tcount * sizeof(struct thread_data));
+       if (!tdata)
+               return -ENOMEM;
+       objs  = vzalloc(tcount * entries * sizeof(struct test_obj));
+       if (!objs) {
+               vfree(tdata);
+               return -ENOMEM;
+       }
+
+       err = rhashtable_init(&ht, &test_rht_params);
+       if (err < 0) {
+               pr_warn("Test failed: Unable to initialize hashtable: %d\n",
+                       err);
+               vfree(tdata);
+               vfree(objs);
+               return -EINVAL;
+       }
+       for (i = 0; i < tcount; i++) {
+               tdata[i].id = i;
+               tdata[i].objs = objs + i * entries;
+               tdata[i].task = kthread_run(threadfunc, &tdata[i],
+                                           "rhashtable_thrad[%d]", i);
+               if (IS_ERR(tdata[i].task))
+                       pr_err(" kthread_run failed for thread %d\n", i);
+               else
+                       started_threads++;
+       }
+       if (down_interruptible(&prestart_sem))
+               pr_err("  down interruptible failed\n");
+       for (i = 0; i < tcount; i++)
+               up(&startup_sem);
+       for (i = 0; i < tcount; i++) {
+               if (IS_ERR(tdata[i].task))
+                       continue;
+               if ((err = kthread_stop(tdata[i].task))) {
+                       pr_warn("Test failed: thread %d returned: %d\n",
+                               i, err);
+                       failed_threads++;
+               }
+       }
+       pr_info("Started %d threads, %d failed\n",
+               started_threads, failed_threads);
+       rhashtable_destroy(&ht);
+       vfree(tdata);
+       vfree(objs);
        return 0;
 }
 
index 1132d733556dbc330d32eda5460f55e6e067b627..17c75a4246c8bbab8b56fe4d562cd85ea670a21f 100644 (file)
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -16,7 +16,7 @@ struct cma {
 extern struct cma cma_areas[MAX_CMA_AREAS];
 extern unsigned cma_area_count;
 
-static unsigned long cma_bitmap_maxno(struct cma *cma)
+static inline unsigned long cma_bitmap_maxno(struct cma *cma)
 {
        return cma->count >> cma->order_per_bit;
 }
index 6c513a63ea84c3c7ffd41201b7a419ff7b6dfd5d..7b28e9cdf1c7686428fe49802fced44088043555 100644 (file)
@@ -2,7 +2,7 @@
  * This file contains shadow memory manipulation code.
  *
  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
- * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  *
  * Some of code borrowed from https://github.com/xairy/linux by
  *        Andrey Konovalov <adech.fo@gmail.com>
index 680ceedf810ab4c9cd08c9929f5d445de9f5aa6a..e07c94fbd0ac5a141ecf95ab7d39d046fea13e67 100644 (file)
@@ -2,7 +2,7 @@
  * This file contains error reporting code.
  *
  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
- * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  *
  * Some of code borrowed from https://github.com/xairy/linux by
  *        Andrey Konovalov <adech.fo@gmail.com>
index ea5a936594887c43506fefe82a61a4ad6fcf4274..1f4446a90cef07c67ee1082b83f0ca87ebfefea1 100644 (file)
@@ -1146,8 +1146,11 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
        }
 
        if (!PageHuge(p) && PageTransHuge(hpage)) {
-               if (unlikely(split_huge_page(hpage))) {
-                       pr_err("MCE: %#lx: thp split failed\n", pfn);
+               if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
+                       if (!PageAnon(hpage))
+                               pr_err("MCE: %#lx: non anonymous thp\n", pfn);
+                       else
+                               pr_err("MCE: %#lx: thp split failed\n", pfn);
                        if (TestClearPageHWPoison(p))
                                atomic_long_sub(nr_pages, &num_poisoned_pages);
                        put_page(p);
@@ -1538,6 +1541,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
                 */
                ret = __get_any_page(page, pfn, 0);
                if (!PageLRU(page)) {
+                       /* Drop page reference which is from __get_any_page() */
+                       put_page(page);
                        pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
                                pfn, page->flags);
                        return -EIO;
@@ -1567,13 +1572,12 @@ static int soft_offline_huge_page(struct page *page, int flags)
        unlock_page(hpage);
 
        ret = isolate_huge_page(hpage, &pagelist);
-       if (ret) {
-               /*
-                * get_any_page() and isolate_huge_page() takes a refcount each,
-                * so need to drop one here.
-                */
-               put_page(hpage);
-       } else {
+       /*
+        * get_any_page() and isolate_huge_page() takes a refcount each,
+        * so need to drop one here.
+        */
+       put_page(hpage);
+       if (!ret) {
                pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
                return -EBUSY;
        }
index 003dbe4b060d914dae4e93997c372ddd3e1a8e92..6da82bcb0a8b66b7326c1a021a7eac3b476cd85e 100644 (file)
@@ -1277,6 +1277,7 @@ int __ref add_memory(int nid, u64 start, u64 size)
 
        /* create new memmap entry */
        firmware_map_add_hotplug(start, start + size, "System RAM");
+       memblock_add_node(start, size, nid);
 
        goto out;
 
@@ -2013,6 +2014,8 @@ void __ref remove_memory(int nid, u64 start, u64 size)
 
        /* remove memmap entry */
        firmware_map_remove(start, start + size, "System RAM");
+       memblock_free(start, size);
+       memblock_remove(start, size);
 
        arch_remove_memory(start, size);
 
index beda4171080232f37e779a3658045e1f813a1cff..df959b7d608518edf9ab5c577e6b19afa8d88ed9 100644 (file)
@@ -5060,6 +5060,10 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
 {
        unsigned long zone_start_pfn, zone_end_pfn;
 
+       /* When hotadd a new node, the node should be empty */
+       if (!node_start_pfn && !node_end_pfn)
+               return 0;
+
        /* Get the start and end of the zone */
        zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
        zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
@@ -5123,6 +5127,10 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
        unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
        unsigned long zone_start_pfn, zone_end_pfn;
 
+       /* When hotadd a new node, the node should be empty */
+       if (!node_start_pfn && !node_end_pfn)
+               return 0;
+
        zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
        zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
 
index eb8baa72adc8ab2c84c390a54bcfa4db429d4c0d..c6ffc55ee0d7adda3d0dfb0975061d72fe7af6ac 100644 (file)
@@ -1,6 +1,6 @@
 obj-$(CONFIG_6LOWPAN) += 6lowpan.o
 
-6lowpan-y := iphc.o nhc.o
+6lowpan-y := core.o iphc.o nhc.o
 
 #rfc6282 nhcs
 obj-$(CONFIG_6LOWPAN_NHC_DEST) += nhc_dest.o
diff --git a/net/6lowpan/core.c b/net/6lowpan/core.c
new file mode 100644 (file)
index 0000000..ae1896f
--- /dev/null
@@ -0,0 +1,40 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Authors:
+ * (C) 2015 Pengutronix, Alexander Aring <aar@pengutronix.de>
+ */
+
+#include <linux/module.h>
+
+#include <net/6lowpan.h>
+
+void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype)
+{
+       lowpan_priv(dev)->lltype = lltype;
+}
+EXPORT_SYMBOL(lowpan_netdev_setup);
+
+static int __init lowpan_module_init(void)
+{
+       request_module_nowait("ipv6");
+
+       request_module_nowait("nhc_dest");
+       request_module_nowait("nhc_fragment");
+       request_module_nowait("nhc_hop");
+       request_module_nowait("nhc_ipv6");
+       request_module_nowait("nhc_mobility");
+       request_module_nowait("nhc_routing");
+       request_module_nowait("nhc_udp");
+
+       return 0;
+}
+module_init(lowpan_module_init);
+
+MODULE_LICENSE("GPL");
index 9055d7b9d1129d69e34f8ed255922b2e340fcbb1..1e0071fdcf72a036d6697f2e453ba303b69abbe0 100644 (file)
@@ -48,7 +48,6 @@
 
 #include <linux/bitops.h>
 #include <linux/if_arp.h>
-#include <linux/module.h>
 #include <linux/netdevice.h>
 #include <net/6lowpan.h>
 #include <net/ipv6.h>
@@ -284,7 +283,7 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
                if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
                        return -EINVAL;
 
-               hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
+               hdr.flow_lbl[0] = (tmp & 0x0F) | ((tmp >> 2) & 0x30);
                memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
                skb_pull(skb, 2);
                break;
@@ -610,21 +609,3 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
        return 0;
 }
 EXPORT_SYMBOL_GPL(lowpan_header_compress);
-
-static int __init lowpan_module_init(void)
-{
-       request_module_nowait("ipv6");
-
-       request_module_nowait("nhc_dest");
-       request_module_nowait("nhc_fragment");
-       request_module_nowait("nhc_hop");
-       request_module_nowait("nhc_ipv6");
-       request_module_nowait("nhc_mobility");
-       request_module_nowait("nhc_routing");
-       request_module_nowait("nhc_udp");
-
-       return 0;
-}
-module_init(lowpan_module_init);
-
-MODULE_LICENSE("GPL");
index 01d7ba840df8dbf48b07e3c8697bb7c11f424a8d..fded86508117dad4d81aad327b287aee991a55be 100644 (file)
@@ -791,10 +791,9 @@ void vlan_setup(struct net_device *dev)
 {
        ether_setup(dev);
 
-       dev->priv_flags         |= IFF_802_1Q_VLAN;
+       dev->priv_flags         |= IFF_802_1Q_VLAN | IFF_NO_QUEUE;
        dev->priv_flags         &= ~IFF_TX_SKB_SHARING;
        netif_keep_dst(dev);
-       dev->tx_queue_len       = 0;
 
        dev->netdev_ops         = &vlan_netdev_ops;
        dev->destructor         = vlan_dev_free;
index 6d0b471eede8639f55b33c5c4d434c8fddef6ee5..cc7d87d6498785535017ddcdbbce6da8e3f8679c 100644 (file)
@@ -19,6 +19,7 @@
 #include "main.h"
 
 #include <linux/atomic.h>
+#include <linux/bitops.h>
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
@@ -453,7 +454,7 @@ static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
        int j;
 
        /* check if orig node candidate is running DAT */
-       if (!(candidate->capabilities & BATADV_ORIG_CAPA_HAS_DAT))
+       if (!test_bit(BATADV_ORIG_CAPA_HAS_DAT, &candidate->capabilities))
                goto out;
 
        /* Check if this node has already been selected... */
@@ -713,9 +714,9 @@ static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
                                           uint16_t tvlv_value_len)
 {
        if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
-               orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_DAT;
+               clear_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities);
        else
-               orig->capabilities |= BATADV_ORIG_CAPA_HAS_DAT;
+               set_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities);
 }
 
 /**
index cffa92dd98778bf2ffdf11107243fcc0f60cab6b..6012e2b4af4f5d400385fe8c6f5ca00524933a65 100644 (file)
@@ -153,15 +153,11 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
        struct batadv_neigh_node *router;
        struct batadv_neigh_ifinfo *router_ifinfo;
        struct batadv_gw_node *gw_node, *curr_gw = NULL;
-       uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
-       uint32_t gw_divisor;
+       uint64_t max_gw_factor = 0, tmp_gw_factor = 0;
        uint8_t max_tq = 0;
        uint8_t tq_avg;
        struct batadv_orig_node *orig_node;
 
-       gw_divisor = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE;
-       gw_divisor *= 64;
-
        rcu_read_lock();
        hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
                if (gw_node->deleted)
@@ -187,7 +183,7 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
                        tmp_gw_factor = tq_avg * tq_avg;
                        tmp_gw_factor *= gw_node->bandwidth_down;
                        tmp_gw_factor *= 100 * 100;
-                       tmp_gw_factor /= gw_divisor;
+                       tmp_gw_factor >>= 18;
 
                        if ((tmp_gw_factor > max_gw_factor) ||
                            ((tmp_gw_factor == max_gw_factor) &&
index 7aa480b7edd0d5fa56a7d88aa09c9db5da48068a..68a9554961eb41544c6da01b38cb65821b9e6ef5 100644 (file)
@@ -19,6 +19,8 @@
 #include "main.h"
 
 #include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
@@ -588,19 +590,26 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
  *
  * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator,
  * orig, has toggled then this method updates counter and list accordingly.
+ *
+ * Caller needs to hold orig->mcast_handler_lock.
  */
 static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
                                             struct batadv_orig_node *orig,
                                             uint8_t mcast_flags)
 {
+       struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node;
+       struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list;
+
        /* switched from flag unset to set */
        if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
            !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) {
                atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables);
 
                spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-               hlist_add_head_rcu(&orig->mcast_want_all_unsnoopables_node,
-                                  &bat_priv->mcast.want_all_unsnoopables_list);
+               /* flag checks above + mcast_handler_lock prevents this */
+               WARN_ON(!hlist_unhashed(node));
+
+               hlist_add_head_rcu(node, head);
                spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
        /* switched from flag set to unset */
        } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) &&
@@ -608,7 +617,10 @@ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
                atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables);
 
                spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-               hlist_del_rcu(&orig->mcast_want_all_unsnoopables_node);
+               /* flag checks above + mcast_handler_lock prevents this */
+               WARN_ON(hlist_unhashed(node));
+
+               hlist_del_init_rcu(node);
                spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
        }
 }
@@ -621,19 +633,26 @@ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
  *
  * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has
  * toggled then this method updates counter and list accordingly.
+ *
+ * Caller needs to hold orig->mcast_handler_lock.
  */
 static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
                                          struct batadv_orig_node *orig,
                                          uint8_t mcast_flags)
 {
+       struct hlist_node *node = &orig->mcast_want_all_ipv4_node;
+       struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list;
+
        /* switched from flag unset to set */
        if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 &&
            !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) {
                atomic_inc(&bat_priv->mcast.num_want_all_ipv4);
 
                spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-               hlist_add_head_rcu(&orig->mcast_want_all_ipv4_node,
-                                  &bat_priv->mcast.want_all_ipv4_list);
+               /* flag checks above + mcast_handler_lock prevents this */
+               WARN_ON(!hlist_unhashed(node));
+
+               hlist_add_head_rcu(node, head);
                spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
        /* switched from flag set to unset */
        } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) &&
@@ -641,7 +660,10 @@ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
                atomic_dec(&bat_priv->mcast.num_want_all_ipv4);
 
                spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-               hlist_del_rcu(&orig->mcast_want_all_ipv4_node);
+               /* flag checks above + mcast_handler_lock prevents this */
+               WARN_ON(hlist_unhashed(node));
+
+               hlist_del_init_rcu(node);
                spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
        }
 }
@@ -654,19 +676,26 @@ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
  *
  * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has
  * toggled then this method updates counter and list accordingly.
+ *
+ * Caller needs to hold orig->mcast_handler_lock.
  */
 static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
                                          struct batadv_orig_node *orig,
                                          uint8_t mcast_flags)
 {
+       struct hlist_node *node = &orig->mcast_want_all_ipv6_node;
+       struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list;
+
        /* switched from flag unset to set */
        if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 &&
            !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) {
                atomic_inc(&bat_priv->mcast.num_want_all_ipv6);
 
                spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-               hlist_add_head_rcu(&orig->mcast_want_all_ipv6_node,
-                                  &bat_priv->mcast.want_all_ipv6_list);
+               /* flag checks above + mcast_handler_lock prevents this */
+               WARN_ON(!hlist_unhashed(node));
+
+               hlist_add_head_rcu(node, head);
                spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
        /* switched from flag set to unset */
        } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) &&
@@ -674,7 +703,10 @@ static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
                atomic_dec(&bat_priv->mcast.num_want_all_ipv6);
 
                spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-               hlist_del_rcu(&orig->mcast_want_all_ipv6_node);
+               /* flag checks above + mcast_handler_lock prevents this */
+               WARN_ON(hlist_unhashed(node));
+
+               hlist_del_init_rcu(node);
                spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
        }
 }
@@ -697,39 +729,42 @@ static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
        uint8_t mcast_flags = BATADV_NO_FLAGS;
        bool orig_initialized;
 
-       orig_initialized = orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST;
+       if (orig_mcast_enabled && tvlv_value &&
+           (tvlv_value_len >= sizeof(mcast_flags)))
+               mcast_flags = *(uint8_t *)tvlv_value;
+
+       spin_lock_bh(&orig->mcast_handler_lock);
+       orig_initialized = test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
+                                   &orig->capa_initialized);
 
        /* If mcast support is turned on decrease the disabled mcast node
         * counter only if we had increased it for this node before. If this
         * is a completely new orig_node no need to decrease the counter.
         */
        if (orig_mcast_enabled &&
-           !(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST)) {
+           !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
                if (orig_initialized)
                        atomic_dec(&bat_priv->mcast.num_disabled);
-               orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST;
+               set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
        /* If mcast support is being switched off or if this is an initial
         * OGM without mcast support then increase the disabled mcast
         * node counter.
         */
        } else if (!orig_mcast_enabled &&
-                  (orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST ||
+                  (test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities) ||
                    !orig_initialized)) {
                atomic_inc(&bat_priv->mcast.num_disabled);
-               orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST;
+               clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
        }
 
-       orig->capa_initialized |= BATADV_ORIG_CAPA_HAS_MCAST;
-
-       if (orig_mcast_enabled && tvlv_value &&
-           (tvlv_value_len >= sizeof(mcast_flags)))
-               mcast_flags = *(uint8_t *)tvlv_value;
+       set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized);
 
        batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags);
        batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags);
        batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags);
 
        orig->mcast_flags = mcast_flags;
+       spin_unlock_bh(&orig->mcast_handler_lock);
 }
 
 /**
@@ -763,11 +798,15 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
 {
        struct batadv_priv *bat_priv = orig->bat_priv;
 
-       if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) &&
-           orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST)
+       spin_lock_bh(&orig->mcast_handler_lock);
+
+       if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities) &&
+           test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized))
                atomic_dec(&bat_priv->mcast.num_disabled);
 
        batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
        batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
        batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
+
+       spin_unlock_bh(&orig->mcast_handler_lock);
 }
index f0a50f31d822e8a9f90477249b0727a668f70a28..46604010dcd42aa83ee2a0801dbaf77005e03d63 100644 (file)
@@ -19,6 +19,7 @@
 #include "main.h"
 
 #include <linux/atomic.h>
+#include <linux/bitops.h>
 #include <linux/byteorder/generic.h>
 #include <linux/compiler.h>
 #include <linux/debugfs.h>
@@ -134,9 +135,9 @@ static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
                                          uint16_t tvlv_value_len)
 {
        if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
-               orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_NC;
+               clear_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities);
        else
-               orig->capabilities |= BATADV_ORIG_CAPA_HAS_NC;
+               set_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities);
 }
 
 /**
@@ -894,7 +895,7 @@ void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
                goto out;
 
        /* check if orig node is network coding enabled */
-       if (!(orig_node->capabilities & BATADV_ORIG_CAPA_HAS_NC))
+       if (!test_bit(BATADV_ORIG_CAPA_HAS_NC, &orig_node->capabilities))
                goto out;
 
        /* accept ogms from 'good' neighbors and single hop neighbors */
index 018b7495ad844cdf4f97f1590455bc205cdf3d71..32a0fcfab36d918b9359633a326191745b409fb4 100644 (file)
@@ -696,8 +696,13 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
        orig_node->last_seen = jiffies;
        reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
        orig_node->bcast_seqno_reset = reset_time;
+
 #ifdef CONFIG_BATMAN_ADV_MCAST
        orig_node->mcast_flags = BATADV_NO_FLAGS;
+       INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
+       INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
+       INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
+       spin_lock_init(&orig_node->mcast_handler_lock);
 #endif
 
        /* create a vlan object for the "untagged" LAN */
index 0a01992e65ab06b898e40a8097a29e5162c53b08..191076ef1eca1d0a6a2833d4437f925da1b9e71b 100644 (file)
@@ -616,7 +616,8 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
                 * we delete only packets belonging to the given interface
                 */
                if ((hard_iface) &&
-                   (forw_packet->if_incoming != hard_iface))
+                   (forw_packet->if_incoming != hard_iface) &&
+                   (forw_packet->if_outgoing != hard_iface))
                        continue;
 
                spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
index a2fc843c22432e790980fa15653cf95e6c60b384..49d3d3aa59cba2777ed1697f5b5bd7cbc8ffd217 100644 (file)
@@ -202,6 +202,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
        int gw_mode;
        enum batadv_forw_mode forw_mode;
        struct batadv_orig_node *mcast_single_orig = NULL;
+       int network_offset = ETH_HLEN;
 
        if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
                goto dropped;
@@ -214,14 +215,18 @@ static int batadv_interface_tx(struct sk_buff *skb,
        case ETH_P_8021Q:
                vhdr = vlan_eth_hdr(skb);
 
-               if (vhdr->h_vlan_encapsulated_proto != ethertype)
+               if (vhdr->h_vlan_encapsulated_proto != ethertype) {
+                       network_offset += VLAN_HLEN;
                        break;
+               }
 
                /* fall through */
        case ETH_P_BATMAN:
                goto dropped;
        }
 
+       skb_set_network_header(skb, network_offset);
+
        if (batadv_bla_tx(bat_priv, skb, vid))
                goto dropped;
 
@@ -936,7 +941,7 @@ static void batadv_softif_init_early(struct net_device *dev)
        dev->netdev_ops = &batadv_netdev_ops;
        dev->destructor = batadv_softif_free;
        dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-       dev->tx_queue_len = 0;
+       dev->priv_flags |= IFF_NO_QUEUE;
 
        /* can't call min_mtu, because the needed variables
         * have not been initialized yet
index 5e953297d3b2bda513cd9ad90e7e634928f34a20..c1eb7b72ab15fb3a09bf3dc236dc7c83043d51d9 100644 (file)
@@ -19,6 +19,7 @@
 #include "main.h"
 
 #include <linux/atomic.h>
+#include <linux/bitops.h>
 #include <linux/bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/compiler.h>
@@ -595,8 +596,11 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
        /* increase the refcounter of the related vlan */
        vlan = batadv_softif_vlan_get(bat_priv, vid);
        if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
-                addr, BATADV_PRINT_VID(vid)))
+                addr, BATADV_PRINT_VID(vid))) {
+               kfree(tt_local);
+               tt_local = NULL;
                goto out;
+       }
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
@@ -1879,7 +1883,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
                }
                spin_unlock_bh(list_lock);
        }
-       orig_node->capa_initialized &= ~BATADV_ORIG_CAPA_HAS_TT;
+       clear_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized);
 }
 
 static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global,
@@ -2212,7 +2216,7 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
        spin_lock_bh(&bat_priv->tt.req_list_lock);
 
        list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
-               list_del(&node->list);
+               list_del_init(&node->list);
                kfree(node);
        }
 
@@ -2248,7 +2252,7 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
        list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
                if (batadv_has_timed_out(node->issued_at,
                                         BATADV_TT_REQUEST_TIMEOUT)) {
-                       list_del(&node->list);
+                       list_del_init(&node->list);
                        kfree(node);
                }
        }
@@ -2530,7 +2534,8 @@ out:
                batadv_hardif_free_ref(primary_if);
        if (ret && tt_req_node) {
                spin_lock_bh(&bat_priv->tt.req_list_lock);
-               list_del(&tt_req_node->list);
+               /* list_del_init() verifies tt_req_node still is in the list */
+               list_del_init(&tt_req_node->list);
                spin_unlock_bh(&bat_priv->tt.req_list_lock);
                kfree(tt_req_node);
        }
@@ -2838,7 +2843,7 @@ static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
                                return;
                }
        }
-       orig_node->capa_initialized |= BATADV_ORIG_CAPA_HAS_TT;
+       set_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized);
 }
 
 static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
@@ -2967,7 +2972,7 @@ static void batadv_handle_tt_response(struct batadv_priv *bat_priv,
        list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
                if (!batadv_compare_eth(node->addr, resp_src))
                        continue;
-               list_del(&node->list);
+               list_del_init(&node->list);
                kfree(node);
        }
 
@@ -3340,7 +3345,8 @@ static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
        bool has_tt_init;
 
        tt_vlan = (struct batadv_tvlv_tt_vlan_data *)tt_buff;
-       has_tt_init = orig_node->capa_initialized & BATADV_ORIG_CAPA_HAS_TT;
+       has_tt_init = test_bit(BATADV_ORIG_CAPA_HAS_TT,
+                              &orig_node->capa_initialized);
 
        /* orig table not initialised AND first diff is in the OGM OR the ttvn
         * increased by one -> we can apply the attached changes
index 67d63483618eba4791ee4fffb5654e9c85d82482..55610a805b533bd227332d88a4b5f4b29c5deebd 100644 (file)
@@ -221,6 +221,7 @@ struct batadv_orig_bat_iv {
  * @batadv_dat_addr_t:  address of the orig node in the distributed hash
  * @last_seen: time when last packet from this node was received
  * @bcast_seqno_reset: time when the broadcast seqno window was reset
+ * @mcast_handler_lock: synchronizes mcast-capability and -flag changes
  * @mcast_flags: multicast flags announced by the orig node
  * @mcast_want_all_unsnoop_node: a list node for the
  *  mcast.want_all_unsnoopables list
@@ -268,13 +269,15 @@ struct batadv_orig_node {
        unsigned long last_seen;
        unsigned long bcast_seqno_reset;
 #ifdef CONFIG_BATMAN_ADV_MCAST
+       /* synchronizes mcast tvlv specific orig changes */
+       spinlock_t mcast_handler_lock;
        uint8_t mcast_flags;
        struct hlist_node mcast_want_all_unsnoopables_node;
        struct hlist_node mcast_want_all_ipv4_node;
        struct hlist_node mcast_want_all_ipv6_node;
 #endif
-       uint8_t capabilities;
-       uint8_t capa_initialized;
+       unsigned long capabilities;
+       unsigned long capa_initialized;
        atomic_t last_ttvn;
        unsigned char *tt_buff;
        int16_t tt_buff_len;
@@ -313,10 +316,10 @@ struct batadv_orig_node {
  *  (= orig node announces a tvlv of type BATADV_TVLV_MCAST)
  */
 enum batadv_orig_capabilities {
-       BATADV_ORIG_CAPA_HAS_DAT = BIT(0),
-       BATADV_ORIG_CAPA_HAS_NC = BIT(1),
-       BATADV_ORIG_CAPA_HAS_TT = BIT(2),
-       BATADV_ORIG_CAPA_HAS_MCAST = BIT(3),
+       BATADV_ORIG_CAPA_HAS_DAT,
+       BATADV_ORIG_CAPA_HAS_NC,
+       BATADV_ORIG_CAPA_HAS_TT,
+       BATADV_ORIG_CAPA_HAS_MCAST,
 };
 
 /**
index 0ffe2e24020aa86b80115221811f324511cc1385..131e79cde3504f161e38f7bd5797f2de06eb63d1 100644 (file)
@@ -85,7 +85,7 @@ struct lowpan_dev {
 
 static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
 {
-       return netdev_priv(netdev);
+       return (struct lowpan_dev *)lowpan_priv(netdev)->priv;
 }
 
 static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
@@ -848,8 +848,9 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
        struct net_device *netdev;
        int err = 0;
 
-       netdev = alloc_netdev(sizeof(struct lowpan_dev), IFACE_NAME_TEMPLATE,
-                             NET_NAME_UNKNOWN, netdev_setup);
+       netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev)),
+                             IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN,
+                             netdev_setup);
        if (!netdev)
                return -ENOMEM;
 
@@ -859,7 +860,7 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
        SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
        SET_NETDEV_DEVTYPE(netdev, &bt_type);
 
-       *dev = netdev_priv(netdev);
+       *dev = lowpan_dev(netdev);
        (*dev)->netdev = netdev;
        (*dev)->hdev = chan->conn->hcon->hdev;
        INIT_LIST_HEAD(&(*dev)->peers);
@@ -869,6 +870,8 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
        list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
        spin_unlock(&devices_lock);
 
+       lowpan_netdev_setup(netdev, LOWPAN_LLTYPE_BTLE);
+
        err = register_netdev(netdev);
        if (err < 0) {
                BT_INFO("register_netdev failed %d", err);
index 238ddd3cf95fb660d41f751821a09550f977f067..e32f34189007967e7674a501e2c944029623cfab 100644 (file)
@@ -379,7 +379,7 @@ static bool amp_write_rem_assoc_frag(struct hci_dev *hdev,
        amp_ctrl_put(ctrl);
 
        hci_req_init(&req, hdev);
-       hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, sizeof(cp), &cp);
+       hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
        hci_req_run_skb(&req, write_remote_amp_assoc_complete);
 
        kfree(cp);
index 2c48bf0b5afbd00d4b0308e8bb74a92fc9b72a7a..b4548c739a6475446d643bd5b01ab8627ef1f08e 100644 (file)
@@ -64,6 +64,48 @@ static void hci_le_create_connection_cancel(struct hci_conn *conn)
        hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
 }
 
+/* This function requires the caller holds hdev->lock */
+static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
+{
+       struct hci_conn_params *params;
+       struct smp_irk *irk;
+       bdaddr_t *bdaddr;
+       u8 bdaddr_type;
+
+       bdaddr = &conn->dst;
+       bdaddr_type = conn->dst_type;
+
+       /* Check if we need to convert to identity address */
+       irk = hci_get_irk(conn->hdev, bdaddr, bdaddr_type);
+       if (irk) {
+               bdaddr = &irk->bdaddr;
+               bdaddr_type = irk->addr_type;
+       }
+
+       params = hci_explicit_connect_lookup(conn->hdev, bdaddr, bdaddr_type);
+       if (!params)
+               return;
+
+       /* The connection attempt was doing scan for new RPA, and is
+        * in scan phase. If params are not associated with any other
+        * autoconnect action, remove them completely. If they are, just unmark
+        * them as waiting for connection, by clearing explicit_connect field.
+        */
+       if (params->auto_connect == HCI_AUTO_CONN_EXPLICIT)
+               hci_conn_params_del(conn->hdev, bdaddr, bdaddr_type);
+       else
+               params->explicit_connect = false;
+}
+
+/* This function requires the caller holds hdev->lock */
+static void hci_connect_le_scan_remove(struct hci_conn *conn)
+{
+       hci_connect_le_scan_cleanup(conn);
+
+       hci_conn_hash_del(conn->hdev, conn);
+       hci_update_background_scan(conn->hdev);
+}
+
 static void hci_acl_create_connection(struct hci_conn *conn)
 {
        struct hci_dev *hdev = conn->hdev;
@@ -340,8 +382,12 @@ static void hci_conn_timeout(struct work_struct *work)
                if (conn->out) {
                        if (conn->type == ACL_LINK)
                                hci_acl_create_connection_cancel(conn);
-                       else if (conn->type == LE_LINK)
-                               hci_le_create_connection_cancel(conn);
+                       else if (conn->type == LE_LINK) {
+                               if (test_bit(HCI_CONN_SCANNING, &conn->flags))
+                                       hci_connect_le_scan_remove(conn);
+                               else
+                                       hci_le_create_connection_cancel(conn);
+                       }
                } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
                        hci_reject_sco(conn);
                }
@@ -637,15 +683,18 @@ static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
        struct hci_conn *conn;
 
-       if (status == 0)
-               return;
+       hci_dev_lock(hdev);
+
+       conn = hci_lookup_le_connect(hdev);
+
+       if (!status) {
+               hci_connect_le_scan_cleanup(conn);
+               goto done;
+       }
 
        BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
               status);
 
-       hci_dev_lock(hdev);
-
-       conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
        if (!conn)
                goto done;
 
@@ -685,6 +734,7 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
        hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
 
        conn->state = BT_CONNECT;
+       clear_bit(HCI_CONN_SCANNING, &conn->flags);
 }
 
 static void hci_req_directed_advertising(struct hci_request *req,
@@ -728,7 +778,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
                                u8 role)
 {
        struct hci_conn_params *params;
-       struct hci_conn *conn;
+       struct hci_conn *conn, *conn_unfinished;
        struct smp_irk *irk;
        struct hci_request req;
        int err;
@@ -751,26 +801,29 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
         * and return the object found.
         */
        conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+       conn_unfinished = NULL;
        if (conn) {
-               conn->pending_sec_level = sec_level;
-               goto done;
+               if (conn->state == BT_CONNECT &&
+                   test_bit(HCI_CONN_SCANNING, &conn->flags)) {
+                       BT_DBG("will continue unfinished conn %pMR", dst);
+                       conn_unfinished = conn;
+               } else {
+                       if (conn->pending_sec_level < sec_level)
+                               conn->pending_sec_level = sec_level;
+                       goto done;
+               }
        }
 
        /* Since the controller supports only one LE connection attempt at a
         * time, we return -EBUSY if there is any connection attempt running.
         */
-       conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
-       if (conn)
+       if (hci_lookup_le_connect(hdev))
                return ERR_PTR(-EBUSY);
 
        /* When given an identity address with existing identity
         * resolving key, the connection needs to be established
         * to a resolvable random address.
         *
-        * This uses the cached random resolvable address from
-        * a previous scan. When no cached address is available,
-        * try connecting to the identity address instead.
-        *
         * Storing the resolvable random address is required here
         * to handle connection failures. The address will later
         * be resolved back into the original identity address
@@ -782,15 +835,23 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
                dst_type = ADDR_LE_DEV_RANDOM;
        }
 
-       conn = hci_conn_add(hdev, LE_LINK, dst, role);
+       if (conn_unfinished) {
+               conn = conn_unfinished;
+               bacpy(&conn->dst, dst);
+       } else {
+               conn = hci_conn_add(hdev, LE_LINK, dst, role);
+       }
+
        if (!conn)
                return ERR_PTR(-ENOMEM);
 
        conn->dst_type = dst_type;
        conn->sec_level = BT_SECURITY_LOW;
-       conn->pending_sec_level = sec_level;
        conn->conn_timeout = conn_timeout;
 
+       if (!conn_unfinished)
+               conn->pending_sec_level = sec_level;
+
        hci_req_init(&req, hdev);
 
        /* Disable advertising if we're active. For master role
@@ -854,6 +915,144 @@ create_conn:
                return ERR_PTR(err);
        }
 
+done:
+       /* If this is continuation of connect started by hci_connect_le_scan,
+        * it already called hci_conn_hold and calling it again would mess the
+        * counter.
+        */
+       if (!conn_unfinished)
+               hci_conn_hold(conn);
+
+       return conn;
+}
+
+static void hci_connect_le_scan_complete(struct hci_dev *hdev, u8 status,
+                                        u16 opcode)
+{
+       struct hci_conn *conn;
+
+       if (!status)
+               return;
+
+       BT_ERR("Failed to add device to auto conn whitelist: status 0x%2.2x",
+              status);
+
+       hci_dev_lock(hdev);
+
+       conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+       if (conn)
+               hci_le_conn_failed(conn, status);
+
+       hci_dev_unlock(hdev);
+}
+
+static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
+{
+       struct hci_conn *conn;
+
+       conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
+       if (!conn)
+               return false;
+
+       if (conn->dst_type != type)
+               return false;
+
+       if (conn->state != BT_CONNECTED)
+               return false;
+
+       return true;
+}
+
+/* This function requires the caller holds hdev->lock */
+static int hci_explicit_conn_params_set(struct hci_request *req,
+                                       bdaddr_t *addr, u8 addr_type)
+{
+       struct hci_dev *hdev = req->hdev;
+       struct hci_conn_params *params;
+
+       if (is_connected(hdev, addr, addr_type))
+               return -EISCONN;
+
+       params = hci_conn_params_add(hdev, addr, addr_type);
+       if (!params)
+               return -EIO;
+
+       /* If we created new params, or existing params were marked as disabled,
+        * mark them to be used just once to connect.
+        */
+       if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
+               params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
+               list_del_init(&params->action);
+               list_add(&params->action, &hdev->pend_le_conns);
+       }
+
+       params->explicit_connect = true;
+       __hci_update_background_scan(req);
+
+       BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
+              params->auto_connect);
+
+       return 0;
+}
+
+/* This function requires the caller holds hdev->lock */
+struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
+                                    u8 dst_type, u8 sec_level,
+                                    u16 conn_timeout, u8 role)
+{
+       struct hci_conn *conn;
+       struct hci_request req;
+       int err;
+
+       /* Let's make sure that le is enabled.*/
+       if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
+               if (lmp_le_capable(hdev))
+                       return ERR_PTR(-ECONNREFUSED);
+
+               return ERR_PTR(-EOPNOTSUPP);
+       }
+
+       /* Some devices send ATT messages as soon as the physical link is
+        * established. To be able to handle these ATT messages, the user-
+        * space first establishes the connection and then starts the pairing
+        * process.
+        *
+        * So if a hci_conn object already exists for the following connection
+        * attempt, we simply update pending_sec_level and auth_type fields
+        * and return the object found.
+        */
+       conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+       if (conn) {
+               if (conn->pending_sec_level < sec_level)
+                       conn->pending_sec_level = sec_level;
+               goto done;
+       }
+
+       BT_DBG("requesting refresh of dst_addr");
+
+       conn = hci_conn_add(hdev, LE_LINK, dst, role);
+       if (!conn)
+               return ERR_PTR(-ENOMEM);
+
+       hci_req_init(&req, hdev);
+
+       if (hci_explicit_conn_params_set(&req, dst, dst_type) < 0)
+               return ERR_PTR(-EBUSY);
+
+       conn->state = BT_CONNECT;
+       set_bit(HCI_CONN_SCANNING, &conn->flags);
+
+       err = hci_req_run(&req, hci_connect_le_scan_complete);
+       if (err && err != -ENODATA) {
+               hci_conn_del(conn);
+               return ERR_PTR(err);
+       }
+
+       conn->dst_type = dst_type;
+       conn->sec_level = BT_SECURITY_LOW;
+       conn->pending_sec_level = sec_level;
+       conn->conn_timeout = conn_timeout;
+
 done:
        hci_conn_hold(conn);
        return conn;
index bc43b6490555c7d75dae8ac452a7b012c8f1ffa1..adcbc74c243268e8330bf760c39fa792a1c2a3a8 100644 (file)
@@ -2847,6 +2847,30 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
        return NULL;
 }
 
+/* This function requires the caller holds hdev->lock */
+struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
+                                                   bdaddr_t *addr,
+                                                   u8 addr_type)
+{
+       struct hci_conn_params *param;
+
+       list_for_each_entry(param, &hdev->pend_le_conns, action) {
+               if (bacmp(&param->addr, addr) == 0 &&
+                   param->addr_type == addr_type &&
+                   param->explicit_connect)
+                       return param;
+       }
+
+       list_for_each_entry(param, &hdev->pend_le_reports, action) {
+               if (bacmp(&param->addr, addr) == 0 &&
+                   param->addr_type == addr_type &&
+                   param->explicit_connect)
+                       return param;
+       }
+
+       return NULL;
+}
+
 /* This function requires the caller holds hdev->lock */
 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
                                            bdaddr_t *addr, u8 addr_type)
@@ -2916,6 +2940,15 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev)
        list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
                if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
                        continue;
+
+               /* If trying to estabilish one time connection to disabled
+                * device, leave the params, but mark them as just once.
+                */
+               if (params->explicit_connect) {
+                       params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
+                       continue;
+               }
+
                list_del(&params->list);
                kfree(params);
        }
index 218d7dfc342f484b0b9b18c4208a2ccc5efc0cb8..7ba35a9ba6b77db152db5931530d8d0c5dcec6b5 100644 (file)
@@ -1059,7 +1059,7 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
 
                hci_dev_set_flag(hdev, HCI_LE_ADV);
 
-               conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+               conn = hci_lookup_le_connect(hdev);
                if (conn)
                        queue_delayed_work(hdev->workqueue,
                                           &conn->le_conn_timeout,
@@ -4447,7 +4447,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
         */
        hci_dev_clear_flag(hdev, HCI_LE_ADV);
 
-       conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+       conn = hci_lookup_le_connect(hdev);
        if (!conn) {
                conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
                if (!conn) {
@@ -4640,42 +4640,49 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
        /* If we're not connectable only connect devices that we have in
         * our pend_le_conns list.
         */
-       params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
-                                          addr, addr_type);
+       params = hci_explicit_connect_lookup(hdev, addr, addr_type);
+
        if (!params)
                return NULL;
 
-       switch (params->auto_connect) {
-       case HCI_AUTO_CONN_DIRECT:
-               /* Only devices advertising with ADV_DIRECT_IND are
-                * triggering a connection attempt. This is allowing
-                * incoming connections from slave devices.
-                */
-               if (adv_type != LE_ADV_DIRECT_IND)
+       if (!params->explicit_connect) {
+               switch (params->auto_connect) {
+               case HCI_AUTO_CONN_DIRECT:
+                       /* Only devices advertising with ADV_DIRECT_IND are
+                        * triggering a connection attempt. This is allowing
+                        * incoming connections from slave devices.
+                        */
+                       if (adv_type != LE_ADV_DIRECT_IND)
+                               return NULL;
+                       break;
+               case HCI_AUTO_CONN_ALWAYS:
+                       /* Devices advertising with ADV_IND or ADV_DIRECT_IND
+                        * are triggering a connection attempt. This means
+                        * that incoming connectioms from slave device are
+                        * accepted and also outgoing connections to slave
+                        * devices are established when found.
+                        */
+                       break;
+               default:
                        return NULL;
-               break;
-       case HCI_AUTO_CONN_ALWAYS:
-               /* Devices advertising with ADV_IND or ADV_DIRECT_IND
-                * are triggering a connection attempt. This means
-                * that incoming connectioms from slave device are
-                * accepted and also outgoing connections to slave
-                * devices are established when found.
-                */
-               break;
-       default:
-               return NULL;
+               }
        }
 
        conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
                              HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
        if (!IS_ERR(conn)) {
-               /* Store the pointer since we don't really have any
+               /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
+                * by higher layer that tried to connect, if no then
+                * store the pointer since we don't really have any
                 * other owner of the object besides the params that
                 * triggered it. This way we can abort the connection if
                 * the parameters get removed and keep the reference
                 * count consistent once the connection is established.
                 */
-               params->conn = hci_conn_get(conn);
+
+               if (!params->explicit_connect)
+                       params->conn = hci_conn_get(conn);
+
                return conn;
        }
 
index d6025d6e6d59f957c612a1e7ff455f770734eb6a..b7369220c9efff616d13f3ad3aeec44c809bd4e0 100644 (file)
@@ -317,7 +317,7 @@ static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
         * address be updated at the next cycle.
         */
        if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
-           hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
+           hci_lookup_le_connect(hdev)) {
                BT_DBG("Deferring random address update");
                hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
                return;
@@ -479,7 +479,6 @@ void hci_update_page_scan(struct hci_dev *hdev)
 void __hci_update_background_scan(struct hci_request *req)
 {
        struct hci_dev *hdev = req->hdev;
-       struct hci_conn *conn;
 
        if (!test_bit(HCI_UP, &hdev->flags) ||
            test_bit(HCI_INIT, &hdev->flags) ||
@@ -529,8 +528,7 @@ void __hci_update_background_scan(struct hci_request *req)
                 * since some controllers are not able to scan and connect at
                 * the same time.
                 */
-               conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
-               if (conn)
+               if (hci_lookup_le_connect(hdev))
                        return;
 
                /* If controller is currently scanning, we stop it to ensure we
index 45fffa4136421b8dd5ab301cf0a4ec529b36ff8e..7c65ee200c29215c6b3f050cfbb881873be4946a 100644 (file)
@@ -7113,8 +7113,10 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
                else
                        role = HCI_ROLE_MASTER;
 
-               hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
-                                     HCI_LE_CONN_TIMEOUT, role);
+               hcon = hci_connect_le_scan(hdev, dst, dst_type,
+                                          chan->sec_level,
+                                          HCI_LE_CONN_TIMEOUT,
+                                          role);
        } else {
                u8 auth_type = l2cap_get_auth_type(chan);
                hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
index e435438f95f030ce3703660b9c816d63b8d03ffb..ccaf5a436d8f7a70799729a04ffc17583d11913f 100644 (file)
@@ -3564,9 +3564,10 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
                 */
                hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
 
-               conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
-                                     sec_level, HCI_LE_CONN_TIMEOUT,
-                                     HCI_ROLE_MASTER);
+               conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
+                                          addr_type, sec_level,
+                                          HCI_LE_CONN_TIMEOUT,
+                                          HCI_ROLE_MASTER);
        }
 
        if (IS_ERR(conn)) {
@@ -4210,7 +4211,7 @@ static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
                /* Don't let discovery abort an outgoing connection attempt
                 * that's using directed advertising.
                 */
-               if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
+               if (hci_lookup_le_connect(hdev)) {
                        *status = MGMT_STATUS_REJECTED;
                        return false;
                }
@@ -6107,6 +6108,12 @@ static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
        switch (auto_connect) {
        case HCI_AUTO_CONN_DISABLED:
        case HCI_AUTO_CONN_LINK_LOSS:
+               /* If auto connect is being disabled when we're trying to
+                * connect to device, keep connecting.
+                */
+               if (params->explicit_connect)
+                       list_add(&params->action, &hdev->pend_le_conns);
+
                __hci_update_background_scan(req);
                break;
        case HCI_AUTO_CONN_REPORT:
index 0aa8f5cf46a17171c627e6949c51e684f28a58ed..6ed2feb51e3c7ae3d0f0fd629d6e147549b5fa54 100644 (file)
@@ -365,8 +365,7 @@ void br_dev_setup(struct net_device *dev)
        dev->destructor = br_dev_free;
        dev->ethtool_ops = &br_ethtool_ops;
        SET_NETDEV_DEVTYPE(dev, &br_type);
-       dev->tx_queue_len = 0;
-       dev->priv_flags = IFF_EBRIDGE;
+       dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
 
        dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
                        NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
index 0752796fe0ba4443036a94a385ef4d1666cd3adc..66efdc21f548524a19f3abc3ea5268b245f88dd2 100644 (file)
@@ -1608,7 +1608,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
                break;
        }
 
-       if (skb_trimmed)
+       if (skb_trimmed && skb_trimmed != skb)
                kfree_skb(skb_trimmed);
 
        return err;
@@ -1653,7 +1653,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
                break;
        }
 
-       if (skb_trimmed)
+       if (skb_trimmed && skb_trimmed != skb)
                kfree_skb(skb_trimmed);
 
        return err;
index 0f2408f6cdfe4336efdc281d7317c01d766b9e38..dbcb1949ea58c2a4ca26135134367b90eb55f8e7 100644 (file)
@@ -849,7 +849,7 @@ struct rtnl_link_ops br_link_ops __read_mostly = {
        .kind                   = "bridge",
        .priv_size              = sizeof(struct net_bridge),
        .setup                  = br_dev_setup,
-       .maxtype                = IFLA_BRPORT_MAX,
+       .maxtype                = IFLA_BR_MAX,
        .policy                 = br_policy,
        .validate               = br_validate,
        .newlink                = br_dev_newlink,
index edbca468fa73cc29b31703bd4fe4d70925f21bd5..d730a0f68f46b43b3e8dd51cb3bb029f04cde93a 100644 (file)
@@ -177,7 +177,7 @@ static int transmit(struct cflayer *layer, struct cfpkt *pkt)
        skb->protocol = htons(ETH_P_CAIF);
 
        /* Check if we need to handle xoff */
-       if (likely(caifd->netdev->tx_queue_len == 0))
+       if (likely(caifd->netdev->priv_flags & IFF_NO_QUEUE))
                goto noxoff;
 
        if (unlikely(caifd->xoff))
index 4870c3556a5a68be94cf28b65d527331810e7187..b1f3f4844e60c21248f97ef4c2a3c0b0dddc1bca 100644 (file)
@@ -6997,6 +6997,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
        dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
        setup(dev);
 
+       if (!dev->tx_queue_len)
+               printk(KERN_WARNING "%s uses DEPRECATED zero tx_queue_len - convert driver to use IFF_NO_QUEUE instead.\n", name);
+
        dev->num_tx_queues = txqs;
        dev->real_num_tx_queues = txqs;
        if (netif_alloc_netdev_queues(dev))
index f8694d1b8702e70db45a0eee94b5361ecb4214e0..50dcdbb0ee46edc40c4fc105dc91bc8dc55492b8 100644 (file)
@@ -20,6 +20,7 @@
 #include <net/net_namespace.h>
 #include <linux/sched.h>
 #include <linux/prefetch.h>
+#include <net/lwtunnel.h>
 
 #include <net/dst.h>
 #include <net/dst_metadata.h>
@@ -184,6 +185,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
 #ifdef CONFIG_IP_ROUTE_CLASSID
        dst->tclassid = 0;
 #endif
+       dst->lwtstate = NULL;
        atomic_set(&dst->__refcnt, initial_ref);
        dst->__use = 0;
        dst->lastuse = jiffies;
@@ -264,6 +266,7 @@ again:
                kfree(dst);
        else
                kmem_cache_free(dst->ops->kmem_cachep, dst);
+       lwtstate_put(dst->lwtstate);
 
        dst = child;
        if (dst) {
index a50dbfa83ad9c4459dde60bc07908170f607d3c9..b4adc961413ffa7b3b139f156ccec88c21ac7954 100644 (file)
@@ -1124,6 +1124,7 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
        *pfp = fp;
        return 0;
 }
+EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
 
 void bpf_prog_destroy(struct bpf_prog *fp)
 {
@@ -1348,7 +1349,7 @@ const struct bpf_func_proto bpf_l3_csum_replace_proto = {
 static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
 {
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
-       u32 is_pseudo = BPF_IS_PSEUDO_HEADER(flags);
+       bool is_pseudo = !!BPF_IS_PSEUDO_HEADER(flags);
        int offset = (int) r2;
        __sum16 sum, *ptr;
 
@@ -1488,13 +1489,13 @@ static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
 {
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
        struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2;
-       struct ip_tunnel_info *info = skb_tunnel_info(skb, AF_INET);
+       struct ip_tunnel_info *info = skb_tunnel_info(skb);
 
        if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags || !info))
                return -EINVAL;
 
        to->tunnel_id = be64_to_cpu(info->key.tun_id);
-       to->remote_ipv4 = be32_to_cpu(info->key.ipv4_src);
+       to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
 
        return 0;
 }
@@ -1528,7 +1529,7 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
        info = &md->u.tun_info;
        info->mode = IP_TUNNEL_INFO_TX;
        info->key.tun_id = cpu_to_be64(from->tunnel_id);
-       info->key.ipv4_dst = cpu_to_be32(from->remote_ipv4);
+       info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
 
        return 0;
 }
index 5d6d8e3d450aeeaabe50608feba3156b9c9b9963..e924c2e08554094e824449e3829d42f148906e4a 100644 (file)
@@ -179,14 +179,16 @@ int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
 }
 EXPORT_SYMBOL(lwtunnel_cmp_encap);
 
-int __lwtunnel_output(struct sock *sk, struct sk_buff *skb,
-                     struct lwtunnel_state *lwtstate)
+int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
 {
+       struct dst_entry *dst = skb_dst(skb);
        const struct lwtunnel_encap_ops *ops;
+       struct lwtunnel_state *lwtstate;
        int ret = -EINVAL;
 
-       if (!lwtstate)
+       if (!dst)
                goto drop;
+       lwtstate = dst->lwtstate;
 
        if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
            lwtstate->type > LWTUNNEL_ENCAP_MAX)
@@ -209,35 +211,38 @@ drop:
 
        return ret;
 }
+EXPORT_SYMBOL(lwtunnel_output);
 
-int lwtunnel_output6(struct sock *sk, struct sk_buff *skb)
+int lwtunnel_input(struct sk_buff *skb)
 {
-       struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
-       struct lwtunnel_state *lwtstate = NULL;
+       struct dst_entry *dst = skb_dst(skb);
+       const struct lwtunnel_encap_ops *ops;
+       struct lwtunnel_state *lwtstate;
+       int ret = -EINVAL;
 
-       if (rt) {
-               lwtstate = rt->rt6i_lwtstate;
-               skb->dev = rt->dst.dev;
-       }
+       if (!dst)
+               goto drop;
+       lwtstate = dst->lwtstate;
 
-       skb->protocol = htons(ETH_P_IPV6);
+       if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+           lwtstate->type > LWTUNNEL_ENCAP_MAX)
+               return 0;
 
-       return __lwtunnel_output(sk, skb, lwtstate);
-}
-EXPORT_SYMBOL(lwtunnel_output6);
+       ret = -EOPNOTSUPP;
+       rcu_read_lock();
+       ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+       if (likely(ops && ops->input))
+               ret = ops->input(skb);
+       rcu_read_unlock();
 
-int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
-{
-       struct rtable *rt = (struct rtable *)skb_dst(skb);
-       struct lwtunnel_state *lwtstate = NULL;
+       if (ret == -EOPNOTSUPP)
+               goto drop;
 
-       if (rt) {
-               lwtstate = rt->rt_lwtstate;
-               skb->dev = rt->dst.dev;
-       }
+       return ret;
 
-       skb->protocol = htons(ETH_P_IP);
+drop:
+       kfree_skb(skb);
 
-       return __lwtunnel_output(sk, skb, lwtstate);
+       return ret;
 }
-EXPORT_SYMBOL(lwtunnel_output);
+EXPORT_SYMBOL(lwtunnel_input);
index b6a19ca0f99e49c7406f1fedb6c59433bbd7fd38..bf9a5d93c2d10bbb9c0bfc82d5e7c5df82ef2b5c 100644 (file)
@@ -4022,8 +4022,8 @@ EXPORT_SYMBOL(skb_checksum_setup);
  * Otherwise returns the provided skb. Returns NULL in error cases
  * (e.g. transport_len exceeds skb length or out-of-memory).
  *
- * Caller needs to set the skb transport header and release the returned skb.
- * Provided skb is consumed.
+ * Caller needs to set the skb transport header and free any returned skb if it
+ * differs from the provided skb.
  */
 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
                                               unsigned int transport_len)
@@ -4032,16 +4032,12 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
        unsigned int len = skb_transport_offset(skb) + transport_len;
        int ret;
 
-       if (skb->len < len) {
-               kfree_skb(skb);
+       if (skb->len < len)
                return NULL;
-       } else if (skb->len == len) {
+       else if (skb->len == len)
                return skb;
-       }
 
        skb_chk = skb_clone(skb, GFP_ATOMIC);
-       kfree_skb(skb);
-
        if (!skb_chk)
                return NULL;
 
@@ -4066,8 +4062,8 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
  * If the skb has data beyond the given transport length, then a
  * trimmed & cloned skb is checked and returned.
  *
- * Caller needs to set the skb transport header and release the returned skb.
- * Provided skb is consumed.
+ * Caller needs to set the skb transport header and free any returned skb if it
+ * differs from the provided skb.
  */
 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
                                     unsigned int transport_len,
@@ -4079,23 +4075,26 @@ struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
 
        skb_chk = skb_checksum_maybe_trim(skb, transport_len);
        if (!skb_chk)
-               return NULL;
+               goto err;
 
-       if (!pskb_may_pull(skb_chk, offset)) {
-               kfree_skb(skb_chk);
-               return NULL;
-       }
+       if (!pskb_may_pull(skb_chk, offset))
+               goto err;
 
        __skb_pull(skb_chk, offset);
        ret = skb_chkf(skb_chk);
        __skb_push(skb_chk, offset);
 
-       if (ret) {
-               kfree_skb(skb_chk);
-               return NULL;
-       }
+       if (ret)
+               goto err;
 
        return skb_chk;
+
+err:
+       if (skb_chk && skb_chk != skb)
+               kfree_skb(skb_chk);
+
+       return NULL;
+
 }
 EXPORT_SYMBOL(skb_checksum_trimmed);
 
index a7732a06804376aa321c9982faaf981ad3c51c87..3dffce953c39fc0209e8d0f553817e972f854303 100644 (file)
@@ -301,7 +301,7 @@ out:
 EXPORT_SYMBOL(in6_pton);
 
 void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
-                             __be32 from, __be32 to, int pseudohdr)
+                             __be32 from, __be32 to, bool pseudohdr)
 {
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
                csum_replace4(sum, from, to);
@@ -318,7 +318,7 @@ EXPORT_SYMBOL(inet_proto_csum_replace4);
 
 void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
                               const __be32 *from, const __be32 *to,
-                              int pseudohdr)
+                              bool pseudohdr)
 {
        __be32 diff[] = {
                ~from[0], ~from[1], ~from[2], ~from[3],
@@ -336,6 +336,19 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
 }
 EXPORT_SYMBOL(inet_proto_csum_replace16);
 
+void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
+                                    __wsum diff, bool pseudohdr)
+{
+       if (skb->ip_summed != CHECKSUM_PARTIAL) {
+               *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
+               if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
+                       skb->csum = ~csum_add(diff, ~skb->csum);
+       } else if (pseudohdr) {
+               *sum = ~csum_fold(csum_add(diff, csum_unfold(*sum)));
+       }
+}
+EXPORT_SYMBOL(inet_proto_csum_replace_by_diff);
+
 struct __net_random_once_work {
        struct work_struct work;
        struct static_key *key;
index 78d4ac97aae36af0eff4698bb8cf8c124be2fc69..053eb2b8e68281e778f8f605bb3d5ad9b83ae473 100644 (file)
@@ -554,6 +554,31 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
        return 0;
 }
 
+static int dsa_of_probe_links(struct dsa_platform_data *pd,
+                             struct dsa_chip_data *cd,
+                             int chip_index, int port_index,
+                             struct device_node *port,
+                             const char *port_name)
+{
+       struct device_node *link;
+       int link_index;
+       int ret;
+
+       for (link_index = 0;; link_index++) {
+               link = of_parse_phandle(port, "link", link_index);
+               if (!link)
+                       break;
+
+               if (!strcmp(port_name, "dsa") && pd->nr_chips > 1) {
+                       ret = dsa_of_setup_routing_table(pd, cd, chip_index,
+                                                        port_index, link);
+                       if (ret)
+                               return ret;
+               }
+       }
+       return 0;
+}
+
 static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
 {
        int i;
@@ -573,7 +598,7 @@ static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
 static int dsa_of_probe(struct device *dev)
 {
        struct device_node *np = dev->of_node;
-       struct device_node *child, *mdio, *ethernet, *port, *link;
+       struct device_node *child, *mdio, *ethernet, *port;
        struct mii_bus *mdio_bus, *mdio_bus_switch;
        struct net_device *ethernet_dev;
        struct dsa_platform_data *pd;
@@ -668,15 +693,10 @@ static int dsa_of_probe(struct device *dev)
                                goto out_free_chip;
                        }
 
-                       link = of_parse_phandle(port, "link", 0);
-
-                       if (!strcmp(port_name, "dsa") && link &&
-                                       pd->nr_chips > 1) {
-                               ret = dsa_of_setup_routing_table(pd, cd,
-                                               chip_index, port_index, link);
-                               if (ret)
-                                       goto out_free_chip;
-                       }
+                       ret = dsa_of_probe_links(pd, cd, chip_index,
+                                                port_index, port, port_name);
+                       if (ret)
+                               goto out_free_chip;
 
                }
        }
index 373ff315030da0081b02eccd36015ed50b4fc3bc..cce97385f7436445f22c17605b5ee4da48c80cac 100644 (file)
@@ -1147,7 +1147,7 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
        slave_dev->features = master->vlan_features;
        slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
        eth_hw_addr_inherit(slave_dev, master);
-       slave_dev->tx_queue_len = 0;
+       slave_dev->priv_flags |= IFF_NO_QUEUE;
        slave_dev->netdev_ops = &dsa_slave_netdev_ops;
        slave_dev->switchdev_ops = &dsa_slave_switchdev_ops;
 
index 44d27469ae55982d1895021b79ba76a85c1324a8..35a9788bb3ae734d8e5b2f5199901a6c47f7a587 100644 (file)
@@ -392,7 +392,7 @@ void hsr_dev_setup(struct net_device *dev)
        dev->header_ops = &hsr_header_ops;
        dev->netdev_ops = &hsr_device_ops;
        SET_NETDEV_DEVTYPE(dev, &hsr_type);
-       dev->tx_queue_len = 0;
+       dev->priv_flags |= IFF_NO_QUEUE;
 
        dev->destructor = hsr_dev_destroy;
 
index e50f69da78eb50f86b8dee51fcfa6730f37dfad3..ea339fa94c27c5006ca8121073c5d4b69a9b8e23 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <net/ieee802154_netdev.h>
 #include <net/inet_frag.h>
+#include <net/6lowpan.h>
 
 struct lowpan_create_arg {
        u16 tag;
@@ -37,26 +38,18 @@ static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a)
        }
 }
 
-struct lowpan_dev_record {
-       struct net_device *ldev;
-       struct list_head list;
-};
-
 /* private device info */
 struct lowpan_dev_info {
        struct net_device       *real_dev; /* real WPAN device ptr */
-       struct mutex            dev_list_mtx; /* mutex for list ops */
        u16                     fragment_tag;
 };
 
 static inline struct
 lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
 {
-       return netdev_priv(dev);
+       return (struct lowpan_dev_info *)lowpan_priv(dev)->priv;
 }
 
-extern struct list_head lowpan_devices;
-
 int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
 void lowpan_net_frag_exit(void);
 int lowpan_net_frag_init(void);
index f20a387a1011021347af182060f3b8f4ceda7183..953b1c49f5d1e42019752d86021fd7690272232e 100644 (file)
@@ -52,8 +52,7 @@
 
 #include "6lowpan_i.h"
 
-LIST_HEAD(lowpan_devices);
-static int lowpan_open_count;
+static int open_count;
 
 static struct header_ops lowpan_header_ops = {
        .create = lowpan_header_create,
@@ -91,7 +90,7 @@ static void lowpan_setup(struct net_device *dev)
        dev->hard_header_len    = 2 + 1 + 20 + 14;
        dev->needed_tailroom    = 2; /* FCS */
        dev->mtu                = IPV6_MIN_MTU;
-       dev->tx_queue_len       = 0;
+       dev->priv_flags         |= IFF_NO_QUEUE;
        dev->flags              = IFF_BROADCAST | IFF_MULTICAST;
        dev->watchdog_timeo     = 0;
 
@@ -114,7 +113,6 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
                          struct nlattr *tb[], struct nlattr *data[])
 {
        struct net_device *real_dev;
-       struct lowpan_dev_record *entry;
        int ret;
 
        ASSERT_RTNL();
@@ -133,67 +131,52 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
                return -EINVAL;
        }
 
-       lowpan_dev_info(dev)->real_dev = real_dev;
-       mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
-
-       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-       if (!entry) {
+       if (real_dev->ieee802154_ptr->lowpan_dev) {
                dev_put(real_dev);
-               lowpan_dev_info(dev)->real_dev = NULL;
-               return -ENOMEM;
+               return -EBUSY;
        }
 
-       entry->ldev = dev;
-
+       lowpan_dev_info(dev)->real_dev = real_dev;
        /* Set the lowpan hardware address to the wpan hardware address. */
        memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
 
-       mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
-       INIT_LIST_HEAD(&entry->list);
-       list_add_tail(&entry->list, &lowpan_devices);
-       mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
+       lowpan_netdev_setup(dev, LOWPAN_LLTYPE_IEEE802154);
 
        ret = register_netdevice(dev);
-       if (ret >= 0) {
-               if (!lowpan_open_count)
-                       lowpan_rx_init();
-               lowpan_open_count++;
+       if (ret < 0) {
+               dev_put(real_dev);
+               return ret;
        }
 
-       return ret;
+       real_dev->ieee802154_ptr->lowpan_dev = dev;
+       if (!open_count)
+               lowpan_rx_init();
+
+       open_count++;
+
+       return 0;
 }
 
 static void lowpan_dellink(struct net_device *dev, struct list_head *head)
 {
        struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
        struct net_device *real_dev = lowpan_dev->real_dev;
-       struct lowpan_dev_record *entry, *tmp;
 
        ASSERT_RTNL();
 
-       lowpan_open_count--;
-       if (!lowpan_open_count)
-               lowpan_rx_exit();
-
-       mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
-       list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
-               if (entry->ldev == dev) {
-                       list_del(&entry->list);
-                       kfree(entry);
-               }
-       }
-       mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
+       open_count--;
 
-       mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
-
-       unregister_netdevice_queue(dev, head);
+       if (!open_count)
+               lowpan_rx_exit();
 
+       real_dev->ieee802154_ptr->lowpan_dev = NULL;
+       unregister_netdevice(dev);
        dev_put(real_dev);
 }
 
 static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
        .kind           = "lowpan",
-       .priv_size      = sizeof(struct lowpan_dev_info),
+       .priv_size      = LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev_info)),
        .setup          = lowpan_setup,
        .newlink        = lowpan_newlink,
        .dellink        = lowpan_dellink,
@@ -214,19 +197,21 @@ static int lowpan_device_event(struct notifier_block *unused,
                               unsigned long event, void *ptr)
 {
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-       LIST_HEAD(del_list);
-       struct lowpan_dev_record *entry, *tmp;
 
        if (dev->type != ARPHRD_IEEE802154)
                goto out;
 
-       if (event == NETDEV_UNREGISTER) {
-               list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
-                       if (lowpan_dev_info(entry->ldev)->real_dev == dev)
-                               lowpan_dellink(entry->ldev, &del_list);
-               }
-
-               unregister_netdevice_many(&del_list);
+       switch (event) {
+       case NETDEV_UNREGISTER:
+               /* Check if wpan interface is unregistered that we
+                * also delete possible lowpan interfaces which belongs
+                * to the wpan interface.
+                */
+               if (dev->ieee802154_ptr && dev->ieee802154_ptr->lowpan_dev)
+                       lowpan_dellink(dev->ieee802154_ptr->lowpan_dev, NULL);
+               break;
+       default:
+               break;
        }
 
 out:
index 4be1d289ab2df7581d93702d46bf3793abeba1a7..12e10201d263860d64cc7835bd947459e4d95873 100644 (file)
 
 #include "6lowpan_i.h"
 
-static int lowpan_give_skb_to_devices(struct sk_buff *skb,
-                                     struct net_device *dev)
+static int lowpan_give_skb_to_device(struct sk_buff *skb,
+                                    struct net_device *dev)
 {
-       struct lowpan_dev_record *entry;
-       struct sk_buff *skb_cp;
-       int stat = NET_RX_SUCCESS;
-
+       skb->dev = dev->ieee802154_ptr->lowpan_dev;
        skb->protocol = htons(ETH_P_IPV6);
        skb->pkt_type = PACKET_HOST;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(entry, &lowpan_devices, list)
-               if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
-                       skb_cp = skb_copy(skb, GFP_ATOMIC);
-                       if (!skb_cp) {
-                               kfree_skb(skb);
-                               rcu_read_unlock();
-                               return NET_RX_DROP;
-                       }
-
-                       skb_cp->dev = entry->ldev;
-                       stat = netif_rx(skb_cp);
-                       if (stat == NET_RX_DROP)
-                               break;
-               }
-       rcu_read_unlock();
-
-       consume_skb(skb);
-
-       return stat;
+       return netif_rx(skb);
 }
 
 static int
@@ -89,6 +67,10 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
        struct ieee802154_hdr hdr;
        int ret;
 
+       if (dev->type != ARPHRD_IEEE802154 ||
+           !dev->ieee802154_ptr->lowpan_dev)
+               goto drop;
+
        skb = skb_share_check(skb, GFP_ATOMIC);
        if (!skb)
                goto drop;
@@ -99,9 +81,6 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
        if (skb->pkt_type == PACKET_OTHERHOST)
                goto drop_skb;
 
-       if (dev->type != ARPHRD_IEEE802154)
-               goto drop_skb;
-
        if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
                goto drop_skb;
 
@@ -109,7 +88,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
        if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
                /* Pull off the 1-byte of 6lowpan header. */
                skb_pull(skb, 1);
-               return lowpan_give_skb_to_devices(skb, NULL);
+               return lowpan_give_skb_to_device(skb, dev);
        } else {
                switch (skb->data[0] & 0xe0) {
                case LOWPAN_DISPATCH_IPHC:      /* ipv6 datagram */
@@ -117,7 +96,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
                        if (ret < 0)
                                goto drop_skb;
 
-                       return lowpan_give_skb_to_devices(skb, NULL);
+                       return lowpan_give_skb_to_device(skb, dev);
                case LOWPAN_DISPATCH_FRAG1:     /* first fragment header */
                        ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1);
                        if (ret == 1) {
@@ -125,7 +104,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
                                if (ret < 0)
                                        goto drop_skb;
 
-                               return lowpan_give_skb_to_devices(skb, NULL);
+                               return lowpan_give_skb_to_device(skb, dev);
                        } else if (ret == -1) {
                                return NET_RX_DROP;
                        } else {
@@ -138,7 +117,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
                                if (ret < 0)
                                        goto drop_skb;
 
-                               return lowpan_give_skb_to_devices(skb, NULL);
+                               return lowpan_give_skb_to_device(skb, dev);
                        } else if (ret == -1) {
                                return NET_RX_DROP;
                        } else {
index 2597abbf7f4bbbd121486306994f5e568de4d37e..f6263fc1234056e89cd2c06046699c4fa1d75b39 100644 (file)
@@ -112,7 +112,7 @@ lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
 
        frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr);
        if (IS_ERR(frag))
-               return -PTR_ERR(frag);
+               return PTR_ERR(frag);
 
        memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen);
        memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len);
@@ -224,7 +224,7 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *dev)
        } else {
                da.mode = IEEE802154_ADDR_LONG;
                da.extended_addr = ieee802154_devaddr_from_raw(daddr);
-               cb->ackreq = wpan_dev->frame_retries >= 0;
+               cb->ackreq = wpan_dev->ackreq;
        }
 
        return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
index 68f24016860c30c7800b36d45046cf3e7f23b8c9..1b00a14850cb5f097b557079225c55c1abadab40 100644 (file)
@@ -230,6 +230,8 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = {
        [NL802154_ATTR_WPAN_PHY_CAPS] = { .type = NLA_NESTED },
 
        [NL802154_ATTR_SUPPORTED_COMMANDS] = { .type = NLA_NESTED },
+
+       [NL802154_ATTR_ACKREQ_DEFAULT] = { .type = NLA_U8 },
 };
 
 /* message building helper */
@@ -458,6 +460,7 @@ static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev,
        CMD(set_max_csma_backoffs, SET_MAX_CSMA_BACKOFFS);
        CMD(set_max_frame_retries, SET_MAX_FRAME_RETRIES);
        CMD(set_lbt_mode, SET_LBT_MODE);
+       CMD(set_ackreq_default, SET_ACKREQ_DEFAULT);
 
        if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER)
                CMD(set_tx_power, SET_TX_POWER);
@@ -656,6 +659,10 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
        if (nla_put_u8(msg, NL802154_ATTR_LBT_MODE, wpan_dev->lbt))
                goto nla_put_failure;
 
+       /* ackreq default behaviour */
+       if (nla_put_u8(msg, NL802154_ATTR_ACKREQ_DEFAULT, wpan_dev->ackreq))
+               goto nla_put_failure;
+
        genlmsg_end(msg, hdr);
        return 0;
 
@@ -1042,6 +1049,24 @@ static int nl802154_set_lbt_mode(struct sk_buff *skb, struct genl_info *info)
        return rdev_set_lbt_mode(rdev, wpan_dev, mode);
 }
 
+static int
+nl802154_set_ackreq_default(struct sk_buff *skb, struct genl_info *info)
+{
+       struct cfg802154_registered_device *rdev = info->user_ptr[0];
+       struct net_device *dev = info->user_ptr[1];
+       struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+       bool ackreq;
+
+       if (netif_running(dev))
+               return -EBUSY;
+
+       if (!info->attrs[NL802154_ATTR_ACKREQ_DEFAULT])
+               return -EINVAL;
+
+       ackreq = !!nla_get_u8(info->attrs[NL802154_ATTR_ACKREQ_DEFAULT]);
+       return rdev_set_ackreq_default(rdev, wpan_dev, ackreq);
+}
+
 #define NL802154_FLAG_NEED_WPAN_PHY    0x01
 #define NL802154_FLAG_NEED_NETDEV      0x02
 #define NL802154_FLAG_NEED_RTNL                0x04
@@ -1248,6 +1273,14 @@ static const struct genl_ops nl802154_ops[] = {
                .internal_flags = NL802154_FLAG_NEED_NETDEV |
                                  NL802154_FLAG_NEED_RTNL,
        },
+       {
+               .cmd = NL802154_CMD_SET_ACKREQ_DEFAULT,
+               .doit = nl802154_set_ackreq_default,
+               .policy = nl802154_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL802154_FLAG_NEED_NETDEV |
+                                 NL802154_FLAG_NEED_RTNL,
+       },
 };
 
 /* initialisation/exit functions */
index 8d5960a37195136380032644b65a8b741a03ab00..03b357501cc55dc99c29a642d89975cbd2da87a0 100644 (file)
@@ -195,4 +195,17 @@ rdev_set_lbt_mode(struct cfg802154_registered_device *rdev,
        return ret;
 }
 
+static inline int
+rdev_set_ackreq_default(struct cfg802154_registered_device *rdev,
+                       struct wpan_dev *wpan_dev, bool ackreq)
+{
+       int ret;
+
+       trace_802154_rdev_set_ackreq_default(&rdev->wpan_phy, wpan_dev,
+                                            ackreq);
+       ret = rdev->ops->set_ackreq_default(&rdev->wpan_phy, wpan_dev, ackreq);
+       trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+       return ret;
+}
+
 #endif /* __CFG802154_RDEV_OPS */
index 4399b7fbaa31481c402079680e3509ed05fb9479..9a471e41ec737270a2d9f364564187f2736aaf19 100644 (file)
@@ -275,6 +275,25 @@ TRACE_EVENT(802154_rdev_set_lbt_mode,
                WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->mode))
 );
 
+TRACE_EVENT(802154_rdev_set_ackreq_default,
+       TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+                bool ackreq),
+       TP_ARGS(wpan_phy, wpan_dev, ackreq),
+       TP_STRUCT__entry(
+               WPAN_PHY_ENTRY
+               WPAN_DEV_ENTRY
+               __field(bool, ackreq)
+       ),
+       TP_fast_assign(
+               WPAN_PHY_ASSIGN;
+               WPAN_DEV_ASSIGN;
+               __entry->ackreq = ackreq;
+       ),
+       TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT
+               ", ackreq default: %s", WPAN_PHY_PR_ARG,
+               WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->ackreq))
+);
+
 TRACE_EVENT(802154_rdev_return_int,
        TP_PROTO(struct wpan_phy *wpan_phy, int ret),
        TP_ARGS(wpan_phy, ret),
index c8b855882fa522011405cf43209a7671bd3272c7..675e88cac2b469707ea49cd8e3c695e1b91776fc 100644 (file)
@@ -450,15 +450,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                        goto out;
        }
 
-       if (sk->sk_bound_dev_if) {
-               struct net_device *dev;
-
-               rcu_read_lock();
-               dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
-               if (dev)
-                       tb_id = vrf_dev_table_rcu(dev) ? : tb_id;
-               rcu_read_unlock();
-       }
+       tb_id = vrf_dev_table_ifindex(net, sk->sk_bound_dev_if) ? : tb_id;
        chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
 
        /* Not specified by any standard per-se, however it breaks too
index b7f1d20a96154b0295ece902def96d637622affe..01f1c7dcd329ca6ec6f281328e52db7bb256ae9b 100644 (file)
@@ -533,13 +533,13 @@ errout:
 
 #endif
 
-int fib_encap_match(struct net *net, u16 encap_type,
-                   struct nlattr *encap,
-                   int oif, const struct fib_nh *nh)
+static int fib_encap_match(struct net *net, u16 encap_type,
+                          struct nlattr *encap,
+                          int oif, const struct fib_nh *nh)
 {
        struct lwtunnel_state *lwtstate;
        struct net_device *dev = NULL;
-       int ret;
+       int ret, result = 0;
 
        if (encap_type == LWTUNNEL_ENCAP_NONE)
                return 0;
@@ -548,10 +548,12 @@ int fib_encap_match(struct net *net, u16 encap_type,
                dev = __dev_get_by_index(net, oif);
        ret = lwtunnel_build_state(dev, encap_type,
                                   encap, &lwtstate);
-       if (!ret)
-               return lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate);
+       if (!ret) {
+               result = lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate);
+               lwtstate_free(lwtstate);
+       }
 
-       return 0;
+       return result;
 }
 
 int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
@@ -708,10 +710,18 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
 
                        if (tbl)
                                err = fib_table_lookup(tbl, &fl4, &res,
-                                                  FIB_LOOKUP_IGNORE_LINKSTATE);
-                       else
+                                                      FIB_LOOKUP_IGNORE_LINKSTATE |
+                                                      FIB_LOOKUP_NOREF);
+
+                       /* on error or if no table given do full lookup. This
+                        * is needed for example when nexthops are in the local
+                        * table rather than the given table
+                        */
+                       if (!tbl || err) {
                                err = fib_lookup(net, &fl4, &res,
                                                 FIB_LOOKUP_IGNORE_LINKSTATE);
+                       }
+
                        if (err) {
                                rcu_read_unlock();
                                return err;
index 1243c79cb5b0178052cae82e7a53728950f302e3..5154f81c53266841ae52913bda220dcf99244a8c 100644 (file)
@@ -2468,7 +2468,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
                key = l->key + 1;
                iter->pos++;
 
-               if (pos-- <= 0)
+               if (--pos <= 0)
                        break;
 
                l = NULL;
index 34968cd5c1464bf896ba1a2130fc223c50973499..2d1646cff0572054cc2982b764d817094c002146 100644 (file)
@@ -79,7 +79,11 @@ static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
        __be16 *pd = data;
        size_t start = ntohs(pd[0]);
        size_t offset = ntohs(pd[1]);
-       size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
+       size_t plen = sizeof(struct udphdr) + hdrlen +
+           max_t(size_t, offset + sizeof(u16), start);
+
+       if (skb->remcsum_offload)
+               return guehdr;
 
        if (!pskb_may_pull(skb, plen))
                return NULL;
@@ -221,29 +225,21 @@ out_unlock:
 
 static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
                                      struct guehdr *guehdr, void *data,
-                                     size_t hdrlen, u8 ipproto,
-                                     struct gro_remcsum *grc, bool nopartial)
+                                     size_t hdrlen, struct gro_remcsum *grc,
+                                     bool nopartial)
 {
        __be16 *pd = data;
        size_t start = ntohs(pd[0]);
        size_t offset = ntohs(pd[1]);
-       size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
 
        if (skb->remcsum_offload)
-               return NULL;
+               return guehdr;
 
        if (!NAPI_GRO_CB(skb)->csum_valid)
                return NULL;
 
-       /* Pull checksum that will be written */
-       if (skb_gro_header_hard(skb, off + plen)) {
-               guehdr = skb_gro_header_slow(skb, off + plen, off);
-               if (!guehdr)
-                       return NULL;
-       }
-
-       skb_gro_remcsum_process(skb, (void *)guehdr + hdrlen,
-                               start, offset, grc, nopartial);
+       guehdr = skb_gro_remcsum_process(skb, (void *)guehdr, off, hdrlen,
+                                        start, offset, grc, nopartial);
 
        skb->remcsum_offload = 1;
 
@@ -307,10 +303,10 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
 
                if (flags & GUE_PFLAG_REMCSUM) {
                        guehdr = gue_gro_remcsum(skb, off, guehdr,
-                                                data + doffset, hdrlen,
-                                                guehdr->proto_ctype, &grc,
+                                                data + doffset, hdrlen, &grc,
                                                 !!(fou->flags &
                                                    FOU_F_REMCSUM_NOPARTIAL));
+
                        if (!guehdr)
                                goto out;
 
@@ -351,7 +347,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
        rcu_read_lock();
        offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
        ops = rcu_dereference(offloads[guehdr->proto_ctype]);
-       if (WARN_ON(!ops || !ops->callbacks.gro_receive))
+       if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
                goto out_unlock;
 
        pp = ops->callbacks.gro_receive(head, skb);
index c6f1ce149ffb868af6c31cfc86207f13dad16238..f16488efa1c8930c23aea42a770e935fa41578b2 100644 (file)
@@ -426,7 +426,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
        fl4.flowi4_mark = mark;
        fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
        fl4.flowi4_proto = IPPROTO_ICMP;
-       fl4.flowi4_oif = vrf_master_ifindex_rcu(skb->dev) ? : skb->dev->ifindex;
+       fl4.flowi4_oif = vrf_master_ifindex(skb->dev) ? : skb->dev->ifindex;
        security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
        rt = ip_route_output_key(net, &fl4);
        if (IS_ERR(rt))
@@ -460,7 +460,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
        fl4->flowi4_proto = IPPROTO_ICMP;
        fl4->fl4_icmp_type = type;
        fl4->fl4_icmp_code = code;
-       fl4->flowi4_oif = vrf_master_ifindex_rcu(skb_in->dev) ? : skb_in->dev->ifindex;
+       fl4->flowi4_oif = vrf_master_ifindex(skb_in->dev) ? : skb_in->dev->ifindex;
 
        security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
        rt = __ip_route_output_key(net, fl4);
index 651cdf648ec4728bff6e709b0324b7d52ffd65ed..9fdfd9deac11dde85bc62803068fbe50e45837b8 100644 (file)
@@ -1435,33 +1435,35 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
        struct sk_buff *skb_chk;
        unsigned int transport_len;
        unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
-       int ret;
+       int ret = -EINVAL;
 
        transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
 
-       skb_get(skb);
        skb_chk = skb_checksum_trimmed(skb, transport_len,
                                       ip_mc_validate_checksum);
        if (!skb_chk)
-               return -EINVAL;
+               goto err;
 
-       if (!pskb_may_pull(skb_chk, len)) {
-               kfree_skb(skb_chk);
-               return -EINVAL;
-       }
+       if (!pskb_may_pull(skb_chk, len))
+               goto err;
 
        ret = ip_mc_check_igmp_msg(skb_chk);
-       if (ret) {
-               kfree_skb(skb_chk);
-               return ret;
-       }
+       if (ret)
+               goto err;
 
        if (skb_trimmed)
                *skb_trimmed = skb_chk;
-       else
+       /* free now unneeded clone */
+       else if (skb_chk != skb)
                kfree_skb(skb_chk);
 
-       return 0;
+       ret = 0;
+
+err:
+       if (ret && skb_chk && skb_chk != skb)
+               kfree_skb(skb_chk);
+
+       return ret;
 }
 
 /**
@@ -1470,7 +1472,7 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
  * @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional)
  *
  * Checks whether an IPv4 packet is a valid IGMP packet. If so sets
- * skb network and transport headers accordingly and returns zero.
+ * skb transport header accordingly and returns zero.
  *
  * -EINVAL: A broken packet was detected, i.e. it violates some internet
  *  standard
@@ -1485,7 +1487,8 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
  * to leave the original skb and its full frame unchanged (which might be
  * desirable for layer 2 frame jugglers).
  *
- * The caller needs to release a reference count from any returned skb_trimmed.
+ * Caller needs to set the skb network header and free any returned skb if it
+ * differs from the provided skb.
  */
 int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
 {
index 05e3145f7dc346af56a50b945dc75d6f1bdb27c7..134957159c27eb9180e08b73360fe891574b4742 100644 (file)
@@ -593,7 +593,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
        }
 
        spin_unlock(&queue->syn_wait_lock);
-       if (del_timer_sync(&req->rsk_timer))
+       if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
                reqsk_put(req);
        return found;
 }
index fb44d693796efc04535dd2c51cebc9081dd9bbfb..1bf328182697bdaeb5bace4a8ac9e7c0d726a69d 100644 (file)
@@ -407,10 +407,10 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
                                return PACKET_REJECT;
 
                        info = &tun_dst->u.tun_info;
-                       info->key.ipv4_src = iph->saddr;
-                       info->key.ipv4_dst = iph->daddr;
-                       info->key.ipv4_tos = iph->tos;
-                       info->key.ipv4_ttl = iph->ttl;
+                       info->key.u.ipv4.src = iph->saddr;
+                       info->key.u.ipv4.dst = iph->daddr;
+                       info->key.tos = iph->tos;
+                       info->key.ttl = iph->ttl;
 
                        info->mode = IP_TUNNEL_INFO_RX;
                        info->key.tun_flags = tpi->flags &
@@ -521,15 +521,15 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
        __be16 df, flags;
        int err;
 
-       tun_info = skb_tunnel_info(skb, AF_INET);
+       tun_info = skb_tunnel_info(skb);
        if (unlikely(!tun_info || tun_info->mode != IP_TUNNEL_INFO_TX))
                goto err_free_skb;
 
        key = &tun_info->key;
        memset(&fl, 0, sizeof(fl));
-       fl.daddr = key->ipv4_dst;
-       fl.saddr = key->ipv4_src;
-       fl.flowi4_tos = RT_TOS(key->ipv4_tos);
+       fl.daddr = key->u.ipv4.dst;
+       fl.saddr = key->u.ipv4.src;
+       fl.flowi4_tos = RT_TOS(key->tos);
        fl.flowi4_mark = skb->mark;
        fl.flowi4_proto = IPPROTO_GRE;
 
@@ -564,8 +564,8 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
 
        df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
        err = iptunnel_xmit(skb->sk, rt, skb, fl.saddr,
-                           key->ipv4_dst, IPPROTO_GRE,
-                           key->ipv4_tos, key->ipv4_ttl, df, false);
+                           key->u.ipv4.dst, IPPROTO_GRE,
+                           key->tos, key->ttl, df, false);
        iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
        return;
 
index 5512f4e4ec1b1b629a9c1fe1c7ac2ac4ffced248..289b6c26ce377d2dc094b9ae028a7f2cebc00e4c 100644 (file)
@@ -192,15 +192,15 @@ struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
 
-static const struct nla_policy ip_tun_policy[IP_TUN_MAX + 1] = {
-       [IP_TUN_ID]             = { .type = NLA_U64 },
-       [IP_TUN_DST]            = { .type = NLA_U32 },
-       [IP_TUN_SRC]            = { .type = NLA_U32 },
-       [IP_TUN_TTL]            = { .type = NLA_U8 },
-       [IP_TUN_TOS]            = { .type = NLA_U8 },
-       [IP_TUN_SPORT]          = { .type = NLA_U16 },
-       [IP_TUN_DPORT]          = { .type = NLA_U16 },
-       [IP_TUN_FLAGS]          = { .type = NLA_U16 },
+static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = {
+       [LWTUNNEL_IP_ID]        = { .type = NLA_U64 },
+       [LWTUNNEL_IP_DST]       = { .type = NLA_U32 },
+       [LWTUNNEL_IP_SRC]       = { .type = NLA_U32 },
+       [LWTUNNEL_IP_TTL]       = { .type = NLA_U8 },
+       [LWTUNNEL_IP_TOS]       = { .type = NLA_U8 },
+       [LWTUNNEL_IP_SPORT]     = { .type = NLA_U16 },
+       [LWTUNNEL_IP_DPORT]     = { .type = NLA_U16 },
+       [LWTUNNEL_IP_FLAGS]     = { .type = NLA_U16 },
 };
 
 static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
@@ -208,10 +208,10 @@ static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
 {
        struct ip_tunnel_info *tun_info;
        struct lwtunnel_state *new_state;
-       struct nlattr *tb[IP_TUN_MAX + 1];
+       struct nlattr *tb[LWTUNNEL_IP_MAX + 1];
        int err;
 
-       err = nla_parse_nested(tb, IP_TUN_MAX, attr, ip_tun_policy);
+       err = nla_parse_nested(tb, LWTUNNEL_IP_MAX, attr, ip_tun_policy);
        if (err < 0)
                return err;
 
@@ -223,29 +223,29 @@ static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
 
        tun_info = lwt_tun_info(new_state);
 
-       if (tb[IP_TUN_ID])
-               tun_info->key.tun_id = nla_get_u64(tb[IP_TUN_ID]);
+       if (tb[LWTUNNEL_IP_ID])
+               tun_info->key.tun_id = nla_get_u64(tb[LWTUNNEL_IP_ID]);
 
-       if (tb[IP_TUN_DST])
-               tun_info->key.ipv4_dst = nla_get_be32(tb[IP_TUN_DST]);
+       if (tb[LWTUNNEL_IP_DST])
+               tun_info->key.u.ipv4.dst = nla_get_be32(tb[LWTUNNEL_IP_DST]);
 
-       if (tb[IP_TUN_SRC])
-               tun_info->key.ipv4_src = nla_get_be32(tb[IP_TUN_SRC]);
+       if (tb[LWTUNNEL_IP_SRC])
+               tun_info->key.u.ipv4.src = nla_get_be32(tb[LWTUNNEL_IP_SRC]);
 
-       if (tb[IP_TUN_TTL])
-               tun_info->key.ipv4_ttl = nla_get_u8(tb[IP_TUN_TTL]);
+       if (tb[LWTUNNEL_IP_TTL])
+               tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]);
 
-       if (tb[IP_TUN_TOS])
-               tun_info->key.ipv4_tos = nla_get_u8(tb[IP_TUN_TOS]);
+       if (tb[LWTUNNEL_IP_TOS])
+               tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);
 
-       if (tb[IP_TUN_SPORT])
-               tun_info->key.tp_src = nla_get_be16(tb[IP_TUN_SPORT]);
+       if (tb[LWTUNNEL_IP_SPORT])
+               tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP_SPORT]);
 
-       if (tb[IP_TUN_DPORT])
-               tun_info->key.tp_dst = nla_get_be16(tb[IP_TUN_DPORT]);
+       if (tb[LWTUNNEL_IP_DPORT])
+               tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP_DPORT]);
 
-       if (tb[IP_TUN_FLAGS])
-               tun_info->key.tun_flags = nla_get_u16(tb[IP_TUN_FLAGS]);
+       if (tb[LWTUNNEL_IP_FLAGS])
+               tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP_FLAGS]);
 
        tun_info->mode = IP_TUNNEL_INFO_TX;
        tun_info->options = NULL;
@@ -261,14 +261,14 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb,
 {
        struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
 
-       if (nla_put_u64(skb, IP_TUN_ID, tun_info->key.tun_id) ||
-           nla_put_be32(skb, IP_TUN_DST, tun_info->key.ipv4_dst) ||
-           nla_put_be32(skb, IP_TUN_SRC, tun_info->key.ipv4_src) ||
-           nla_put_u8(skb, IP_TUN_TOS, tun_info->key.ipv4_tos) ||
-           nla_put_u8(skb, IP_TUN_TTL, tun_info->key.ipv4_ttl) ||
-           nla_put_u16(skb, IP_TUN_SPORT, tun_info->key.tp_src) ||
-           nla_put_u16(skb, IP_TUN_DPORT, tun_info->key.tp_dst) ||
-           nla_put_u16(skb, IP_TUN_FLAGS, tun_info->key.tun_flags))
+       if (nla_put_u64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id) ||
+           nla_put_be32(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) ||
+           nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
+           nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
+           nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) ||
+           nla_put_u16(skb, LWTUNNEL_IP_SPORT, tun_info->key.tp_src) ||
+           nla_put_u16(skb, LWTUNNEL_IP_DPORT, tun_info->key.tp_dst) ||
+           nla_put_u16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags))
                return -ENOMEM;
 
        return 0;
@@ -276,25 +276,134 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb,
 
 static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
 {
-       return nla_total_size(8)        /* IP_TUN_ID */
-               + nla_total_size(4)     /* IP_TUN_DST */
-               + nla_total_size(4)     /* IP_TUN_SRC */
-               + nla_total_size(1)     /* IP_TUN_TOS */
-               + nla_total_size(1)     /* IP_TUN_TTL */
-               + nla_total_size(2)     /* IP_TUN_SPORT */
-               + nla_total_size(2)     /* IP_TUN_DPORT */
-               + nla_total_size(2);    /* IP_TUN_FLAGS */
+       return nla_total_size(8)        /* LWTUNNEL_IP_ID */
+               + nla_total_size(4)     /* LWTUNNEL_IP_DST */
+               + nla_total_size(4)     /* LWTUNNEL_IP_SRC */
+               + nla_total_size(1)     /* LWTUNNEL_IP_TOS */
+               + nla_total_size(1)     /* LWTUNNEL_IP_TTL */
+               + nla_total_size(2)     /* LWTUNNEL_IP_SPORT */
+               + nla_total_size(2)     /* LWTUNNEL_IP_DPORT */
+               + nla_total_size(2);    /* LWTUNNEL_IP_FLAGS */
+}
+
+static int ip_tun_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+       return memcmp(lwt_tun_info(a), lwt_tun_info(b),
+                     sizeof(struct ip_tunnel_info));
 }
 
 static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
        .build_state = ip_tun_build_state,
        .fill_encap = ip_tun_fill_encap_info,
        .get_encap_size = ip_tun_encap_nlsize,
+       .cmp_encap = ip_tun_cmp_encap,
+};
+
+static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
+       [LWTUNNEL_IP6_ID]               = { .type = NLA_U64 },
+       [LWTUNNEL_IP6_DST]              = { .len = sizeof(struct in6_addr) },
+       [LWTUNNEL_IP6_SRC]              = { .len = sizeof(struct in6_addr) },
+       [LWTUNNEL_IP6_HOPLIMIT]         = { .type = NLA_U8 },
+       [LWTUNNEL_IP6_TC]               = { .type = NLA_U8 },
+       [LWTUNNEL_IP6_SPORT]            = { .type = NLA_U16 },
+       [LWTUNNEL_IP6_DPORT]            = { .type = NLA_U16 },
+       [LWTUNNEL_IP6_FLAGS]            = { .type = NLA_U16 },
+};
+
+static int ip6_tun_build_state(struct net_device *dev, struct nlattr *attr,
+                              struct lwtunnel_state **ts)
+{
+       struct ip_tunnel_info *tun_info;
+       struct lwtunnel_state *new_state;
+       struct nlattr *tb[LWTUNNEL_IP6_MAX + 1];
+       int err;
+
+       err = nla_parse_nested(tb, LWTUNNEL_IP6_MAX, attr, ip6_tun_policy);
+       if (err < 0)
+               return err;
+
+       new_state = lwtunnel_state_alloc(sizeof(*tun_info));
+       if (!new_state)
+               return -ENOMEM;
+
+       new_state->type = LWTUNNEL_ENCAP_IP6;
+
+       tun_info = lwt_tun_info(new_state);
+
+       if (tb[LWTUNNEL_IP6_ID])
+               tun_info->key.tun_id = nla_get_u64(tb[LWTUNNEL_IP6_ID]);
+
+       if (tb[LWTUNNEL_IP6_DST])
+               tun_info->key.u.ipv6.dst = nla_get_in6_addr(tb[LWTUNNEL_IP6_DST]);
+
+       if (tb[LWTUNNEL_IP6_SRC])
+               tun_info->key.u.ipv6.src = nla_get_in6_addr(tb[LWTUNNEL_IP6_SRC]);
+
+       if (tb[LWTUNNEL_IP6_HOPLIMIT])
+               tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP6_HOPLIMIT]);
+
+       if (tb[LWTUNNEL_IP6_TC])
+               tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]);
+
+       if (tb[LWTUNNEL_IP6_SPORT])
+               tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP6_SPORT]);
+
+       if (tb[LWTUNNEL_IP6_DPORT])
+               tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP6_DPORT]);
+
+       if (tb[LWTUNNEL_IP6_FLAGS])
+               tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP6_FLAGS]);
+
+       tun_info->mode = IP_TUNNEL_INFO_TX;
+       tun_info->options = NULL;
+       tun_info->options_len = 0;
+
+       *ts = new_state;
+
+       return 0;
+}
+
+static int ip6_tun_fill_encap_info(struct sk_buff *skb,
+                                  struct lwtunnel_state *lwtstate)
+{
+       struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
+
+       if (nla_put_u64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id) ||
+           nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) ||
+           nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
+           nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) ||
+           nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) ||
+           nla_put_u16(skb, LWTUNNEL_IP6_SPORT, tun_info->key.tp_src) ||
+           nla_put_u16(skb, LWTUNNEL_IP6_DPORT, tun_info->key.tp_dst) ||
+           nla_put_u16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags))
+               return -ENOMEM;
+
+       return 0;
+}
+
+static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+       return nla_total_size(8)        /* LWTUNNEL_IP6_ID */
+               + nla_total_size(16)    /* LWTUNNEL_IP6_DST */
+               + nla_total_size(16)    /* LWTUNNEL_IP6_SRC */
+               + nla_total_size(1)     /* LWTUNNEL_IP6_HOPLIMIT */
+               + nla_total_size(1)     /* LWTUNNEL_IP6_TC */
+               + nla_total_size(2)     /* LWTUNNEL_IP6_SPORT */
+               + nla_total_size(2)     /* LWTUNNEL_IP6_DPORT */
+               + nla_total_size(2);    /* LWTUNNEL_IP6_FLAGS */
+}
+
+static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
+       .build_state = ip6_tun_build_state,
+       .fill_encap = ip6_tun_fill_encap_info,
+       .get_encap_size = ip6_tun_encap_nlsize,
+       .cmp_encap = ip_tun_cmp_encap,
 };
 
 void __init ip_tunnel_core_init(void)
 {
        lwtunnel_encap_add_ops(&ip_tun_lwt_ops, LWTUNNEL_ENCAP_IP);
+       lwtunnel_encap_add_ops(&ip6_tun_lwt_ops, LWTUNNEL_ENCAP_IP6);
 }
 
 struct static_key ip_tunnel_metadata_cnt = STATIC_KEY_INIT_FALSE;
index 2199a5db25e60412389d861a105d86c62100b396..690d27d3f2f90d99612de8ed4a32dec0596a680a 100644 (file)
@@ -58,6 +58,12 @@ config NFT_REJECT_IPV4
        default NFT_REJECT
        tristate
 
+config NFT_DUP_IPV4
+       tristate "IPv4 nf_tables packet duplication support"
+       select NF_DUP_IPV4
+       help
+         This module enables IPv4 packet duplication support for nf_tables.
+
 endif # NF_TABLES_IPV4
 
 config NF_TABLES_ARP
@@ -67,6 +73,12 @@ config NF_TABLES_ARP
 
 endif # NF_TABLES
 
+config NF_DUP_IPV4
+       tristate "Netfilter IPv4 packet duplication to alternate destination"
+       help
+         This option enables the nf_dup_ipv4 core, which duplicates an IPv4
+         packet to be rerouted to another destination.
+
 config NF_LOG_ARP
        tristate "ARP packet logging"
        default m if NETFILTER_ADVANCED=n
index 7fe6c703528f79f3ba6d355724c26f32e20a21c5..87b073da14c928df176e7f4163e5301edcafdd89 100644 (file)
@@ -41,6 +41,7 @@ obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o
 obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
 obj-$(CONFIG_NFT_MASQ_IPV4) += nft_masq_ipv4.o
 obj-$(CONFIG_NFT_REDIR_IPV4) += nft_redir_ipv4.o
+obj-$(CONFIG_NFT_DUP_IPV4) += nft_dup_ipv4.o
 obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o
 
 # generic IP tables 
@@ -70,3 +71,5 @@ obj-$(CONFIG_IP_NF_ARP_MANGLE) += arpt_mangle.o
 
 # just filtering instance of ARP tables for now
 obj-$(CONFIG_IP_NF_ARPFILTER) += arptable_filter.o
+
+obj-$(CONFIG_NF_DUP_IPV4) += nf_dup_ipv4.o
index 4bf3dc49ad1ea84d59815cb9a46aa3cd3fd374b5..270765236f5e8cc9e39c02f9b6fa0836f853f96d 100644 (file)
@@ -72,7 +72,7 @@ set_ect_tcp(struct sk_buff *skb, const struct ipt_ECN_info *einfo)
                tcph->cwr = einfo->proto.tcp.cwr;
 
        inet_proto_csum_replace2(&tcph->check, skb,
-                                oldval, ((__be16 *)tcph)[6], 0);
+                                oldval, ((__be16 *)tcph)[6], false);
        return true;
 }
 
index 30ad9554b5e9931ad37329f0ffda6a8aacdf55d1..8a2caaf3940bedaa9abba13352594a29341d287e 100644 (file)
@@ -280,7 +280,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
                return -EINVAL;
        }
 
-       h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple);
+       h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
        if (h) {
                struct sockaddr_in sin;
                struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
index 80d5554b9a88da301a69db2df7d98f8a3a0a0348..cdde3ec496e94321c424d3dd37b31cb305e05451 100644 (file)
@@ -134,9 +134,11 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
        struct nf_conntrack_tuple innertuple, origtuple;
        const struct nf_conntrack_l4proto *innerproto;
        const struct nf_conntrack_tuple_hash *h;
-       u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
+       const struct nf_conntrack_zone *zone;
+       struct nf_conntrack_zone tmp;
 
        NF_CT_ASSERT(skb->nfct == NULL);
+       zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
 
        /* Are they talking about one of our connections? */
        if (!nf_ct_get_tuplepr(skb,
index b69e82bda2159464b2eb0b0fd7c184c605947cc8..9306ec4fab41e9fa0c3c99fd6be78bcf8adfb397 100644 (file)
@@ -43,19 +43,22 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
 static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
                                              struct sk_buff *skb)
 {
-       u16 zone = NF_CT_DEFAULT_ZONE;
-
+       u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
-       if (skb->nfct)
-               zone = nf_ct_zone((struct nf_conn *)skb->nfct);
+       if (skb->nfct) {
+               enum ip_conntrack_info ctinfo;
+               const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+
+               zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo));
+       }
 #endif
        if (nf_bridge_in_prerouting(skb))
-               return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
+               return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id;
 
        if (hooknum == NF_INET_PRE_ROUTING)
-               return IP_DEFRAG_CONNTRACK_IN + zone;
+               return IP_DEFRAG_CONNTRACK_IN + zone_id;
        else
-               return IP_DEFRAG_CONNTRACK_OUT + zone;
+               return IP_DEFRAG_CONNTRACK_OUT + zone_id;
 }
 
 static unsigned int ipv4_conntrack_defrag(const struct nf_hook_ops *ops,
diff --git a/net/ipv4/netfilter/nf_dup_ipv4.c b/net/ipv4/netfilter/nf_dup_ipv4.c
new file mode 100644 (file)
index 0000000..b5bb375
--- /dev/null
@@ -0,0 +1,120 @@
+/*
+ * (C) 2007 by Sebastian Claßen <sebastian.classen@freenet.ag>
+ * (C) 2007-2010 by Jan Engelhardt <jengelh@medozas.de>
+ *
+ * Extracted from xt_TEE.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 or later, as
+ * published by the Free Software Foundation.
+ */
+#include <linux/ip.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/route.h>
+#include <linux/skbuff.h>
+#include <net/checksum.h>
+#include <net/icmp.h>
+#include <net/ip.h>
+#include <net/route.h>
+#include <net/netfilter/ipv4/nf_dup_ipv4.h>
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
+static struct net *pick_net(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+       const struct dst_entry *dst;
+
+       if (skb->dev != NULL)
+               return dev_net(skb->dev);
+       dst = skb_dst(skb);
+       if (dst != NULL && dst->dev != NULL)
+               return dev_net(dst->dev);
+#endif
+       return &init_net;
+}
+
+static bool nf_dup_ipv4_route(struct sk_buff *skb, const struct in_addr *gw,
+                             int oif)
+{
+       const struct iphdr *iph = ip_hdr(skb);
+       struct net *net = pick_net(skb);
+       struct rtable *rt;
+       struct flowi4 fl4;
+
+       memset(&fl4, 0, sizeof(fl4));
+       if (oif != -1)
+               fl4.flowi4_oif = oif;
+
+       fl4.daddr = gw->s_addr;
+       fl4.flowi4_tos = RT_TOS(iph->tos);
+       fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
+       fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
+       rt = ip_route_output_key(net, &fl4);
+       if (IS_ERR(rt))
+               return false;
+
+       skb_dst_drop(skb);
+       skb_dst_set(skb, &rt->dst);
+       skb->dev      = rt->dst.dev;
+       skb->protocol = htons(ETH_P_IP);
+
+       return true;
+}
+
+void nf_dup_ipv4(struct sk_buff *skb, unsigned int hooknum,
+                const struct in_addr *gw, int oif)
+{
+       struct iphdr *iph;
+
+       if (this_cpu_read(nf_skb_duplicated))
+               return;
+       /*
+        * Copy the skb, and route the copy. Will later return %XT_CONTINUE for
+        * the original skb, which should continue on its way as if nothing has
+        * happened. The copy should be independently delivered to the gateway.
+        */
+       skb = pskb_copy(skb, GFP_ATOMIC);
+       if (skb == NULL)
+               return;
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+       /* Avoid counting cloned packets towards the original connection. */
+       nf_conntrack_put(skb->nfct);
+       skb->nfct     = &nf_ct_untracked_get()->ct_general;
+       skb->nfctinfo = IP_CT_NEW;
+       nf_conntrack_get(skb->nfct);
+#endif
+       /*
+        * If we are in PREROUTING/INPUT, the checksum must be recalculated
+        * since the length could have changed as a result of defragmentation.
+        *
+        * We also decrease the TTL to mitigate potential loops between two
+        * hosts.
+        *
+        * Set %IP_DF so that the original source is notified of a potentially
+        * decreased MTU on the clone route. IPv6 does this too.
+        */
+       iph = ip_hdr(skb);
+       iph->frag_off |= htons(IP_DF);
+       if (hooknum == NF_INET_PRE_ROUTING ||
+           hooknum == NF_INET_LOCAL_IN)
+               --iph->ttl;
+       ip_send_check(iph);
+
+       if (nf_dup_ipv4_route(skb, gw, oif)) {
+               __this_cpu_write(nf_skb_duplicated, true);
+               ip_local_out(skb);
+               __this_cpu_write(nf_skb_duplicated, false);
+       } else {
+               kfree_skb(skb);
+       }
+}
+EXPORT_SYMBOL_GPL(nf_dup_ipv4);
+
+MODULE_AUTHOR("Sebastian Claßen <sebastian.classen@freenet.ag>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_DESCRIPTION("nf_dup_ipv4: Duplicate IPv4 packet");
+MODULE_LICENSE("GPL");
index e59cc05c09e96c8f6996e5e0063c4d138d0dee11..22f4579b0c2aeba3ad637e4cff756042e86e4244 100644 (file)
@@ -120,7 +120,7 @@ static void nf_nat_ipv4_csum_update(struct sk_buff *skb,
                oldip = iph->daddr;
                newip = t->dst.u3.ip;
        }
-       inet_proto_csum_replace4(check, skb, oldip, newip, 1);
+       inet_proto_csum_replace4(check, skb, oldip, newip, true);
 }
 
 static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
@@ -151,7 +151,7 @@ static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
                }
        } else
                inet_proto_csum_replace2(check, skb,
-                                        htons(oldlen), htons(datalen), 1);
+                                        htons(oldlen), htons(datalen), true);
 }
 
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
index 4557b4ab8342740696b5fa4d3de8c6218ed70186..7b98baa13edeb1e9b944df54cffaa9ba669cef77 100644 (file)
@@ -67,7 +67,7 @@ icmp_manip_pkt(struct sk_buff *skb,
 
        hdr = (struct icmphdr *)(skb->data + hdroff);
        inet_proto_csum_replace2(&hdr->checksum, skb,
-                                hdr->un.echo.id, tuple->src.u.icmp.id, 0);
+                                hdr->un.echo.id, tuple->src.u.icmp.id, false);
        hdr->un.echo.id = tuple->src.u.icmp.id;
        return true;
 }
diff --git a/net/ipv4/netfilter/nft_dup_ipv4.c b/net/ipv4/netfilter/nft_dup_ipv4.c
new file mode 100644 (file)
index 0000000..25419fb
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2015 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/ipv4/nf_dup_ipv4.h>
+
+struct nft_dup_ipv4 {
+       enum nft_registers      sreg_addr:8;
+       enum nft_registers      sreg_dev:8;
+};
+
+static void nft_dup_ipv4_eval(const struct nft_expr *expr,
+                             struct nft_regs *regs,
+                             const struct nft_pktinfo *pkt)
+{
+       struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
+       struct in_addr gw = {
+               .s_addr = regs->data[priv->sreg_addr],
+       };
+       int oif = regs->data[priv->sreg_dev];
+
+       nf_dup_ipv4(pkt->skb, pkt->ops->hooknum, &gw, oif);
+}
+
+static int nft_dup_ipv4_init(const struct nft_ctx *ctx,
+                            const struct nft_expr *expr,
+                            const struct nlattr * const tb[])
+{
+       struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
+       int err;
+
+       if (tb[NFTA_DUP_SREG_ADDR] == NULL)
+               return -EINVAL;
+
+       priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]);
+       err = nft_validate_register_load(priv->sreg_addr, sizeof(struct in_addr));
+       if (err < 0)
+               return err;
+
+       if (tb[NFTA_DUP_SREG_DEV] != NULL) {
+               priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
+               return nft_validate_register_load(priv->sreg_dev, sizeof(int));
+       }
+       return 0;
+}
+
+static int nft_dup_ipv4_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
+
+       if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) ||
+           nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_dup_ipv4_type;
+static const struct nft_expr_ops nft_dup_ipv4_ops = {
+       .type           = &nft_dup_ipv4_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_dup_ipv4)),
+       .eval           = nft_dup_ipv4_eval,
+       .init           = nft_dup_ipv4_init,
+       .dump           = nft_dup_ipv4_dump,
+};
+
+static const struct nla_policy nft_dup_ipv4_policy[NFTA_DUP_MAX + 1] = {
+       [NFTA_DUP_SREG_ADDR]    = { .type = NLA_U32 },
+       [NFTA_DUP_SREG_DEV]     = { .type = NLA_U32 },
+};
+
+static struct nft_expr_type nft_dup_ipv4_type __read_mostly = {
+       .family         = NFPROTO_IPV4,
+       .name           = "dup",
+       .ops            = &nft_dup_ipv4_ops,
+       .policy         = nft_dup_ipv4_policy,
+       .maxattr        = NFTA_DUP_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_dup_ipv4_module_init(void)
+{
+       return nft_register_expr(&nft_dup_ipv4_type);
+}
+
+static void __exit nft_dup_ipv4_module_exit(void)
+{
+       nft_unregister_expr(&nft_dup_ipv4_type);
+}
+
+module_init(nft_dup_ipv4_module_init);
+module_exit(nft_dup_ipv4_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "dup");
index 2c89d294b669803323d2b48f32aeb7726d00b235..f3087aaa6dd86c7aad92814b53aa6cc6494292a1 100644 (file)
@@ -1359,7 +1359,6 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
                list_del(&rt->rt_uncached);
                spin_unlock_bh(&ul->lock);
        }
-       lwtstate_put(rt->rt_lwtstate);
 }
 
 void rt_flush_dev(struct net_device *dev)
@@ -1408,7 +1407,7 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
 #ifdef CONFIG_IP_ROUTE_CLASSID
                rt->dst.tclassid = nh->nh_tclassid;
 #endif
-               rt->rt_lwtstate = lwtstate_get(nh->nh_lwtstate);
+               rt->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
                if (unlikely(fnhe))
                        cached = rt_bind_exception(rt, fnhe, daddr);
                else if (!(rt->dst.flags & DST_NOCACHE))
@@ -1494,7 +1493,6 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        rth->rt_gateway = 0;
        rth->rt_uses_gateway = 0;
        INIT_LIST_HEAD(&rth->rt_uncached);
-       rth->rt_lwtstate = NULL;
        if (our) {
                rth->dst.input= ip_local_deliver;
                rth->rt_flags |= RTCF_LOCAL;
@@ -1624,15 +1622,20 @@ static int __mkroute_input(struct sk_buff *skb,
        rth->rt_gateway = 0;
        rth->rt_uses_gateway = 0;
        INIT_LIST_HEAD(&rth->rt_uncached);
-       rth->rt_lwtstate = NULL;
        RT_CACHE_STAT_INC(in_slow_tot);
 
        rth->dst.input = ip_forward;
        rth->dst.output = ip_output;
 
        rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag);
-       if (lwtunnel_output_redirect(rth->rt_lwtstate))
+       if (lwtunnel_output_redirect(rth->dst.lwtstate)) {
+               rth->dst.lwtstate->orig_output = rth->dst.output;
                rth->dst.output = lwtunnel_output;
+       }
+       if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
+               rth->dst.lwtstate->orig_input = rth->dst.input;
+               rth->dst.input = lwtunnel_input;
+       }
        skb_dst_set(skb, &rth->dst);
 out:
        err = 0;
@@ -1689,7 +1692,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
           by fib_lookup.
         */
 
-       tun_info = skb_tunnel_info(skb, AF_INET);
+       tun_info = skb_tunnel_info(skb);
        if (tun_info && tun_info->mode == IP_TUNNEL_INFO_RX)
                fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
        else
@@ -1809,7 +1812,6 @@ local_input:
        rth->rt_gateway = 0;
        rth->rt_uses_gateway = 0;
        INIT_LIST_HEAD(&rth->rt_uncached);
-       rth->rt_lwtstate = NULL;
 
        RT_CACHE_STAT_INC(in_slow_tot);
        if (res.type == RTN_UNREACHABLE) {
@@ -2000,7 +2002,6 @@ add:
        rth->rt_gateway = 0;
        rth->rt_uses_gateway = 0;
        INIT_LIST_HEAD(&rth->rt_uncached);
-       rth->rt_lwtstate = NULL;
        RT_CACHE_STAT_INC(out_slow_tot);
 
        if (flags & RTCF_LOCAL)
@@ -2023,7 +2024,7 @@ add:
        }
 
        rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
-       if (lwtunnel_output_redirect(rth->rt_lwtstate))
+       if (lwtunnel_output_redirect(rth->dst.lwtstate))
                rth->dst.output = lwtunnel_output;
 
        return rth;
@@ -2287,7 +2288,6 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
                rt->rt_uses_gateway = ort->rt_uses_gateway;
 
                INIT_LIST_HEAD(&rt->rt_uncached);
-               rt->rt_lwtstate = NULL;
                dst_free(new);
        }
 
index 433231ccfb17fc6d01179247d1d81226803d18df..0330ab2e2b6329ced120cd9b7100a5a34f50e82b 100644 (file)
@@ -41,8 +41,6 @@ static int tcp_syn_retries_min = 1;
 static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
 static int ip_ping_group_range_min[] = { 0, 0 };
 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
-static int min_sndbuf = SOCK_MIN_SNDBUF;
-static int min_rcvbuf = SOCK_MIN_RCVBUF;
 
 /* Update system visible IP port range */
 static void set_local_port_range(struct net *net, int range[2])
@@ -530,7 +528,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_tcp_wmem),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &min_sndbuf,
+               .extra1         = &one,
        },
        {
                .procname       = "tcp_notsent_lowat",
@@ -545,7 +543,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_tcp_rmem),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &min_rcvbuf,
+               .extra1         = &one,
        },
        {
                .procname       = "tcp_app_win",
@@ -758,7 +756,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_udp_rmem_min),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &min_rcvbuf,
+               .extra1         = &one
        },
        {
                .procname       = "udp_wmem_min",
@@ -766,7 +764,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_udp_wmem_min),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &min_sndbuf,
+               .extra1         = &one
        },
        { }
 };
index bff69746e05f05d936ec8f7a62c34d3f87a55d10..55b3c0f4dde5ba45c0840542f1308613b6f06b8f 100644 (file)
@@ -19,7 +19,7 @@
 static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
 
 static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
-                                           int tos,
+                                           int tos, int oif,
                                            const xfrm_address_t *saddr,
                                            const xfrm_address_t *daddr)
 {
@@ -28,6 +28,7 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
        memset(fl4, 0, sizeof(*fl4));
        fl4->daddr = daddr->a4;
        fl4->flowi4_tos = tos;
+       fl4->flowi4_oif = oif;
        if (saddr)
                fl4->saddr = saddr->a4;
 
@@ -38,22 +39,22 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
        return ERR_CAST(rt);
 }
 
-static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
+static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, int oif,
                                          const xfrm_address_t *saddr,
                                          const xfrm_address_t *daddr)
 {
        struct flowi4 fl4;
 
-       return __xfrm4_dst_lookup(net, &fl4, tos, saddr, daddr);
+       return __xfrm4_dst_lookup(net, &fl4, tos, oif, saddr, daddr);
 }
 
-static int xfrm4_get_saddr(struct net *net,
+static int xfrm4_get_saddr(struct net *net, int oif,
                           xfrm_address_t *saddr, xfrm_address_t *daddr)
 {
        struct dst_entry *dst;
        struct flowi4 fl4;
 
-       dst = __xfrm4_dst_lookup(net, &fl4, 0, NULL, daddr);
+       dst = __xfrm4_dst_lookup(net, &fl4, 0, oif, NULL, daddr);
        if (IS_ERR(dst))
                return -EHOSTUNREACH;
 
index 643f61339e7b4fc9d4dcba75c4bb772c99d39292..983bb999738c4dbe1c6b084f5cc75da7d14b1543 100644 (file)
@@ -92,6 +92,25 @@ config IPV6_MIP6
 
          If unsure, say N.
 
+config IPV6_ILA
+       tristate "IPv6: Identifier Locator Addressing (ILA)"
+       select LWTUNNEL
+       ---help---
+         Support for IPv6 Identifier Locator Addressing (ILA).
+
+         ILA is a mechanism to do network virtualization without
+         encapsulation. The basic concept of ILA is that we split an
+         IPv6 address into a 64 bit locator and 64 bit identifier. The
+         identifier is the identity of an entity in communication
+         ("who") and the locator expresses the location of the
+         entity ("where").
+
+         ILA can be configured using the "encap ila" option with
+         "ip -6 route" command. ILA is described in
+         https://tools.ietf.org/html/draft-herbert-nvo3-ila-00.
+
+         If unsure, say N.
+
 config INET6_XFRM_TUNNEL
        tristate
        select INET6_TUNNEL
index 0f3f1999719ac72617b14e68c13f2662f66054a7..2c900c7b7eb1c45b1619a42275552861bdfcda44 100644 (file)
@@ -34,6 +34,7 @@ obj-$(CONFIG_INET6_XFRM_MODE_TUNNEL) += xfrm6_mode_tunnel.o
 obj-$(CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION) += xfrm6_mode_ro.o
 obj-$(CONFIG_INET6_XFRM_MODE_BEET) += xfrm6_mode_beet.o
 obj-$(CONFIG_IPV6_MIP6) += mip6.o
+obj-$(CONFIG_IPV6_ILA) += ila.o
 obj-$(CONFIG_NETFILTER)        += netfilter/
 
 obj-$(CONFIG_IPV6_VTI) += ip6_vti.o
index 59242399b0b531d88326bdb71a9184cae355532e..0f08d3b9e23826914c057efa7db4e7e946883bd8 100644 (file)
@@ -3656,7 +3656,7 @@ static void addrconf_dad_work(struct work_struct *w)
 
        /* send a neighbour solicitation for our addr */
        addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
-       ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any);
+       ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any, NULL);
 out:
        in6_ifa_put(ifp);
        rtnl_unlock();
diff --git a/net/ipv6/ila.c b/net/ipv6/ila.c
new file mode 100644 (file)
index 0000000..f011c3d
--- /dev/null
@@ -0,0 +1,210 @@
+#include <linux/errno.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/types.h>
+#include <net/checksum.h>
+#include <net/ip.h>
+#include <net/ip6_fib.h>
+#include <net/lwtunnel.h>
+#include <net/protocol.h>
+#include <uapi/linux/ila.h>
+
+struct ila_params {
+       __be64 locator;
+};
+
+static inline struct ila_params *ila_params_lwtunnel(
+       struct lwtunnel_state *lwstate)
+{
+       return (struct ila_params *)lwstate->data;
+}
+
+static inline __wsum compute_csum_diff8(const __be32 *from, const __be32 *to)
+{
+       __be32 diff[] = {
+               ~from[0], ~from[1], to[0], to[1],
+       };
+
+       return csum_partial(diff, sizeof(diff), 0);
+}
+
+static inline __wsum get_csum_diff(struct ipv6hdr *ip6h, struct ila_params *p)
+{
+               return compute_csum_diff8((__be32 *)&ip6h->daddr,
+                                         (__be32 *)&p->locator);
+}
+
+static void update_ipv6_locator(struct sk_buff *skb, struct ila_params *p)
+{
+       __wsum diff;
+       struct ipv6hdr *ip6h = ipv6_hdr(skb);
+       size_t nhoff = sizeof(struct ipv6hdr);
+
+       /* First update checksum */
+       switch (ip6h->nexthdr) {
+       case NEXTHDR_TCP:
+               if (likely(pskb_may_pull(skb, nhoff + sizeof(struct tcphdr)))) {
+                       struct tcphdr *th = (struct tcphdr *)
+                                       (skb_network_header(skb) + nhoff);
+
+                       diff = get_csum_diff(ip6h, p);
+                       inet_proto_csum_replace_by_diff(&th->check, skb,
+                                                       diff, true);
+               }
+               break;
+       case NEXTHDR_UDP:
+               if (likely(pskb_may_pull(skb, nhoff + sizeof(struct udphdr)))) {
+                       struct udphdr *uh = (struct udphdr *)
+                                       (skb_network_header(skb) + nhoff);
+
+                       if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+                               diff = get_csum_diff(ip6h, p);
+                               inet_proto_csum_replace_by_diff(&uh->check, skb,
+                                                               diff, true);
+                               if (!uh->check)
+                                       uh->check = CSUM_MANGLED_0;
+                       }
+               }
+               break;
+       case NEXTHDR_ICMP:
+               if (likely(pskb_may_pull(skb,
+                                        nhoff + sizeof(struct icmp6hdr)))) {
+                       struct icmp6hdr *ih = (struct icmp6hdr *)
+                                       (skb_network_header(skb) + nhoff);
+
+                       diff = get_csum_diff(ip6h, p);
+                       inet_proto_csum_replace_by_diff(&ih->icmp6_cksum, skb,
+                                                       diff, true);
+               }
+               break;
+       }
+
+       /* Now change destination address */
+       *(__be64 *)&ip6h->daddr = p->locator;
+}
+
+static int ila_output(struct sock *sk, struct sk_buff *skb)
+{
+       struct dst_entry *dst = skb_dst(skb);
+
+       if (skb->protocol != htons(ETH_P_IPV6))
+               goto drop;
+
+       update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
+
+       return dst->lwtstate->orig_output(sk, skb);
+
+drop:
+       kfree_skb(skb);
+       return -EINVAL;
+}
+
+static int ila_input(struct sk_buff *skb)
+{
+       struct dst_entry *dst = skb_dst(skb);
+
+       if (skb->protocol != htons(ETH_P_IPV6))
+               goto drop;
+
+       update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
+
+       return dst->lwtstate->orig_input(skb);
+
+drop:
+       kfree_skb(skb);
+       return -EINVAL;
+}
+
+static struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = {
+       [ILA_ATTR_LOCATOR] = { .type = NLA_U64, },
+};
+
+static int ila_build_state(struct net_device *dev, struct nlattr *nla,
+                          struct lwtunnel_state **ts)
+{
+       struct ila_params *p;
+       struct nlattr *tb[ILA_ATTR_MAX + 1];
+       size_t encap_len = sizeof(*p);
+       struct lwtunnel_state *newts;
+       int ret;
+
+       ret = nla_parse_nested(tb, ILA_ATTR_MAX, nla,
+                              ila_nl_policy);
+       if (ret < 0)
+               return ret;
+
+       if (!tb[ILA_ATTR_LOCATOR])
+               return -EINVAL;
+
+       newts = lwtunnel_state_alloc(encap_len);
+       if (!newts)
+               return -ENOMEM;
+
+       newts->len = encap_len;
+       p = ila_params_lwtunnel(newts);
+
+       p->locator = (__force __be64)nla_get_u64(tb[ILA_ATTR_LOCATOR]);
+
+       newts->type = LWTUNNEL_ENCAP_ILA;
+       newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT |
+                       LWTUNNEL_STATE_INPUT_REDIRECT;
+
+       *ts = newts;
+
+       return 0;
+}
+
+static int ila_fill_encap_info(struct sk_buff *skb,
+                              struct lwtunnel_state *lwtstate)
+{
+       struct ila_params *p = ila_params_lwtunnel(lwtstate);
+
+       if (nla_put_u64(skb, ILA_ATTR_LOCATOR, (__force u64)p->locator))
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static int ila_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+       /* No encapsulation overhead */
+       return 0;
+}
+
+static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+       struct ila_params *a_p = ila_params_lwtunnel(a);
+       struct ila_params *b_p = ila_params_lwtunnel(b);
+
+       return (a_p->locator != b_p->locator);
+}
+
+static const struct lwtunnel_encap_ops ila_encap_ops = {
+       .build_state = ila_build_state,
+       .output = ila_output,
+       .input = ila_input,
+       .fill_encap = ila_fill_encap_info,
+       .get_encap_size = ila_encap_nlsize,
+       .cmp_encap = ila_encap_cmp,
+};
+
+static int __init ila_init(void)
+{
+       return lwtunnel_encap_add_ops(&ila_encap_ops, LWTUNNEL_ENCAP_ILA);
+}
+
+static void __exit ila_fini(void)
+{
+       lwtunnel_encap_del_ops(&ila_encap_ops, LWTUNNEL_ENCAP_ILA);
+}
+
+module_init(ila_init);
+module_exit(ila_fini);
+MODULE_AUTHOR("Tom Herbert <tom@herbertland.com>");
+MODULE_LICENSE("GPL");
index 5693b5eb84820fceb7feb2f87345cd38b2613c6e..418d9823692b6e78077d44c1ed8b15e998e2316b 100644 (file)
@@ -173,12 +173,13 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
                        *ppcpu_rt = NULL;
                }
        }
+
+       non_pcpu_rt->rt6i_pcpu = NULL;
 }
 
 static void rt6_release(struct rt6_info *rt)
 {
        if (atomic_dec_and_test(&rt->rt6i_ref)) {
-               lwtstate_put(rt->rt6i_lwtstate);
                rt6_free_pcpu(rt);
                dst_free(&rt->dst);
        }
index df8afe5ab31e4b8e75bf2fbf844f8b3e798edbba..9405b04eecc64f478960329da93f6e01d437954e 100644 (file)
@@ -143,34 +143,36 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
        struct sk_buff *skb_chk = NULL;
        unsigned int transport_len;
        unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg);
-       int ret;
+       int ret = -EINVAL;
 
        transport_len = ntohs(ipv6_hdr(skb)->payload_len);
        transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr);
 
-       skb_get(skb);
        skb_chk = skb_checksum_trimmed(skb, transport_len,
                                       ipv6_mc_validate_checksum);
        if (!skb_chk)
-               return -EINVAL;
+               goto err;
 
-       if (!pskb_may_pull(skb_chk, len)) {
-               kfree_skb(skb_chk);
-               return -EINVAL;
-       }
+       if (!pskb_may_pull(skb_chk, len))
+               goto err;
 
        ret = ipv6_mc_check_mld_msg(skb_chk);
-       if (ret) {
-               kfree_skb(skb_chk);
-               return ret;
-       }
+       if (ret)
+               goto err;
 
        if (skb_trimmed)
                *skb_trimmed = skb_chk;
-       else
+       /* free now unneeded clone */
+       else if (skb_chk != skb)
                kfree_skb(skb_chk);
 
-       return 0;
+       ret = 0;
+
+err:
+       if (ret && skb_chk && skb_chk != skb)
+               kfree_skb(skb_chk);
+
+       return ret;
 }
 
 /**
@@ -179,7 +181,7 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
  * @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional)
  *
  * Checks whether an IPv6 packet is a valid MLD packet. If so sets
- * skb network and transport headers accordingly and returns zero.
+ * skb transport header accordingly and returns zero.
  *
  * -EINVAL: A broken packet was detected, i.e. it violates some internet
  *  standard
@@ -194,7 +196,8 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
  * to leave the original skb and its full frame unchanged (which might be
  * desirable for layer 2 frame jugglers).
  *
- * The caller needs to release a reference count from any returned skb_trimmed.
+ * Caller needs to set the skb network header and free any returned skb if it
+ * differs from the provided skb.
  */
 int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed)
 {
index b3054611f88a5f69503e1a44ced1592579dfc4fd..13d3c2beb93ea3e6e7745858ac2ba0520b0d65ba 100644 (file)
@@ -553,7 +553,8 @@ static void ndisc_send_unsol_na(struct net_device *dev)
 
 void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
                   const struct in6_addr *solicit,
-                  const struct in6_addr *daddr, const struct in6_addr *saddr)
+                  const struct in6_addr *daddr, const struct in6_addr *saddr,
+                  struct sk_buff *oskb)
 {
        struct sk_buff *skb;
        struct in6_addr addr_buf;
@@ -589,6 +590,9 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
                ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR,
                                       dev->dev_addr);
 
+       if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE) && oskb)
+               skb_dst_copy(skb, oskb);
+
        ndisc_send_skb(skb, daddr, saddr);
 }
 
@@ -675,12 +679,12 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
                                  "%s: trying to ucast probe in NUD_INVALID: %pI6\n",
                                  __func__, target);
                }
-               ndisc_send_ns(dev, neigh, target, target, saddr);
+               ndisc_send_ns(dev, neigh, target, target, saddr, skb);
        } else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) {
                neigh_app_ns(neigh);
        } else {
                addrconf_addr_solict_mult(target, &mcaddr);
-               ndisc_send_ns(dev, NULL, target, &mcaddr, saddr);
+               ndisc_send_ns(dev, NULL, target, &mcaddr, saddr, skb);
        }
 }
 
index b552cf0d6198c50308461d0d83ac09bdd8c21f82..96833e4b31939a191eaf7de297ac438d4aa41fa4 100644 (file)
@@ -47,9 +47,21 @@ config NFT_REJECT_IPV6
        default NFT_REJECT
        tristate
 
+config NFT_DUP_IPV6
+       tristate "IPv6 nf_tables packet duplication support"
+       select NF_DUP_IPV6
+       help
+         This module enables IPv6 packet duplication support for nf_tables.
+
 endif # NF_TABLES_IPV6
 endif # NF_TABLES
 
+config NF_DUP_IPV6
+       tristate "Netfilter IPv6 packet duplication to alternate destination"
+       help
+         This option enables the nf_dup_ipv6 core, which duplicates an IPv6
+         packet to be rerouted to another destination.
+
 config NF_REJECT_IPV6
        tristate "IPv6 packet rejection"
        default m if NETFILTER_ADVANCED=n
index c36e0a5490de10cd64f5c64571efa13628568199..b4f7d0b4e2afc630f7a5be2ae949dff676dc5985 100644 (file)
@@ -30,6 +30,8 @@ obj-$(CONFIG_NF_LOG_IPV6) += nf_log_ipv6.o
 # reject
 obj-$(CONFIG_NF_REJECT_IPV6) += nf_reject_ipv6.o
 
+obj-$(CONFIG_NF_DUP_IPV6) += nf_dup_ipv6.o
+
 # nf_tables
 obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o
 obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o
@@ -37,6 +39,7 @@ obj-$(CONFIG_NFT_CHAIN_NAT_IPV6) += nft_chain_nat_ipv6.o
 obj-$(CONFIG_NFT_REJECT_IPV6) += nft_reject_ipv6.o
 obj-$(CONFIG_NFT_MASQ_IPV6) += nft_masq_ipv6.o
 obj-$(CONFIG_NFT_REDIR_IPV6) += nft_redir_ipv6.o
+obj-$(CONFIG_NFT_DUP_IPV6) += nft_dup_ipv6.o
 
 # matches
 obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
index 4ba0c34c627b0e88d3a06fda6532c83a3936315e..7302900c321aff58fcb7dc21794b50e04b1942d8 100644 (file)
@@ -251,7 +251,7 @@ ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
        if (*len < 0 || (unsigned int) *len < sizeof(sin6))
                return -EINVAL;
 
-       h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple);
+       h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
        if (!h) {
                pr_debug("IP6T_SO_ORIGINAL_DST: Can't find %pI6c/%u-%pI6c/%u.\n",
                         &tuple.src.u3.ip6, ntohs(tuple.src.u.tcp.port),
index 90388d606483cbbd15e421b3e51f6d757cd05883..0e6fae103d33454f70fb5790b71d2529af969636 100644 (file)
@@ -150,7 +150,7 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
        struct nf_conntrack_tuple intuple, origtuple;
        const struct nf_conntrack_tuple_hash *h;
        const struct nf_conntrack_l4proto *inproto;
-       u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
+       struct nf_conntrack_zone tmp;
 
        NF_CT_ASSERT(skb->nfct == NULL);
 
@@ -177,7 +177,8 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
 
        *ctinfo = IP_CT_RELATED;
 
-       h = nf_conntrack_find_get(net, zone, &intuple);
+       h = nf_conntrack_find_get(net, nf_ct_zone_tmpl(tmpl, skb, &tmp),
+                                 &intuple);
        if (!h) {
                pr_debug("icmpv6_error: no match\n");
                return -NF_ACCEPT;
index 267fb8d5876e169f27e0e9a595dc89a20cfbea4e..6d9c0b3d5b8c49d111cca7bd70b9bc5229f0a263 100644 (file)
 static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
                                                struct sk_buff *skb)
 {
-       u16 zone = NF_CT_DEFAULT_ZONE;
-
+       u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
-       if (skb->nfct)
-               zone = nf_ct_zone((struct nf_conn *)skb->nfct);
+       if (skb->nfct) {
+               enum ip_conntrack_info ctinfo;
+               const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+
+               zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo));
+       }
 #endif
        if (nf_bridge_in_prerouting(skb))
-               return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
+               return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id;
 
        if (hooknum == NF_INET_PRE_ROUTING)
-               return IP6_DEFRAG_CONNTRACK_IN + zone;
+               return IP6_DEFRAG_CONNTRACK_IN + zone_id;
        else
-               return IP6_DEFRAG_CONNTRACK_OUT + zone;
-
+               return IP6_DEFRAG_CONNTRACK_OUT + zone_id;
 }
 
 static unsigned int ipv6_defrag(const struct nf_hook_ops *ops,
diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c
new file mode 100644 (file)
index 0000000..d8ab654
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * (C) 2007 by Sebastian Claßen <sebastian.classen@freenet.ag>
+ * (C) 2007-2010 by Jan Engelhardt <jengelh@medozas.de>
+ *
+ * Extracted from xt_TEE.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 or later, as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/skbuff.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/netfilter/ipv6/nf_dup_ipv6.h>
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
+static struct net *pick_net(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+       const struct dst_entry *dst;
+
+       if (skb->dev != NULL)
+               return dev_net(skb->dev);
+       dst = skb_dst(skb);
+       if (dst != NULL && dst->dev != NULL)
+               return dev_net(dst->dev);
+#endif
+       return &init_net;
+}
+
+static bool nf_dup_ipv6_route(struct sk_buff *skb, const struct in6_addr *gw,
+                             int oif)
+{
+       const struct ipv6hdr *iph = ipv6_hdr(skb);
+       struct net *net = pick_net(skb);
+       struct dst_entry *dst;
+       struct flowi6 fl6;
+
+       memset(&fl6, 0, sizeof(fl6));
+       if (oif != -1)
+               fl6.flowi6_oif = oif;
+
+       fl6.daddr = *gw;
+       fl6.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
+                        (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
+       dst = ip6_route_output(net, NULL, &fl6);
+       if (dst->error) {
+               dst_release(dst);
+               return false;
+       }
+       skb_dst_drop(skb);
+       skb_dst_set(skb, dst);
+       skb->dev      = dst->dev;
+       skb->protocol = htons(ETH_P_IPV6);
+
+       return true;
+}
+
+void nf_dup_ipv6(struct sk_buff *skb, unsigned int hooknum,
+                const struct in6_addr *gw, int oif)
+{
+       if (this_cpu_read(nf_skb_duplicated))
+               return;
+       skb = pskb_copy(skb, GFP_ATOMIC);
+       if (skb == NULL)
+               return;
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+       nf_conntrack_put(skb->nfct);
+       skb->nfct     = &nf_ct_untracked_get()->ct_general;
+       skb->nfctinfo = IP_CT_NEW;
+       nf_conntrack_get(skb->nfct);
+#endif
+       if (hooknum == NF_INET_PRE_ROUTING ||
+           hooknum == NF_INET_LOCAL_IN) {
+               struct ipv6hdr *iph = ipv6_hdr(skb);
+               --iph->hop_limit;
+       }
+       if (nf_dup_ipv6_route(skb, gw, oif)) {
+               __this_cpu_write(nf_skb_duplicated, true);
+               ip6_local_out(skb);
+               __this_cpu_write(nf_skb_duplicated, false);
+       } else {
+               kfree_skb(skb);
+       }
+}
+EXPORT_SYMBOL_GPL(nf_dup_ipv6);
+
+MODULE_AUTHOR("Sebastian Claßen <sebastian.classen@freenet.ag>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_DESCRIPTION("nf_dup_ipv6: IPv6 packet duplication");
+MODULE_LICENSE("GPL");
index e76900e0aa925a26c226f733f9a44e396ea7cc7f..70fbaed49edbc5511d9327be5c9c0e003dc12e7d 100644 (file)
@@ -124,7 +124,7 @@ static void nf_nat_ipv6_csum_update(struct sk_buff *skb,
                newip = &t->dst.u3.in6;
        }
        inet_proto_csum_replace16(check, skb, oldip->s6_addr32,
-                                 newip->s6_addr32, 1);
+                                 newip->s6_addr32, true);
 }
 
 static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb,
@@ -155,7 +155,7 @@ static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb,
                }
        } else
                inet_proto_csum_replace2(check, skb,
-                                        htons(oldlen), htons(datalen), 1);
+                                        htons(oldlen), htons(datalen), true);
 }
 
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
index 2205e8eeeacfa2ff56980cbeb73b6d52c77089fe..57593b00c5b4327164b79567be619f0f7561f5a2 100644 (file)
@@ -73,7 +73,7 @@ icmpv6_manip_pkt(struct sk_buff *skb,
            hdr->icmp6_type == ICMPV6_ECHO_REPLY) {
                inet_proto_csum_replace2(&hdr->icmp6_cksum, skb,
                                         hdr->icmp6_identifier,
-                                        tuple->src.u.icmp.id, 0);
+                                        tuple->src.u.icmp.id, false);
                hdr->icmp6_identifier = tuple->src.u.icmp.id;
        }
        return true;
diff --git a/net/ipv6/netfilter/nft_dup_ipv6.c b/net/ipv6/netfilter/nft_dup_ipv6.c
new file mode 100644 (file)
index 0000000..0eaa4f6
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2015 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/ipv6/nf_dup_ipv6.h>
+
+struct nft_dup_ipv6 {
+       enum nft_registers      sreg_addr:8;
+       enum nft_registers      sreg_dev:8;
+};
+
+static void nft_dup_ipv6_eval(const struct nft_expr *expr,
+                             struct nft_regs *regs,
+                             const struct nft_pktinfo *pkt)
+{
+       struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
+       struct in6_addr *gw = (struct in6_addr *)&regs->data[priv->sreg_addr];
+       int oif = regs->data[priv->sreg_dev];
+
+       nf_dup_ipv6(pkt->skb, pkt->ops->hooknum, gw, oif);
+}
+
+static int nft_dup_ipv6_init(const struct nft_ctx *ctx,
+                            const struct nft_expr *expr,
+                            const struct nlattr * const tb[])
+{
+       struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
+       int err;
+
+       if (tb[NFTA_DUP_SREG_ADDR] == NULL)
+               return -EINVAL;
+
+       priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]);
+       err = nft_validate_register_load(priv->sreg_addr, sizeof(struct in6_addr));
+       if (err < 0)
+               return err;
+
+       if (tb[NFTA_DUP_SREG_DEV] != NULL) {
+               priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
+               return nft_validate_register_load(priv->sreg_dev, sizeof(int));
+       }
+       return 0;
+}
+
+static int nft_dup_ipv6_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
+
+       if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) ||
+           nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_dup_ipv6_type;
+static const struct nft_expr_ops nft_dup_ipv6_ops = {
+       .type           = &nft_dup_ipv6_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_dup_ipv6)),
+       .eval           = nft_dup_ipv6_eval,
+       .init           = nft_dup_ipv6_init,
+       .dump           = nft_dup_ipv6_dump,
+};
+
+static const struct nla_policy nft_dup_ipv6_policy[NFTA_DUP_MAX + 1] = {
+       [NFTA_DUP_SREG_ADDR]    = { .type = NLA_U32 },
+       [NFTA_DUP_SREG_DEV]     = { .type = NLA_U32 },
+};
+
+static struct nft_expr_type nft_dup_ipv6_type __read_mostly = {
+       .family         = NFPROTO_IPV6,
+       .name           = "dup",
+       .ops            = &nft_dup_ipv6_ops,
+       .policy         = nft_dup_ipv6_policy,
+       .maxattr        = NFTA_DUP_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_dup_ipv6_module_init(void)
+{
+       return nft_register_expr(&nft_dup_ipv6_type);
+}
+
+static void __exit nft_dup_ipv6_module_exit(void)
+{
+       nft_unregister_expr(&nft_dup_ipv6_type);
+}
+
+module_init(nft_dup_ipv6_module_init);
+module_exit(nft_dup_ipv6_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "dup");
index 1c0217e6135797fbb6e0fd794c25f81666e2e220..e476f01add87c62295a52fef641c806caca7d09a 100644 (file)
 #include <net/tcp.h>
 #include <linux/rtnetlink.h>
 #include <net/dst.h>
+#include <net/dst_metadata.h>
 #include <net/xfrm.h>
 #include <net/netevent.h>
 #include <net/netlink.h>
 #include <net/nexthop.h>
 #include <net/lwtunnel.h>
+#include <net/ip_tunnels.h>
 
 #include <asm/uaccess.h>
 
@@ -319,8 +321,7 @@ static const struct rt6_info ip6_blk_hole_entry_template = {
 /* allocate dst with ip6_dst_ops */
 static struct rt6_info *__ip6_dst_alloc(struct net *net,
                                        struct net_device *dev,
-                                       int flags,
-                                       struct fib6_table *table)
+                                       int flags)
 {
        struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
                                        0, DST_OBSOLETE_FORCE_CHK, flags);
@@ -337,10 +338,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net,
 
 static struct rt6_info *ip6_dst_alloc(struct net *net,
                                      struct net_device *dev,
-                                     int flags,
-                                     struct fib6_table *table)
+                                     int flags)
 {
-       struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags, table);
+       struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
 
        if (rt) {
                rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
@@ -538,7 +538,7 @@ static void rt6_probe_deferred(struct work_struct *w)
                container_of(w, struct __rt6_probe_work, work);
 
        addrconf_addr_solict_mult(&work->target, &mcaddr);
-       ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
+       ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL, NULL);
        dev_put(work->dev);
        kfree(work);
 }
@@ -957,8 +957,7 @@ static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
        if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
                ort = (struct rt6_info *)ort->dst.from;
 
-       rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev,
-                            0, ort->rt6i_table);
+       rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0);
 
        if (!rt)
                return NULL;
@@ -990,8 +989,7 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
        struct rt6_info *pcpu_rt;
 
        pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
-                                 rt->dst.dev, rt->dst.flags,
-                                 rt->rt6i_table);
+                                 rt->dst.dev, rt->dst.flags);
 
        if (!pcpu_rt)
                return NULL;
@@ -1004,32 +1002,53 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
 /* It should be called with read_lock_bh(&tb6_lock) acquired */
 static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
 {
-       struct rt6_info *pcpu_rt, *prev, **p;
+       struct rt6_info *pcpu_rt, **p;
 
        p = this_cpu_ptr(rt->rt6i_pcpu);
        pcpu_rt = *p;
 
-       if (pcpu_rt)
-               goto done;
+       if (pcpu_rt) {
+               dst_hold(&pcpu_rt->dst);
+               rt6_dst_from_metrics_check(pcpu_rt);
+       }
+       return pcpu_rt;
+}
+
+static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
+{
+       struct fib6_table *table = rt->rt6i_table;
+       struct rt6_info *pcpu_rt, *prev, **p;
 
        pcpu_rt = ip6_rt_pcpu_alloc(rt);
        if (!pcpu_rt) {
                struct net *net = dev_net(rt->dst.dev);
 
-               pcpu_rt = net->ipv6.ip6_null_entry;
-               goto done;
+               dst_hold(&net->ipv6.ip6_null_entry->dst);
+               return net->ipv6.ip6_null_entry;
        }
 
-       prev = cmpxchg(p, NULL, pcpu_rt);
-       if (prev) {
-               /* If someone did it before us, return prev instead */
+       read_lock_bh(&table->tb6_lock);
+       if (rt->rt6i_pcpu) {
+               p = this_cpu_ptr(rt->rt6i_pcpu);
+               prev = cmpxchg(p, NULL, pcpu_rt);
+               if (prev) {
+                       /* If someone did it before us, return prev instead */
+                       dst_destroy(&pcpu_rt->dst);
+                       pcpu_rt = prev;
+               }
+       } else {
+               /* rt has been removed from the fib6 tree
+                * before we have a chance to acquire the read_lock.
+                * In this case, don't brother to create a pcpu rt
+                * since rt is going away anyway.  The next
+                * dst_check() will trigger a re-lookup.
+                */
                dst_destroy(&pcpu_rt->dst);
-               pcpu_rt = prev;
+               pcpu_rt = rt;
        }
-
-done:
        dst_hold(&pcpu_rt->dst);
        rt6_dst_from_metrics_check(pcpu_rt);
+       read_unlock_bh(&table->tb6_lock);
        return pcpu_rt;
 }
 
@@ -1104,9 +1123,22 @@ redo_rt6_select:
                rt->dst.lastuse = jiffies;
                rt->dst.__use++;
                pcpu_rt = rt6_get_pcpu_route(rt);
-               read_unlock_bh(&table->tb6_lock);
+
+               if (pcpu_rt) {
+                       read_unlock_bh(&table->tb6_lock);
+               } else {
+                       /* We have to do the read_unlock first
+                        * because rt6_make_pcpu_route() may trigger
+                        * ip6_dst_gc() which will take the write_lock.
+                        */
+                       dst_hold(&rt->dst);
+                       read_unlock_bh(&table->tb6_lock);
+                       pcpu_rt = rt6_make_pcpu_route(rt);
+                       dst_release(&rt->dst);
+               }
 
                return pcpu_rt;
+
        }
 }
 
@@ -1131,6 +1163,7 @@ void ip6_route_input(struct sk_buff *skb)
        const struct ipv6hdr *iph = ipv6_hdr(skb);
        struct net *net = dev_net(skb->dev);
        int flags = RT6_LOOKUP_F_HAS_SADDR;
+       struct ip_tunnel_info *tun_info;
        struct flowi6 fl6 = {
                .flowi6_iif = skb->dev->ifindex,
                .daddr = iph->daddr,
@@ -1140,6 +1173,10 @@ void ip6_route_input(struct sk_buff *skb)
                .flowi6_proto = iph->nexthdr,
        };
 
+       tun_info = skb_tunnel_info(skb);
+       if (tun_info && tun_info->mode == IP_TUNNEL_INFO_RX)
+               fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
+       skb_dst_drop(skb);
        skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
 }
 
@@ -1562,7 +1599,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
        if (unlikely(!idev))
                return ERR_PTR(-ENODEV);
 
-       rt = ip6_dst_alloc(net, dev, 0, NULL);
+       rt = ip6_dst_alloc(net, dev, 0);
        if (unlikely(!rt)) {
                in6_dev_put(idev);
                dst = ERR_PTR(-ENOMEM);
@@ -1749,7 +1786,8 @@ int ip6_route_add(struct fib6_config *cfg)
        if (!table)
                goto out;
 
-       rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
+       rt = ip6_dst_alloc(net, NULL,
+                          (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
 
        if (!rt) {
                err = -ENOMEM;
@@ -1784,9 +1822,15 @@ int ip6_route_add(struct fib6_config *cfg)
                                           cfg->fc_encap, &lwtstate);
                if (err)
                        goto out;
-               rt->rt6i_lwtstate = lwtstate_get(lwtstate);
-               if (lwtunnel_output_redirect(rt->rt6i_lwtstate))
-                       rt->dst.output = lwtunnel_output6;
+               rt->dst.lwtstate = lwtstate_get(lwtstate);
+               if (lwtunnel_output_redirect(rt->dst.lwtstate)) {
+                       rt->dst.lwtstate->orig_output = rt->dst.output;
+                       rt->dst.output = lwtunnel_output;
+               }
+               if (lwtunnel_input_redirect(rt->dst.lwtstate)) {
+                       rt->dst.lwtstate->orig_input = rt->dst.input;
+                       rt->dst.input = lwtunnel_input;
+               }
        }
 
        ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
@@ -2168,7 +2212,7 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
 #endif
        rt->rt6i_prefsrc = ort->rt6i_prefsrc;
        rt->rt6i_table = ort->rt6i_table;
-       rt->rt6i_lwtstate = lwtstate_get(ort->rt6i_lwtstate);
+       rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate);
 }
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
@@ -2419,7 +2463,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
 {
        struct net *net = dev_net(idev->dev);
        struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
-                                           DST_NOCOUNT, NULL);
+                                           DST_NOCOUNT);
        if (!rt)
                return ERR_PTR(-ENOMEM);
 
@@ -2832,7 +2876,7 @@ static inline size_t rt6_nlmsg_size(struct rt6_info *rt)
               + nla_total_size(sizeof(struct rta_cacheinfo))
               + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
               + nla_total_size(1) /* RTA_PREF */
-              + lwtunnel_get_encap_size(rt->rt6i_lwtstate);
+              + lwtunnel_get_encap_size(rt->dst.lwtstate);
 }
 
 static int rt6_fill_node(struct net *net,
@@ -2985,7 +3029,7 @@ static int rt6_fill_node(struct net *net,
        if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
                goto nla_put_failure;
 
-       lwtunnel_fill_encap(skb, rt->rt6i_lwtstate);
+       lwtunnel_fill_encap(skb, rt->dst.lwtstate);
 
        nlmsg_end(skb, nlh);
        return 0;
index e51fc3eee6dbd65506e8612fc5782b9482cf4708..0aba654f5b91c198cce365a5dd619e30b566e798 100644 (file)
@@ -1496,7 +1496,8 @@ int __net_init udp6_proc_init(struct net *net)
        return udp_proc_register(net, &udp6_seq_afinfo);
 }
 
-void udp6_proc_exit(struct net *net) {
+void udp6_proc_exit(struct net *net)
+{
        udp_proc_unregister(net, &udp6_seq_afinfo);
 }
 #endif /* CONFIG_PROC_FS */
index 901ef6f8addc0cf730909d2656513372cd6cf80f..f7fbdbabe50efbc91ea5ed811bdf24b33172cd4b 100644 (file)
 
 static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
 {
-       const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
        struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
 
-       if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph)))
+       if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
                IP6_ECN_set_ce(inner_iph);
 }
 
index ed0583c1b9fc2e0033912e2d4c7177a12f17e8b7..a74013d3eceb448e07d39bc0e2dc5dd03dc401bd 100644 (file)
@@ -26,7 +26,7 @@
 
 static struct xfrm_policy_afinfo xfrm6_policy_afinfo;
 
-static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos,
+static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
                                          const xfrm_address_t *saddr,
                                          const xfrm_address_t *daddr)
 {
@@ -35,6 +35,7 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos,
        int err;
 
        memset(&fl6, 0, sizeof(fl6));
+       fl6.flowi6_oif = oif;
        memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
        if (saddr)
                memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr));
@@ -50,13 +51,13 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos,
        return dst;
 }
 
-static int xfrm6_get_saddr(struct net *net,
+static int xfrm6_get_saddr(struct net *net, int oif,
                           xfrm_address_t *saddr, xfrm_address_t *daddr)
 {
        struct dst_entry *dst;
        struct net_device *dev;
 
-       dst = xfrm6_dst_lookup(net, 0, NULL, daddr);
+       dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr);
        if (IS_ERR(dst))
                return -EHOSTUNREACH;
 
index 086de496a4c197bb98a5e0b550a9b67d516d4902..3891cbd2adeab7bdcb062be3dfb36926cf91d23c 100644 (file)
@@ -7,7 +7,6 @@ config MAC80211
        select CRYPTO_CCM
        select CRYPTO_GCM
        select CRC32
-       select AVERAGE
        ---help---
          This option enables the hardware independent IEEE 802.11
          networking stack.
index 3275f01881bee8a53a046e117873347fe04877c8..783e891b7525c537bcab70b1be7c603e5f6d1f0d 100644 (file)
@@ -3,6 +3,7 @@ obj-$(CONFIG_MAC80211) += mac80211.o
 # mac80211 objects
 mac80211-y := \
        main.o status.o \
+       driver-ops.o \
        sta_info.o \
        wep.o \
        wpa.o \
index 4192806be3d36884d22ce5830a43cae63d54745d..bdf0790d89cca6fe3f64c097c84299ac35e1f3d0 100644 (file)
@@ -145,20 +145,3 @@ void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm)
 {
        crypto_free_cipher(tfm);
 }
-
-void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
-                                       u8 *k1, u8 *k2)
-{
-       u8 l[AES_BLOCK_SIZE] = {};
-       struct ieee80211_key *key =
-               container_of(keyconf, struct ieee80211_key, conf);
-
-       crypto_cipher_encrypt_one(key->u.aes_cmac.tfm, l, l);
-
-       memcpy(k1, l, AES_BLOCK_SIZE);
-       gf_mulx(k1);
-
-       memcpy(k2, k1, AES_BLOCK_SIZE);
-       gf_mulx(k2);
-}
-EXPORT_SYMBOL(ieee80211_aes_cmac_calculate_k1_k2);
index bf7023f6c3278289f1100f7ce6fa4f56d72caa15..685ec13ed7c2b0a2dcdcf82d7388d1c44d041a26 100644 (file)
@@ -1019,6 +1019,65 @@ static int sta_apply_auth_flags(struct ieee80211_local *local,
        return 0;
 }
 
+static void sta_apply_mesh_params(struct ieee80211_local *local,
+                                 struct sta_info *sta,
+                                 struct station_parameters *params)
+{
+#ifdef CONFIG_MAC80211_MESH
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
+       u32 changed = 0;
+
+       if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) {
+               switch (params->plink_state) {
+               case NL80211_PLINK_ESTAB:
+                       if (sta->mesh->plink_state != NL80211_PLINK_ESTAB)
+                               changed = mesh_plink_inc_estab_count(sdata);
+                       sta->mesh->plink_state = params->plink_state;
+
+                       ieee80211_mps_sta_status_update(sta);
+                       changed |= ieee80211_mps_set_sta_local_pm(sta,
+                                     sdata->u.mesh.mshcfg.power_mode);
+                       break;
+               case NL80211_PLINK_LISTEN:
+               case NL80211_PLINK_BLOCKED:
+               case NL80211_PLINK_OPN_SNT:
+               case NL80211_PLINK_OPN_RCVD:
+               case NL80211_PLINK_CNF_RCVD:
+               case NL80211_PLINK_HOLDING:
+                       if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+                               changed = mesh_plink_dec_estab_count(sdata);
+                       sta->mesh->plink_state = params->plink_state;
+
+                       ieee80211_mps_sta_status_update(sta);
+                       changed |= ieee80211_mps_set_sta_local_pm(sta,
+                                       NL80211_MESH_POWER_UNKNOWN);
+                       break;
+               default:
+                       /*  nothing  */
+                       break;
+               }
+       }
+
+       switch (params->plink_action) {
+       case NL80211_PLINK_ACTION_NO_ACTION:
+               /* nothing */
+               break;
+       case NL80211_PLINK_ACTION_OPEN:
+               changed |= mesh_plink_open(sta);
+               break;
+       case NL80211_PLINK_ACTION_BLOCK:
+               changed |= mesh_plink_block(sta);
+               break;
+       }
+
+       if (params->local_pm)
+               changed |= ieee80211_mps_set_sta_local_pm(sta,
+                                                         params->local_pm);
+
+       ieee80211_mbss_info_change_notify(sdata, changed);
+#endif
+}
+
 static int sta_apply_parameters(struct ieee80211_local *local,
                                struct sta_info *sta,
                                struct station_parameters *params)
@@ -1076,7 +1135,6 @@ static int sta_apply_parameters(struct ieee80211_local *local,
        }
 
        if (mask & BIT(NL80211_STA_FLAG_MFP)) {
-               sta->sta.mfp = !!(set & BIT(NL80211_STA_FLAG_MFP));
                if (set & BIT(NL80211_STA_FLAG_MFP))
                        set_sta_flag(sta, WLAN_STA_MFP);
                else
@@ -1097,6 +1155,12 @@ static int sta_apply_parameters(struct ieee80211_local *local,
            params->ext_capab[3] & WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH)
                set_sta_flag(sta, WLAN_STA_TDLS_CHAN_SWITCH);
 
+       if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
+           ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) &&
+           params->ext_capab_len >= 8 &&
+           params->ext_capab[7] & WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED)
+               set_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW);
+
        if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD) {
                sta->sta.uapsd_queues = params->uapsd_queues;
                sta->sta.max_sp = params->max_sp;
@@ -1144,62 +1208,8 @@ static int sta_apply_parameters(struct ieee80211_local *local,
                                              band, false);
        }
 
-       if (ieee80211_vif_is_mesh(&sdata->vif)) {
-#ifdef CONFIG_MAC80211_MESH
-               u32 changed = 0;
-
-               if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) {
-                       switch (params->plink_state) {
-                       case NL80211_PLINK_ESTAB:
-                               if (sta->plink_state != NL80211_PLINK_ESTAB)
-                                       changed = mesh_plink_inc_estab_count(
-                                                       sdata);
-                               sta->plink_state = params->plink_state;
-
-                               ieee80211_mps_sta_status_update(sta);
-                               changed |= ieee80211_mps_set_sta_local_pm(sta,
-                                             sdata->u.mesh.mshcfg.power_mode);
-                               break;
-                       case NL80211_PLINK_LISTEN:
-                       case NL80211_PLINK_BLOCKED:
-                       case NL80211_PLINK_OPN_SNT:
-                       case NL80211_PLINK_OPN_RCVD:
-                       case NL80211_PLINK_CNF_RCVD:
-                       case NL80211_PLINK_HOLDING:
-                               if (sta->plink_state == NL80211_PLINK_ESTAB)
-                                       changed = mesh_plink_dec_estab_count(
-                                                       sdata);
-                               sta->plink_state = params->plink_state;
-
-                               ieee80211_mps_sta_status_update(sta);
-                               changed |= ieee80211_mps_set_sta_local_pm(sta,
-                                               NL80211_MESH_POWER_UNKNOWN);
-                               break;
-                       default:
-                               /*  nothing  */
-                               break;
-                       }
-               }
-
-               switch (params->plink_action) {
-               case NL80211_PLINK_ACTION_NO_ACTION:
-                       /* nothing */
-                       break;
-               case NL80211_PLINK_ACTION_OPEN:
-                       changed |= mesh_plink_open(sta);
-                       break;
-               case NL80211_PLINK_ACTION_BLOCK:
-                       changed |= mesh_plink_block(sta);
-                       break;
-               }
-
-               if (params->local_pm)
-                       changed |=
-                             ieee80211_mps_set_sta_local_pm(sta,
-                                                            params->local_pm);
-               ieee80211_mbss_info_change_notify(sdata, changed);
-#endif
-       }
+       if (ieee80211_vif_is_mesh(&sdata->vif))
+               sta_apply_mesh_params(local, sta, params);
 
        /* set the STA state after all sta info from usermode has been set */
        if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
@@ -2358,6 +2368,8 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
        const u8 *ap;
        enum ieee80211_smps_mode old_req;
        int err;
+       struct sta_info *sta;
+       bool tdls_peer_found = false;
 
        lockdep_assert_held(&sdata->wdev.mtx);
 
@@ -2382,11 +2394,22 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
 
        ap = sdata->u.mgd.associated->bssid;
 
+       rcu_read_lock();
+       list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
+               if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
+                   !test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+                       continue;
+
+               tdls_peer_found = true;
+               break;
+       }
+       rcu_read_unlock();
+
        if (smps_mode == IEEE80211_SMPS_AUTOMATIC) {
-               if (sdata->u.mgd.powersave)
-                       smps_mode = IEEE80211_SMPS_DYNAMIC;
-               else
+               if (tdls_peer_found || !sdata->u.mgd.powersave)
                        smps_mode = IEEE80211_SMPS_OFF;
+               else
+                       smps_mode = IEEE80211_SMPS_DYNAMIC;
        }
 
        /* send SM PS frame to AP */
@@ -2394,6 +2417,8 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
                                         ap, ap);
        if (err)
                sdata->u.mgd.req_smps = old_req;
+       else if (smps_mode != IEEE80211_SMPS_OFF && tdls_peer_found)
+               ieee80211_teardown_tdls_peers(sdata);
 
        return err;
 }
@@ -2479,16 +2504,26 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
                sdata->rc_rateidx_mask[i] = mask->control[i].legacy;
                memcpy(sdata->rc_rateidx_mcs_mask[i], mask->control[i].ht_mcs,
                       sizeof(mask->control[i].ht_mcs));
+               memcpy(sdata->rc_rateidx_vht_mcs_mask[i],
+                      mask->control[i].vht_mcs,
+                      sizeof(mask->control[i].vht_mcs));
 
                sdata->rc_has_mcs_mask[i] = false;
+               sdata->rc_has_vht_mcs_mask[i] = false;
                if (!sband)
                        continue;
 
-               for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++)
-                       if (~sdata->rc_rateidx_mcs_mask[i][j]) {
+               for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) {
+                       if (~sdata->rc_rateidx_mcs_mask[i][j])
                                sdata->rc_has_mcs_mask[i] = true;
+
+                       if (~sdata->rc_rateidx_vht_mcs_mask[i][j])
+                               sdata->rc_has_vht_mcs_mask[i] = true;
+
+                       if (sdata->rc_has_mcs_mask[i] &&
+                           sdata->rc_has_vht_mcs_mask[i])
                                break;
-                       }
+               }
        }
 
        return 0;
index f01c18a3160e11d72dae9e2a0939530ec805f6a5..1d1b9b7bdefe74ac851ca6d01554d663fae39541 100644 (file)
@@ -190,7 +190,7 @@ ieee80211_find_reservation_chanctx(struct ieee80211_local *local,
        return NULL;
 }
 
-static enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta)
+enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta)
 {
        switch (sta->bandwidth) {
        case IEEE80211_STA_RX_BW_20:
@@ -264,9 +264,17 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
                case NL80211_IFTYPE_AP_VLAN:
                        width = ieee80211_get_max_required_bw(sdata);
                        break;
+               case NL80211_IFTYPE_STATION:
+                       /*
+                        * The ap's sta->bandwidth is not set yet at this
+                        * point, so take the width from the chandef, but
+                        * account also for TDLS peers
+                        */
+                       width = max(vif->bss_conf.chandef.width,
+                                   ieee80211_get_max_required_bw(sdata));
+                       break;
                case NL80211_IFTYPE_P2P_DEVICE:
                        continue;
-               case NL80211_IFTYPE_STATION:
                case NL80211_IFTYPE_ADHOC:
                case NL80211_IFTYPE_WDS:
                case NL80211_IFTYPE_MESH_POINT:
@@ -554,12 +562,13 @@ static void ieee80211_free_chanctx(struct ieee80211_local *local,
        kfree_rcu(ctx, rcu_head);
 }
 
-static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
-                                             struct ieee80211_chanctx *ctx)
+void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
+                                      struct ieee80211_chanctx *ctx)
 {
        struct ieee80211_chanctx_conf *conf = &ctx->conf;
        struct ieee80211_sub_if_data *sdata;
        const struct cfg80211_chan_def *compat = NULL;
+       struct sta_info *sta;
 
        lockdep_assert_held(&local->chanctx_mtx);
 
@@ -581,6 +590,20 @@ static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
                if (WARN_ON_ONCE(!compat))
                        break;
        }
+
+       /* TDLS peers can sometimes affect the chandef width */
+       list_for_each_entry_rcu(sta, &local->sta_list, list) {
+               if (!sta->uploaded ||
+                   !test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) ||
+                   !test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
+                   !sta->tdls_chandef.chan)
+                       continue;
+
+               compat = cfg80211_chandef_compatible(&sta->tdls_chandef,
+                                                    compat);
+               if (WARN_ON_ONCE(!compat))
+                       break;
+       }
        rcu_read_unlock();
 
        if (!compat)
index 3ea8b7de963368faf6effceec8ad0e73c8d7fb54..ced6bf3be8d6cf5d3d9fc80b6c46f48c4e567aef 100644 (file)
@@ -122,6 +122,7 @@ static const char *hw_flag_names[NUM_IEEE80211_HW_FLAGS + 1] = {
        FLAG(CHANCTX_STA_CSA),
        FLAG(SUPPORTS_CLONED_SKBS),
        FLAG(SINGLE_SCAN_ON_ALL_BANDS),
+       FLAG(TDLS_WIDER_BW),
 
        /* keep last for the build bug below */
        (void *)0x1
@@ -277,7 +278,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
        DEBUGFS_STATS_ADD(rx_handlers_queued);
        DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc);
        DEBUGFS_STATS_ADD(rx_handlers_drop_defrag);
-       DEBUGFS_STATS_ADD(rx_handlers_drop_short);
        DEBUGFS_STATS_ADD(tx_expand_skb_head);
        DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned);
        DEBUGFS_STATS_ADD(rx_expand_skb_head_defrag);
index e82bf1e9d7a83e32566bf9d96f8f461bd4f141ab..702ca122c498938691842d95db7732d6d8c6d6bb 100644 (file)
@@ -57,7 +57,6 @@ KEY_CONF_FILE(keylen, D);
 KEY_CONF_FILE(keyidx, D);
 KEY_CONF_FILE(hw_key_idx, D);
 KEY_FILE(flags, X);
-KEY_FILE(tx_rx_count, D);
 KEY_READ(ifindex, sdata->name, "%s\n");
 KEY_OPS(ifindex);
 
@@ -310,7 +309,6 @@ void ieee80211_debugfs_key_add(struct ieee80211_key *key)
        DEBUGFS_ADD(flags);
        DEBUGFS_ADD(keyidx);
        DEBUGFS_ADD(hw_key_idx);
-       DEBUGFS_ADD(tx_rx_count);
        DEBUGFS_ADD(algorithm);
        DEBUGFS_ADD(tx_spec);
        DEBUGFS_ADD(rx_spec);
index c09c0131bfa227e99346b180f501cc6fcf64644a..1021e87c051f35168eef1274a38a4d720054359e 100644 (file)
@@ -186,6 +186,38 @@ IEEE80211_IF_FILE(rc_rateidx_mcs_mask_2ghz,
 IEEE80211_IF_FILE(rc_rateidx_mcs_mask_5ghz,
                  rc_rateidx_mcs_mask[IEEE80211_BAND_5GHZ], HEXARRAY);
 
+static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_2ghz(
+                               const struct ieee80211_sub_if_data *sdata,
+                               char *buf, int buflen)
+{
+       int i, len = 0;
+       const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[IEEE80211_BAND_2GHZ];
+
+       for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+               len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]);
+       len += scnprintf(buf + len, buflen - len, "\n");
+
+       return len;
+}
+
+IEEE80211_IF_FILE_R(rc_rateidx_vht_mcs_mask_2ghz);
+
+static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_5ghz(
+                               const struct ieee80211_sub_if_data *sdata,
+                               char *buf, int buflen)
+{
+       int i, len = 0;
+       const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[IEEE80211_BAND_5GHZ];
+
+       for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+               len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]);
+       len += scnprintf(buf + len, buflen - len, "\n");
+
+       return len;
+}
+
+IEEE80211_IF_FILE_R(rc_rateidx_vht_mcs_mask_5ghz);
+
 IEEE80211_IF_FILE(flags, flags, HEX);
 IEEE80211_IF_FILE(state, state, LHEX);
 IEEE80211_IF_FILE(txpower, vif.bss_conf.txpower, DEC);
@@ -565,6 +597,8 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata)
        DEBUGFS_ADD(rc_rateidx_mask_5ghz);
        DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
        DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
+       DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_2ghz);
+       DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz);
        DEBUGFS_ADD(hw_queues);
 }
 
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
new file mode 100644 (file)
index 0000000..267c3b1
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <net/mac80211.h>
+#include "ieee80211_i.h"
+#include "trace.h"
+#include "driver-ops.h"
+
+__must_check
+int drv_sta_state(struct ieee80211_local *local,
+                 struct ieee80211_sub_if_data *sdata,
+                 struct sta_info *sta,
+                 enum ieee80211_sta_state old_state,
+                 enum ieee80211_sta_state new_state)
+{
+       int ret = 0;
+
+       might_sleep();
+
+       sdata = get_bss_sdata(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return -EIO;
+
+       trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state);
+       if (local->ops->sta_state) {
+               ret = local->ops->sta_state(&local->hw, &sdata->vif, &sta->sta,
+                                           old_state, new_state);
+       } else if (old_state == IEEE80211_STA_AUTH &&
+                  new_state == IEEE80211_STA_ASSOC) {
+               ret = drv_sta_add(local, sdata, &sta->sta);
+               if (ret == 0)
+                       sta->uploaded = true;
+       } else if (old_state == IEEE80211_STA_ASSOC &&
+                  new_state == IEEE80211_STA_AUTH) {
+               drv_sta_remove(local, sdata, &sta->sta);
+       }
+       trace_drv_return_int(local, ret);
+       return ret;
+}
index 32a2e707e2226355235907c956632d6f03aa5af5..02d91332d7dddbe4459246308d605146be4430d7 100644 (file)
@@ -573,37 +573,12 @@ static inline void drv_sta_pre_rcu_remove(struct ieee80211_local *local,
        trace_drv_return_void(local);
 }
 
-static inline __must_check
+__must_check
 int drv_sta_state(struct ieee80211_local *local,
                  struct ieee80211_sub_if_data *sdata,
                  struct sta_info *sta,
                  enum ieee80211_sta_state old_state,
-                 enum ieee80211_sta_state new_state)
-{
-       int ret = 0;
-
-       might_sleep();
-
-       sdata = get_bss_sdata(sdata);
-       if (!check_sdata_in_driver(sdata))
-               return -EIO;
-
-       trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state);
-       if (local->ops->sta_state) {
-               ret = local->ops->sta_state(&local->hw, &sdata->vif, &sta->sta,
-                                           old_state, new_state);
-       } else if (old_state == IEEE80211_STA_AUTH &&
-                  new_state == IEEE80211_STA_ASSOC) {
-               ret = drv_sta_add(local, sdata, &sta->sta);
-               if (ret == 0)
-                       sta->uploaded = true;
-       } else if (old_state == IEEE80211_STA_ASSOC &&
-                  new_state == IEEE80211_STA_AUTH) {
-               drv_sta_remove(local, sdata, &sta->sta);
-       }
-       trace_drv_return_int(local, ret);
-       return ret;
-}
+                 enum ieee80211_sta_state new_state);
 
 static inline void drv_sta_rc_update(struct ieee80211_local *local,
                                     struct ieee80211_sub_if_data *sdata,
index b12f61507f9f9a4f84eaad69f98ac209af90361e..6e52659f923f72a6ab451e73b75338e4f4e7de13 100644 (file)
@@ -84,13 +84,13 @@ struct ieee80211_local;
 #define IEEE80211_DEAUTH_FRAME_LEN     (24 /* hdr */ + 2 /* reason */)
 
 struct ieee80211_fragment_entry {
-       unsigned long first_frag_time;
-       unsigned int seq;
-       unsigned int rx_queue;
-       unsigned int last_frag;
-       unsigned int extra_len;
        struct sk_buff_head skb_list;
-       int ccmp; /* Whether fragments were encrypted with CCMP */
+       unsigned long first_frag_time;
+       u16 seq;
+       u16 extra_len;
+       u16 last_frag;
+       u8 rx_queue;
+       bool ccmp; /* Whether fragments were encrypted with CCMP */
        u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
 };
 
@@ -181,7 +181,6 @@ typedef unsigned __bitwise__ ieee80211_rx_result;
 
 /**
  * enum ieee80211_packet_rx_flags - packet RX flags
- * @IEEE80211_RX_FRAGMENTED: fragmented frame
  * @IEEE80211_RX_AMSDU: a-MSDU packet
  * @IEEE80211_RX_MALFORMED_ACTION_FRM: action frame is malformed
  * @IEEE80211_RX_DEFERRED_RELEASE: frame was subjected to receive reordering
@@ -190,7 +189,6 @@ typedef unsigned __bitwise__ ieee80211_rx_result;
  * @rx_flags field of &struct ieee80211_rx_status.
  */
 enum ieee80211_packet_rx_flags {
-       IEEE80211_RX_FRAGMENTED                 = BIT(2),
        IEEE80211_RX_AMSDU                      = BIT(3),
        IEEE80211_RX_MALFORMED_ACTION_FRM       = BIT(4),
        IEEE80211_RX_DEFERRED_RELEASE           = BIT(5),
@@ -202,8 +200,6 @@ enum ieee80211_packet_rx_flags {
  * @IEEE80211_RX_CMNTR: received on cooked monitor already
  * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported
  *     to cfg80211_report_obss_beacon().
- * @IEEE80211_RX_REORDER_TIMER: this frame is released by the
- *     reorder buffer timeout timer, not the normal RX path
  *
  * These flags are used across handling multiple interfaces
  * for a single frame.
@@ -211,10 +207,10 @@ enum ieee80211_packet_rx_flags {
 enum ieee80211_rx_flags {
        IEEE80211_RX_CMNTR              = BIT(0),
        IEEE80211_RX_BEACON_REPORTED    = BIT(1),
-       IEEE80211_RX_REORDER_TIMER      = BIT(2),
 };
 
 struct ieee80211_rx_data {
+       struct napi_struct *napi;
        struct sk_buff *skb;
        struct ieee80211_local *local;
        struct ieee80211_sub_if_data *sdata;
@@ -725,6 +721,7 @@ struct ieee80211_if_mesh {
  *     back to wireless media and to the local net stack.
  * @IEEE80211_SDATA_DISCONNECT_RESUME: Disconnect after resume.
  * @IEEE80211_SDATA_IN_DRIVER: indicates interface was added to driver
+ * @IEEE80211_SDATA_MU_MIMO_OWNER: indicates interface owns MU-MIMO capability
  */
 enum ieee80211_sub_if_data_flags {
        IEEE80211_SDATA_ALLMULTI                = BIT(0),
@@ -732,6 +729,7 @@ enum ieee80211_sub_if_data_flags {
        IEEE80211_SDATA_DONT_BRIDGE_PACKETS     = BIT(3),
        IEEE80211_SDATA_DISCONNECT_RESUME       = BIT(4),
        IEEE80211_SDATA_IN_DRIVER               = BIT(5),
+       IEEE80211_SDATA_MU_MIMO_OWNER           = BIT(6),
 };
 
 /**
@@ -903,6 +901,9 @@ struct ieee80211_sub_if_data {
        bool rc_has_mcs_mask[IEEE80211_NUM_BANDS];
        u8  rc_rateidx_mcs_mask[IEEE80211_NUM_BANDS][IEEE80211_HT_MCS_MASK_LEN];
 
+       bool rc_has_vht_mcs_mask[IEEE80211_NUM_BANDS];
+       u16 rc_rateidx_vht_mcs_mask[IEEE80211_NUM_BANDS][NL80211_VHT_NSS_MAX];
+
        union {
                struct ieee80211_if_ap ap;
                struct ieee80211_if_wds wds;
@@ -1010,7 +1011,6 @@ enum sdata_queue_type {
        IEEE80211_SDATA_QUEUE_AGG_STOP          = 2,
        IEEE80211_SDATA_QUEUE_RX_AGG_START      = 3,
        IEEE80211_SDATA_QUEUE_RX_AGG_STOP       = 4,
-       IEEE80211_SDATA_QUEUE_TDLS_CHSW         = 5,
 };
 
 enum {
@@ -1286,7 +1286,6 @@ struct ieee80211_local {
        unsigned int rx_handlers_queued;
        unsigned int rx_handlers_drop_nullfunc;
        unsigned int rx_handlers_drop_defrag;
-       unsigned int rx_handlers_drop_short;
        unsigned int tx_expand_skb_head;
        unsigned int tx_expand_skb_head_cloned;
        unsigned int rx_expand_skb_head_defrag;
@@ -1348,14 +1347,16 @@ struct ieee80211_local {
 
        struct ieee80211_sub_if_data __rcu *p2p_sdata;
 
-       struct napi_struct *napi;
-
        /* virtual monitor interface */
        struct ieee80211_sub_if_data __rcu *monitor_sdata;
        struct cfg80211_chan_def monitor_chandef;
 
        /* extended capabilities provided by mac80211 */
        u8 ext_capa[8];
+
+       /* TDLS channel switch */
+       struct work_struct tdls_chsw_work;
+       struct sk_buff_head skb_queue_tdls_chsw;
 };
 
 static inline struct ieee80211_sub_if_data *
@@ -1715,6 +1716,8 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
                                 enum ieee80211_band band, bool nss_only);
 void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata,
                                      struct ieee80211_sta_vht_cap *vht_cap);
+void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
+                                    u16 vht_mask[NL80211_VHT_NSS_MAX]);
 
 /* Spectrum management */
 void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
@@ -1763,8 +1766,6 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
 
 /* utility functions/constants */
 extern const void *const mac80211_wiphy_privid; /* for wiphy privid */
-u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
-                       enum nl80211_iftype type);
 int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
                             int rate, int erp, int short_preamble,
                             int shift);
@@ -2042,6 +2043,9 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
                                 enum ieee80211_chanctx_mode chanmode,
                                 u8 radar_detect);
 int ieee80211_max_num_channels(struct ieee80211_local *local);
+enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta);
+void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
+                                      struct ieee80211_chanctx *ctx);
 
 /* TDLS */
 int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
@@ -2058,8 +2062,8 @@ int ieee80211_tdls_channel_switch(struct wiphy *wiphy, struct net_device *dev,
 void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy,
                                          struct net_device *dev,
                                          const u8 *addr);
-void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
-                                          struct sk_buff *skb);
+void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata);
+void ieee80211_tdls_chsw_work(struct work_struct *wk);
 
 extern const struct ethtool_ops ieee80211_ethtool_ops;
 
index 553ac6dd4867480048aed3ca0d430948f928d3c7..6964fc6a8ea2c7b46149e8be5e79c6f373f50d40 100644 (file)
@@ -1242,8 +1242,6 @@ static void ieee80211_iface_work(struct work_struct *work)
                                                        WLAN_BACK_RECIPIENT, 0,
                                                        false);
                        mutex_unlock(&local->sta_mtx);
-               } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_TDLS_CHSW) {
-                       ieee80211_process_tdls_channel_switch(sdata, skb);
                } else if (ieee80211_is_action(mgmt->frame_control) &&
                           mgmt->u.action.category == WLAN_CATEGORY_BACK) {
                        int len = skb->len;
@@ -1790,13 +1788,23 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
                sband = local->hw.wiphy->bands[i];
                sdata->rc_rateidx_mask[i] =
                        sband ? (1 << sband->n_bitrates) - 1 : 0;
-               if (sband)
+               if (sband) {
+                       __le16 cap;
+                       u16 *vht_rate_mask;
+
                        memcpy(sdata->rc_rateidx_mcs_mask[i],
                               sband->ht_cap.mcs.rx_mask,
                               sizeof(sdata->rc_rateidx_mcs_mask[i]));
-               else
+
+                       cap = sband->vht_cap.vht_mcs.rx_mcs_map;
+                       vht_rate_mask = sdata->rc_rateidx_vht_mcs_mask[i];
+                       ieee80211_get_vht_mask_from_cap(cap, vht_rate_mask);
+               } else {
                        memset(sdata->rc_rateidx_mcs_mask[i], 0,
                               sizeof(sdata->rc_rateidx_mcs_mask[i]));
+                       memset(sdata->rc_rateidx_vht_mcs_mask[i], 0,
+                              sizeof(sdata->rc_rateidx_vht_mcs_mask[i]));
+               }
        }
 
        ieee80211_set_default_queues(sdata);
index b22df3a79a417c9d182647f3126e004490d3de93..44388d6a1d8e628f4324f64e95c9de675e861830 100644 (file)
@@ -336,7 +336,6 @@ static void ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
                        ieee80211_check_fast_xmit(sta);
                } else {
                        rcu_assign_pointer(sta->gtk[idx], new);
-                       sta->gtk_idx = idx;
                }
        } else {
                defunikey = old &&
index 3f4f9eaac14003d1a2f2f655215fd0ed3fcca499..9951ef06323e743d2c33156d15e5a21478584cd9 100644 (file)
@@ -115,9 +115,6 @@ struct ieee80211_key {
                } gen;
        } u;
 
-       /* number of times this key has been used */
-       int tx_rx_count;
-
 #ifdef CONFIG_MAC80211_DEBUGFS
        struct {
                struct dentry *stalink;
index 3c63468b4dfb530d0e28a5f97dddf5065543ba6f..ff79a13d231db0d4197c80a67a9d119d4c870e68 100644 (file)
@@ -629,6 +629,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
        INIT_WORK(&local->sched_scan_stopped_work,
                  ieee80211_sched_scan_stopped_work);
 
+       INIT_WORK(&local->tdls_chsw_work, ieee80211_tdls_chsw_work);
+
        spin_lock_init(&local->ack_status_lock);
        idr_init(&local->ack_status_frames);
 
@@ -645,6 +647,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
 
        skb_queue_head_init(&local->skb_queue);
        skb_queue_head_init(&local->skb_queue_unreliable);
+       skb_queue_head_init(&local->skb_queue_tdls_chsw);
 
        ieee80211_alloc_led_names(local);
 
@@ -1132,18 +1135,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
 }
 EXPORT_SYMBOL(ieee80211_register_hw);
 
-void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi,
-                       struct net_device *napi_dev,
-                       int (*poll)(struct napi_struct *, int),
-                       int weight)
-{
-       struct ieee80211_local *local = hw_to_local(hw);
-
-       netif_napi_add(napi_dev, napi, poll, weight);
-       local->napi = napi;
-}
-EXPORT_SYMBOL_GPL(ieee80211_napi_add);
-
 void ieee80211_unregister_hw(struct ieee80211_hw *hw)
 {
        struct ieee80211_local *local = hw_to_local(hw);
@@ -1173,6 +1164,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
 
        cancel_work_sync(&local->restart_work);
        cancel_work_sync(&local->reconfig_filter);
+       cancel_work_sync(&local->tdls_chsw_work);
        flush_work(&local->sched_scan_stopped_work);
 
        ieee80211_clear_tx_pending(local);
@@ -1183,6 +1175,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
                wiphy_warn(local->hw.wiphy, "skb_queue not empty\n");
        skb_queue_purge(&local->skb_queue);
        skb_queue_purge(&local->skb_queue_unreliable);
+       skb_queue_purge(&local->skb_queue_tdls_chsw);
 
        destroy_workqueue(local->workqueue);
        wiphy_unregister(local->hw.wiphy);
index 817098add1d6736e1632ba7f142a9b81465cd71f..e06a5ca7c9a996b311b524c37f93629ccc928a61 100644 (file)
@@ -158,7 +158,7 @@ void mesh_sta_cleanup(struct sta_info *sta)
        changed = mesh_accept_plinks_update(sdata);
        if (!sdata->u.mesh.user_mpm) {
                changed |= mesh_plink_deactivate(sta);
-               del_timer_sync(&sta->plink_timer);
+               del_timer_sync(&sta->mesh->plink_timer);
        }
 
        if (changed)
index 085edc1d056bf7adfe1f7fba5a500733bf72927a..d80e0a4c16cf98eb386bb61c3513f20619693556 100644 (file)
 
 #define MAX_PREQ_QUEUE_LEN     64
 
-/* Destination only */
-#define MP_F_DO        0x1
-/* Reply and forward */
-#define MP_F_RF        0x2
-/* Unknown Sequence Number */
-#define MP_F_USN    0x01
-/* Reason code Present */
-#define MP_F_RCODE  0x02
-
 static void mesh_queue_preq(struct mesh_path *, u8);
 
 static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae)
@@ -79,6 +70,12 @@ static inline u16 u16_field_get(const u8 *preq_elem, int offset, bool ae)
 #define MSEC_TO_TU(x) (x*1000/1024)
 #define SN_GT(x, y) ((s32)(y - x) < 0)
 #define SN_LT(x, y) ((s32)(x - y) < 0)
+#define MAX_SANE_SN_DELTA 32
+
+static inline u32 SN_DELTA(u32 x, u32 y)
+{
+       return x >= y ? x - y : y - x;
+}
 
 #define net_traversal_jiffies(s) \
        msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
@@ -279,15 +276,10 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
        *pos++ = ttl;
        /* number of destinations */
        *pos++ = 1;
-       /*
-        * flags bit, bit 1 is unset if we know the sequence number and
-        * bit 2 is set if we have a reason code
+       /* Flags field has AE bit only as defined in
+        * sec 8.4.2.117 IEEE802.11-2012
         */
        *pos = 0;
-       if (!target_sn)
-               *pos |= MP_F_USN;
-       if (target_rcode)
-               *pos |= MP_F_RCODE;
        pos++;
        memcpy(pos, target, ETH_ALEN);
        pos += ETH_ALEN;
@@ -316,8 +308,9 @@ void ieee80211s_update_metric(struct ieee80211_local *local,
        failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
 
        /* moving average, scaled to 100 */
-       sta->fail_avg = ((80 * sta->fail_avg + 5) / 100 + 20 * failed);
-       if (sta->fail_avg > 95)
+       sta->mesh->fail_avg =
+               ((80 * sta->mesh->fail_avg + 5) / 100 + 20 * failed);
+       if (sta->mesh->fail_avg > 95)
                mesh_plink_broken(sta);
 }
 
@@ -333,7 +326,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
        u32 tx_time, estimated_retx;
        u64 result;
 
-       if (sta->fail_avg >= 100)
+       if (sta->mesh->fail_avg >= 100)
                return MAX_METRIC;
 
        sta_set_rate_info_tx(sta, &sta->last_tx_rate, &rinfo);
@@ -341,7 +334,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
        if (WARN_ON(!rate))
                return MAX_METRIC;
 
-       err = (sta->fail_avg << ARITH_SHIFT) / 100;
+       err = (sta->mesh->fail_avg << ARITH_SHIFT) / 100;
 
        /* bitrate is in units of 100 Kbps, while we need rate in units of
         * 1Mbps. This will be corrected on tx_time computation.
@@ -441,6 +434,26 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
                                        process = false;
                                        fresh_info = false;
                                }
+                       } else if (!(mpath->flags & MESH_PATH_ACTIVE)) {
+                               bool have_sn, newer_sn, bounced;
+
+                               have_sn = mpath->flags & MESH_PATH_SN_VALID;
+                               newer_sn = have_sn && SN_GT(orig_sn, mpath->sn);
+                               bounced = have_sn &&
+                                         (SN_DELTA(orig_sn, mpath->sn) >
+                                                       MAX_SANE_SN_DELTA);
+
+                               if (!have_sn || newer_sn) {
+                                       /* if SN is newer than what we had
+                                        * then we can take it */;
+                               } else if (bounced) {
+                                       /* if SN is way different than what
+                                        * we had then assume the other side
+                                        * rebooted or restarted */;
+                               } else {
+                                       process = false;
+                                       fresh_info = false;
+                               }
                        }
                } else {
                        mpath = mesh_path_add(sdata, orig_addr);
@@ -570,15 +583,13 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
                                        SN_LT(mpath->sn, target_sn)) {
                                mpath->sn = target_sn;
                                mpath->flags |= MESH_PATH_SN_VALID;
-                       } else if ((!(target_flags & MP_F_DO)) &&
+                       } else if ((!(target_flags & IEEE80211_PREQ_TO_FLAG)) &&
                                        (mpath->flags & MESH_PATH_ACTIVE)) {
                                reply = true;
                                target_metric = mpath->metric;
                                target_sn = mpath->sn;
-                               if (target_flags & MP_F_RF)
-                                       target_flags |= MP_F_DO;
-                               else
-                                       forward = false;
+                               /* Case E2 of sec 13.10.9.3 IEEE 802.11-2012*/
+                               target_flags |= IEEE80211_PREQ_TO_FLAG;
                        }
                }
                rcu_read_unlock();
@@ -736,9 +747,12 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
                if (mpath->flags & MESH_PATH_ACTIVE &&
                    ether_addr_equal(ta, sta->sta.addr) &&
                    (!(mpath->flags & MESH_PATH_SN_VALID) ||
-                   SN_GT(target_sn, mpath->sn))) {
+                   SN_GT(target_sn, mpath->sn)  || target_sn == 0)) {
                        mpath->flags &= ~MESH_PATH_ACTIVE;
-                       mpath->sn = target_sn;
+                       if (target_sn != 0)
+                               mpath->sn = target_sn;
+                       else
+                               mpath->sn += 1;
                        spin_unlock_bh(&mpath->state_lock);
                        if (!ifmsh->mshcfg.dot11MeshForwarding)
                                goto endperr;
@@ -862,7 +876,7 @@ void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
 
        rcu_read_lock();
        sta = sta_info_get(sdata, mgmt->sa);
-       if (!sta || sta->plink_state != NL80211_PLINK_ESTAB) {
+       if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
                rcu_read_unlock();
                return;
        }
@@ -974,7 +988,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
        struct mesh_preq_queue *preq_node;
        struct mesh_path *mpath;
-       u8 ttl, target_flags;
+       u8 ttl, target_flags = 0;
        const u8 *da;
        u32 lifetime;
 
@@ -1033,9 +1047,9 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
        }
 
        if (preq_node->flags & PREQ_Q_F_REFRESH)
-               target_flags = MP_F_DO;
+               target_flags |= IEEE80211_PREQ_TO_FLAG;
        else
-               target_flags = MP_F_RF;
+               target_flags &= ~IEEE80211_PREQ_TO_FLAG;
 
        spin_unlock_bh(&mpath->state_lock);
        da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr;
@@ -1176,7 +1190,9 @@ void mesh_path_timer(unsigned long data)
                spin_unlock_bh(&mpath->state_lock);
                mesh_queue_preq(mpath, 0);
        } else {
-               mpath->flags = 0;
+               mpath->flags &= ~(MESH_PATH_RESOLVING |
+                                 MESH_PATH_RESOLVED |
+                                 MESH_PATH_REQ_QUEUED);
                mpath->exp_time = jiffies;
                spin_unlock_bh(&mpath->state_lock);
                if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
index 3b59099413fb1770e2ee2228065899bc5f9eb302..58384642e03c52bb6427fc809a9149769855e9c1 100644 (file)
 #include "rate.h"
 #include "mesh.h"
 
+#define PLINK_CNF_AID(mgmt) ((mgmt)->u.action.u.self_prot.variable + 2)
 #define PLINK_GET_LLID(p) (p + 2)
 #define PLINK_GET_PLID(p) (p + 4)
 
-#define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \
+#define mod_plink_timer(s, t) (mod_timer(&s->mesh->plink_timer, \
                                jiffies + msecs_to_jiffies(t)))
 
 enum plink_event {
@@ -53,18 +54,13 @@ static const char * const mplevents[] = {
        [CLS_IGNR] = "CLS_IGNR"
 };
 
-static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
-                              enum ieee80211_self_protected_actioncode action,
-                              u8 *da, u16 llid, u16 plid, u16 reason);
-
-
 /* We only need a valid sta if user configured a minimum rssi_threshold. */
 static bool rssi_threshold_check(struct ieee80211_sub_if_data *sdata,
                                 struct sta_info *sta)
 {
        s32 rssi_threshold = sdata->u.mesh.mshcfg.rssi_threshold;
        return rssi_threshold == 0 ||
-              (sta && (s8) -ewma_read(&sta->avg_signal) > rssi_threshold);
+              (sta && (s8) -ewma_signal_read(&sta->avg_signal) > rssi_threshold);
 }
 
 /**
@@ -72,14 +68,14 @@ static bool rssi_threshold_check(struct ieee80211_sub_if_data *sdata,
  *
  * @sta: mesh peer link to restart
  *
- * Locking: this function must be called holding sta->plink_lock
+ * Locking: this function must be called holding sta->mesh->plink_lock
  */
 static inline void mesh_plink_fsm_restart(struct sta_info *sta)
 {
-       lockdep_assert_held(&sta->plink_lock);
-       sta->plink_state = NL80211_PLINK_LISTEN;
-       sta->llid = sta->plid = sta->reason = 0;
-       sta->plink_retries = 0;
+       lockdep_assert_held(&sta->mesh->plink_lock);
+       sta->mesh->plink_state = NL80211_PLINK_LISTEN;
+       sta->mesh->llid = sta->mesh->plid = sta->mesh->reason = 0;
+       sta->mesh->plink_retries = 0;
 }
 
 /*
@@ -119,7 +115,7 @@ static u32 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata)
        rcu_read_lock();
        list_for_each_entry_rcu(sta, &local->sta_list, list) {
                if (sdata != sta->sdata ||
-                   sta->plink_state != NL80211_PLINK_ESTAB)
+                   sta->mesh->plink_state != NL80211_PLINK_ESTAB)
                        continue;
 
                short_slot = false;
@@ -169,7 +165,7 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
        rcu_read_lock();
        list_for_each_entry_rcu(sta, &local->sta_list, list) {
                if (sdata != sta->sdata ||
-                   sta->plink_state != NL80211_PLINK_ESTAB)
+                   sta->mesh->plink_state != NL80211_PLINK_ESTAB)
                        continue;
 
                if (sta->sta.bandwidth > IEEE80211_STA_RX_BW_20)
@@ -204,59 +200,8 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
        return BSS_CHANGED_HT;
 }
 
-/**
- * __mesh_plink_deactivate - deactivate mesh peer link
- *
- * @sta: mesh peer link to deactivate
- *
- * All mesh paths with this peer as next hop will be flushed
- * Returns beacon changed flag if the beacon content changed.
- *
- * Locking: the caller must hold sta->plink_lock
- */
-static u32 __mesh_plink_deactivate(struct sta_info *sta)
-{
-       struct ieee80211_sub_if_data *sdata = sta->sdata;
-       u32 changed = 0;
-
-       lockdep_assert_held(&sta->plink_lock);
-
-       if (sta->plink_state == NL80211_PLINK_ESTAB)
-               changed = mesh_plink_dec_estab_count(sdata);
-       sta->plink_state = NL80211_PLINK_BLOCKED;
-       mesh_path_flush_by_nexthop(sta);
-
-       ieee80211_mps_sta_status_update(sta);
-       changed |= ieee80211_mps_set_sta_local_pm(sta,
-                       NL80211_MESH_POWER_UNKNOWN);
-
-       return changed;
-}
-
-/**
- * mesh_plink_deactivate - deactivate mesh peer link
- *
- * @sta: mesh peer link to deactivate
- *
- * All mesh paths with this peer as next hop will be flushed
- */
-u32 mesh_plink_deactivate(struct sta_info *sta)
-{
-       struct ieee80211_sub_if_data *sdata = sta->sdata;
-       u32 changed;
-
-       spin_lock_bh(&sta->plink_lock);
-       changed = __mesh_plink_deactivate(sta);
-       sta->reason = WLAN_REASON_MESH_PEER_CANCELED;
-       mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
-                           sta->sta.addr, sta->llid, sta->plid,
-                           sta->reason);
-       spin_unlock_bh(&sta->plink_lock);
-
-       return changed;
-}
-
 static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
+                              struct sta_info *sta,
                               enum ieee80211_self_protected_actioncode action,
                               u8 *da, u16 llid, u16 plid, u16 reason)
 {
@@ -306,7 +251,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
                if (action == WLAN_SP_MESH_PEERING_CONFIRM) {
                        /* AID */
                        pos = skb_put(skb, 2);
-                       put_unaligned_le16(plid, pos);
+                       put_unaligned_le16(sta->sta.aid, pos);
                }
                if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
                    ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
@@ -375,6 +320,58 @@ free:
        return err;
 }
 
+/**
+ * __mesh_plink_deactivate - deactivate mesh peer link
+ *
+ * @sta: mesh peer link to deactivate
+ *
+ * All mesh paths with this peer as next hop will be flushed
+ * Returns beacon changed flag if the beacon content changed.
+ *
+ * Locking: the caller must hold sta->mesh->plink_lock
+ */
+static u32 __mesh_plink_deactivate(struct sta_info *sta)
+{
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
+       u32 changed = 0;
+
+       lockdep_assert_held(&sta->mesh->plink_lock);
+
+       if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+               changed = mesh_plink_dec_estab_count(sdata);
+       sta->mesh->plink_state = NL80211_PLINK_BLOCKED;
+       mesh_path_flush_by_nexthop(sta);
+
+       ieee80211_mps_sta_status_update(sta);
+       changed |= ieee80211_mps_set_sta_local_pm(sta,
+                       NL80211_MESH_POWER_UNKNOWN);
+
+       return changed;
+}
+
+/**
+ * mesh_plink_deactivate - deactivate mesh peer link
+ *
+ * @sta: mesh peer link to deactivate
+ *
+ * All mesh paths with this peer as next hop will be flushed
+ */
+u32 mesh_plink_deactivate(struct sta_info *sta)
+{
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
+       u32 changed;
+
+       spin_lock_bh(&sta->mesh->plink_lock);
+       changed = __mesh_plink_deactivate(sta);
+       sta->mesh->reason = WLAN_REASON_MESH_PEER_CANCELED;
+       mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_CLOSE,
+                           sta->sta.addr, sta->mesh->llid, sta->mesh->plid,
+                           sta->mesh->reason);
+       spin_unlock_bh(&sta->mesh->plink_lock);
+
+       return changed;
+}
+
 static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
                               struct sta_info *sta,
                               struct ieee802_11_elems *elems, bool insert)
@@ -388,13 +385,14 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
        sband = local->hw.wiphy->bands[band];
        rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates);
 
-       spin_lock_bh(&sta->plink_lock);
+       spin_lock_bh(&sta->mesh->plink_lock);
        sta->last_rx = jiffies;
 
        /* rates and capabilities don't change during peering */
-       if (sta->plink_state == NL80211_PLINK_ESTAB && sta->processed_beacon)
+       if (sta->mesh->plink_state == NL80211_PLINK_ESTAB &&
+           sta->mesh->processed_beacon)
                goto out;
-       sta->processed_beacon = true;
+       sta->mesh->processed_beacon = true;
 
        if (sta->sta.supp_rates[band] != rates)
                changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
@@ -421,23 +419,57 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
        else
                rate_control_rate_update(local, sband, sta, changed);
 out:
-       spin_unlock_bh(&sta->plink_lock);
+       spin_unlock_bh(&sta->mesh->plink_lock);
+}
+
+static int mesh_allocate_aid(struct ieee80211_sub_if_data *sdata)
+{
+       struct sta_info *sta;
+       unsigned long *aid_map;
+       int aid;
+
+       aid_map = kcalloc(BITS_TO_LONGS(IEEE80211_MAX_AID + 1),
+                         sizeof(*aid_map), GFP_KERNEL);
+       if (!aid_map)
+               return -ENOMEM;
+
+       /* reserve aid 0 for mcast indication */
+       __set_bit(0, aid_map);
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(sta, &sdata->local->sta_list, list)
+               __set_bit(sta->sta.aid, aid_map);
+       rcu_read_unlock();
+
+       aid = find_first_zero_bit(aid_map, IEEE80211_MAX_AID + 1);
+       kfree(aid_map);
+
+       if (aid > IEEE80211_MAX_AID)
+               return -ENOBUFS;
+
+       return aid;
 }
 
 static struct sta_info *
 __mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *hw_addr)
 {
        struct sta_info *sta;
+       int aid;
 
        if (sdata->local->num_sta >= MESH_MAX_PLINKS)
                return NULL;
 
+       aid = mesh_allocate_aid(sdata);
+       if (aid < 0)
+               return NULL;
+
        sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL);
        if (!sta)
                return NULL;
 
-       sta->plink_state = NL80211_PLINK_LISTEN;
+       sta->mesh->plink_state = NL80211_PLINK_LISTEN;
        sta->sta.wme = true;
+       sta->sta.aid = aid;
 
        sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
        sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
@@ -524,7 +556,7 @@ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
                goto out;
 
        if (mesh_peer_accepts_plinks(elems) &&
-           sta->plink_state == NL80211_PLINK_LISTEN &&
+           sta->mesh->plink_state == NL80211_PLINK_LISTEN &&
            sdata->u.mesh.accepting_plinks &&
            sdata->u.mesh.mshcfg.auto_open_plinks &&
            rssi_threshold_check(sdata, sta))
@@ -554,52 +586,52 @@ static void mesh_plink_timer(unsigned long data)
        if (sta->sdata->local->quiescing)
                return;
 
-       spin_lock_bh(&sta->plink_lock);
+       spin_lock_bh(&sta->mesh->plink_lock);
 
        /* If a timer fires just before a state transition on another CPU,
         * we may have already extended the timeout and changed state by the
         * time we've acquired the lock and arrived  here.  In that case,
         * skip this timer and wait for the new one.
         */
-       if (time_before(jiffies, sta->plink_timer.expires)) {
+       if (time_before(jiffies, sta->mesh->plink_timer.expires)) {
                mpl_dbg(sta->sdata,
                        "Ignoring timer for %pM in state %s (timer adjusted)",
-                       sta->sta.addr, mplstates[sta->plink_state]);
-               spin_unlock_bh(&sta->plink_lock);
+                       sta->sta.addr, mplstates[sta->mesh->plink_state]);
+               spin_unlock_bh(&sta->mesh->plink_lock);
                return;
        }
 
        /* del_timer() and handler may race when entering these states */
-       if (sta->plink_state == NL80211_PLINK_LISTEN ||
-           sta->plink_state == NL80211_PLINK_ESTAB) {
+       if (sta->mesh->plink_state == NL80211_PLINK_LISTEN ||
+           sta->mesh->plink_state == NL80211_PLINK_ESTAB) {
                mpl_dbg(sta->sdata,
                        "Ignoring timer for %pM in state %s (timer deleted)",
-                       sta->sta.addr, mplstates[sta->plink_state]);
-               spin_unlock_bh(&sta->plink_lock);
+                       sta->sta.addr, mplstates[sta->mesh->plink_state]);
+               spin_unlock_bh(&sta->mesh->plink_lock);
                return;
        }
 
        mpl_dbg(sta->sdata,
                "Mesh plink timer for %pM fired on state %s\n",
-               sta->sta.addr, mplstates[sta->plink_state]);
+               sta->sta.addr, mplstates[sta->mesh->plink_state]);
        sdata = sta->sdata;
        mshcfg = &sdata->u.mesh.mshcfg;
 
-       switch (sta->plink_state) {
+       switch (sta->mesh->plink_state) {
        case NL80211_PLINK_OPN_RCVD:
        case NL80211_PLINK_OPN_SNT:
                /* retry timer */
-               if (sta->plink_retries < mshcfg->dot11MeshMaxRetries) {
+               if (sta->mesh->plink_retries < mshcfg->dot11MeshMaxRetries) {
                        u32 rand;
                        mpl_dbg(sta->sdata,
                                "Mesh plink for %pM (retry, timeout): %d %d\n",
-                               sta->sta.addr, sta->plink_retries,
-                               sta->plink_timeout);
+                               sta->sta.addr, sta->mesh->plink_retries,
+                               sta->mesh->plink_timeout);
                        get_random_bytes(&rand, sizeof(u32));
-                       sta->plink_timeout = sta->plink_timeout +
-                                            rand % sta->plink_timeout;
-                       ++sta->plink_retries;
-                       mod_plink_timer(sta, sta->plink_timeout);
+                       sta->mesh->plink_timeout = sta->mesh->plink_timeout +
+                                            rand % sta->mesh->plink_timeout;
+                       ++sta->mesh->plink_retries;
+                       mod_plink_timer(sta, sta->mesh->plink_timeout);
                        action = WLAN_SP_MESH_PEERING_OPEN;
                        break;
                }
@@ -609,31 +641,31 @@ static void mesh_plink_timer(unsigned long data)
                /* confirm timer */
                if (!reason)
                        reason = WLAN_REASON_MESH_CONFIRM_TIMEOUT;
-               sta->plink_state = NL80211_PLINK_HOLDING;
+               sta->mesh->plink_state = NL80211_PLINK_HOLDING;
                mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout);
                action = WLAN_SP_MESH_PEERING_CLOSE;
                break;
        case NL80211_PLINK_HOLDING:
                /* holding timer */
-               del_timer(&sta->plink_timer);
+               del_timer(&sta->mesh->plink_timer);
                mesh_plink_fsm_restart(sta);
                break;
        default:
                break;
        }
-       spin_unlock_bh(&sta->plink_lock);
+       spin_unlock_bh(&sta->mesh->plink_lock);
        if (action)
-               mesh_plink_frame_tx(sdata, action, sta->sta.addr,
-                                   sta->llid, sta->plid, reason);
+               mesh_plink_frame_tx(sdata, sta, action, sta->sta.addr,
+                                   sta->mesh->llid, sta->mesh->plid, reason);
 }
 
 static inline void mesh_plink_timer_set(struct sta_info *sta, u32 timeout)
 {
-       sta->plink_timer.expires = jiffies + msecs_to_jiffies(timeout);
-       sta->plink_timer.data = (unsigned long) sta;
-       sta->plink_timer.function = mesh_plink_timer;
-       sta->plink_timeout = timeout;
-       add_timer(&sta->plink_timer);
+       sta->mesh->plink_timer.expires = jiffies + msecs_to_jiffies(timeout);
+       sta->mesh->plink_timer.data = (unsigned long) sta;
+       sta->mesh->plink_timer.function = mesh_plink_timer;
+       sta->mesh->plink_timeout = timeout;
+       add_timer(&sta->mesh->plink_timer);
 }
 
 static bool llid_in_use(struct ieee80211_sub_if_data *sdata,
@@ -645,7 +677,7 @@ static bool llid_in_use(struct ieee80211_sub_if_data *sdata,
 
        rcu_read_lock();
        list_for_each_entry_rcu(sta, &local->sta_list, list) {
-               if (!memcmp(&sta->llid, &llid, sizeof(llid))) {
+               if (!memcmp(&sta->mesh->llid, &llid, sizeof(llid))) {
                        in_use = true;
                        break;
                }
@@ -661,8 +693,6 @@ static u16 mesh_get_new_llid(struct ieee80211_sub_if_data *sdata)
 
        do {
                get_random_bytes(&llid, sizeof(llid));
-               /* for mesh PS we still only have the AID range for TIM bits */
-               llid = (llid % IEEE80211_MAX_AID) + 1;
        } while (llid_in_use(sdata, llid));
 
        return llid;
@@ -676,16 +706,16 @@ u32 mesh_plink_open(struct sta_info *sta)
        if (!test_sta_flag(sta, WLAN_STA_AUTH))
                return 0;
 
-       spin_lock_bh(&sta->plink_lock);
-       sta->llid = mesh_get_new_llid(sdata);
-       if (sta->plink_state != NL80211_PLINK_LISTEN &&
-           sta->plink_state != NL80211_PLINK_BLOCKED) {
-               spin_unlock_bh(&sta->plink_lock);
+       spin_lock_bh(&sta->mesh->plink_lock);
+       sta->mesh->llid = mesh_get_new_llid(sdata);
+       if (sta->mesh->plink_state != NL80211_PLINK_LISTEN &&
+           sta->mesh->plink_state != NL80211_PLINK_BLOCKED) {
+               spin_unlock_bh(&sta->mesh->plink_lock);
                return 0;
        }
-       sta->plink_state = NL80211_PLINK_OPN_SNT;
+       sta->mesh->plink_state = NL80211_PLINK_OPN_SNT;
        mesh_plink_timer_set(sta, sdata->u.mesh.mshcfg.dot11MeshRetryTimeout);
-       spin_unlock_bh(&sta->plink_lock);
+       spin_unlock_bh(&sta->mesh->plink_lock);
        mpl_dbg(sdata,
                "Mesh plink: starting establishment with %pM\n",
                sta->sta.addr);
@@ -693,8 +723,8 @@ u32 mesh_plink_open(struct sta_info *sta)
        /* set the non-peer mode to active during peering */
        changed = ieee80211_mps_local_status_update(sdata);
 
-       mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
-                           sta->sta.addr, sta->llid, 0, 0);
+       mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_OPEN,
+                           sta->sta.addr, sta->mesh->llid, 0, 0);
        return changed;
 }
 
@@ -702,10 +732,10 @@ u32 mesh_plink_block(struct sta_info *sta)
 {
        u32 changed;
 
-       spin_lock_bh(&sta->plink_lock);
+       spin_lock_bh(&sta->mesh->plink_lock);
        changed = __mesh_plink_deactivate(sta);
-       sta->plink_state = NL80211_PLINK_BLOCKED;
-       spin_unlock_bh(&sta->plink_lock);
+       sta->mesh->plink_state = NL80211_PLINK_BLOCKED;
+       spin_unlock_bh(&sta->mesh->plink_lock);
 
        return changed;
 }
@@ -715,12 +745,11 @@ static void mesh_plink_close(struct ieee80211_sub_if_data *sdata,
                             enum plink_event event)
 {
        struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg;
-
        u16 reason = (event == CLS_ACPT) ?
                     WLAN_REASON_MESH_CLOSE : WLAN_REASON_MESH_CONFIG;
 
-       sta->reason = reason;
-       sta->plink_state = NL80211_PLINK_HOLDING;
+       sta->mesh->reason = reason;
+       sta->mesh->plink_state = NL80211_PLINK_HOLDING;
        mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout);
 }
 
@@ -730,8 +759,8 @@ static u32 mesh_plink_establish(struct ieee80211_sub_if_data *sdata,
        struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg;
        u32 changed = 0;
 
-       del_timer(&sta->plink_timer);
-       sta->plink_state = NL80211_PLINK_ESTAB;
+       del_timer(&sta->mesh->plink_timer);
+       sta->mesh->plink_state = NL80211_PLINK_ESTAB;
        changed |= mesh_plink_inc_estab_count(sdata);
        changed |= mesh_set_ht_prot_mode(sdata);
        changed |= mesh_set_short_slot_time(sdata);
@@ -758,18 +787,18 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
        u32 changed = 0;
 
        mpl_dbg(sdata, "peer %pM in state %s got event %s\n", sta->sta.addr,
-               mplstates[sta->plink_state], mplevents[event]);
+               mplstates[sta->mesh->plink_state], mplevents[event]);
 
-       spin_lock_bh(&sta->plink_lock);
-       switch (sta->plink_state) {
+       spin_lock_bh(&sta->mesh->plink_lock);
+       switch (sta->mesh->plink_state) {
        case NL80211_PLINK_LISTEN:
                switch (event) {
                case CLS_ACPT:
                        mesh_plink_fsm_restart(sta);
                        break;
                case OPN_ACPT:
-                       sta->plink_state = NL80211_PLINK_OPN_RCVD;
-                       sta->llid = mesh_get_new_llid(sdata);
+                       sta->mesh->plink_state = NL80211_PLINK_OPN_RCVD;
+                       sta->mesh->llid = mesh_get_new_llid(sdata);
                        mesh_plink_timer_set(sta,
                                             mshcfg->dot11MeshRetryTimeout);
 
@@ -791,11 +820,11 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
                        break;
                case OPN_ACPT:
                        /* retry timer is left untouched */
-                       sta->plink_state = NL80211_PLINK_OPN_RCVD;
+                       sta->mesh->plink_state = NL80211_PLINK_OPN_RCVD;
                        action = WLAN_SP_MESH_PEERING_CONFIRM;
                        break;
                case CNF_ACPT:
-                       sta->plink_state = NL80211_PLINK_CNF_RCVD;
+                       sta->mesh->plink_state = NL80211_PLINK_CNF_RCVD;
                        mod_plink_timer(sta, mshcfg->dot11MeshConfirmTimeout);
                        break;
                default:
@@ -855,7 +884,7 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
        case NL80211_PLINK_HOLDING:
                switch (event) {
                case CLS_ACPT:
-                       del_timer(&sta->plink_timer);
+                       del_timer(&sta->mesh->plink_timer);
                        mesh_plink_fsm_restart(sta);
                        break;
                case OPN_ACPT:
@@ -874,17 +903,18 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
                 */
                break;
        }
-       spin_unlock_bh(&sta->plink_lock);
+       spin_unlock_bh(&sta->mesh->plink_lock);
        if (action) {
-               mesh_plink_frame_tx(sdata, action, sta->sta.addr,
-                                   sta->llid, sta->plid, sta->reason);
+               mesh_plink_frame_tx(sdata, sta, action, sta->sta.addr,
+                                   sta->mesh->llid, sta->mesh->plid,
+                                   sta->mesh->reason);
 
                /* also send confirm in open case */
                if (action == WLAN_SP_MESH_PEERING_OPEN) {
-                       mesh_plink_frame_tx(sdata,
+                       mesh_plink_frame_tx(sdata, sta,
                                            WLAN_SP_MESH_PEERING_CONFIRM,
-                                           sta->sta.addr, sta->llid,
-                                           sta->plid, 0);
+                                           sta->sta.addr, sta->mesh->llid,
+                                           sta->mesh->plid, 0);
                }
        }
 
@@ -939,7 +969,7 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
                        mpl_dbg(sdata, "Mesh plink: Action frame from non-authed peer\n");
                        goto out;
                }
-               if (sta->plink_state == NL80211_PLINK_BLOCKED)
+               if (sta->mesh->plink_state == NL80211_PLINK_BLOCKED)
                        goto out;
        }
 
@@ -954,7 +984,7 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
                if (!matches_local)
                        event = OPN_RJCT;
                if (!mesh_plink_free_count(sdata) ||
-                   (sta->plid && sta->plid != plid))
+                   (sta->mesh->plid && sta->mesh->plid != plid))
                        event = OPN_IGNR;
                else
                        event = OPN_ACPT;
@@ -963,14 +993,14 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
                if (!matches_local)
                        event = CNF_RJCT;
                if (!mesh_plink_free_count(sdata) ||
-                   sta->llid != llid ||
-                   (sta->plid && sta->plid != plid))
+                   sta->mesh->llid != llid ||
+                   (sta->mesh->plid && sta->mesh->plid != plid))
                        event = CNF_IGNR;
                else
                        event = CNF_ACPT;
                break;
        case WLAN_SP_MESH_PEERING_CLOSE:
-               if (sta->plink_state == NL80211_PLINK_ESTAB)
+               if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
                        /* Do not check for llid or plid. This does not
                         * follow the standard but since multiple plinks
                         * per sta are not supported, it is necessary in
@@ -981,9 +1011,9 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
                         * restarted.
                         */
                        event = CLS_ACPT;
-               else if (sta->plid != plid)
+               else if (sta->mesh->plid != plid)
                        event = CLS_IGNR;
-               else if (ie_len == 8 && sta->llid != llid)
+               else if (ie_len == 8 && sta->mesh->llid != llid)
                        event = CLS_IGNR;
                else
                        event = CLS_ACPT;
@@ -1070,9 +1100,9 @@ mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata,
                        mpl_dbg(sdata, "Mesh plink: failed to init peer!\n");
                        goto unlock_rcu;
                }
-               sta->plid = plid;
+               sta->mesh->plid = plid;
        } else if (!sta && event == OPN_RJCT) {
-               mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+               mesh_plink_frame_tx(sdata, NULL, WLAN_SP_MESH_PEERING_CLOSE,
                                    mgmt->sa, 0, plid,
                                    WLAN_REASON_MESH_CONFIG);
                goto unlock_rcu;
@@ -1081,9 +1111,13 @@ mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata,
                goto unlock_rcu;
        }
 
-       /* 802.11-2012 13.3.7.2 - update plid on CNF if not set */
-       if (!sta->plid && event == CNF_ACPT)
-               sta->plid = plid;
+       if (event == CNF_ACPT) {
+               /* 802.11-2012 13.3.7.2 - update plid on CNF if not set */
+               if (!sta->mesh->plid)
+                       sta->mesh->plid = plid;
+
+               sta->mesh->aid = get_unaligned_le16(PLINK_CNF_AID(mgmt));
+       }
 
        changed |= mesh_plink_fsm(sdata, sta, event);
 
index ad8b377b4b9f6cfa5d5e222a3c0aff169f4d72d1..90a268abea171aebc5d5907929db0804a2b0f7b0 100644 (file)
@@ -92,16 +92,16 @@ u32 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata)
                if (sdata != sta->sdata)
                        continue;
 
-               switch (sta->plink_state) {
+               switch (sta->mesh->plink_state) {
                case NL80211_PLINK_OPN_SNT:
                case NL80211_PLINK_OPN_RCVD:
                case NL80211_PLINK_CNF_RCVD:
                        peering = true;
                        break;
                case NL80211_PLINK_ESTAB:
-                       if (sta->local_pm == NL80211_MESH_POWER_LIGHT_SLEEP)
+                       if (sta->mesh->local_pm == NL80211_MESH_POWER_LIGHT_SLEEP)
                                light_sleep_cnt++;
-                       else if (sta->local_pm == NL80211_MESH_POWER_DEEP_SLEEP)
+                       else if (sta->mesh->local_pm == NL80211_MESH_POWER_DEEP_SLEEP)
                                deep_sleep_cnt++;
                        break;
                default:
@@ -153,19 +153,19 @@ u32 ieee80211_mps_set_sta_local_pm(struct sta_info *sta,
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
 
-       if (sta->local_pm == pm)
+       if (sta->mesh->local_pm == pm)
                return 0;
 
        mps_dbg(sdata, "local STA operates in mode %d with %pM\n",
                pm, sta->sta.addr);
 
-       sta->local_pm = pm;
+       sta->mesh->local_pm = pm;
 
        /*
         * announce peer-specific power mode transition
         * (see IEEE802.11-2012 13.14.3.2 and 13.14.3.3)
         */
-       if (sta->plink_state == NL80211_PLINK_ESTAB)
+       if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
                mps_qos_null_tx(sta);
 
        return ieee80211_mps_local_status_update(sdata);
@@ -197,8 +197,8 @@ void ieee80211_mps_set_frame_flags(struct ieee80211_sub_if_data *sdata,
 
        if (is_unicast_ether_addr(hdr->addr1) &&
            ieee80211_is_data_qos(hdr->frame_control) &&
-           sta->plink_state == NL80211_PLINK_ESTAB)
-               pm = sta->local_pm;
+           sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+               pm = sta->mesh->local_pm;
        else
                pm = sdata->u.mesh.nonpeer_pm;
 
@@ -241,16 +241,16 @@ void ieee80211_mps_sta_status_update(struct sta_info *sta)
         * use peer-specific power mode if peering is established and the
         * peer's power mode is known
         */
-       if (sta->plink_state == NL80211_PLINK_ESTAB &&
-           sta->peer_pm != NL80211_MESH_POWER_UNKNOWN)
-               pm = sta->peer_pm;
+       if (sta->mesh->plink_state == NL80211_PLINK_ESTAB &&
+           sta->mesh->peer_pm != NL80211_MESH_POWER_UNKNOWN)
+               pm = sta->mesh->peer_pm;
        else
-               pm = sta->nonpeer_pm;
+               pm = sta->mesh->nonpeer_pm;
 
        do_buffer = (pm != NL80211_MESH_POWER_ACTIVE);
 
        /* clear the MPSP flags for non-peers or active STA */
-       if (sta->plink_state != NL80211_PLINK_ESTAB) {
+       if (sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
                clear_sta_flag(sta, WLAN_STA_MPSP_OWNER);
                clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
        } else if (!do_buffer) {
@@ -296,13 +296,13 @@ static void mps_set_sta_peer_pm(struct sta_info *sta,
                pm = NL80211_MESH_POWER_ACTIVE;
        }
 
-       if (sta->peer_pm == pm)
+       if (sta->mesh->peer_pm == pm)
                return;
 
        mps_dbg(sta->sdata, "STA %pM enters mode %d\n",
                sta->sta.addr, pm);
 
-       sta->peer_pm = pm;
+       sta->mesh->peer_pm = pm;
 
        ieee80211_mps_sta_status_update(sta);
 }
@@ -317,13 +317,13 @@ static void mps_set_sta_nonpeer_pm(struct sta_info *sta,
        else
                pm = NL80211_MESH_POWER_ACTIVE;
 
-       if (sta->nonpeer_pm == pm)
+       if (sta->mesh->nonpeer_pm == pm)
                return;
 
        mps_dbg(sta->sdata, "STA %pM sets non-peer mode to %d\n",
                sta->sta.addr, pm);
 
-       sta->nonpeer_pm = pm;
+       sta->mesh->nonpeer_pm = pm;
 
        ieee80211_mps_sta_status_update(sta);
 }
@@ -552,7 +552,7 @@ void ieee80211_mpsp_trigger_process(u8 *qc, struct sta_info *sta,
        } else {
                if (eosp)
                        clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
-               else if (sta->local_pm != NL80211_MESH_POWER_ACTIVE)
+               else if (sta->mesh->local_pm != NL80211_MESH_POWER_ACTIVE)
                        set_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
 
                if (rspi && !test_and_set_sta_flag(sta, WLAN_STA_MPSP_OWNER))
@@ -577,9 +577,9 @@ void ieee80211_mps_frame_release(struct sta_info *sta,
        int ac, buffer_local = 0;
        bool has_buffered = false;
 
-       if (sta->plink_state == NL80211_PLINK_ESTAB)
+       if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
                has_buffered = ieee80211_check_tim(elems->tim, elems->tim_len,
-                                                  sta->llid);
+                                                  sta->mesh->aid);
 
        if (has_buffered)
                mps_dbg(sta->sdata, "%pM indicates buffered frames\n",
@@ -598,7 +598,7 @@ void ieee80211_mps_frame_release(struct sta_info *sta,
        if (!has_buffered && !buffer_local)
                return;
 
-       if (sta->plink_state == NL80211_PLINK_ESTAB)
+       if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
                mpsp_trigger_send(sta, has_buffered, !buffer_local);
        else
                mps_frame_deliver(sta, 1);
index 09625d6205c31418edba53a62ee027245f232050..64bc22ad94965c4615eaaa94aae02b45891204e7 100644 (file)
@@ -127,14 +127,14 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
 
        /* Timing offset calculation (see 13.13.2.2.2) */
        t_t = le64_to_cpu(mgmt->u.beacon.timestamp);
-       sta->t_offset = t_t - t_r;
+       sta->mesh->t_offset = t_t - t_r;
 
        if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
-               s64 t_clockdrift = sta->t_offset_setpoint - sta->t_offset;
+               s64 t_clockdrift = sta->mesh->t_offset_setpoint - sta->mesh->t_offset;
                msync_dbg(sdata,
-                         "STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld\n",
-                         sta->sta.addr, (long long) sta->t_offset,
-                         (long long) sta->t_offset_setpoint,
+                         "STA %pM : t_offset=%lld, t_offset_setpoint=%lld, t_clockdrift=%lld\n",
+                         sta->sta.addr, (long long) sta->mesh->t_offset,
+                         (long long) sta->mesh->t_offset_setpoint,
                          (long long) t_clockdrift);
 
                if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT ||
@@ -152,12 +152,12 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
                        ifmsh->sync_offset_clockdrift_max = t_clockdrift;
                spin_unlock_bh(&ifmsh->sync_offset_lock);
        } else {
-               sta->t_offset_setpoint = sta->t_offset - TOFFSET_SET_MARGIN;
+               sta->mesh->t_offset_setpoint = sta->mesh->t_offset - TOFFSET_SET_MARGIN;
                set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
                msync_dbg(sdata,
-                         "STA %pM : offset was invalid, sta->t_offset=%lld\n",
+                         "STA %pM : offset was invalid, t_offset=%lld\n",
                          sta->sta.addr,
-                         (long long) sta->t_offset);
+                         (long long) sta->mesh->t_offset);
        }
 
 no_sync:
index 9b2cc278ac2afc60920ebec3083bebc35c497b61..705ef1d040edfb70042fdd9cd25f050b19dab4c0 100644 (file)
@@ -6,6 +6,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
+ * Copyright (C) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -538,11 +539,16 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
        ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
 }
 
+/* This function determines vht capability flags for the association
+ * and builds the IE.
+ * Note - the function may set the owner of the MU-MIMO capability
+ */
 static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
                                 struct sk_buff *skb,
                                 struct ieee80211_supported_band *sband,
                                 struct ieee80211_vht_cap *ap_vht_cap)
 {
+       struct ieee80211_local *local = sdata->local;
        u8 *pos;
        u32 cap;
        struct ieee80211_sta_vht_cap vht_cap;
@@ -576,7 +582,34 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
         */
        if (!(ap_vht_cap->vht_cap_info &
                        cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)))
-               cap &= ~IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
+               cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+                        IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+       else if (!(ap_vht_cap->vht_cap_info &
+                       cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
+               cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+
+       /*
+        * If some other vif is using the MU-MIMO capablity we cannot associate
+        * using MU-MIMO - this will lead to contradictions in the group-id
+        * mechanism.
+        * Ownership is defined since association request, in order to avoid
+        * simultaneous associations with MU-MIMO.
+        */
+       if (cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) {
+               bool disable_mu_mimo = false;
+               struct ieee80211_sub_if_data *other;
+
+               list_for_each_entry_rcu(other, &local->interfaces, list) {
+                       if (other->flags & IEEE80211_SDATA_MU_MIMO_OWNER) {
+                               disable_mu_mimo = true;
+                               break;
+                       }
+               }
+               if (disable_mu_mimo)
+                       cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+               else
+                       sdata->flags |= IEEE80211_SDATA_MU_MIMO_OWNER;
+       }
 
        mask = IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
 
@@ -1096,24 +1129,6 @@ static void ieee80211_chswitch_timer(unsigned long data)
        ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.chswitch_work);
 }
 
-static void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
-{
-       struct sta_info *sta;
-       u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
-               if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
-                   !test_sta_flag(sta, WLAN_STA_AUTHORIZED))
-                       continue;
-
-               ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr,
-                                           NL80211_TDLS_TEARDOWN, reason,
-                                           GFP_ATOMIC);
-       }
-       rcu_read_unlock();
-}
-
 static void
 ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
                                 u64 timestamp, u32 device_timestamp,
@@ -2076,6 +2091,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
        memset(&ifmgd->vht_capa, 0, sizeof(ifmgd->vht_capa));
        memset(&ifmgd->vht_capa_mask, 0, sizeof(ifmgd->vht_capa_mask));
+       sdata->flags &= ~IEEE80211_SDATA_MU_MIMO_OWNER;
 
        sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
 
@@ -2538,6 +2554,7 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
                eth_zero_addr(sdata->u.mgd.bssid);
                ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
                sdata->u.mgd.flags = 0;
+               sdata->flags &= ~IEEE80211_SDATA_MU_MIMO_OWNER;
                mutex_lock(&sdata->local->mtx);
                ieee80211_vif_release_channel(sdata);
                mutex_unlock(&sdata->local->mtx);
@@ -3034,12 +3051,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
 
        rate_control_rate_init(sta);
 
-       if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) {
+       if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED)
                set_sta_flag(sta, WLAN_STA_MFP);
-               sta->sta.mfp = true;
-       } else {
-               sta->sta.mfp = false;
-       }
 
        sta->sta.wme = elems.wmm_param && local->hw.queues >= IEEE80211_NUM_ACS;
 
index 358d5f9d820788acef2f439dee6ac88250724edc..573b81a1fb2d882487f75ccc24493cc1b9deee5e 100644 (file)
@@ -179,7 +179,7 @@ int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
-       u32 changed = BSS_CHANGED_OCB;
+       u32 changed = BSS_CHANGED_OCB | BSS_CHANGED_BSSID;
        int err;
 
        if (ifocb->joined == true)
index fda33f961d83ce44f05ab3298113877d01768fb2..9857693b91ec721ff71e3f3cd1087ccc289912e1 100644 (file)
@@ -29,6 +29,65 @@ module_param(ieee80211_default_rc_algo, charp, 0644);
 MODULE_PARM_DESC(ieee80211_default_rc_algo,
                 "Default rate control algorithm for mac80211 to use");
 
+void rate_control_rate_init(struct sta_info *sta)
+{
+       struct ieee80211_local *local = sta->sdata->local;
+       struct rate_control_ref *ref = sta->rate_ctrl;
+       struct ieee80211_sta *ista = &sta->sta;
+       void *priv_sta = sta->rate_ctrl_priv;
+       struct ieee80211_supported_band *sband;
+       struct ieee80211_chanctx_conf *chanctx_conf;
+
+       ieee80211_sta_set_rx_nss(sta);
+
+       if (!ref)
+               return;
+
+       rcu_read_lock();
+
+       chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
+       if (WARN_ON(!chanctx_conf)) {
+               rcu_read_unlock();
+               return;
+       }
+
+       sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
+
+       spin_lock_bh(&sta->rate_ctrl_lock);
+       ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista,
+                           priv_sta);
+       spin_unlock_bh(&sta->rate_ctrl_lock);
+       rcu_read_unlock();
+       set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
+}
+
+void rate_control_rate_update(struct ieee80211_local *local,
+                                   struct ieee80211_supported_band *sband,
+                                   struct sta_info *sta, u32 changed)
+{
+       struct rate_control_ref *ref = local->rate_ctrl;
+       struct ieee80211_sta *ista = &sta->sta;
+       void *priv_sta = sta->rate_ctrl_priv;
+       struct ieee80211_chanctx_conf *chanctx_conf;
+
+       if (ref && ref->ops->rate_update) {
+               rcu_read_lock();
+
+               chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
+               if (WARN_ON(!chanctx_conf)) {
+                       rcu_read_unlock();
+                       return;
+               }
+
+               spin_lock_bh(&sta->rate_ctrl_lock);
+               ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def,
+                                     ista, priv_sta, changed);
+               spin_unlock_bh(&sta->rate_ctrl_lock);
+               rcu_read_unlock();
+       }
+       drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
+}
+
 int ieee80211_rate_control_register(const struct rate_control_ops *ops)
 {
        struct rate_control_alg *alg;
@@ -294,39 +353,37 @@ bool rate_control_send_low(struct ieee80211_sta *pubsta,
 }
 EXPORT_SYMBOL(rate_control_send_low);
 
-static bool rate_idx_match_legacy_mask(struct ieee80211_tx_rate *rate,
-                                      int n_bitrates, u32 mask)
+static bool rate_idx_match_legacy_mask(s8 *rate_idx, int n_bitrates, u32 mask)
 {
        int j;
 
        /* See whether the selected rate or anything below it is allowed. */
-       for (j = rate->idx; j >= 0; j--) {
+       for (j = *rate_idx; j >= 0; j--) {
                if (mask & (1 << j)) {
                        /* Okay, found a suitable rate. Use it. */
-                       rate->idx = j;
+                       *rate_idx = j;
                        return true;
                }
        }
 
        /* Try to find a higher rate that would be allowed */
-       for (j = rate->idx + 1; j < n_bitrates; j++) {
+       for (j = *rate_idx + 1; j < n_bitrates; j++) {
                if (mask & (1 << j)) {
                        /* Okay, found a suitable rate. Use it. */
-                       rate->idx = j;
+                       *rate_idx = j;
                        return true;
                }
        }
        return false;
 }
 
-static bool rate_idx_match_mcs_mask(struct ieee80211_tx_rate *rate,
-                                   u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+static bool rate_idx_match_mcs_mask(s8 *rate_idx, u8 *mcs_mask)
 {
        int i, j;
        int ridx, rbit;
 
-       ridx = rate->idx / 8;
-       rbit = rate->idx % 8;
+       ridx = *rate_idx / 8;
+       rbit = *rate_idx % 8;
 
        /* sanity check */
        if (ridx < 0 || ridx >= IEEE80211_HT_MCS_MASK_LEN)
@@ -336,20 +393,20 @@ static bool rate_idx_match_mcs_mask(struct ieee80211_tx_rate *rate,
        for (i = ridx; i >= 0; i--) {
                for (j = rbit; j >= 0; j--)
                        if (mcs_mask[i] & BIT(j)) {
-                               rate->idx = i * 8 + j;
+                               *rate_idx = i * 8 + j;
                                return true;
                        }
                rbit = 7;
        }
 
        /* Try to find a higher rate that would be allowed */
-       ridx = (rate->idx + 1) / 8;
-       rbit = (rate->idx + 1) % 8;
+       ridx = (*rate_idx + 1) / 8;
+       rbit = (*rate_idx + 1) % 8;
 
        for (i = ridx; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
                for (j = rbit; j < 8; j++)
                        if (mcs_mask[i] & BIT(j)) {
-                               rate->idx = i * 8 + j;
+                               *rate_idx = i * 8 + j;
                                return true;
                        }
                rbit = 0;
@@ -357,37 +414,93 @@ static bool rate_idx_match_mcs_mask(struct ieee80211_tx_rate *rate,
        return false;
 }
 
+static bool rate_idx_match_vht_mcs_mask(s8 *rate_idx, u16 *vht_mask)
+{
+       int i, j;
+       int ridx, rbit;
+
+       ridx = *rate_idx >> 4;
+       rbit = *rate_idx & 0xf;
+
+       if (ridx < 0 || ridx >= NL80211_VHT_NSS_MAX)
+               return false;
+
+       /* See whether the selected rate or anything below it is allowed. */
+       for (i = ridx; i >= 0; i--) {
+               for (j = rbit; j >= 0; j--) {
+                       if (vht_mask[i] & BIT(j)) {
+                               *rate_idx = (i << 4) | j;
+                               return true;
+                       }
+               }
+               rbit = 15;
+       }
 
+       /* Try to find a higher rate that would be allowed */
+       ridx = (*rate_idx + 1) >> 4;
+       rbit = (*rate_idx + 1) & 0xf;
 
-static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
+       for (i = ridx; i < NL80211_VHT_NSS_MAX; i++) {
+               for (j = rbit; j < 16; j++) {
+                       if (vht_mask[i] & BIT(j)) {
+                               *rate_idx = (i << 4) | j;
+                               return true;
+                       }
+               }
+               rbit = 0;
+       }
+       return false;
+}
+
+static void rate_idx_match_mask(s8 *rate_idx, u16 *rate_flags,
                                struct ieee80211_supported_band *sband,
                                enum nl80211_chan_width chan_width,
                                u32 mask,
-                               u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+                               u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN],
+                               u16 vht_mask[NL80211_VHT_NSS_MAX])
 {
-       struct ieee80211_tx_rate alt_rate;
+       if (*rate_flags & IEEE80211_TX_RC_VHT_MCS) {
+               /* handle VHT rates */
+               if (rate_idx_match_vht_mcs_mask(rate_idx, vht_mask))
+                       return;
+
+               *rate_idx = 0;
+               /* keep protection flags */
+               *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS |
+                               IEEE80211_TX_RC_USE_CTS_PROTECT |
+                               IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
 
-       /* handle HT rates */
-       if (rate->flags & IEEE80211_TX_RC_MCS) {
-               if (rate_idx_match_mcs_mask(rate, mcs_mask))
+               *rate_flags |= IEEE80211_TX_RC_MCS;
+               if (chan_width == NL80211_CHAN_WIDTH_40)
+                       *rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+
+               if (rate_idx_match_mcs_mask(rate_idx, mcs_mask))
                        return;
 
                /* also try the legacy rates. */
-               alt_rate.idx = 0;
+               *rate_flags &= ~(IEEE80211_TX_RC_MCS |
+                                IEEE80211_TX_RC_40_MHZ_WIDTH);
+               if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates,
+                                              mask))
+                       return;
+       } else if (*rate_flags & IEEE80211_TX_RC_MCS) {
+               /* handle HT rates */
+               if (rate_idx_match_mcs_mask(rate_idx, mcs_mask))
+                       return;
+
+               /* also try the legacy rates. */
+               *rate_idx = 0;
                /* keep protection flags */
-               alt_rate.flags = rate->flags &
-                                (IEEE80211_TX_RC_USE_RTS_CTS |
-                                 IEEE80211_TX_RC_USE_CTS_PROTECT |
-                                 IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
-               alt_rate.count = rate->count;
-               if (rate_idx_match_legacy_mask(&alt_rate,
-                                              sband->n_bitrates, mask)) {
-                       *rate = alt_rate;
+               *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS |
+                               IEEE80211_TX_RC_USE_CTS_PROTECT |
+                               IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
+               if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates,
+                                              mask))
                        return;
-               }
-       } else if (!(rate->flags & IEEE80211_TX_RC_VHT_MCS)) {
+       } else {
                /* handle legacy rates */
-               if (rate_idx_match_legacy_mask(rate, sband->n_bitrates, mask))
+               if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates,
+                                              mask))
                        return;
 
                /* if HT BSS, and we handle a data frame, also try HT rates */
@@ -400,23 +513,19 @@ static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
                        break;
                }
 
-               alt_rate.idx = 0;
+               *rate_idx = 0;
                /* keep protection flags */
-               alt_rate.flags = rate->flags &
-                                (IEEE80211_TX_RC_USE_RTS_CTS |
-                                 IEEE80211_TX_RC_USE_CTS_PROTECT |
-                                 IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
-               alt_rate.count = rate->count;
+               *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS |
+                               IEEE80211_TX_RC_USE_CTS_PROTECT |
+                               IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
 
-               alt_rate.flags |= IEEE80211_TX_RC_MCS;
+               *rate_flags |= IEEE80211_TX_RC_MCS;
 
                if (chan_width == NL80211_CHAN_WIDTH_40)
-                       alt_rate.flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+                       *rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
 
-               if (rate_idx_match_mcs_mask(&alt_rate, mcs_mask)) {
-                       *rate = alt_rate;
+               if (rate_idx_match_mcs_mask(rate_idx, mcs_mask))
                        return;
-               }
        }
 
        /*
@@ -569,18 +678,92 @@ static void rate_control_fill_sta_table(struct ieee80211_sta *sta,
        }
 }
 
+static bool rate_control_cap_mask(struct ieee80211_sub_if_data *sdata,
+                                 struct ieee80211_supported_band *sband,
+                                 struct ieee80211_sta *sta, u32 *mask,
+                                 u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN],
+                                 u16 vht_mask[NL80211_VHT_NSS_MAX])
+{
+       u32 i, flags;
+
+       *mask = sdata->rc_rateidx_mask[sband->band];
+       flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+       for (i = 0; i < sband->n_bitrates; i++) {
+               if ((flags & sband->bitrates[i].flags) != flags)
+                       *mask &= ~BIT(i);
+       }
+
+       if (*mask == (1 << sband->n_bitrates) - 1 &&
+           !sdata->rc_has_mcs_mask[sband->band] &&
+           !sdata->rc_has_vht_mcs_mask[sband->band])
+               return false;
+
+       if (sdata->rc_has_mcs_mask[sband->band])
+               memcpy(mcs_mask, sdata->rc_rateidx_mcs_mask[sband->band],
+                      IEEE80211_HT_MCS_MASK_LEN);
+       else
+               memset(mcs_mask, 0xff, IEEE80211_HT_MCS_MASK_LEN);
+
+       if (sdata->rc_has_vht_mcs_mask[sband->band])
+               memcpy(vht_mask, sdata->rc_rateidx_vht_mcs_mask[sband->band],
+                      sizeof(u16) * NL80211_VHT_NSS_MAX);
+       else
+               memset(vht_mask, 0xff, sizeof(u16) * NL80211_VHT_NSS_MAX);
+
+       if (sta) {
+               __le16 sta_vht_cap;
+               u16 sta_vht_mask[NL80211_VHT_NSS_MAX];
+
+               /* Filter out rates that the STA does not support */
+               *mask &= sta->supp_rates[sband->band];
+               for (i = 0; i < sizeof(mcs_mask); i++)
+                       mcs_mask[i] &= sta->ht_cap.mcs.rx_mask[i];
+
+               sta_vht_cap = sta->vht_cap.vht_mcs.rx_mcs_map;
+               ieee80211_get_vht_mask_from_cap(sta_vht_cap, sta_vht_mask);
+               for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+                       vht_mask[i] &= sta_vht_mask[i];
+       }
+
+       return true;
+}
+
+static void
+rate_control_apply_mask_ratetbl(struct sta_info *sta,
+                               struct ieee80211_supported_band *sband,
+                               struct ieee80211_sta_rates *rates)
+{
+       int i;
+       u32 mask;
+       u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN];
+       u16 vht_mask[NL80211_VHT_NSS_MAX];
+       enum nl80211_chan_width chan_width;
+
+       if (!rate_control_cap_mask(sta->sdata, sband, &sta->sta, &mask,
+                                  mcs_mask, vht_mask))
+               return;
+
+       chan_width = sta->sdata->vif.bss_conf.chandef.width;
+       for (i = 0; i < IEEE80211_TX_RATE_TABLE_SIZE; i++) {
+               if (rates->rate[i].idx < 0)
+                       break;
+
+               rate_idx_match_mask(&rates->rate[i].idx, &rates->rate[i].flags,
+                                   sband, chan_width, mask, mcs_mask,
+                                   vht_mask);
+       }
+}
+
 static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
                                    struct ieee80211_sta *sta,
                                    struct ieee80211_supported_band *sband,
-                                   struct ieee80211_tx_info *info,
                                    struct ieee80211_tx_rate *rates,
                                    int max_rates)
 {
        enum nl80211_chan_width chan_width;
        u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN];
-       bool has_mcs_mask;
        u32 mask;
-       u32 rate_flags;
+       u16 rate_flags, vht_mask[NL80211_VHT_NSS_MAX];
        int i;
 
        /*
@@ -588,30 +771,10 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
         * default mask (allow all rates) is used to save some processing for
         * the common case.
         */
-       mask = sdata->rc_rateidx_mask[info->band];
-       has_mcs_mask = sdata->rc_has_mcs_mask[info->band];
-       rate_flags =
-               ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
-       for (i = 0; i < sband->n_bitrates; i++)
-               if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
-                       mask &= ~BIT(i);
-
-       if (mask == (1 << sband->n_bitrates) - 1 && !has_mcs_mask)
+       if (!rate_control_cap_mask(sdata, sband, sta, &mask, mcs_mask,
+                                  vht_mask))
                return;
 
-       if (has_mcs_mask)
-               memcpy(mcs_mask, sdata->rc_rateidx_mcs_mask[info->band],
-                      sizeof(mcs_mask));
-       else
-               memset(mcs_mask, 0xff, sizeof(mcs_mask));
-
-       if (sta) {
-               /* Filter out rates that the STA does not support */
-               mask &= sta->supp_rates[info->band];
-               for (i = 0; i < sizeof(mcs_mask); i++)
-                       mcs_mask[i] &= sta->ht_cap.mcs.rx_mask[i];
-       }
-
        /*
         * Make sure the rate index selected for each TX rate is
         * included in the configured mask and change the rate indexes
@@ -623,8 +786,10 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
                if (rates[i].idx < 0)
                        break;
 
-               rate_idx_match_mask(&rates[i], sband, chan_width, mask,
-                                   mcs_mask);
+               rate_flags = rates[i].flags;
+               rate_idx_match_mask(&rates[i].idx, &rate_flags, sband,
+                                   chan_width, mask, mcs_mask, vht_mask);
+               rates[i].flags = rate_flags;
        }
 }
 
@@ -648,7 +813,7 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
        sband = sdata->local->hw.wiphy->bands[info->band];
 
        if (ieee80211_is_data(hdr->frame_control))
-               rate_control_apply_mask(sdata, sta, sband, info, dest, max_rates);
+               rate_control_apply_mask(sdata, sta, sband, dest, max_rates);
 
        if (dest[0].idx < 0)
                __rate_control_send_low(&sdata->local->hw, sband, sta, info,
@@ -705,7 +870,10 @@ int rate_control_set_rates(struct ieee80211_hw *hw,
 {
        struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
        struct ieee80211_sta_rates *old;
+       struct ieee80211_supported_band *sband;
 
+       sband = hw->wiphy->bands[ieee80211_get_sdata_band(sta->sdata)];
+       rate_control_apply_mask_ratetbl(sta, sband, rates);
        /*
         * mac80211 guarantees that this function will not be called
         * concurrently, so the following RCU access is safe, even without
index 25c9be5dd7fd811b32d13c792cf50c661f6f1e44..624fe5b81615e6afa04138cfdb375c8894d133c0 100644 (file)
@@ -71,64 +71,10 @@ rate_control_tx_status_noskb(struct ieee80211_local *local,
        spin_unlock_bh(&sta->rate_ctrl_lock);
 }
 
-static inline void rate_control_rate_init(struct sta_info *sta)
-{
-       struct ieee80211_local *local = sta->sdata->local;
-       struct rate_control_ref *ref = sta->rate_ctrl;
-       struct ieee80211_sta *ista = &sta->sta;
-       void *priv_sta = sta->rate_ctrl_priv;
-       struct ieee80211_supported_band *sband;
-       struct ieee80211_chanctx_conf *chanctx_conf;
-
-       ieee80211_sta_set_rx_nss(sta);
-
-       if (!ref)
-               return;
-
-       rcu_read_lock();
-
-       chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
-       if (WARN_ON(!chanctx_conf)) {
-               rcu_read_unlock();
-               return;
-       }
-
-       sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
-
-       spin_lock_bh(&sta->rate_ctrl_lock);
-       ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista,
-                           priv_sta);
-       spin_unlock_bh(&sta->rate_ctrl_lock);
-       rcu_read_unlock();
-       set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
-}
-
-static inline void rate_control_rate_update(struct ieee80211_local *local,
+void rate_control_rate_init(struct sta_info *sta);
+void rate_control_rate_update(struct ieee80211_local *local,
                                    struct ieee80211_supported_band *sband,
-                                   struct sta_info *sta, u32 changed)
-{
-       struct rate_control_ref *ref = local->rate_ctrl;
-       struct ieee80211_sta *ista = &sta->sta;
-       void *priv_sta = sta->rate_ctrl_priv;
-       struct ieee80211_chanctx_conf *chanctx_conf;
-
-       if (ref && ref->ops->rate_update) {
-               rcu_read_lock();
-
-               chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
-               if (WARN_ON(!chanctx_conf)) {
-                       rcu_read_unlock();
-                       return;
-               }
-
-               spin_lock_bh(&sta->rate_ctrl_lock);
-               ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def,
-                                     ista, priv_sta, changed);
-               spin_unlock_bh(&sta->rate_ctrl_lock);
-               rcu_read_unlock();
-       }
-       drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
-}
+                                   struct sta_info *sta, u32 changed);
 
 static inline void *rate_control_alloc_sta(struct rate_control_ref *ref,
                                           struct sta_info *sta, gfp_t gfp)
index 247552a7f6c2f23a1e4bc89b647d8d37680bf2c3..3ece7d1034c81ae8749cada074fbebecbe06d57f 100644 (file)
@@ -92,14 +92,15 @@ int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma)
 static inline void
 minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list)
 {
-       int j = MAX_THR_RATES;
-       struct minstrel_rate_stats *tmp_mrs = &mi->r[j - 1].stats;
+       int j;
+       struct minstrel_rate_stats *tmp_mrs;
        struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats;
 
-       while (j > 0 && (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) >
-              minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))) {
-               j--;
+       for (j = MAX_THR_RATES; j > 0; --j) {
                tmp_mrs = &mi->r[tp_list[j - 1]].stats;
+               if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) <=
+                   minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))
+                       break;
        }
 
        if (j < MAX_THR_RATES - 1)
index 543b672335353817334b407fbfc1ff3574625ee6..3928dbd24e257e68627aa977cc54a19aaa996339 100644 (file)
@@ -867,7 +867,13 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
        else
                idx = index % MCS_GROUP_RATES + (group->streams - 1) * 8;
 
-       if (offset > 0) {
+       /* enable RTS/CTS if needed:
+        *  - if station is in dynamic SMPS (and streams > 1)
+        *  - for fallback rates, to increase chances of getting through
+        */
+       if (offset > 0 &&
+           (mi->sta->smps_mode == IEEE80211_SMPS_DYNAMIC &&
+            group->streams > 1)) {
                ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts;
                flags |= IEEE80211_TX_RC_USE_RTS_CTS;
        }
index 5dae166cb7f56b7cb9d9097496db9e8fa8d755ce..5bc0b88d9eb1331a0dd47aab05ffa01c7a0bc845 100644 (file)
@@ -42,6 +42,51 @@ static inline void ieee80211_rx_stats(struct net_device *dev, u32 len)
        u64_stats_update_end(&tstats->syncp);
 }
 
+static u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
+                              enum nl80211_iftype type)
+{
+       __le16 fc = hdr->frame_control;
+
+       if (ieee80211_is_data(fc)) {
+               if (len < 24) /* drop incorrect hdr len (data) */
+                       return NULL;
+
+               if (ieee80211_has_a4(fc))
+                       return NULL;
+               if (ieee80211_has_tods(fc))
+                       return hdr->addr1;
+               if (ieee80211_has_fromds(fc))
+                       return hdr->addr2;
+
+               return hdr->addr3;
+       }
+
+       if (ieee80211_is_mgmt(fc)) {
+               if (len < 24) /* drop incorrect hdr len (mgmt) */
+                       return NULL;
+               return hdr->addr3;
+       }
+
+       if (ieee80211_is_ctl(fc)) {
+               if (ieee80211_is_pspoll(fc))
+                       return hdr->addr1;
+
+               if (ieee80211_is_back_req(fc)) {
+                       switch (type) {
+                       case NL80211_IFTYPE_STATION:
+                               return hdr->addr2;
+                       case NL80211_IFTYPE_AP:
+                       case NL80211_IFTYPE_AP_VLAN:
+                               return hdr->addr1;
+                       default:
+                               break; /* fall through to the return */
+                       }
+               }
+       }
+
+       return NULL;
+}
+
 /*
  * monitor mode reception
  *
@@ -77,8 +122,7 @@ static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
        hdr = (void *)(skb->data + rtap_vendor_space);
 
        if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
-                           RX_FLAG_FAILED_PLCP_CRC |
-                           RX_FLAG_AMPDU_IS_ZEROLEN))
+                           RX_FLAG_FAILED_PLCP_CRC))
                return true;
 
        if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space))
@@ -346,10 +390,6 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
                        cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
                put_unaligned_le32(status->ampdu_reference, pos);
                pos += 4;
-               if (status->flag & RX_FLAG_AMPDU_REPORT_ZEROLEN)
-                       flags |= IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN;
-               if (status->flag & RX_FLAG_AMPDU_IS_ZEROLEN)
-                       flags |= IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN;
                if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
                        flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
                if (status->flag & RX_FLAG_AMPDU_IS_LAST)
@@ -1093,11 +1133,6 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
 
-       if (unlikely(rx->skb->len < 16)) {
-               I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
-               return RX_DROP_MONITOR;
-       }
-
        /* Drop disallowed frame classes based on STA auth/assoc state;
         * IEEE 802.11, Chap 5.5.
         *
@@ -1240,22 +1275,22 @@ static void sta_ps_end(struct sta_info *sta)
        ieee80211_sta_ps_deliver_wakeup(sta);
 }
 
-int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
+int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start)
 {
-       struct sta_info *sta_inf = container_of(sta, struct sta_info, sta);
+       struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
        bool in_ps;
 
-       WARN_ON(!ieee80211_hw_check(&sta_inf->local->hw, AP_LINK_PS));
+       WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS));
 
        /* Don't let the same PS state be set twice */
-       in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA);
+       in_ps = test_sta_flag(sta, WLAN_STA_PS_STA);
        if ((start && in_ps) || (!start && !in_ps))
                return -EINVAL;
 
        if (start)
-               sta_ps_start(sta_inf);
+               sta_ps_start(sta);
        else
-               sta_ps_end(sta_inf);
+               sta_ps_end(sta);
 
        return 0;
 }
@@ -1393,7 +1428,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
        sta->rx_bytes += rx->skb->len;
        if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
                sta->last_signal = status->signal;
-               ewma_add(&sta->avg_signal, -status->signal);
+               ewma_signal_add(&sta->avg_signal, -status->signal);
        }
 
        if (status->chains) {
@@ -1405,7 +1440,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
                                continue;
 
                        sta->chain_signal_last[i] = signal;
-                       ewma_add(&sta->chain_signal_avg[i], -signal);
+                       ewma_signal_add(&sta->chain_signal_avg[i], -signal);
                }
        }
 
@@ -1647,7 +1682,6 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
                if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
                        return RX_DROP_MONITOR;
 
-               rx->key->tx_rx_count++;
                /* TODO: add threshold stuff again */
        } else {
                return RX_DROP_MONITOR;
@@ -1883,7 +1917,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
 
        /* Complete frame has been reassembled - process it now */
        status = IEEE80211_SKB_RXCB(rx->skb);
-       status->rx_flags |= IEEE80211_RX_FRAGMENTED;
 
  out:
        ieee80211_led_rx(rx->local);
@@ -2108,9 +2141,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
                /* deliver to local stack */
                skb->protocol = eth_type_trans(skb, dev);
                memset(skb->cb, 0, sizeof(skb->cb));
-               if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) &&
-                   rx->local->napi)
-                       napi_gro_receive(rx->local->napi, skb);
+               if (rx->napi)
+                       napi_gro_receive(rx->napi, skb);
                else
                        netif_receive_skb(skb);
        }
@@ -2378,9 +2410,8 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
                    tf->category == WLAN_CATEGORY_TDLS &&
                    (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ||
                     tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) {
-                       rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TDLS_CHSW;
-                       skb_queue_tail(&sdata->skb_queue, rx->skb);
-                       ieee80211_queue_work(&rx->local->hw, &sdata->work);
+                       skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb);
+                       schedule_work(&local->tdls_chsw_work);
                        if (rx->sta)
                                rx->sta->rx_packets++;
 
@@ -3004,7 +3035,6 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
        return RX_QUEUED;
 }
 
-/* TODO: use IEEE80211_RX_FRAGMENTED */
 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
                                        struct ieee80211_rate *rate)
 {
@@ -3216,7 +3246,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
                /* This is OK -- must be QoS data frame */
                .security_idx = tid,
                .seqno_idx = tid,
-               .flags = IEEE80211_RX_REORDER_TIMER,
+               .napi = NULL, /* must be NULL to not have races */
        };
        struct tid_ampdu_rx *tid_agg_rx;
 
@@ -3286,7 +3316,7 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
        case NL80211_IFTYPE_OCB:
                if (!bssid)
                        return false;
-               if (ieee80211_is_beacon(hdr->frame_control))
+               if (!ieee80211_is_data_present(hdr->frame_control))
                        return false;
                if (!is_broadcast_ether_addr(bssid))
                        return false;
@@ -3393,7 +3423,8 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
  * be called with rcu_read_lock protection.
  */
 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
-                                        struct sk_buff *skb)
+                                        struct sk_buff *skb,
+                                        struct napi_struct *napi)
 {
        struct ieee80211_local *local = hw_to_local(hw);
        struct ieee80211_sub_if_data *sdata;
@@ -3409,6 +3440,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
        memset(&rx, 0, sizeof(rx));
        rx.skb = skb;
        rx.local = local;
+       rx.napi = napi;
 
        if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
                I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
@@ -3510,7 +3542,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
  * This is the receive path handler. It is called by a low level driver when an
  * 802.11 MPDU is received from the hardware.
  */
-void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
+                      struct napi_struct *napi)
 {
        struct ieee80211_local *local = hw_to_local(hw);
        struct ieee80211_rate *rate = NULL;
@@ -3609,7 +3642,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
        ieee80211_tpt_led_trig_rx(local,
                        ((struct ieee80211_hdr *)skb->data)->frame_control,
                        skb->len);
-       __ieee80211_rx_handle_packet(hw, skb);
+       __ieee80211_rx_handle_packet(hw, skb, napi);
 
        rcu_read_unlock();
 
@@ -3617,7 +3650,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
  drop:
        kfree_skb(skb);
 }
-EXPORT_SYMBOL(ieee80211_rx);
+EXPORT_SYMBOL(ieee80211_rx_napi);
 
 /* This is a version of the rx handler that can be called from hard irq
  * context. Post the skb on the queue and schedule the tasklet */
index 666ddac3c87c67a63ed685efe83221cf7d52428d..64f1936350c66e48fb076beb7b418d55f42c8753 100644 (file)
@@ -68,7 +68,7 @@ static const struct rhashtable_params sta_rht_params = {
        .nelem_hint = 3, /* start small */
        .automatic_shrinking = true,
        .head_offset = offsetof(struct sta_info, hash_node),
-       .key_offset = offsetof(struct sta_info, sta.addr),
+       .key_offset = offsetof(struct sta_info, addr),
        .key_len = ETH_ALEN,
        .hashfn = sta_addr_hash,
        .max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE,
@@ -249,6 +249,9 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
        if (sta->sta.txq[0])
                kfree(to_txq_info(sta->sta.txq[0]));
        kfree(rcu_dereference_raw(sta->sta.rates));
+#ifdef CONFIG_MAC80211_MESH
+       kfree(sta->mesh);
+#endif
        kfree(sta);
 }
 
@@ -313,13 +316,19 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
        INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
        mutex_init(&sta->ampdu_mlme.mtx);
 #ifdef CONFIG_MAC80211_MESH
-       spin_lock_init(&sta->plink_lock);
-       if (ieee80211_vif_is_mesh(&sdata->vif) &&
-           !sdata->u.mesh.user_mpm)
-               init_timer(&sta->plink_timer);
-       sta->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
+       if (ieee80211_vif_is_mesh(&sdata->vif)) {
+               sta->mesh = kzalloc(sizeof(*sta->mesh), gfp);
+               if (!sta->mesh)
+                       goto free;
+               spin_lock_init(&sta->mesh->plink_lock);
+               if (ieee80211_vif_is_mesh(&sdata->vif) &&
+                   !sdata->u.mesh.user_mpm)
+                       init_timer(&sta->mesh->plink_timer);
+               sta->mesh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
+       }
 #endif
 
+       memcpy(sta->addr, addr, ETH_ALEN);
        memcpy(sta->sta.addr, addr, ETH_ALEN);
        sta->local = local;
        sta->sdata = sdata;
@@ -332,9 +341,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
 
        ktime_get_ts(&uptime);
        sta->last_connected = uptime.tv_sec;
-       ewma_init(&sta->avg_signal, 1024, 8);
+       ewma_signal_init(&sta->avg_signal);
        for (i = 0; i < ARRAY_SIZE(sta->chain_signal_avg); i++)
-               ewma_init(&sta->chain_signal_avg[i], 1024, 8);
+               ewma_signal_init(&sta->chain_signal_avg[i]);
 
        if (local->ops->wake_tx_queue) {
                void *txq_data;
@@ -405,6 +414,9 @@ free_txq:
        if (sta->sta.txq[0])
                kfree(to_txq_info(sta->sta.txq[0]));
 free:
+#ifdef CONFIG_MAC80211_MESH
+       kfree(sta->mesh);
+#endif
        kfree(sta);
        return NULL;
 }
@@ -623,7 +635,7 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
        bool indicate_tim = false;
        u8 ignore_for_tim = sta->sta.uapsd_queues;
        int ac;
-       u16 id;
+       u16 id = sta->sta.aid;
 
        if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
            sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
@@ -631,12 +643,9 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
                        return;
 
                ps = &sta->sdata->bss->ps;
-               id = sta->sta.aid;
 #ifdef CONFIG_MAC80211_MESH
        } else if (ieee80211_vif_is_mesh(&sta->sdata->vif)) {
                ps = &sta->sdata->u.mesh.ps;
-               /* TIM map only for 1 <= PLID <= IEEE80211_MAX_AID */
-               id = sta->plid % (IEEE80211_MAX_AID + 1);
 #endif
        } else {
                return;
@@ -1887,7 +1896,8 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
                }
 
                if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))) {
-                       sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal);
+                       sinfo->signal_avg =
+                               (s8) -ewma_signal_read(&sta->avg_signal);
                        sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
                }
        }
@@ -1902,7 +1912,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
                for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
                        sinfo->chain_signal[i] = sta->chain_signal_last[i];
                        sinfo->chain_signal_avg[i] =
-                               (s8) -ewma_read(&sta->chain_signal_avg[i]);
+                               (s8) -ewma_signal_read(&sta->chain_signal_avg[i]);
                }
        }
 
@@ -1956,16 +1966,16 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
                                 BIT(NL80211_STA_INFO_PEER_PM) |
                                 BIT(NL80211_STA_INFO_NONPEER_PM);
 
-               sinfo->llid = sta->llid;
-               sinfo->plid = sta->plid;
-               sinfo->plink_state = sta->plink_state;
+               sinfo->llid = sta->mesh->llid;
+               sinfo->plid = sta->mesh->plid;
+               sinfo->plink_state = sta->mesh->plink_state;
                if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
                        sinfo->filled |= BIT(NL80211_STA_INFO_T_OFFSET);
-                       sinfo->t_offset = sta->t_offset;
+                       sinfo->t_offset = sta->mesh->t_offset;
                }
-               sinfo->local_pm = sta->local_pm;
-               sinfo->peer_pm = sta->peer_pm;
-               sinfo->nonpeer_pm = sta->nonpeer_pm;
+               sinfo->local_pm = sta->mesh->local_pm;
+               sinfo->peer_pm = sta->mesh->peer_pm;
+               sinfo->nonpeer_pm = sta->mesh->nonpeer_pm;
 #endif
        }
 
index 226f8ca47ad6737ff54c6dc3a8bc4036189e9fbb..b087c71ff7fe4ef99ed3869cc2c7eecc174b95c0 100644 (file)
@@ -53,6 +53,8 @@
  * @WLAN_STA_TDLS_CHAN_SWITCH: This TDLS peer supports TDLS channel-switching
  * @WLAN_STA_TDLS_OFF_CHANNEL: The local STA is currently off-channel with this
  *     TDLS peer
+ * @WLAN_STA_TDLS_WIDER_BW: This TDLS peer supports working on a wider bw on
+ *     the BSS base channel.
  * @WLAN_STA_UAPSD: Station requested unscheduled SP while driver was
  *     keeping station in power-save mode, reply when the driver
  *     unblocks the station.
@@ -84,6 +86,7 @@ enum ieee80211_sta_info_flags {
        WLAN_STA_TDLS_INITIATOR,
        WLAN_STA_TDLS_CHAN_SWITCH,
        WLAN_STA_TDLS_OFF_CHANNEL,
+       WLAN_STA_TDLS_WIDER_BW,
        WLAN_STA_UAPSD,
        WLAN_STA_SP,
        WLAN_STA_4ADDR_EVENT,
@@ -269,6 +272,56 @@ struct ieee80211_fast_tx {
        struct rcu_head rcu_head;
 };
 
+/**
+ * struct mesh_sta - mesh STA information
+ * @plink_lock: serialize access to plink fields
+ * @llid: Local link ID
+ * @plid: Peer link ID
+ * @aid: local aid supplied by peer
+ * @reason: Cancel reason on PLINK_HOLDING state
+ * @plink_retries: Retries in establishment
+ * @plink_state: peer link state
+ * @plink_timeout: timeout of peer link
+ * @plink_timer: peer link watch timer
+ * @t_offset: timing offset relative to this host
+ * @t_offset_setpoint: reference timing offset of this sta to be used when
+ *     calculating clockdrift
+ * @local_pm: local link-specific power save mode
+ * @peer_pm: peer-specific power save mode towards local STA
+ * @nonpeer_pm: STA power save mode towards non-peer neighbors
+ * @processed_beacon: set to true after peer rates and capabilities are
+ *     processed
+ * @fail_avg: moving percentage of failed MSDUs
+ */
+struct mesh_sta {
+       struct timer_list plink_timer;
+
+       s64 t_offset;
+       s64 t_offset_setpoint;
+
+       spinlock_t plink_lock;
+       u16 llid;
+       u16 plid;
+       u16 aid;
+       u16 reason;
+       u8 plink_retries;
+
+       bool processed_beacon;
+
+       enum nl80211_plink_state plink_state;
+       u32 plink_timeout;
+
+       /* mesh power save */
+       enum nl80211_mesh_power_mode local_pm;
+       enum nl80211_mesh_power_mode peer_pm;
+       enum nl80211_mesh_power_mode nonpeer_pm;
+
+       /* moving percentage of failed MSDUs */
+       unsigned int fail_avg;
+};
+
+DECLARE_EWMA(signal, 1024, 8)
+
 /**
  * struct sta_info - STA information
  *
@@ -278,12 +331,13 @@ struct ieee80211_fast_tx {
  * @list: global linked list entry
  * @free_list: list entry for keeping track of stations to free
  * @hash_node: hash node for rhashtable
+ * @addr: station's MAC address - duplicated from public part to
+ *     let the hash table work with just a single cacheline
  * @local: pointer to the global information
  * @sdata: virtual interface this station belongs to
  * @ptk: peer keys negotiated with this station, if any
  * @ptk_idx: last installed peer key index
  * @gtk: group keys negotiated with this station, if any
- * @gtk_idx: last installed group key index
  * @rate_ctrl: rate control algorithm reference
  * @rate_ctrl_lock: spinlock used to protect rate control data
  *     (data inside the algorithm, so serializes calls there)
@@ -318,30 +372,17 @@ struct ieee80211_fast_tx {
  * @last_signal: signal of last received frame from this STA
  * @avg_signal: moving average of signal of received frames from this STA
  * @last_ack_signal: signal of last received Ack frame from this STA
- * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue)
+ * @last_seq_ctrl: last received seq/frag number from this STA (per TID
+ *     plus one for non-QoS frames)
  * @tx_filtered_count: number of frames the hardware filtered for this STA
  * @tx_retry_failed: number of frames that failed retry
  * @tx_retry_count: total number of retries for frames to this STA
- * @fail_avg: moving percentage of failed MSDUs
  * @tx_packets: number of RX/TX MSDUs
  * @tx_bytes: number of bytes transmitted to this STA
  * @tid_seq: per-TID sequence numbers for sending to this STA
  * @ampdu_mlme: A-MPDU state machine state
  * @timer_to_tid: identity mapping to ID timers
- * @plink_lock: serialize access to plink fields
- * @llid: Local link ID
- * @plid: Peer link ID
- * @reason: Cancel reason on PLINK_HOLDING state
- * @plink_retries: Retries in establishment
- * @plink_state: peer link state
- * @plink_timeout: timeout of peer link
- * @plink_timer: peer link watch timer
- * @t_offset: timing offset relative to this host
- * @t_offset_setpoint: reference timing offset of this sta to be used when
- *     calculating clockdrift
- * @local_pm: local link-specific power save mode
- * @peer_pm: peer-specific power save mode towards local STA
- * @nonpeer_pm: STA power save mode towards non-peer neighbors
+ * @mesh: mesh STA information
  * @debugfs: debug filesystem info
  * @dead: set to true when sta is unlinked
  * @uploaded: set to true when sta is uploaded to the driver
@@ -369,19 +410,19 @@ struct ieee80211_fast_tx {
  * @rx_msdu: MSDUs received from this station, using IEEE80211_NUM_TID
  *     entry for non-QoS frames
  * @fast_tx: TX fastpath information
- * @processed_beacon: set to true after peer rates and capabilities are
- *     processed
+ * @tdls_chandef: a TDLS peer can have a wider chandef that is compatible to
+ *     the BSS one.
  */
 struct sta_info {
        /* General information, mostly static */
        struct list_head list, free_list;
        struct rcu_head rcu_head;
        struct rhash_head hash_node;
+       u8 addr[ETH_ALEN];
        struct ieee80211_local *local;
        struct ieee80211_sub_if_data *sdata;
        struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
        struct ieee80211_key __rcu *ptk[NUM_DEFAULT_KEYS];
-       u8 gtk_idx;
        u8 ptk_idx;
        struct rate_control_ref *rate_ctrl;
        void *rate_ctrl_priv;
@@ -390,6 +431,10 @@ struct sta_info {
 
        struct ieee80211_fast_tx __rcu *fast_tx;
 
+#ifdef CONFIG_MAC80211_MESH
+       struct mesh_sta *mesh;
+#endif
+
        struct work_struct drv_deliver_wk;
 
        u16 listen_interval;
@@ -419,12 +464,12 @@ struct sta_info {
        unsigned long rx_fragments;
        unsigned long rx_dropped;
        int last_signal;
-       struct ewma avg_signal;
+       struct ewma_signal avg_signal;
        int last_ack_signal;
 
        u8 chains;
        s8 chain_signal_last[IEEE80211_MAX_CHAINS];
-       struct ewma chain_signal_avg[IEEE80211_MAX_CHAINS];
+       struct ewma_signal chain_signal_avg[IEEE80211_MAX_CHAINS];
 
        /* Plus 1 for non-QoS frames */
        __le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1];
@@ -432,8 +477,6 @@ struct sta_info {
        /* Updated from TX status path only, no locking requirements */
        unsigned long tx_filtered_count;
        unsigned long tx_retry_failed, tx_retry_count;
-       /* moving percentage of failed MSDUs */
-       unsigned int fail_avg;
 
        /* Updated from TX path only, no locking requirements */
        u64 tx_packets[IEEE80211_NUM_ACS];
@@ -455,29 +498,6 @@ struct sta_info {
        struct sta_ampdu_mlme ampdu_mlme;
        u8 timer_to_tid[IEEE80211_NUM_TIDS];
 
-#ifdef CONFIG_MAC80211_MESH
-       /*
-        * Mesh peer link attributes, protected by plink_lock.
-        * TODO: move to a sub-structure that is referenced with pointer?
-        */
-       spinlock_t plink_lock;
-       u16 llid;
-       u16 plid;
-       u16 reason;
-       u8 plink_retries;
-       enum nl80211_plink_state plink_state;
-       u32 plink_timeout;
-       struct timer_list plink_timer;
-
-       s64 t_offset;
-       s64 t_offset_setpoint;
-       /* mesh power save */
-       enum nl80211_mesh_power_mode local_pm;
-       enum nl80211_mesh_power_mode peer_pm;
-       enum nl80211_mesh_power_mode nonpeer_pm;
-       bool processed_beacon;
-#endif
-
 #ifdef CONFIG_MAC80211_DEBUGFS
        struct sta_info_debugfsdentries {
                struct dentry *dir;
@@ -498,6 +518,8 @@ struct sta_info {
 
        u8 reserved_tid;
 
+       struct cfg80211_chan_def tdls_chandef;
+
        /* keep last! */
        struct ieee80211_sta sta;
 };
@@ -505,7 +527,7 @@ struct sta_info {
 static inline enum nl80211_plink_state sta_plink_state(struct sta_info *sta)
 {
 #ifdef CONFIG_MAC80211_MESH
-       return sta->plink_state;
+       return sta->mesh->plink_state;
 #endif
        return NL80211_PLINK_LISTEN;
 }
@@ -608,7 +630,7 @@ u32 sta_addr_hash(const void *key, u32 length, u32 seed);
                               _sta_bucket_idx(tbl, _addr),             \
                               hash_node)                               \
        /* compare address and run code only if it matches */           \
-       if (ether_addr_equal(_sta->sta.addr, (_addr)))
+       if (ether_addr_equal(_sta->addr, (_addr)))
 
 /*
  * Get STA info by index, BROKEN!
index 45628f37c083aa72575fcc0a0241b1aefc0cb5ad..8ba5832435095f10e94f782e07d92a4f742cad3a 100644 (file)
@@ -515,7 +515,7 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
 
                if (!sdata) {
                        skb->dev = NULL;
-               } else if (info->flags & IEEE80211_TX_INTFL_MLME_CONN_TX) {
+               } else {
                        unsigned int hdr_size =
                                ieee80211_hdrlen(hdr->frame_control);
 
@@ -529,9 +529,6 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
                                ieee80211_mgd_conn_tx_status(sdata,
                                                             hdr->frame_control,
                                                             acked);
-               } else {
-                       /* we assign ack frame ID for the others */
-                       WARN_ON(1);
                }
 
                rcu_read_unlock();
index 8db6e2994bbc59bb7cf38c36848a7d92b8e4a0e5..aee701a5649e59ebd03ef300f25e33eadfc280d5 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2014, Intel Corporation
  * Copyright 2014  Intel Mobile Communications GmbH
+ * Copyright 2015  Intel Deutschland GmbH
  *
  * This file is GPLv2 as found in COPYING.
  */
@@ -11,6 +12,7 @@
 #include <linux/ieee80211.h>
 #include <linux/log2.h>
 #include <net/cfg80211.h>
+#include <linux/rtnetlink.h>
 #include "ieee80211_i.h"
 #include "driver-ops.h"
 
@@ -35,20 +37,28 @@ void ieee80211_tdls_peer_del_work(struct work_struct *wk)
        mutex_unlock(&local->mtx);
 }
 
-static void ieee80211_tdls_add_ext_capab(struct ieee80211_local *local,
+static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata,
                                         struct sk_buff *skb)
 {
-       u8 *pos = (void *)skb_put(skb, 7);
+       struct ieee80211_local *local = sdata->local;
        bool chan_switch = local->hw.wiphy->features &
                           NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
+       bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW);
+       enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+       struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
+       bool vht = sband && sband->vht_cap.vht_supported;
+       u8 *pos = (void *)skb_put(skb, 10);
 
        *pos++ = WLAN_EID_EXT_CAPABILITY;
-       *pos++ = 5; /* len */
+       *pos++ = 8; /* len */
        *pos++ = 0x0;
        *pos++ = 0x0;
        *pos++ = 0x0;
        *pos++ = chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0;
        *pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
+       *pos++ = 0;
+       *pos++ = 0;
+       *pos++ = (vht && wider_band) ? WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED : 0;
 }
 
 static u8
@@ -283,6 +293,60 @@ static void ieee80211_tdls_add_wmm_param_ie(struct ieee80211_sub_if_data *sdata,
        }
 }
 
+static void
+ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
+                                  struct sta_info *sta)
+{
+       /* IEEE802.11ac-2013 Table E-4 */
+       u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 };
+       struct cfg80211_chan_def uc = sta->tdls_chandef;
+       enum nl80211_chan_width max_width = ieee80211_get_sta_bw(&sta->sta);
+       int i;
+
+       /* only support upgrading non-narrow channels up to 80Mhz */
+       if (max_width == NL80211_CHAN_WIDTH_5 ||
+           max_width == NL80211_CHAN_WIDTH_10)
+               return;
+
+       if (max_width > NL80211_CHAN_WIDTH_80)
+               max_width = NL80211_CHAN_WIDTH_80;
+
+       if (uc.width == max_width)
+               return;
+       /*
+        * Channel usage constrains in the IEEE802.11ac-2013 specification only
+        * allow expanding a 20MHz channel to 80MHz in a single way. In
+        * addition, there are no 40MHz allowed channels that are not part of
+        * the allowed 80MHz range in the 5GHz spectrum (the relevant one here).
+        */
+       for (i = 0; i < ARRAY_SIZE(centers_80mhz); i++)
+               if (abs(uc.chan->center_freq - centers_80mhz[i]) <= 30) {
+                       uc.center_freq1 = centers_80mhz[i];
+                       uc.width = NL80211_CHAN_WIDTH_80;
+                       break;
+               }
+
+       if (!uc.center_freq1)
+               return;
+
+       /* proceed to downgrade the chandef until usable or the same */
+       while (uc.width > max_width &&
+              !cfg80211_reg_can_beacon(sdata->local->hw.wiphy,
+                                       &uc, sdata->wdev.iftype))
+               ieee80211_chandef_downgrade(&uc);
+
+       if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) {
+               tdls_dbg(sdata, "TDLS ch width upgraded %d -> %d\n",
+                        sta->tdls_chandef.width, uc.width);
+
+               /*
+                * the station is not yet authorized when BW upgrade is done,
+                * locking is not required
+                */
+               sta->tdls_chandef = uc;
+       }
+}
+
 static void
 ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
                                   struct sk_buff *skb, const u8 *peer,
@@ -320,7 +384,7 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
                offset = noffset;
        }
 
-       ieee80211_tdls_add_ext_capab(local, skb);
+       ieee80211_tdls_add_ext_capab(sdata, skb);
 
        /* add the QoS element if we support it */
        if (local->hw.queues >= IEEE80211_NUM_ACS &&
@@ -350,15 +414,17 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
                offset = noffset;
        }
 
-       rcu_read_lock();
+       mutex_lock(&local->sta_mtx);
 
        /* we should have the peer STA if we're already responding */
        if (action_code == WLAN_TDLS_SETUP_RESPONSE) {
                sta = sta_info_get(sdata, peer);
                if (WARN_ON_ONCE(!sta)) {
-                       rcu_read_unlock();
+                       mutex_unlock(&local->sta_mtx);
                        return;
                }
+
+               sta->tdls_chandef = sdata->vif.bss_conf.chandef;
        }
 
        ieee80211_tdls_add_oper_classes(sdata, skb);
@@ -384,10 +450,6 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
                ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap);
        } else if (action_code == WLAN_TDLS_SETUP_RESPONSE &&
                   ht_cap.ht_supported && sta->sta.ht_cap.ht_supported) {
-               /* disable SMPS in TDLS responder */
-               sta->sta.ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED
-                                       << IEEE80211_HT_CAP_SM_PS_SHIFT;
-
                /* the peer caps are already intersected with our own */
                memcpy(&ht_cap, &sta->sta.ht_cap, sizeof(ht_cap));
 
@@ -448,9 +510,16 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
 
                pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
                ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap);
+
+               /*
+                * if both peers support WIDER_BW, we can expand the chandef to
+                * a wider compatible one, up to 80MHz
+                */
+               if (test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
+                       ieee80211_tdls_chandef_vht_upgrade(sdata, sta);
        }
 
-       rcu_read_unlock();
+       mutex_unlock(&local->sta_mtx);
 
        /* add any remaining IEs */
        if (extra_ies_len) {
@@ -474,15 +543,17 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
        enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
        u8 *pos;
 
-       rcu_read_lock();
+       mutex_lock(&local->sta_mtx);
 
        sta = sta_info_get(sdata, peer);
        ap_sta = sta_info_get(sdata, ifmgd->bssid);
        if (WARN_ON_ONCE(!sta || !ap_sta)) {
-               rcu_read_unlock();
+               mutex_unlock(&local->sta_mtx);
                return;
        }
 
+       sta->tdls_chandef = sdata->vif.bss_conf.chandef;
+
        /* add any custom IEs that go before the QoS IE */
        if (extra_ies_len) {
                static const u8 before_qos[] = {
@@ -530,12 +601,19 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
 
        /* only include VHT-operation if not on the 2.4GHz band */
        if (band != IEEE80211_BAND_2GHZ && sta->sta.vht_cap.vht_supported) {
+               /*
+                * if both peers support WIDER_BW, we can expand the chandef to
+                * a wider compatible one, up to 80MHz
+                */
+               if (test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
+                       ieee80211_tdls_chandef_vht_upgrade(sdata, sta);
+
                pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation));
                ieee80211_ie_build_vht_oper(pos, &sta->sta.vht_cap,
-                                           &sdata->vif.bss_conf.chandef);
+                                           &sta->tdls_chandef);
        }
 
-       rcu_read_unlock();
+       mutex_unlock(&local->sta_mtx);
 
        /* add any remaining IEs */
        if (extra_ies_len) {
@@ -784,7 +862,7 @@ ieee80211_tdls_build_mgmt_packet_data(struct ieee80211_sub_if_data *sdata,
                               max(sizeof(struct ieee80211_mgmt),
                                   sizeof(struct ieee80211_tdls_data)) +
                               50 + /* supported rates */
-                              7 + /* ext capab */
+                              10 + /* ext capab */
                               26 + /* max(WMM-info, WMM-param) */
                               2 + max(sizeof(struct ieee80211_ht_cap),
                                       sizeof(struct ieee80211_ht_operation)) +
@@ -983,8 +1061,17 @@ ieee80211_tdls_mgmt_setup(struct wiphy *wiphy, struct net_device *dev,
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
        struct ieee80211_local *local = sdata->local;
+       enum ieee80211_smps_mode smps_mode = sdata->u.mgd.driver_smps_mode;
        int ret;
 
+       /* don't support setup with forced SMPS mode that's not off */
+       if (smps_mode != IEEE80211_SMPS_AUTOMATIC &&
+           smps_mode != IEEE80211_SMPS_OFF) {
+               tdls_dbg(sdata, "Aborting TDLS setup due to SMPS mode %d\n",
+                        smps_mode);
+               return -ENOTSUPP;
+       }
+
        mutex_lock(&local->mtx);
 
        /* we don't support concurrent TDLS peer setups */
@@ -1146,6 +1233,22 @@ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
        return ret;
 }
 
+static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_chanctx_conf *conf;
+       struct ieee80211_chanctx *ctx;
+
+       mutex_lock(&local->chanctx_mtx);
+       conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+                                        lockdep_is_held(&local->chanctx_mtx));
+       if (conf) {
+               ctx = container_of(conf, struct ieee80211_chanctx, conf);
+               ieee80211_recalc_chanctx_chantype(local, ctx);
+       }
+       mutex_unlock(&local->chanctx_mtx);
+}
+
 int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
                        const u8 *peer, enum nl80211_tdls_operation oper)
 {
@@ -1182,6 +1285,8 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
                        break;
                }
 
+               iee80211_tdls_recalc_chanctx(sdata);
+
                rcu_read_lock();
                sta = sta_info_get(sdata, peer);
                if (!sta) {
@@ -1213,6 +1318,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
                ieee80211_flush_queues(local, sdata, false);
 
                ret = sta_info_destroy_addr(sdata, peer);
+               iee80211_tdls_recalc_chanctx(sdata);
                break;
        default:
                ret = -ENOTSUPP;
@@ -1224,6 +1330,10 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
                eth_zero_addr(sdata->u.mgd.tdls_peer);
        }
 
+       if (ret == 0)
+               ieee80211_queue_work(&sdata->local->hw,
+                                    &sdata->u.mgd.request_smps_work);
+
        mutex_unlock(&local->mtx);
        return ret;
 }
@@ -1627,6 +1737,31 @@ ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata,
                return -EINVAL;
        }
 
+       if (!elems.sec_chan_offs) {
+               chan_type = NL80211_CHAN_HT20;
+       } else {
+               switch (elems.sec_chan_offs->sec_chan_offs) {
+               case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+                       chan_type = NL80211_CHAN_HT40PLUS;
+                       break;
+               case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+                       chan_type = NL80211_CHAN_HT40MINUS;
+                       break;
+               default:
+                       chan_type = NL80211_CHAN_HT20;
+                       break;
+               }
+       }
+
+       cfg80211_chandef_create(&chandef, chan, chan_type);
+
+       /* we will be active on the TDLS link */
+       if (!cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &chandef,
+                                          sdata->wdev.iftype)) {
+               tdls_dbg(sdata, "TDLS chan switch to forbidden channel\n");
+               return -EINVAL;
+       }
+
        mutex_lock(&local->sta_mtx);
        sta = sta_info_get(sdata, tf->sa);
        if (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) {
@@ -1647,27 +1782,15 @@ ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata,
                goto out;
        }
 
-       if (!sta->sta.ht_cap.ht_supported) {
-               chan_type = NL80211_CHAN_NO_HT;
-       } else if (!elems.sec_chan_offs) {
-               chan_type = NL80211_CHAN_HT20;
-       } else {
-               switch (elems.sec_chan_offs->sec_chan_offs) {
-               case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
-                       chan_type = NL80211_CHAN_HT40PLUS;
-                       break;
-               case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
-                       chan_type = NL80211_CHAN_HT40MINUS;
-                       break;
-               default:
-                       chan_type = NL80211_CHAN_HT20;
-                       break;
-               }
+       /* peer should have known better */
+       if (!sta->sta.ht_cap.ht_supported && elems.sec_chan_offs &&
+           elems.sec_chan_offs->sec_chan_offs) {
+               tdls_dbg(sdata, "TDLS chan switch - wide chan unsupported\n");
+               ret = -ENOTSUPP;
+               goto out;
        }
 
-       cfg80211_chandef_create(&chandef, chan, chan_type);
        params.chandef = &chandef;
-
        params.switch_time = le16_to_cpu(elems.ch_sw_timing->switch_time);
        params.switch_timeout = le16_to_cpu(elems.ch_sw_timing->switch_timeout);
 
@@ -1691,12 +1814,15 @@ out:
        return ret;
 }
 
-void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
-                                          struct sk_buff *skb)
+static void
+ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
+                                     struct sk_buff *skb)
 {
        struct ieee80211_tdls_data *tf = (void *)skb->data;
        struct wiphy *wiphy = sdata->local->hw.wiphy;
 
+       ASSERT_RTNL();
+
        /* make sure the driver supports it */
        if (!(wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH))
                return;
@@ -1720,3 +1846,47 @@ void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
                return;
        }
 }
+
+void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
+{
+       struct sta_info *sta;
+       u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
+               if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
+                   !test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+                       continue;
+
+               ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr,
+                                           NL80211_TDLS_TEARDOWN, reason,
+                                           GFP_ATOMIC);
+       }
+       rcu_read_unlock();
+}
+
+void ieee80211_tdls_chsw_work(struct work_struct *wk)
+{
+       struct ieee80211_local *local =
+               container_of(wk, struct ieee80211_local, tdls_chsw_work);
+       struct ieee80211_sub_if_data *sdata;
+       struct sk_buff *skb;
+       struct ieee80211_tdls_data *tf;
+
+       rtnl_lock();
+       while ((skb = skb_dequeue(&local->skb_queue_tdls_chsw))) {
+               tf = (struct ieee80211_tdls_data *)skb->data;
+               list_for_each_entry(sdata, &local->interfaces, list) {
+                       if (!ieee80211_sdata_running(sdata) ||
+                           sdata->vif.type != NL80211_IFTYPE_STATION ||
+                           !ether_addr_equal(tf->da, sdata->vif.addr))
+                               continue;
+
+                       ieee80211_process_tdls_channel_switch(sdata, skb);
+                       break;
+               }
+
+               kfree_skb(skb);
+       }
+       rtnl_unlock();
+}
index b8233505bf9fd3bb4945c126eebee069f2b94878..84e0e8c7fb236952dfc1cfcb23204623e80d7867 100644 (file)
@@ -311,9 +311,6 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
        if (tx->sdata->vif.type == NL80211_IFTYPE_WDS)
                return TX_CONTINUE;
 
-       if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
-               return TX_CONTINUE;
-
        if (tx->flags & IEEE80211_TX_PS_BUFFERED)
                return TX_CONTINUE;
 
@@ -610,7 +607,6 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
        if (tx->key) {
                bool skip_hw = false;
 
-               tx->key->tx_rx_count++;
                /* TODO: add threshold stuff again */
 
                switch (tx->key->conf.cipher) {
@@ -690,7 +686,8 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
 
        txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
                    tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
-                   tx->sdata->vif.type == NL80211_IFTYPE_ADHOC);
+                   tx->sdata->vif.type == NL80211_IFTYPE_ADHOC ||
+                   tx->sdata->vif.type == NL80211_IFTYPE_OCB);
 
        /* set up RTS protection if desired */
        if (len > tx->local->hw.wiphy->rts_threshold) {
@@ -2777,7 +2774,11 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
                sdata->sequence_number += 0x10;
        }
 
-       sta->tx_msdu[tid]++;
+       if (skb_shinfo(skb)->gso_size)
+               sta->tx_msdu[tid] +=
+                       DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
+       else
+               sta->tx_msdu[tid]++;
 
        info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
 
@@ -3213,6 +3214,16 @@ static void ieee80211_set_csa(struct ieee80211_sub_if_data *sdata,
        rcu_read_unlock();
 }
 
+static u8 __ieee80211_csa_update_counter(struct beacon_data *beacon)
+{
+       beacon->csa_current_counter--;
+
+       /* the counter should never reach 0 */
+       WARN_ON_ONCE(!beacon->csa_current_counter);
+
+       return beacon->csa_current_counter;
+}
+
 u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif)
 {
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
@@ -3231,11 +3242,7 @@ u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif)
        if (!beacon)
                goto unlock;
 
-       beacon->csa_current_counter--;
-
-       /* the counter should never reach 0 */
-       WARN_ON_ONCE(!beacon->csa_current_counter);
-       count = beacon->csa_current_counter;
+       count = __ieee80211_csa_update_counter(beacon);
 
 unlock:
        rcu_read_unlock();
@@ -3335,7 +3342,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
                if (beacon) {
                        if (beacon->csa_counter_offsets[0]) {
                                if (!is_template)
-                                       ieee80211_csa_update_counter(vif);
+                                       __ieee80211_csa_update_counter(beacon);
 
                                ieee80211_set_csa(sdata, beacon);
                        }
@@ -3381,7 +3388,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
 
                if (beacon->csa_counter_offsets[0]) {
                        if (!is_template)
-                               ieee80211_csa_update_counter(vif);
+                               __ieee80211_csa_update_counter(beacon);
 
                        ieee80211_set_csa(sdata, beacon);
                }
@@ -3411,7 +3418,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
                                 * for now we leave it consistent with overall
                                 * mac80211's behavior.
                                 */
-                               ieee80211_csa_update_counter(vif);
+                               __ieee80211_csa_update_counter(beacon);
 
                        ieee80211_set_csa(sdata, beacon);
                }
index 43e5aadd7a894f04b7b2d7cf4b3d456ff7228400..1104421bc525598aae491c587f29eb4b385fc220 100644 (file)
@@ -47,55 +47,6 @@ struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy)
 }
 EXPORT_SYMBOL(wiphy_to_ieee80211_hw);
 
-u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
-                       enum nl80211_iftype type)
-{
-       __le16 fc = hdr->frame_control;
-
-        /* drop ACK/CTS frames and incorrect hdr len (ctrl) */
-       if (len < 16)
-               return NULL;
-
-       if (ieee80211_is_data(fc)) {
-               if (len < 24) /* drop incorrect hdr len (data) */
-                       return NULL;
-
-               if (ieee80211_has_a4(fc))
-                       return NULL;
-               if (ieee80211_has_tods(fc))
-                       return hdr->addr1;
-               if (ieee80211_has_fromds(fc))
-                       return hdr->addr2;
-
-               return hdr->addr3;
-       }
-
-       if (ieee80211_is_mgmt(fc)) {
-               if (len < 24) /* drop incorrect hdr len (mgmt) */
-                       return NULL;
-               return hdr->addr3;
-       }
-
-       if (ieee80211_is_ctl(fc)) {
-               if (ieee80211_is_pspoll(fc))
-                       return hdr->addr1;
-
-               if (ieee80211_is_back_req(fc)) {
-                       switch (type) {
-                       case NL80211_IFTYPE_STATION:
-                               return hdr->addr2;
-                       case NL80211_IFTYPE_AP:
-                       case NL80211_IFTYPE_AP_VLAN:
-                               return hdr->addr1;
-                       default:
-                               break; /* fall through to the return */
-                       }
-               }
-       }
-
-       return NULL;
-}
-
 void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
 {
        struct sk_buff *skb;
@@ -752,7 +703,12 @@ EXPORT_SYMBOL_GPL(wdev_to_ieee80211_vif);
 
 struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif)
 {
-       struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+       struct ieee80211_sub_if_data *sdata;
+
+       if (!vif)
+               return NULL;
+
+       sdata = vif_to_sdata(vif);
 
        if (!ieee80211_sdata_running(sdata) ||
            !(sdata->flags & IEEE80211_SDATA_IN_DRIVER))
@@ -1709,6 +1665,7 @@ static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local)
        local->resuming = false;
        local->suspended = false;
        local->started = false;
+       local->in_reconfig = false;
 
        /* scheduled scan clearly can't be running any more, but tell
         * cfg80211 and clear local state
@@ -1759,16 +1716,24 @@ int ieee80211_reconfig(struct ieee80211_local *local)
        struct ieee80211_sub_if_data *sched_scan_sdata;
        struct cfg80211_sched_scan_request *sched_scan_req;
        bool sched_scan_stopped = false;
+       bool suspended = local->suspended;
 
        /* nothing to do if HW shouldn't run */
        if (!local->open_count)
                goto wake_up;
 
 #ifdef CONFIG_PM
-       if (local->suspended)
+       if (suspended)
                local->resuming = true;
 
        if (local->wowlan) {
+               /*
+                * In the wowlan case, both mac80211 and the device
+                * are functional when the resume op is called, so
+                * clear local->suspended so the device could operate
+                * normally (e.g. pass rx frames).
+                */
+               local->suspended = false;
                res = drv_resume(local);
                local->wowlan = false;
                if (res < 0) {
@@ -1781,8 +1746,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                /*
                 * res is 1, which means the driver requested
                 * to go through a regular reset on wakeup.
+                * restore local->suspended in this case.
                 */
                reconfig_due_to_wowlan = true;
+               local->suspended = true;
        }
 #endif
 
@@ -1794,7 +1761,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
         */
        res = drv_start(local);
        if (res) {
-               if (local->suspended)
+               if (suspended)
                        WARN(1, "Hardware became unavailable upon resume. This could be a software issue prior to suspend or a hardware issue.\n");
                else
                        WARN(1, "Hardware became unavailable during restart.\n");
@@ -2088,10 +2055,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
         * If this is for hw restart things are still running.
         * We may want to change that later, however.
         */
-       if (local->open_count && (!local->suspended || reconfig_due_to_wowlan))
+       if (local->open_count && (!suspended || reconfig_due_to_wowlan))
                drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
 
-       if (!local->suspended)
+       if (!suspended)
                return 0;
 
 #ifdef CONFIG_PM
index 80694d55db7404079212761ff23083bb9aa35169..834ccdbc74be1ccd518aaa952ad854810d94f4ca 100644 (file)
@@ -308,11 +308,15 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
        enum ieee80211_sta_rx_bandwidth bw;
+       enum nl80211_chan_width bss_width = sdata->vif.bss_conf.chandef.width;
 
-       bw = ieee80211_chan_width_to_rx_bw(sdata->vif.bss_conf.chandef.width);
-       bw = min(bw, ieee80211_sta_cap_rx_bw(sta));
+       bw = ieee80211_sta_cap_rx_bw(sta);
        bw = min(bw, sta->cur_max_bandwidth);
 
+       /* do not cap the BW of TDLS WIDER_BW peers by the bss */
+       if (!test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
+               bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
+
        return bw;
 }
 
@@ -422,3 +426,29 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
        if (changed > 0)
                rate_control_rate_update(local, sband, sta, changed);
 }
+
+void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
+                                    u16 vht_mask[NL80211_VHT_NSS_MAX])
+{
+       int i;
+       u16 mask, cap = le16_to_cpu(vht_cap);
+
+       for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+               mask = (cap >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
+               switch (mask) {
+               case IEEE80211_VHT_MCS_SUPPORT_0_7:
+                       vht_mask[i] = 0x00FF;
+                       break;
+               case IEEE80211_VHT_MCS_SUPPORT_0_8:
+                       vht_mask[i] = 0x01FF;
+                       break;
+               case IEEE80211_VHT_MCS_SUPPORT_0_9:
+                       vht_mask[i] = 0x03FF;
+                       break;
+               case IEEE80211_VHT_MCS_NOT_SUPPORTED:
+               default:
+                       vht_mask[i] = 0;
+                       break;
+               }
+       }
+}
index 943f7606527e25b4cad0bfea2a38755f1f7f69d9..feb547dc8643ab286fa0c64f19b01e2b4766898e 100644 (file)
@@ -516,31 +516,34 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
                        return RX_DROP_UNUSABLE;
        }
 
-       ccmp_hdr2pn(pn, skb->data + hdrlen);
+       if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
+               ccmp_hdr2pn(pn, skb->data + hdrlen);
 
-       queue = rx->security_idx;
+               queue = rx->security_idx;
 
-       if (memcmp(pn, key->u.ccmp.rx_pn[queue], IEEE80211_CCMP_PN_LEN) <= 0) {
-               key->u.ccmp.replays++;
-               return RX_DROP_UNUSABLE;
-       }
+               if (memcmp(pn, key->u.ccmp.rx_pn[queue],
+                          IEEE80211_CCMP_PN_LEN) <= 0) {
+                       key->u.ccmp.replays++;
+                       return RX_DROP_UNUSABLE;
+               }
 
-       if (!(status->flag & RX_FLAG_DECRYPTED)) {
-               u8 aad[2 * AES_BLOCK_SIZE];
-               u8 b_0[AES_BLOCK_SIZE];
-               /* hardware didn't decrypt/verify MIC */
-               ccmp_special_blocks(skb, pn, b_0, aad);
+               if (!(status->flag & RX_FLAG_DECRYPTED)) {
+                       u8 aad[2 * AES_BLOCK_SIZE];
+                       u8 b_0[AES_BLOCK_SIZE];
+                       /* hardware didn't decrypt/verify MIC */
+                       ccmp_special_blocks(skb, pn, b_0, aad);
+
+                       if (ieee80211_aes_ccm_decrypt(
+                                   key->u.ccmp.tfm, b_0, aad,
+                                   skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN,
+                                   data_len,
+                                   skb->data + skb->len - mic_len, mic_len))
+                               return RX_DROP_UNUSABLE;
+               }
 
-               if (ieee80211_aes_ccm_decrypt(
-                           key->u.ccmp.tfm, b_0, aad,
-                           skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN,
-                           data_len,
-                           skb->data + skb->len - mic_len, mic_len))
-                       return RX_DROP_UNUSABLE;
+               memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
        }
 
-       memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
-
        /* Remove CCMP header and MIC */
        if (pskb_trim(skb, skb->len - mic_len))
                return RX_DROP_UNUSABLE;
@@ -739,31 +742,35 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
                        return RX_DROP_UNUSABLE;
        }
 
-       gcmp_hdr2pn(pn, skb->data + hdrlen);
+       if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
+               gcmp_hdr2pn(pn, skb->data + hdrlen);
 
-       queue = rx->security_idx;
+               queue = rx->security_idx;
 
-       if (memcmp(pn, key->u.gcmp.rx_pn[queue], IEEE80211_GCMP_PN_LEN) <= 0) {
-               key->u.gcmp.replays++;
-               return RX_DROP_UNUSABLE;
-       }
+               if (memcmp(pn, key->u.gcmp.rx_pn[queue],
+                          IEEE80211_GCMP_PN_LEN) <= 0) {
+                       key->u.gcmp.replays++;
+                       return RX_DROP_UNUSABLE;
+               }
 
-       if (!(status->flag & RX_FLAG_DECRYPTED)) {
-               u8 aad[2 * AES_BLOCK_SIZE];
-               u8 j_0[AES_BLOCK_SIZE];
-               /* hardware didn't decrypt/verify MIC */
-               gcmp_special_blocks(skb, pn, j_0, aad);
+               if (!(status->flag & RX_FLAG_DECRYPTED)) {
+                       u8 aad[2 * AES_BLOCK_SIZE];
+                       u8 j_0[AES_BLOCK_SIZE];
+                       /* hardware didn't decrypt/verify MIC */
+                       gcmp_special_blocks(skb, pn, j_0, aad);
+
+                       if (ieee80211_aes_gcm_decrypt(
+                                   key->u.gcmp.tfm, j_0, aad,
+                                   skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN,
+                                   data_len,
+                                   skb->data + skb->len -
+                                   IEEE80211_GCMP_MIC_LEN))
+                               return RX_DROP_UNUSABLE;
+               }
 
-               if (ieee80211_aes_gcm_decrypt(
-                           key->u.gcmp.tfm, j_0, aad,
-                           skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN,
-                           data_len,
-                           skb->data + skb->len - IEEE80211_GCMP_MIC_LEN))
-                       return RX_DROP_UNUSABLE;
+               memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
        }
 
-       memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
-
        /* Remove GCMP header and MIC */
        if (pskb_trim(skb, skb->len - IEEE80211_GCMP_MIC_LEN))
                return RX_DROP_UNUSABLE;
index f7ba51e8b4cafbf720c5ee3096c1102cbf2a4438..c865ebb2ace2b0b74cf3ba994c085790cde83da6 100644 (file)
@@ -209,10 +209,6 @@ ieee802154_set_backoff_exponent(struct wpan_phy *wpan_phy,
 {
        ASSERT_RTNL();
 
-       if (wpan_dev->min_be == min_be &&
-           wpan_dev->max_be == max_be)
-               return 0;
-
        wpan_dev->min_be = min_be;
        wpan_dev->max_be = max_be;
        return 0;
@@ -224,9 +220,6 @@ ieee802154_set_short_addr(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
 {
        ASSERT_RTNL();
 
-       if (wpan_dev->short_addr == short_addr)
-               return 0;
-
        wpan_dev->short_addr = short_addr;
        return 0;
 }
@@ -238,9 +231,6 @@ ieee802154_set_max_csma_backoffs(struct wpan_phy *wpan_phy,
 {
        ASSERT_RTNL();
 
-       if (wpan_dev->csma_retries == max_csma_backoffs)
-               return 0;
-
        wpan_dev->csma_retries = max_csma_backoffs;
        return 0;
 }
@@ -252,9 +242,6 @@ ieee802154_set_max_frame_retries(struct wpan_phy *wpan_phy,
 {
        ASSERT_RTNL();
 
-       if (wpan_dev->frame_retries == max_frame_retries)
-               return 0;
-
        wpan_dev->frame_retries = max_frame_retries;
        return 0;
 }
@@ -265,13 +252,20 @@ ieee802154_set_lbt_mode(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
 {
        ASSERT_RTNL();
 
-       if (wpan_dev->lbt == mode)
-               return 0;
-
        wpan_dev->lbt = mode;
        return 0;
 }
 
+static int
+ieee802154_set_ackreq_default(struct wpan_phy *wpan_phy,
+                             struct wpan_dev *wpan_dev, bool ackreq)
+{
+       ASSERT_RTNL();
+
+       wpan_dev->ackreq = ackreq;
+       return 0;
+}
+
 const struct cfg802154_ops mac802154_config_ops = {
        .add_virtual_intf_deprecated = ieee802154_add_iface_deprecated,
        .del_virtual_intf_deprecated = ieee802154_del_iface_deprecated,
@@ -289,4 +283,5 @@ const struct cfg802154_ops mac802154_config_ops = {
        .set_max_csma_backoffs = ieee802154_set_max_csma_backoffs,
        .set_max_frame_retries = ieee802154_set_max_frame_retries,
        .set_lbt_mode = ieee802154_set_lbt_mode,
+       .set_ackreq_default = ieee802154_set_ackreq_default,
 };
index 416de903e46757cfead3fe54106efa07ce6e6245..ed26952f9e143407723dca1ef45d8400424aca3e 100644 (file)
@@ -125,6 +125,14 @@ static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
        if (netif_running(dev))
                return -EBUSY;
 
+       /* lowpan need to be down for update
+        * SLAAC address after ifup
+        */
+       if (sdata->wpan_dev.lowpan_dev) {
+               if (netif_running(sdata->wpan_dev.lowpan_dev))
+                       return -EBUSY;
+       }
+
        ieee802154_be64_to_le64(&extended_addr, addr->sa_data);
        if (!ieee802154_is_valid_extended_unicast_addr(extended_addr))
                return -EINVAL;
@@ -132,6 +140,13 @@ static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
        sdata->wpan_dev.extended_addr = extended_addr;
 
+       /* update lowpan interface mac address when
+        * wpan mac has been changed
+        */
+       if (sdata->wpan_dev.lowpan_dev)
+               memcpy(sdata->wpan_dev.lowpan_dev->dev_addr, dev->dev_addr,
+                      dev->addr_len);
+
        return mac802154_wpan_update_llsec(dev);
 }
 
@@ -483,8 +498,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
        wpan_dev->min_be = 3;
        wpan_dev->max_be = 5;
        wpan_dev->csma_retries = 4;
-       /* for compatibility, actual default is 3 */
-       wpan_dev->frame_retries = -1;
+       wpan_dev->frame_retries = 3;
 
        wpan_dev->pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
        wpan_dev->short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
index 9e55431b9a5cc0baf0c40fa9e7a96c3381617ad5..e8cab5bb80c664669f9c34b36e5e9178db83ae13 100644 (file)
@@ -111,7 +111,7 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
        phy->supported.max_minbe = 8;
        phy->supported.min_maxbe = 3;
        phy->supported.max_maxbe = 8;
-       phy->supported.min_frame_retries = -1;
+       phy->supported.min_frame_retries = 0;
        phy->supported.max_frame_retries = 7;
        phy->supported.max_csma_backoffs = 5;
        phy->supported.lbt = NL802154_SUPPORTED_BOOL_FALSE;
@@ -177,11 +177,8 @@ int ieee802154_register_hw(struct ieee802154_hw *hw)
        }
 
        if (!(hw->flags & IEEE802154_HW_FRAME_RETRIES)) {
-               /* TODO should be 3, but our default value is -1 which means
-                * no ARET handling.
-                */
-               local->phy->supported.min_frame_retries = -1;
-               local->phy->supported.max_frame_retries = -1;
+               local->phy->supported.min_frame_retries = 3;
+               local->phy->supported.max_frame_retries = 3;
        }
 
        if (hw->flags & IEEE802154_HW_PROMISCUOUS)
index 276f8c9922184ada1e505cd31cf028552906f217..3da5ca3ba5638e2cbc30b875c2e0ccae347de954 100644 (file)
@@ -48,7 +48,6 @@ int mpls_output(struct sock *sk, struct sk_buff *skb)
        struct dst_entry *dst = skb_dst(skb);
        struct rtable *rt = NULL;
        struct rt6_info *rt6 = NULL;
-       struct lwtunnel_state *lwtstate = NULL;
        int err = 0;
        bool bos;
        int i;
@@ -58,11 +57,9 @@ int mpls_output(struct sock *sk, struct sk_buff *skb)
        if (skb->protocol == htons(ETH_P_IP)) {
                ttl = ip_hdr(skb)->ttl;
                rt = (struct rtable *)dst;
-               lwtstate = rt->rt_lwtstate;
        } else if (skb->protocol == htons(ETH_P_IPV6)) {
                ttl = ipv6_hdr(skb)->hop_limit;
                rt6 = (struct rt6_info *)dst;
-               lwtstate = rt6->rt6i_lwtstate;
        } else {
                goto drop;
        }
@@ -72,12 +69,12 @@ int mpls_output(struct sock *sk, struct sk_buff *skb)
        /* Find the output device */
        out_dev = dst->dev;
        if (!mpls_output_possible(out_dev) ||
-           !lwtstate || skb_warn_if_lro(skb))
+           !dst->lwtstate || skb_warn_if_lro(skb))
                goto drop;
 
        skb_forward_csum(skb);
 
-       tun_encap_info = mpls_lwtunnel_encap(lwtstate);
+       tun_encap_info = mpls_lwtunnel_encap(dst->lwtstate);
 
        /* Verify the destination can hold the packet */
        new_header_size = mpls_encap_size(tun_encap_info);
index 6eae69a698ed8344fee68a46e767a2393e53148a..3e1b4abf1897a5bdeca9e5fa061bd06d9b858263 100644 (file)
@@ -867,6 +867,8 @@ config NETFILTER_XT_TARGET_TEE
        depends on NETFILTER_ADVANCED
        depends on IPV6 || IPV6=n
        depends on !NF_CONNTRACK || NF_CONNTRACK
+       select NF_DUP_IPV4
+       select NF_DUP_IPV6 if IP6_NF_IPTABLES
        ---help---
        This option adds a "TEE" target with which a packet can be cloned and
        this clone be rerouted to another nexthop.
index 5882bbfd198c24b9e72d1d6ddc05f21ad38f53bc..136184572fc9d274a5ef9493852c57045119ce5c 100644 (file)
@@ -274,7 +274,7 @@ void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
                " for conn " FMT_CONN "\n",
                __func__, ARG_TUPLE(&tuple), ARG_CONN(cp));
 
-       h = nf_conntrack_find_get(ip_vs_conn_net(cp), NF_CT_DEFAULT_ZONE,
+       h = nf_conntrack_find_get(ip_vs_conn_net(cp), &nf_ct_zone_dflt,
                                  &tuple);
        if (h) {
                ct = nf_ct_tuplehash_to_ctrack(h);
index 3c20d02aee738c5293a5b449f28ebff596c7232d..ac3be9b0629b7aee7743d414bbffc994ecc9704f 100644 (file)
@@ -126,7 +126,7 @@ EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
 unsigned int nf_conntrack_hash_rnd __read_mostly;
 EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
 
-static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
+static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple)
 {
        unsigned int n;
 
@@ -135,7 +135,7 @@ static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
         * three bytes manually.
         */
        n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
-       return jhash2((u32 *)tuple, n, zone ^ nf_conntrack_hash_rnd ^
+       return jhash2((u32 *)tuple, n, nf_conntrack_hash_rnd ^
                      (((__force __u16)tuple->dst.u.all << 16) |
                      tuple->dst.protonum));
 }
@@ -151,15 +151,15 @@ static u32 hash_bucket(u32 hash, const struct net *net)
 }
 
 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
-                                 u16 zone, unsigned int size)
+                                 unsigned int size)
 {
-       return __hash_bucket(hash_conntrack_raw(tuple, zone), size);
+       return __hash_bucket(hash_conntrack_raw(tuple), size);
 }
 
-static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
+static inline u_int32_t hash_conntrack(const struct net *net,
                                       const struct nf_conntrack_tuple *tuple)
 {
-       return __hash_conntrack(tuple, zone, net->ct.htable_size);
+       return __hash_conntrack(tuple, net->ct.htable_size);
 }
 
 bool
@@ -288,7 +288,9 @@ static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
 }
 
 /* Released via destroy_conntrack() */
-struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
+                                const struct nf_conntrack_zone *zone,
+                                gfp_t flags)
 {
        struct nf_conn *tmpl;
 
@@ -299,24 +301,15 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
        tmpl->status = IPS_TEMPLATE;
        write_pnet(&tmpl->ct_net, net);
 
-#ifdef CONFIG_NF_CONNTRACK_ZONES
-       if (zone) {
-               struct nf_conntrack_zone *nf_ct_zone;
+       if (nf_ct_zone_add(tmpl, flags, zone) < 0)
+               goto out_free;
 
-               nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, flags);
-               if (!nf_ct_zone)
-                       goto out_free;
-               nf_ct_zone->id = zone;
-       }
-#endif
        atomic_set(&tmpl->ct_general.use, 0);
 
        return tmpl;
-#ifdef CONFIG_NF_CONNTRACK_ZONES
 out_free:
        kfree(tmpl);
        return NULL;
-#endif
 }
 EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
 
@@ -373,7 +366,6 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)
 {
        struct net *net = nf_ct_net(ct);
        unsigned int hash, reply_hash;
-       u16 zone = nf_ct_zone(ct);
        unsigned int sequence;
 
        nf_ct_helper_destroy(ct);
@@ -381,9 +373,9 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)
        local_bh_disable();
        do {
                sequence = read_seqcount_begin(&net->ct.generation);
-               hash = hash_conntrack(net, zone,
+               hash = hash_conntrack(net,
                                      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-               reply_hash = hash_conntrack(net, zone,
+               reply_hash = hash_conntrack(net,
                                           &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
        } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 
@@ -431,8 +423,8 @@ static void death_by_timeout(unsigned long ul_conntrack)
 
 static inline bool
 nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
-                       const struct nf_conntrack_tuple *tuple,
-                       u16 zone)
+               const struct nf_conntrack_tuple *tuple,
+               const struct nf_conntrack_zone *zone)
 {
        struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
 
@@ -440,8 +432,8 @@ nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
         * so we need to check that the conntrack is confirmed
         */
        return nf_ct_tuple_equal(tuple, &h->tuple) &&
-               nf_ct_zone(ct) == zone &&
-               nf_ct_is_confirmed(ct);
+              nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
+              nf_ct_is_confirmed(ct);
 }
 
 /*
@@ -450,7 +442,7 @@ nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
  *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
  */
 static struct nf_conntrack_tuple_hash *
-____nf_conntrack_find(struct net *net, u16 zone,
+____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
                      const struct nf_conntrack_tuple *tuple, u32 hash)
 {
        struct nf_conntrack_tuple_hash *h;
@@ -486,7 +478,7 @@ begin:
 
 /* Find a connection corresponding to a tuple. */
 static struct nf_conntrack_tuple_hash *
-__nf_conntrack_find_get(struct net *net, u16 zone,
+__nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
                        const struct nf_conntrack_tuple *tuple, u32 hash)
 {
        struct nf_conntrack_tuple_hash *h;
@@ -513,11 +505,11 @@ begin:
 }
 
 struct nf_conntrack_tuple_hash *
-nf_conntrack_find_get(struct net *net, u16 zone,
+nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
                      const struct nf_conntrack_tuple *tuple)
 {
        return __nf_conntrack_find_get(net, zone, tuple,
-                                      hash_conntrack_raw(tuple, zone));
+                                      hash_conntrack_raw(tuple));
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
 
@@ -536,11 +528,11 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
 int
 nf_conntrack_hash_check_insert(struct nf_conn *ct)
 {
+       const struct nf_conntrack_zone *zone;
        struct net *net = nf_ct_net(ct);
        unsigned int hash, reply_hash;
        struct nf_conntrack_tuple_hash *h;
        struct hlist_nulls_node *n;
-       u16 zone;
        unsigned int sequence;
 
        zone = nf_ct_zone(ct);
@@ -548,9 +540,9 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
        local_bh_disable();
        do {
                sequence = read_seqcount_begin(&net->ct.generation);
-               hash = hash_conntrack(net, zone,
+               hash = hash_conntrack(net,
                                      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-               reply_hash = hash_conntrack(net, zone,
+               reply_hash = hash_conntrack(net,
                                           &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
        } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 
@@ -558,12 +550,14 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
        hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
                                      &h->tuple) &&
-                   zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+                   nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+                                    NF_CT_DIRECTION(h)))
                        goto out;
        hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
                                      &h->tuple) &&
-                   zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+                   nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+                                    NF_CT_DIRECTION(h)))
                        goto out;
 
        add_timer(&ct->timeout);
@@ -588,6 +582,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
 int
 __nf_conntrack_confirm(struct sk_buff *skb)
 {
+       const struct nf_conntrack_zone *zone;
        unsigned int hash, reply_hash;
        struct nf_conntrack_tuple_hash *h;
        struct nf_conn *ct;
@@ -596,7 +591,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        struct hlist_nulls_node *n;
        enum ip_conntrack_info ctinfo;
        struct net *net;
-       u16 zone;
        unsigned int sequence;
 
        ct = nf_ct_get(skb, &ctinfo);
@@ -617,7 +611,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
                /* reuse the hash saved before */
                hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
                hash = hash_bucket(hash, net);
-               reply_hash = hash_conntrack(net, zone,
+               reply_hash = hash_conntrack(net,
                                           &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 
        } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
@@ -649,12 +643,14 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
                                      &h->tuple) &&
-                   zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+                   nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+                                    NF_CT_DIRECTION(h)))
                        goto out;
        hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
                                      &h->tuple) &&
-                   zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+                   nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+                                    NF_CT_DIRECTION(h)))
                        goto out;
 
        /* Timer relative to confirmation time, not original
@@ -707,11 +703,14 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
                         const struct nf_conn *ignored_conntrack)
 {
        struct net *net = nf_ct_net(ignored_conntrack);
+       const struct nf_conntrack_zone *zone;
        struct nf_conntrack_tuple_hash *h;
        struct hlist_nulls_node *n;
        struct nf_conn *ct;
-       u16 zone = nf_ct_zone(ignored_conntrack);
-       unsigned int hash = hash_conntrack(net, zone, tuple);
+       unsigned int hash;
+
+       zone = nf_ct_zone(ignored_conntrack);
+       hash = hash_conntrack(net, tuple);
 
        /* Disable BHs the entire time since we need to disable them at
         * least once for the stats anyway.
@@ -721,7 +720,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
                ct = nf_ct_tuplehash_to_ctrack(h);
                if (ct != ignored_conntrack &&
                    nf_ct_tuple_equal(tuple, &h->tuple) &&
-                   nf_ct_zone(ct) == zone) {
+                   nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h))) {
                        NF_CT_STAT_INC(net, found);
                        rcu_read_unlock_bh();
                        return 1;
@@ -810,7 +809,8 @@ void init_nf_conntrack_hash_rnd(void)
 }
 
 static struct nf_conn *
-__nf_conntrack_alloc(struct net *net, u16 zone,
+__nf_conntrack_alloc(struct net *net,
+                    const struct nf_conntrack_zone *zone,
                     const struct nf_conntrack_tuple *orig,
                     const struct nf_conntrack_tuple *repl,
                     gfp_t gfp, u32 hash)
@@ -820,7 +820,7 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
        if (unlikely(!nf_conntrack_hash_rnd)) {
                init_nf_conntrack_hash_rnd();
                /* recompute the hash as nf_conntrack_hash_rnd is initialized */
-               hash = hash_conntrack_raw(orig, zone);
+               hash = hash_conntrack_raw(orig);
        }
 
        /* We don't want any race condition at early drop stage */
@@ -840,10 +840,9 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
         * SLAB_DESTROY_BY_RCU.
         */
        ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
-       if (ct == NULL) {
-               atomic_dec(&net->ct.count);
-               return ERR_PTR(-ENOMEM);
-       }
+       if (ct == NULL)
+               goto out;
+
        spin_lock_init(&ct->lock);
        ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
        ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
@@ -857,31 +856,24 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
        memset(&ct->__nfct_init_offset[0], 0,
               offsetof(struct nf_conn, proto) -
               offsetof(struct nf_conn, __nfct_init_offset[0]));
-#ifdef CONFIG_NF_CONNTRACK_ZONES
-       if (zone) {
-               struct nf_conntrack_zone *nf_ct_zone;
 
-               nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC);
-               if (!nf_ct_zone)
-                       goto out_free;
-               nf_ct_zone->id = zone;
-       }
-#endif
+       if (zone && nf_ct_zone_add(ct, GFP_ATOMIC, zone) < 0)
+               goto out_free;
+
        /* Because we use RCU lookups, we set ct_general.use to zero before
         * this is inserted in any list.
         */
        atomic_set(&ct->ct_general.use, 0);
        return ct;
-
-#ifdef CONFIG_NF_CONNTRACK_ZONES
 out_free:
-       atomic_dec(&net->ct.count);
        kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
+out:
+       atomic_dec(&net->ct.count);
        return ERR_PTR(-ENOMEM);
-#endif
 }
 
-struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
+struct nf_conn *nf_conntrack_alloc(struct net *net,
+                                  const struct nf_conntrack_zone *zone,
                                   const struct nf_conntrack_tuple *orig,
                                   const struct nf_conntrack_tuple *repl,
                                   gfp_t gfp)
@@ -923,8 +915,9 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
        struct nf_conntrack_tuple repl_tuple;
        struct nf_conntrack_ecache *ecache;
        struct nf_conntrack_expect *exp = NULL;
-       u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
+       const struct nf_conntrack_zone *zone;
        struct nf_conn_timeout *timeout_ext;
+       struct nf_conntrack_zone tmp;
        unsigned int *timeouts;
 
        if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
@@ -932,6 +925,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
                return NULL;
        }
 
+       zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
        ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
                                  hash);
        if (IS_ERR(ct))
@@ -1026,10 +1020,11 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
                  int *set_reply,
                  enum ip_conntrack_info *ctinfo)
 {
+       const struct nf_conntrack_zone *zone;
        struct nf_conntrack_tuple tuple;
        struct nf_conntrack_tuple_hash *h;
+       struct nf_conntrack_zone tmp;
        struct nf_conn *ct;
-       u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
        u32 hash;
 
        if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
@@ -1040,7 +1035,8 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
        }
 
        /* look for tuple match */
-       hash = hash_conntrack_raw(&tuple, zone);
+       zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
+       hash = hash_conntrack_raw(&tuple);
        h = __nf_conntrack_find_get(net, zone, &tuple, hash);
        if (!h) {
                h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
@@ -1290,6 +1286,13 @@ bool __nf_ct_kill_acct(struct nf_conn *ct,
 }
 EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
 
+/* Built-in default zone used e.g. by modules. */
+const struct nf_conntrack_zone nf_ct_zone_dflt = {
+       .id     = NF_CT_DEFAULT_ZONE_ID,
+       .dir    = NF_CT_DEFAULT_ZONE_DIR,
+};
+EXPORT_SYMBOL_GPL(nf_ct_zone_dflt);
+
 #ifdef CONFIG_NF_CONNTRACK_ZONES
 static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
        .len    = sizeof(struct nf_conntrack_zone),
@@ -1596,8 +1599,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
                                        struct nf_conntrack_tuple_hash, hnnode);
                        ct = nf_ct_tuplehash_to_ctrack(h);
                        hlist_nulls_del_rcu(&h->hnnode);
-                       bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct),
-                                                 hashsize);
+                       bucket = __hash_conntrack(&h->tuple, hashsize);
                        hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
                }
        }
index b45a4223cb058a47ae2863a4166cd5085e587ca6..acf5c7b3f378c600ec983a0b92e7eb935c56b0c8 100644 (file)
@@ -88,7 +88,8 @@ static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple
 }
 
 struct nf_conntrack_expect *
-__nf_ct_expect_find(struct net *net, u16 zone,
+__nf_ct_expect_find(struct net *net,
+                   const struct nf_conntrack_zone *zone,
                    const struct nf_conntrack_tuple *tuple)
 {
        struct nf_conntrack_expect *i;
@@ -100,7 +101,7 @@ __nf_ct_expect_find(struct net *net, u16 zone,
        h = nf_ct_expect_dst_hash(tuple);
        hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) {
                if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
-                   nf_ct_zone(i->master) == zone)
+                   nf_ct_zone_equal_any(i->master, zone))
                        return i;
        }
        return NULL;
@@ -109,7 +110,8 @@ EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
 
 /* Just find a expectation corresponding to a tuple. */
 struct nf_conntrack_expect *
-nf_ct_expect_find_get(struct net *net, u16 zone,
+nf_ct_expect_find_get(struct net *net,
+                     const struct nf_conntrack_zone *zone,
                      const struct nf_conntrack_tuple *tuple)
 {
        struct nf_conntrack_expect *i;
@@ -127,7 +129,8 @@ EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
 /* If an expectation for this connection is found, it gets delete from
  * global list then returned. */
 struct nf_conntrack_expect *
-nf_ct_find_expectation(struct net *net, u16 zone,
+nf_ct_find_expectation(struct net *net,
+                      const struct nf_conntrack_zone *zone,
                       const struct nf_conntrack_tuple *tuple)
 {
        struct nf_conntrack_expect *i, *exp = NULL;
@@ -140,7 +143,7 @@ nf_ct_find_expectation(struct net *net, u16 zone,
        hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) {
                if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
                    nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
-                   nf_ct_zone(i->master) == zone) {
+                   nf_ct_zone_equal_any(i->master, zone)) {
                        exp = i;
                        break;
                }
@@ -220,16 +223,16 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
        }
 
        return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
-              nf_ct_zone(a->master) == nf_ct_zone(b->master);
+              nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
 }
 
 static inline int expect_matches(const struct nf_conntrack_expect *a,
                                 const struct nf_conntrack_expect *b)
 {
        return a->master == b->master && a->class == b->class &&
-               nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
-               nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
-               nf_ct_zone(a->master) == nf_ct_zone(b->master);
+              nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
+              nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
+              nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
 }
 
 /* Generally a bad idea to call this: could have matched already. */
index 6b8b0abbfab482280ae6a318f8bc58260e0b21c8..94a66541e0b76a1764ad6b2d73bf8f6cd1310a6b 100644 (file)
@@ -127,6 +127,20 @@ ctnetlink_dump_tuples(struct sk_buff *skb,
        return ret;
 }
 
+static inline int
+ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype,
+                      const struct nf_conntrack_zone *zone, int dir)
+{
+       if (zone->id == NF_CT_DEFAULT_ZONE_ID || zone->dir != dir)
+               return 0;
+       if (nla_put_be16(skb, attrtype, htons(zone->id)))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
 static inline int
 ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
 {
@@ -458,6 +472,7 @@ static int
 ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
                    struct nf_conn *ct)
 {
+       const struct nf_conntrack_zone *zone;
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
        struct nlattr *nest_parms;
@@ -473,11 +488,16 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
        nfmsg->version      = NFNETLINK_V0;
        nfmsg->res_id       = 0;
 
+       zone = nf_ct_zone(ct);
+
        nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
        if (!nest_parms)
                goto nla_put_failure;
        if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
                goto nla_put_failure;
+       if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+                                  NF_CT_ZONE_DIR_ORIG) < 0)
+               goto nla_put_failure;
        nla_nest_end(skb, nest_parms);
 
        nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
@@ -485,10 +505,13 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
                goto nla_put_failure;
        if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
                goto nla_put_failure;
+       if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+                                  NF_CT_ZONE_DIR_REPL) < 0)
+               goto nla_put_failure;
        nla_nest_end(skb, nest_parms);
 
-       if (nf_ct_zone(ct) &&
-           nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
+       if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
+                                  NF_CT_DEFAULT_ZONE_DIR) < 0)
                goto nla_put_failure;
 
        if (ctnetlink_dump_status(skb, ct) < 0 ||
@@ -598,7 +621,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
               + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
 #endif
 #ifdef CONFIG_NF_CONNTRACK_ZONES
-              + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
+              + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
 #endif
               + ctnetlink_proto_size(ct)
               + ctnetlink_label_size(ct)
@@ -609,6 +632,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
 static int
 ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
 {
+       const struct nf_conntrack_zone *zone;
        struct net *net;
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
@@ -655,11 +679,16 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
        nfmsg->res_id   = 0;
 
        rcu_read_lock();
+       zone = nf_ct_zone(ct);
+
        nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
        if (!nest_parms)
                goto nla_put_failure;
        if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
                goto nla_put_failure;
+       if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+                                  NF_CT_ZONE_DIR_ORIG) < 0)
+               goto nla_put_failure;
        nla_nest_end(skb, nest_parms);
 
        nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
@@ -667,10 +696,13 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
                goto nla_put_failure;
        if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
                goto nla_put_failure;
+       if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+                                  NF_CT_ZONE_DIR_REPL) < 0)
+               goto nla_put_failure;
        nla_nest_end(skb, nest_parms);
 
-       if (nf_ct_zone(ct) &&
-           nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
+       if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
+                                  NF_CT_DEFAULT_ZONE_DIR) < 0)
                goto nla_put_failure;
 
        if (ctnetlink_dump_id(skb, ct) < 0)
@@ -920,15 +952,54 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
        return ret;
 }
 
+static int
+ctnetlink_parse_zone(const struct nlattr *attr,
+                    struct nf_conntrack_zone *zone)
+{
+       nf_ct_zone_init(zone, NF_CT_DEFAULT_ZONE_ID,
+                       NF_CT_DEFAULT_ZONE_DIR, 0);
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+       if (attr)
+               zone->id = ntohs(nla_get_be16(attr));
+#else
+       if (attr)
+               return -EOPNOTSUPP;
+#endif
+       return 0;
+}
+
+static int
+ctnetlink_parse_tuple_zone(struct nlattr *attr, enum ctattr_type type,
+                          struct nf_conntrack_zone *zone)
+{
+       int ret;
+
+       if (zone->id != NF_CT_DEFAULT_ZONE_ID)
+               return -EINVAL;
+
+       ret = ctnetlink_parse_zone(attr, zone);
+       if (ret < 0)
+               return ret;
+
+       if (type == CTA_TUPLE_REPLY)
+               zone->dir = NF_CT_ZONE_DIR_REPL;
+       else
+               zone->dir = NF_CT_ZONE_DIR_ORIG;
+
+       return 0;
+}
+
 static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
        [CTA_TUPLE_IP]          = { .type = NLA_NESTED },
        [CTA_TUPLE_PROTO]       = { .type = NLA_NESTED },
+       [CTA_TUPLE_ZONE]        = { .type = NLA_U16 },
 };
 
 static int
 ctnetlink_parse_tuple(const struct nlattr * const cda[],
                      struct nf_conntrack_tuple *tuple,
-                     enum ctattr_type type, u_int8_t l3num)
+                     enum ctattr_type type, u_int8_t l3num,
+                     struct nf_conntrack_zone *zone)
 {
        struct nlattr *tb[CTA_TUPLE_MAX+1];
        int err;
@@ -955,6 +1026,16 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[],
        if (err < 0)
                return err;
 
+       if (tb[CTA_TUPLE_ZONE]) {
+               if (!zone)
+                       return -EINVAL;
+
+               err = ctnetlink_parse_tuple_zone(tb[CTA_TUPLE_ZONE],
+                                                type, zone);
+               if (err < 0)
+                       return err;
+       }
+
        /* orig and expect tuples get DIR_ORIGINAL */
        if (type == CTA_TUPLE_REPLY)
                tuple->dst.dir = IP_CT_DIR_REPLY;
@@ -964,21 +1045,6 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[],
        return 0;
 }
 
-static int
-ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone)
-{
-       if (attr)
-#ifdef CONFIG_NF_CONNTRACK_ZONES
-               *zone = ntohs(nla_get_be16(attr));
-#else
-               return -EOPNOTSUPP;
-#endif
-       else
-               *zone = 0;
-
-       return 0;
-}
-
 static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
        [CTA_HELP_NAME]         = { .type = NLA_NUL_STRING,
                                    .len = NF_CT_HELPER_NAME_LEN - 1 },
@@ -1058,7 +1124,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
        struct nf_conn *ct;
        struct nfgenmsg *nfmsg = nlmsg_data(nlh);
        u_int8_t u3 = nfmsg->nfgen_family;
-       u16 zone;
+       struct nf_conntrack_zone zone;
        int err;
 
        err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
@@ -1066,9 +1132,11 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
                return err;
 
        if (cda[CTA_TUPLE_ORIG])
-               err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
+               err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
+                                           u3, &zone);
        else if (cda[CTA_TUPLE_REPLY])
-               err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
+               err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
+                                           u3, &zone);
        else {
                return ctnetlink_flush_conntrack(net, cda,
                                                 NETLINK_CB(skb).portid,
@@ -1078,7 +1146,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
        if (err < 0)
                return err;
 
-       h = nf_conntrack_find_get(net, zone, &tuple);
+       h = nf_conntrack_find_get(net, &zone, &tuple);
        if (!h)
                return -ENOENT;
 
@@ -1112,7 +1180,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
        struct sk_buff *skb2 = NULL;
        struct nfgenmsg *nfmsg = nlmsg_data(nlh);
        u_int8_t u3 = nfmsg->nfgen_family;
-       u16 zone;
+       struct nf_conntrack_zone zone;
        int err;
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
@@ -1138,16 +1206,18 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
                return err;
 
        if (cda[CTA_TUPLE_ORIG])
-               err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
+               err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
+                                           u3, &zone);
        else if (cda[CTA_TUPLE_REPLY])
-               err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
+               err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
+                                           u3, &zone);
        else
                return -EINVAL;
 
        if (err < 0)
                return err;
 
-       h = nf_conntrack_find_get(net, zone, &tuple);
+       h = nf_conntrack_find_get(net, &zone, &tuple);
        if (!h)
                return -ENOENT;
 
@@ -1645,7 +1715,8 @@ ctnetlink_change_conntrack(struct nf_conn *ct,
 }
 
 static struct nf_conn *
-ctnetlink_create_conntrack(struct net *net, u16 zone,
+ctnetlink_create_conntrack(struct net *net,
+                          const struct nf_conntrack_zone *zone,
                           const struct nlattr * const cda[],
                           struct nf_conntrack_tuple *otuple,
                           struct nf_conntrack_tuple *rtuple,
@@ -1761,7 +1832,8 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
                struct nf_conntrack_tuple_hash *master_h;
                struct nf_conn *master_ct;
 
-               err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, u3);
+               err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER,
+                                           u3, NULL);
                if (err < 0)
                        goto err2;
 
@@ -1804,7 +1876,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
        struct nfgenmsg *nfmsg = nlmsg_data(nlh);
        struct nf_conn *ct;
        u_int8_t u3 = nfmsg->nfgen_family;
-       u16 zone;
+       struct nf_conntrack_zone zone;
        int err;
 
        err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
@@ -1812,21 +1884,23 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
                return err;
 
        if (cda[CTA_TUPLE_ORIG]) {
-               err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3);
+               err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG,
+                                           u3, &zone);
                if (err < 0)
                        return err;
        }
 
        if (cda[CTA_TUPLE_REPLY]) {
-               err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, u3);
+               err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY,
+                                           u3, &zone);
                if (err < 0)
                        return err;
        }
 
        if (cda[CTA_TUPLE_ORIG])
-               h = nf_conntrack_find_get(net, zone, &otuple);
+               h = nf_conntrack_find_get(net, &zone, &otuple);
        else if (cda[CTA_TUPLE_REPLY])
-               h = nf_conntrack_find_get(net, zone, &rtuple);
+               h = nf_conntrack_find_get(net, &zone, &rtuple);
 
        if (h == NULL) {
                err = -ENOENT;
@@ -1836,7 +1910,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
                        if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
                                return -EINVAL;
 
-                       ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
+                       ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple,
                                                        &rtuple, u3);
                        if (IS_ERR(ct))
                                return PTR_ERR(ct);
@@ -2082,7 +2156,7 @@ ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
               + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
 #endif
 #ifdef CONFIG_NF_CONNTRACK_ZONES
-              + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
+              + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
 #endif
               + ctnetlink_proto_size(ct)
               ;
@@ -2091,14 +2165,20 @@ ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
 static int
 ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
 {
+       const struct nf_conntrack_zone *zone;
        struct nlattr *nest_parms;
 
        rcu_read_lock();
+       zone = nf_ct_zone(ct);
+
        nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
        if (!nest_parms)
                goto nla_put_failure;
        if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
                goto nla_put_failure;
+       if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+                                  NF_CT_ZONE_DIR_ORIG) < 0)
+               goto nla_put_failure;
        nla_nest_end(skb, nest_parms);
 
        nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
@@ -2106,12 +2186,14 @@ ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
                goto nla_put_failure;
        if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
                goto nla_put_failure;
+       if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+                                  NF_CT_ZONE_DIR_REPL) < 0)
+               goto nla_put_failure;
        nla_nest_end(skb, nest_parms);
 
-       if (nf_ct_zone(ct)) {
-               if (nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
-                       goto nla_put_failure;
-       }
+       if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
+                                  NF_CT_DEFAULT_ZONE_DIR) < 0)
+               goto nla_put_failure;
 
        if (ctnetlink_dump_id(skb, ct) < 0)
                goto nla_put_failure;
@@ -2218,12 +2300,12 @@ static int ctnetlink_nfqueue_exp_parse(const struct nlattr * const *cda,
        int err;
 
        err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE,
-                                   nf_ct_l3num(ct));
+                                   nf_ct_l3num(ct), NULL);
        if (err < 0)
                return err;
 
        return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK,
-                                    nf_ct_l3num(ct));
+                                    nf_ct_l3num(ct), NULL);
 }
 
 static int
@@ -2612,23 +2694,22 @@ static int ctnetlink_dump_exp_ct(struct sock *ctnl, struct sk_buff *skb,
        struct nf_conntrack_tuple tuple;
        struct nf_conntrack_tuple_hash *h;
        struct nf_conn *ct;
-       u16 zone = 0;
+       struct nf_conntrack_zone zone;
        struct netlink_dump_control c = {
                .dump = ctnetlink_exp_ct_dump_table,
                .done = ctnetlink_exp_done,
        };
 
-       err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
+       err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
+                                   u3, NULL);
        if (err < 0)
                return err;
 
-       if (cda[CTA_EXPECT_ZONE]) {
-               err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
-               if (err < 0)
-                       return err;
-       }
+       err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
+       if (err < 0)
+               return err;
 
-       h = nf_conntrack_find_get(net, zone, &tuple);
+       h = nf_conntrack_find_get(net, &zone, &tuple);
        if (!h)
                return -ENOENT;
 
@@ -2652,7 +2733,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
        struct sk_buff *skb2;
        struct nfgenmsg *nfmsg = nlmsg_data(nlh);
        u_int8_t u3 = nfmsg->nfgen_family;
-       u16 zone;
+       struct nf_conntrack_zone zone;
        int err;
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
@@ -2672,16 +2753,18 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
                return err;
 
        if (cda[CTA_EXPECT_TUPLE])
-               err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+               err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+                                           u3, NULL);
        else if (cda[CTA_EXPECT_MASTER])
-               err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
+               err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
+                                           u3, NULL);
        else
                return -EINVAL;
 
        if (err < 0)
                return err;
 
-       exp = nf_ct_expect_find_get(net, zone, &tuple);
+       exp = nf_ct_expect_find_get(net, &zone, &tuple);
        if (!exp)
                return -ENOENT;
 
@@ -2732,8 +2815,8 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
        struct nfgenmsg *nfmsg = nlmsg_data(nlh);
        struct hlist_node *next;
        u_int8_t u3 = nfmsg->nfgen_family;
+       struct nf_conntrack_zone zone;
        unsigned int i;
-       u16 zone;
        int err;
 
        if (cda[CTA_EXPECT_TUPLE]) {
@@ -2742,12 +2825,13 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
                if (err < 0)
                        return err;
 
-               err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+               err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+                                           u3, NULL);
                if (err < 0)
                        return err;
 
                /* bump usage count to 2 */
-               exp = nf_ct_expect_find_get(net, zone, &tuple);
+               exp = nf_ct_expect_find_get(net, &zone, &tuple);
                if (!exp)
                        return -ENOENT;
 
@@ -2849,7 +2933,8 @@ ctnetlink_parse_expect_nat(const struct nlattr *attr,
                return -EINVAL;
 
        err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
-                                       &nat_tuple, CTA_EXPECT_NAT_TUPLE, u3);
+                                   &nat_tuple, CTA_EXPECT_NAT_TUPLE,
+                                   u3, NULL);
        if (err < 0)
                return err;
 
@@ -2937,7 +3022,8 @@ err_out:
 }
 
 static int
-ctnetlink_create_expect(struct net *net, u16 zone,
+ctnetlink_create_expect(struct net *net,
+                       const struct nf_conntrack_zone *zone,
                        const struct nlattr * const cda[],
                        u_int8_t u3, u32 portid, int report)
 {
@@ -2949,13 +3035,16 @@ ctnetlink_create_expect(struct net *net, u16 zone,
        int err;
 
        /* caller guarantees that those three CTA_EXPECT_* exist */
-       err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+       err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+                                   u3, NULL);
        if (err < 0)
                return err;
-       err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
+       err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK,
+                                   u3, NULL);
        if (err < 0)
                return err;
-       err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
+       err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER,
+                                   u3, NULL);
        if (err < 0)
                return err;
 
@@ -3011,7 +3100,7 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
        struct nf_conntrack_expect *exp;
        struct nfgenmsg *nfmsg = nlmsg_data(nlh);
        u_int8_t u3 = nfmsg->nfgen_family;
-       u16 zone;
+       struct nf_conntrack_zone zone;
        int err;
 
        if (!cda[CTA_EXPECT_TUPLE]
@@ -3023,19 +3112,18 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
        if (err < 0)
                return err;
 
-       err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+       err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+                                   u3, NULL);
        if (err < 0)
                return err;
 
        spin_lock_bh(&nf_conntrack_expect_lock);
-       exp = __nf_ct_expect_find(net, zone, &tuple);
-
+       exp = __nf_ct_expect_find(net, &zone, &tuple);
        if (!exp) {
                spin_unlock_bh(&nf_conntrack_expect_lock);
                err = -ENOENT;
                if (nlh->nlmsg_flags & NLM_F_CREATE) {
-                       err = ctnetlink_create_expect(net, zone, cda,
-                                                     u3,
+                       err = ctnetlink_create_expect(net, &zone, cda, u3,
                                                      NETLINK_CB(skb).portid,
                                                      nlmsg_report(nlh));
                }
index 825c3e3f83053582dba71c0128d706cb516c943a..5588c7ae1ac26740df91576867d5aff51aa961f9 100644 (file)
@@ -143,13 +143,14 @@ static int destroy_sibling_or_exp(struct net *net, struct nf_conn *ct,
                                  const struct nf_conntrack_tuple *t)
 {
        const struct nf_conntrack_tuple_hash *h;
+       const struct nf_conntrack_zone *zone;
        struct nf_conntrack_expect *exp;
        struct nf_conn *sibling;
-       u16 zone = nf_ct_zone(ct);
 
        pr_debug("trying to timeout ct or exp for tuple ");
        nf_ct_dump_tuple(t);
 
+       zone = nf_ct_zone(ct);
        h = nf_conntrack_find_get(net, zone, t);
        if (h)  {
                sibling = nf_ct_tuplehash_to_ctrack(h);
index ce3e840c870452b705744f9b64fb32661b6c82a5..dff0f0cc59e456171d81b8c5400bf30378f5ea66 100644 (file)
@@ -103,9 +103,9 @@ static void nf_ct_sack_block_adjust(struct sk_buff *skb,
                         ntohl(sack->end_seq), ntohl(new_end_seq));
 
                inet_proto_csum_replace4(&tcph->check, skb,
-                                        sack->start_seq, new_start_seq, 0);
+                                        sack->start_seq, new_start_seq, false);
                inet_proto_csum_replace4(&tcph->check, skb,
-                                        sack->end_seq, new_end_seq, 0);
+                                        sack->end_seq, new_end_seq, false);
                sack->start_seq = new_start_seq;
                sack->end_seq = new_end_seq;
                sackoff += sizeof(*sack);
@@ -193,8 +193,9 @@ int nf_ct_seq_adjust(struct sk_buff *skb,
        newseq = htonl(ntohl(tcph->seq) + seqoff);
        newack = htonl(ntohl(tcph->ack_seq) - ackoff);
 
-       inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
-       inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
+       inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, false);
+       inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack,
+                                false);
 
        pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
                 ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
index fc823fa5dcf53794bc8977cb5502d1dac92938e2..1fb3cacc04e16794ce27e9061893b9a90015fb82 100644 (file)
@@ -140,6 +140,35 @@ static inline void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
 }
 #endif
 
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+static void ct_show_zone(struct seq_file *s, const struct nf_conn *ct,
+                        int dir)
+{
+       const struct nf_conntrack_zone *zone = nf_ct_zone(ct);
+
+       if (zone->dir != dir)
+               return;
+       switch (zone->dir) {
+       case NF_CT_DEFAULT_ZONE_DIR:
+               seq_printf(s, "zone=%u ", zone->id);
+               break;
+       case NF_CT_ZONE_DIR_ORIG:
+               seq_printf(s, "zone-orig=%u ", zone->id);
+               break;
+       case NF_CT_ZONE_DIR_REPL:
+               seq_printf(s, "zone-reply=%u ", zone->id);
+               break;
+       default:
+               break;
+       }
+}
+#else
+static inline void ct_show_zone(struct seq_file *s, const struct nf_conn *ct,
+                               int dir)
+{
+}
+#endif
+
 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
 static void ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
 {
@@ -202,6 +231,8 @@ static int ct_seq_show(struct seq_file *s, void *v)
        print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
                    l3proto, l4proto);
 
+       ct_show_zone(s, ct, NF_CT_ZONE_DIR_ORIG);
+
        if (seq_has_overflowed(s))
                goto release;
 
@@ -214,6 +245,8 @@ static int ct_seq_show(struct seq_file *s, void *v)
        print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
                    l3proto, l4proto);
 
+       ct_show_zone(s, ct, NF_CT_ZONE_DIR_REPL);
+
        if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
                goto release;
 
@@ -228,11 +261,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
 #endif
 
        ct_show_secctx(s, ct);
-
-#ifdef CONFIG_NF_CONNTRACK_ZONES
-       seq_printf(s, "zone=%u ", nf_ct_zone(ct));
-#endif
-
+       ct_show_zone(s, ct, NF_CT_DEFAULT_ZONE_DIR);
        ct_show_delta_time(s, ct);
 
        seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use));
index 4e0b47831d43a25f021a1eeb2c62c2307b8630e1..5113dfd39df929967f247ca644a96664e6d72f1a 100644 (file)
@@ -118,14 +118,13 @@ EXPORT_SYMBOL(nf_xfrm_me_harder);
 
 /* We keep an extra hash for each conntrack, for fast searching. */
 static inline unsigned int
-hash_by_src(const struct net *net, u16 zone,
-           const struct nf_conntrack_tuple *tuple)
+hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple)
 {
        unsigned int hash;
 
        /* Original src, to ensure we map it consistently if poss. */
        hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
-                     tuple->dst.protonum ^ zone ^ nf_conntrack_hash_rnd);
+                     tuple->dst.protonum ^ nf_conntrack_hash_rnd);
 
        return reciprocal_scale(hash, net->ct.nat_htable_size);
 }
@@ -185,20 +184,22 @@ same_src(const struct nf_conn *ct,
 
 /* Only called for SRC manip */
 static int
-find_appropriate_src(struct net *net, u16 zone,
+find_appropriate_src(struct net *net,
+                    const struct nf_conntrack_zone *zone,
                     const struct nf_nat_l3proto *l3proto,
                     const struct nf_nat_l4proto *l4proto,
                     const struct nf_conntrack_tuple *tuple,
                     struct nf_conntrack_tuple *result,
                     const struct nf_nat_range *range)
 {
-       unsigned int h = hash_by_src(net, zone, tuple);
+       unsigned int h = hash_by_src(net, tuple);
        const struct nf_conn_nat *nat;
        const struct nf_conn *ct;
 
        hlist_for_each_entry_rcu(nat, &net->ct.nat_bysource[h], bysource) {
                ct = nat->ct;
-               if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
+               if (same_src(ct, tuple) &&
+                   nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
                        /* Copy source part from reply tuple. */
                        nf_ct_invert_tuplepr(result,
                                       &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
@@ -218,7 +219,8 @@ find_appropriate_src(struct net *net, u16 zone,
  * the ip with the lowest src-ip/dst-ip/proto usage.
  */
 static void
-find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
+find_best_ips_proto(const struct nf_conntrack_zone *zone,
+                   struct nf_conntrack_tuple *tuple,
                    const struct nf_nat_range *range,
                    const struct nf_conn *ct,
                    enum nf_nat_manip_type maniptype)
@@ -258,7 +260,7 @@ find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
         */
        j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
                   range->flags & NF_NAT_RANGE_PERSISTENT ?
-                       0 : (__force u32)tuple->dst.u3.all[max] ^ zone);
+                       0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id);
 
        full_range = false;
        for (i = 0; i <= max; i++) {
@@ -297,10 +299,12 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
                 struct nf_conn *ct,
                 enum nf_nat_manip_type maniptype)
 {
+       const struct nf_conntrack_zone *zone;
        const struct nf_nat_l3proto *l3proto;
        const struct nf_nat_l4proto *l4proto;
        struct net *net = nf_ct_net(ct);
-       u16 zone = nf_ct_zone(ct);
+
+       zone = nf_ct_zone(ct);
 
        rcu_read_lock();
        l3proto = __nf_nat_l3proto_find(orig_tuple->src.l3num);
@@ -420,7 +424,7 @@ nf_nat_setup_info(struct nf_conn *ct,
        if (maniptype == NF_NAT_MANIP_SRC) {
                unsigned int srchash;
 
-               srchash = hash_by_src(net, nf_ct_zone(ct),
+               srchash = hash_by_src(net,
                                      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
                spin_lock_bh(&nf_nat_lock);
                /* nf_conntrack_alter_reply might re-allocate extension aera */
index b8067b53ff3a8579e9ba126288c65ee89702fbac..15c47b246d0d0a0632574e56d2caa9d12514966d 100644 (file)
@@ -69,7 +69,7 @@ dccp_manip_pkt(struct sk_buff *skb,
        l3proto->csum_update(skb, iphdroff, &hdr->dccph_checksum,
                             tuple, maniptype);
        inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport,
-                                0);
+                                false);
        return true;
 }
 
index 37f5505f4529be54f45ef773088ca7aadbbccaed..4f8820fc514804d775274330f590fe0d1dbab54f 100644 (file)
@@ -70,7 +70,7 @@ tcp_manip_pkt(struct sk_buff *skb,
                return true;
 
        l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
-       inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, 0);
+       inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, false);
        return true;
 }
 
index b0ede2f0d8bcbd0c7ee156cec4c73a432b9438cd..b1e627227b6e2670fb6ce9d151e8965a4c8731c3 100644 (file)
@@ -57,7 +57,7 @@ udp_manip_pkt(struct sk_buff *skb,
                l3proto->csum_update(skb, iphdroff, &hdr->check,
                                     tuple, maniptype);
                inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport,
-                                        0);
+                                        false);
                if (!hdr->check)
                        hdr->check = CSUM_MANGLED_0;
        }
index 368f14e01e758d9771534eee7744d5d5a66cf8ba..58340c97bd836ffedd512a895cc02f56ee05f169 100644 (file)
@@ -56,7 +56,7 @@ udplite_manip_pkt(struct sk_buff *skb,
        }
 
        l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
-       inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, 0);
+       inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, false);
        if (!hdr->check)
                hdr->check = CSUM_MANGLED_0;
 
index d7f1685279034b5e3b62748284d24c52cc1f8907..8fbbdb09826eefd29105b3e80f87c1e54dbb64a9 100644 (file)
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_tcpudp.h>
 #include <linux/netfilter/xt_SYNPROXY.h>
+
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_extend.h>
 #include <net/netfilter/nf_conntrack_seqadj.h>
 #include <net/netfilter/nf_conntrack_synproxy.h>
+#include <net/netfilter/nf_conntrack_zones.h>
 
 int synproxy_net_id;
 EXPORT_SYMBOL_GPL(synproxy_net_id);
@@ -225,7 +227,7 @@ unsigned int synproxy_tstamp_adjust(struct sk_buff *skb,
                                                     synproxy->tsoff);
                                }
                                inet_proto_csum_replace4(&th->check, skb,
-                                                        old, *ptr, 0);
+                                                        old, *ptr, false);
                                return 1;
                        }
                        optoff += op[1];
@@ -352,7 +354,7 @@ static int __net_init synproxy_net_init(struct net *net)
        struct nf_conn *ct;
        int err = -ENOMEM;
 
-       ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL);
+       ct = nf_ct_tmpl_alloc(net, &nf_ct_zone_dflt, GFP_KERNEL);
        if (!ct)
                goto err1;
 
index c18af2f63eefb07e00be893190c35492232d4008..fefbf5f0b28d2f91e33db5e5d04182ff5a42db4b 100644 (file)
@@ -27,8 +27,6 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_DESCRIPTION("nfacct: Extended Netfilter accounting infrastructure");
 
-static LIST_HEAD(nfnl_acct_list);
-
 struct nf_acct {
        atomic64_t              pkts;
        atomic64_t              bytes;
@@ -53,6 +51,7 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
             const struct nlmsghdr *nlh, const struct nlattr * const tb[])
 {
        struct nf_acct *nfacct, *matching = NULL;
+       struct net *net = sock_net(nfnl);
        char *acct_name;
        unsigned int size = 0;
        u32 flags = 0;
@@ -64,7 +63,7 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
        if (strlen(acct_name) == 0)
                return -EINVAL;
 
-       list_for_each_entry(nfacct, &nfnl_acct_list, head) {
+       list_for_each_entry(nfacct, &net->nfnl_acct_list, head) {
                if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0)
                        continue;
 
@@ -124,7 +123,7 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
                             be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS])));
        }
        atomic_set(&nfacct->refcnt, 1);
-       list_add_tail_rcu(&nfacct->head, &nfnl_acct_list);
+       list_add_tail_rcu(&nfacct->head, &net->nfnl_acct_list);
        return 0;
 }
 
@@ -185,6 +184,7 @@ nla_put_failure:
 static int
 nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       struct net *net = sock_net(skb->sk);
        struct nf_acct *cur, *last;
        const struct nfacct_filter *filter = cb->data;
 
@@ -196,7 +196,7 @@ nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
                cb->args[1] = 0;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
+       list_for_each_entry_rcu(cur, &net->nfnl_acct_list, head) {
                if (last) {
                        if (cur != last)
                                continue;
@@ -257,6 +257,7 @@ static int
 nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
             const struct nlmsghdr *nlh, const struct nlattr * const tb[])
 {
+       struct net *net = sock_net(nfnl);
        int ret = -ENOENT;
        struct nf_acct *cur;
        char *acct_name;
@@ -283,7 +284,7 @@ nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
                return -EINVAL;
        acct_name = nla_data(tb[NFACCT_NAME]);
 
-       list_for_each_entry(cur, &nfnl_acct_list, head) {
+       list_for_each_entry(cur, &net->nfnl_acct_list, head) {
                struct sk_buff *skb2;
 
                if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
@@ -336,19 +337,20 @@ static int
 nfnl_acct_del(struct sock *nfnl, struct sk_buff *skb,
             const struct nlmsghdr *nlh, const struct nlattr * const tb[])
 {
+       struct net *net = sock_net(nfnl);
        char *acct_name;
        struct nf_acct *cur;
        int ret = -ENOENT;
 
        if (!tb[NFACCT_NAME]) {
-               list_for_each_entry(cur, &nfnl_acct_list, head)
+               list_for_each_entry(cur, &net->nfnl_acct_list, head)
                        nfnl_acct_try_del(cur);
 
                return 0;
        }
        acct_name = nla_data(tb[NFACCT_NAME]);
 
-       list_for_each_entry(cur, &nfnl_acct_list, head) {
+       list_for_each_entry(cur, &net->nfnl_acct_list, head) {
                if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX) != 0)
                        continue;
 
@@ -394,12 +396,12 @@ static const struct nfnetlink_subsystem nfnl_acct_subsys = {
 
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ACCT);
 
-struct nf_acct *nfnl_acct_find_get(const char *acct_name)
+struct nf_acct *nfnl_acct_find_get(struct net *net, const char *acct_name)
 {
        struct nf_acct *cur, *acct = NULL;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
+       list_for_each_entry_rcu(cur, &net->nfnl_acct_list, head) {
                if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
                        continue;
 
@@ -422,7 +424,9 @@ EXPORT_SYMBOL_GPL(nfnl_acct_find_get);
 
 void nfnl_acct_put(struct nf_acct *acct)
 {
-       atomic_dec(&acct->refcnt);
+       if (atomic_dec_and_test(&acct->refcnt))
+               kfree_rcu(acct, rcu_head);
+
        module_put(THIS_MODULE);
 }
 EXPORT_SYMBOL_GPL(nfnl_acct_put);
@@ -478,34 +482,59 @@ int nfnl_acct_overquota(const struct sk_buff *skb, struct nf_acct *nfacct)
 }
 EXPORT_SYMBOL_GPL(nfnl_acct_overquota);
 
+static int __net_init nfnl_acct_net_init(struct net *net)
+{
+       INIT_LIST_HEAD(&net->nfnl_acct_list);
+
+       return 0;
+}
+
+static void __net_exit nfnl_acct_net_exit(struct net *net)
+{
+       struct nf_acct *cur, *tmp;
+
+       list_for_each_entry_safe(cur, tmp, &net->nfnl_acct_list, head) {
+               list_del_rcu(&cur->head);
+
+               if (atomic_dec_and_test(&cur->refcnt))
+                       kfree_rcu(cur, rcu_head);
+       }
+}
+
+static struct pernet_operations nfnl_acct_ops = {
+        .init   = nfnl_acct_net_init,
+        .exit   = nfnl_acct_net_exit,
+};
+
 static int __init nfnl_acct_init(void)
 {
        int ret;
 
+       ret = register_pernet_subsys(&nfnl_acct_ops);
+       if (ret < 0) {
+               pr_err("nfnl_acct_init: failed to register pernet ops\n");
+               goto err_out;
+       }
+
        pr_info("nfnl_acct: registering with nfnetlink.\n");
        ret = nfnetlink_subsys_register(&nfnl_acct_subsys);
        if (ret < 0) {
                pr_err("nfnl_acct_init: cannot register with nfnetlink.\n");
-               goto err_out;
+               goto cleanup_pernet;
        }
        return 0;
+
+cleanup_pernet:
+       unregister_pernet_subsys(&nfnl_acct_ops);
 err_out:
        return ret;
 }
 
 static void __exit nfnl_acct_exit(void)
 {
-       struct nf_acct *cur, *tmp;
-
        pr_info("nfnl_acct: unregistering from nfnetlink.\n");
        nfnetlink_subsys_unregister(&nfnl_acct_subsys);
-
-       list_for_each_entry_safe(cur, tmp, &nfnl_acct_list, head) {
-               list_del_rcu(&cur->head);
-               /* We are sure that our objects have no clients at this point,
-                * it's safe to release them all without checking refcnt. */
-               kfree_rcu(cur, rcu_head);
-       }
+       unregister_pernet_subsys(&nfnl_acct_ops);
 }
 
 module_init(nfnl_acct_init);
index 17591239229f75564b944dc7db61e1dae6a2f1f1..1067fb4c1ffa2ec24988143fc7ec8d134dc9dda6 100644 (file)
 #include <net/netfilter/nf_tables.h>
 
 struct nft_counter {
-       seqlock_t       lock;
        u64             bytes;
        u64             packets;
 };
 
+struct nft_counter_percpu {
+       struct nft_counter      counter;
+       struct u64_stats_sync   syncp;
+};
+
+struct nft_counter_percpu_priv {
+       struct nft_counter_percpu __percpu *counter;
+};
+
 static void nft_counter_eval(const struct nft_expr *expr,
                             struct nft_regs *regs,
                             const struct nft_pktinfo *pkt)
 {
-       struct nft_counter *priv = nft_expr_priv(expr);
-
-       write_seqlock_bh(&priv->lock);
-       priv->bytes += pkt->skb->len;
-       priv->packets++;
-       write_sequnlock_bh(&priv->lock);
+       struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
+       struct nft_counter_percpu *this_cpu;
+
+       local_bh_disable();
+       this_cpu = this_cpu_ptr(priv->counter);
+       u64_stats_update_begin(&this_cpu->syncp);
+       this_cpu->counter.bytes += pkt->skb->len;
+       this_cpu->counter.packets++;
+       u64_stats_update_end(&this_cpu->syncp);
+       local_bh_enable();
 }
 
 static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
 {
-       struct nft_counter *priv = nft_expr_priv(expr);
+       struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
+       struct nft_counter_percpu *cpu_stats;
+       struct nft_counter total;
+       u64 bytes, packets;
        unsigned int seq;
-       u64 bytes;
-       u64 packets;
-
-       do {
-               seq = read_seqbegin(&priv->lock);
-               bytes   = priv->bytes;
-               packets = priv->packets;
-       } while (read_seqretry(&priv->lock, seq));
-
-       if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(bytes)))
-               goto nla_put_failure;
-       if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(packets)))
+       int cpu;
+
+       memset(&total, 0, sizeof(total));
+       for_each_possible_cpu(cpu) {
+               cpu_stats = per_cpu_ptr(priv->counter, cpu);
+               do {
+                       seq     = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+                       bytes   = cpu_stats->counter.bytes;
+                       packets = cpu_stats->counter.packets;
+               } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
+
+               total.packets += packets;
+               total.bytes += bytes;
+       }
+
+       if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)) ||
+           nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets)))
                goto nla_put_failure;
        return 0;
 
@@ -67,23 +87,44 @@ static int nft_counter_init(const struct nft_ctx *ctx,
                            const struct nft_expr *expr,
                            const struct nlattr * const tb[])
 {
-       struct nft_counter *priv = nft_expr_priv(expr);
+       struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
+       struct nft_counter_percpu __percpu *cpu_stats;
+       struct nft_counter_percpu *this_cpu;
+
+       cpu_stats = netdev_alloc_pcpu_stats(struct nft_counter_percpu);
+       if (cpu_stats == NULL)
+               return ENOMEM;
+
+       preempt_disable();
+       this_cpu = this_cpu_ptr(cpu_stats);
+       if (tb[NFTA_COUNTER_PACKETS]) {
+               this_cpu->counter.packets =
+                       be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
+       }
+       if (tb[NFTA_COUNTER_BYTES]) {
+               this_cpu->counter.bytes =
+                       be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
+       }
+       preempt_enable();
+       priv->counter = cpu_stats;
+       return 0;
+}
 
-       if (tb[NFTA_COUNTER_PACKETS])
-               priv->packets = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
-       if (tb[NFTA_COUNTER_BYTES])
-               priv->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
+static void nft_counter_destroy(const struct nft_ctx *ctx,
+                               const struct nft_expr *expr)
+{
+       struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
 
-       seqlock_init(&priv->lock);
-       return 0;
+       free_percpu(priv->counter);
 }
 
 static struct nft_expr_type nft_counter_type;
 static const struct nft_expr_ops nft_counter_ops = {
        .type           = &nft_counter_type,
-       .size           = NFT_EXPR_SIZE(sizeof(struct nft_counter)),
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_counter_percpu_priv)),
        .eval           = nft_counter_eval,
        .init           = nft_counter_init,
+       .destroy        = nft_counter_destroy,
        .dump           = nft_counter_dump,
 };
 
index 435c1ccd6c0e6a74266b2054a62f603d038ecb00..5d67938f8b2f27a090f47386fe4ec889c0507b13 100644 (file)
 static DEFINE_SPINLOCK(limit_lock);
 
 struct nft_limit {
+       u64             last;
        u64             tokens;
+       u64             tokens_max;
        u64             rate;
-       u64             unit;
-       unsigned long   stamp;
+       u64             nsecs;
+       u32             burst;
 };
 
-static void nft_limit_eval(const struct nft_expr *expr,
-                          struct nft_regs *regs,
-                          const struct nft_pktinfo *pkt)
+static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost)
 {
-       struct nft_limit *priv = nft_expr_priv(expr);
+       u64 now, tokens;
+       s64 delta;
 
        spin_lock_bh(&limit_lock);
-       if (time_after_eq(jiffies, priv->stamp)) {
-               priv->tokens = priv->rate;
-               priv->stamp = jiffies + priv->unit * HZ;
-       }
-
-       if (priv->tokens >= 1) {
-               priv->tokens--;
+       now = ktime_get_ns();
+       tokens = limit->tokens + now - limit->last;
+       if (tokens > limit->tokens_max)
+               tokens = limit->tokens_max;
+
+       limit->last = now;
+       delta = tokens - cost;
+       if (delta >= 0) {
+               limit->tokens = delta;
                spin_unlock_bh(&limit_lock);
-               return;
+               return false;
        }
+       limit->tokens = tokens;
        spin_unlock_bh(&limit_lock);
-
-       regs->verdict.code = NFT_BREAK;
+       return true;
 }
 
-static const struct nla_policy nft_limit_policy[NFTA_LIMIT_MAX + 1] = {
-       [NFTA_LIMIT_RATE]       = { .type = NLA_U64 },
-       [NFTA_LIMIT_UNIT]       = { .type = NLA_U64 },
-};
-
-static int nft_limit_init(const struct nft_ctx *ctx,
-                         const struct nft_expr *expr,
+static int nft_limit_init(struct nft_limit *limit,
                          const struct nlattr * const tb[])
 {
-       struct nft_limit *priv = nft_expr_priv(expr);
+       u64 unit;
 
        if (tb[NFTA_LIMIT_RATE] == NULL ||
            tb[NFTA_LIMIT_UNIT] == NULL)
                return -EINVAL;
 
-       priv->rate   = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
-       priv->unit   = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
-       priv->stamp  = jiffies + priv->unit * HZ;
-       priv->tokens = priv->rate;
+       limit->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
+       unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
+       limit->nsecs = unit * NSEC_PER_SEC;
+       if (limit->rate == 0 || limit->nsecs < unit)
+               return -EOVERFLOW;
+       limit->tokens = limit->tokens_max = limit->nsecs;
+
+       if (tb[NFTA_LIMIT_BURST]) {
+               u64 rate;
+
+               limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));
+
+               rate = limit->rate + limit->burst;
+               if (rate < limit->rate)
+                       return -EOVERFLOW;
+
+               limit->rate = rate;
+       }
+       limit->last = ktime_get_ns();
+
        return 0;
 }
 
-static int nft_limit_dump(struct sk_buff *skb, const struct nft_expr *expr)
+static int nft_limit_dump(struct sk_buff *skb, const struct nft_limit *limit,
+                         enum nft_limit_type type)
 {
-       const struct nft_limit *priv = nft_expr_priv(expr);
+       u64 secs = div_u64(limit->nsecs, NSEC_PER_SEC);
+       u64 rate = limit->rate - limit->burst;
 
-       if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(priv->rate)))
-               goto nla_put_failure;
-       if (nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(priv->unit)))
+       if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(rate)) ||
+           nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs)) ||
+           nla_put_be32(skb, NFTA_LIMIT_BURST, htonl(limit->burst)) ||
+           nla_put_be32(skb, NFTA_LIMIT_TYPE, htonl(type)))
                goto nla_put_failure;
        return 0;
 
@@ -84,18 +100,114 @@ nla_put_failure:
        return -1;
 }
 
+struct nft_limit_pkts {
+       struct nft_limit        limit;
+       u64                     cost;
+};
+
+static void nft_limit_pkts_eval(const struct nft_expr *expr,
+                               struct nft_regs *regs,
+                               const struct nft_pktinfo *pkt)
+{
+       struct nft_limit_pkts *priv = nft_expr_priv(expr);
+
+       if (nft_limit_eval(&priv->limit, priv->cost))
+               regs->verdict.code = NFT_BREAK;
+}
+
+static const struct nla_policy nft_limit_policy[NFTA_LIMIT_MAX + 1] = {
+       [NFTA_LIMIT_RATE]       = { .type = NLA_U64 },
+       [NFTA_LIMIT_UNIT]       = { .type = NLA_U64 },
+       [NFTA_LIMIT_BURST]      = { .type = NLA_U32 },
+       [NFTA_LIMIT_TYPE]       = { .type = NLA_U32 },
+};
+
+static int nft_limit_pkts_init(const struct nft_ctx *ctx,
+                              const struct nft_expr *expr,
+                              const struct nlattr * const tb[])
+{
+       struct nft_limit_pkts *priv = nft_expr_priv(expr);
+       int err;
+
+       err = nft_limit_init(&priv->limit, tb);
+       if (err < 0)
+               return err;
+
+       priv->cost = div_u64(priv->limit.nsecs, priv->limit.rate);
+       return 0;
+}
+
+static int nft_limit_pkts_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_limit_pkts *priv = nft_expr_priv(expr);
+
+       return nft_limit_dump(skb, &priv->limit, NFT_LIMIT_PKTS);
+}
+
 static struct nft_expr_type nft_limit_type;
-static const struct nft_expr_ops nft_limit_ops = {
+static const struct nft_expr_ops nft_limit_pkts_ops = {
+       .type           = &nft_limit_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_limit_pkts)),
+       .eval           = nft_limit_pkts_eval,
+       .init           = nft_limit_pkts_init,
+       .dump           = nft_limit_pkts_dump,
+};
+
+static void nft_limit_pkt_bytes_eval(const struct nft_expr *expr,
+                                    struct nft_regs *regs,
+                                    const struct nft_pktinfo *pkt)
+{
+       struct nft_limit *priv = nft_expr_priv(expr);
+       u64 cost = div_u64(priv->nsecs * pkt->skb->len, priv->rate);
+
+       if (nft_limit_eval(priv, cost))
+               regs->verdict.code = NFT_BREAK;
+}
+
+static int nft_limit_pkt_bytes_init(const struct nft_ctx *ctx,
+                                   const struct nft_expr *expr,
+                                   const struct nlattr * const tb[])
+{
+       struct nft_limit *priv = nft_expr_priv(expr);
+
+       return nft_limit_init(priv, tb);
+}
+
+static int nft_limit_pkt_bytes_dump(struct sk_buff *skb,
+                                   const struct nft_expr *expr)
+{
+       const struct nft_limit *priv = nft_expr_priv(expr);
+
+       return nft_limit_dump(skb, priv, NFT_LIMIT_PKT_BYTES);
+}
+
+static const struct nft_expr_ops nft_limit_pkt_bytes_ops = {
        .type           = &nft_limit_type,
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_limit)),
-       .eval           = nft_limit_eval,
-       .init           = nft_limit_init,
-       .dump           = nft_limit_dump,
+       .eval           = nft_limit_pkt_bytes_eval,
+       .init           = nft_limit_pkt_bytes_init,
+       .dump           = nft_limit_pkt_bytes_dump,
 };
 
+static const struct nft_expr_ops *
+nft_limit_select_ops(const struct nft_ctx *ctx,
+                    const struct nlattr * const tb[])
+{
+       if (tb[NFTA_LIMIT_TYPE] == NULL)
+               return &nft_limit_pkts_ops;
+
+       switch (ntohl(nla_get_be32(tb[NFTA_LIMIT_TYPE]))) {
+       case NFT_LIMIT_PKTS:
+               return &nft_limit_pkts_ops;
+       case NFT_LIMIT_PKT_BYTES:
+               return &nft_limit_pkt_bytes_ops;
+       }
+       return ERR_PTR(-EOPNOTSUPP);
+}
+
 static struct nft_expr_type nft_limit_type __read_mostly = {
        .name           = "limit",
-       .ops            = &nft_limit_ops,
+       .select_ops     = nft_limit_select_ops,
        .policy         = nft_limit_policy,
        .maxattr        = NFTA_LIMIT_MAX,
        .flags          = NFT_EXPR_STATEFUL,
index 94fb3b27a2c54393091602e0e96b2634ff8ceb1b..09b4b07eb67644fdc90ef357378c46d243b7a642 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/if_vlan.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/netlink.h>
 #include <net/netfilter/nf_tables_core.h>
 #include <net/netfilter/nf_tables.h>
 
+/* add vlan header into the user buffer for if tag was removed by offloads */
+static bool
+nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
+{
+       int mac_off = skb_mac_header(skb) - skb->data;
+       u8 vlan_len, *vlanh, *dst_u8 = (u8 *) d;
+       struct vlan_ethhdr veth;
+
+       vlanh = (u8 *) &veth;
+       if (offset < ETH_HLEN) {
+               u8 ethlen = min_t(u8, len, ETH_HLEN - offset);
+
+               if (skb_copy_bits(skb, mac_off, &veth, ETH_HLEN))
+                       return false;
+
+               veth.h_vlan_proto = skb->vlan_proto;
+
+               memcpy(dst_u8, vlanh + offset, ethlen);
+
+               len -= ethlen;
+               if (len == 0)
+                       return true;
+
+               dst_u8 += ethlen;
+               offset = ETH_HLEN;
+       } else if (offset >= VLAN_ETH_HLEN) {
+               offset -= VLAN_HLEN;
+               goto skip;
+       }
+
+       veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
+       veth.h_vlan_encapsulated_proto = skb->protocol;
+
+       vlanh += offset;
+
+       vlan_len = min_t(u8, len, VLAN_ETH_HLEN - offset);
+       memcpy(dst_u8, vlanh, vlan_len);
+
+       len -= vlan_len;
+       if (!len)
+               return true;
+
+       dst_u8 += vlan_len;
+ skip:
+       return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
+}
+
 static void nft_payload_eval(const struct nft_expr *expr,
                             struct nft_regs *regs,
                             const struct nft_pktinfo *pkt)
@@ -26,10 +74,18 @@ static void nft_payload_eval(const struct nft_expr *expr,
        u32 *dest = &regs->data[priv->dreg];
        int offset;
 
+       dest[priv->len / NFT_REG32_SIZE] = 0;
        switch (priv->base) {
        case NFT_PAYLOAD_LL_HEADER:
                if (!skb_mac_header_was_set(skb))
                        goto err;
+
+               if (skb_vlan_tag_present(skb)) {
+                       if (!nft_payload_copy_vlan(dest, skb,
+                                                  priv->offset, priv->len))
+                               goto err;
+                       return;
+               }
                offset = skb_mac_header(skb) - skb->data;
                break;
        case NFT_PAYLOAD_NETWORK_HEADER:
@@ -43,7 +99,6 @@ static void nft_payload_eval(const struct nft_expr *expr,
        }
        offset += priv->offset;
 
-       dest[priv->len / NFT_REG32_SIZE] = 0;
        if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
                goto err;
        return;
index 43ddeee404e91f97908fb9228c1e873931b75bcc..8e524898ccea234a2b5cae3bdfaf2cd72d023238 100644 (file)
@@ -181,9 +181,23 @@ out:
 #endif
 }
 
+static u16 xt_ct_flags_to_dir(const struct xt_ct_target_info_v1 *info)
+{
+       switch (info->flags & (XT_CT_ZONE_DIR_ORIG |
+                              XT_CT_ZONE_DIR_REPL)) {
+       case XT_CT_ZONE_DIR_ORIG:
+               return NF_CT_ZONE_DIR_ORIG;
+       case XT_CT_ZONE_DIR_REPL:
+               return NF_CT_ZONE_DIR_REPL;
+       default:
+               return NF_CT_DEFAULT_ZONE_DIR;
+       }
+}
+
 static int xt_ct_tg_check(const struct xt_tgchk_param *par,
                          struct xt_ct_target_info_v1 *info)
 {
+       struct nf_conntrack_zone zone;
        struct nf_conn *ct;
        int ret = -EOPNOTSUPP;
 
@@ -193,7 +207,9 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
        }
 
 #ifndef CONFIG_NF_CONNTRACK_ZONES
-       if (info->zone)
+       if (info->zone || info->flags & (XT_CT_ZONE_DIR_ORIG |
+                                        XT_CT_ZONE_DIR_REPL |
+                                        XT_CT_ZONE_MARK))
                goto err1;
 #endif
 
@@ -201,7 +217,13 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
        if (ret < 0)
                goto err1;
 
-       ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL);
+       memset(&zone, 0, sizeof(zone));
+       zone.id = info->zone;
+       zone.dir = xt_ct_flags_to_dir(info);
+       if (info->flags & XT_CT_ZONE_MARK)
+               zone.flags |= NF_CT_FLAG_MARK;
+
+       ct = nf_ct_tmpl_alloc(par->net, &zone, GFP_KERNEL);
        if (!ct) {
                ret = -ENOMEM;
                goto err2;
index 8c3190e2fc6abad6394ba5498762ba0502c34d58..8c02501a530f4b481f9a6f4248957893801af2b2 100644 (file)
@@ -144,7 +144,7 @@ tcpmss_mangle_packet(struct sk_buff *skb,
 
                        inet_proto_csum_replace2(&tcph->check, skb,
                                                 htons(oldmss), htons(newmss),
-                                                0);
+                                                false);
                        return 0;
                }
        }
@@ -185,18 +185,18 @@ tcpmss_mangle_packet(struct sk_buff *skb,
        memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
 
        inet_proto_csum_replace2(&tcph->check, skb,
-                                htons(len), htons(len + TCPOLEN_MSS), 1);
+                                htons(len), htons(len + TCPOLEN_MSS), true);
        opt[0] = TCPOPT_MSS;
        opt[1] = TCPOLEN_MSS;
        opt[2] = (newmss & 0xff00) >> 8;
        opt[3] = newmss & 0x00ff;
 
-       inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), 0);
+       inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), false);
 
        oldval = ((__be16 *)tcph)[6];
        tcph->doff += TCPOLEN_MSS/4;
        inet_proto_csum_replace2(&tcph->check, skb,
-                                oldval, ((__be16 *)tcph)[6], 0);
+                                oldval, ((__be16 *)tcph)[6], false);
        return TCPOLEN_MSS;
 }
 
index 625fa1d636a01ccacb43c2fcae902d65a466b9db..eb92bffff11ccb22690ec0cf76ff25076d6a8cb6 100644 (file)
@@ -80,7 +80,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
                                n <<= 8;
                        }
                        inet_proto_csum_replace2(&tcph->check, skb, htons(o),
-                                                htons(n), 0);
+                                                htons(n), false);
                }
                memset(opt + i, TCPOPT_NOP, optl);
        }
index c5d6556dbc5e407cffca198ac5fe66b97a0cb908..49fee6aa2c0aa03b8af6bf9c3358d300d8b9ac7c 100644 (file)
  *     modify it under the terms of the GNU General Public License
  *     version 2 or later, as published by the Free Software Foundation.
  */
-#include <linux/ip.h>
 #include <linux/module.h>
-#include <linux/percpu.h>
-#include <linux/route.h>
 #include <linux/skbuff.h>
-#include <linux/notifier.h>
-#include <net/checksum.h>
-#include <net/icmp.h>
-#include <net/ip.h>
-#include <net/ipv6.h>
-#include <net/ip6_route.h>
-#include <net/route.h>
+#include <linux/route.h>
 #include <linux/netfilter/x_tables.h>
+#include <net/route.h>
+#include <net/netfilter/ipv4/nf_dup_ipv4.h>
+#include <net/netfilter/ipv6/nf_dup_ipv6.h>
 #include <linux/netfilter/xt_TEE.h>
 
-#if IS_ENABLED(CONFIG_NF_CONNTRACK)
-#      define WITH_CONNTRACK 1
-#      include <net/netfilter/nf_conntrack.h>
-#endif
-
 struct xt_tee_priv {
        struct notifier_block   notifier;
        struct xt_tee_tginfo    *tginfo;
@@ -38,161 +27,24 @@ struct xt_tee_priv {
 
 static const union nf_inet_addr tee_zero_address;
 
-static struct net *pick_net(struct sk_buff *skb)
-{
-#ifdef CONFIG_NET_NS
-       const struct dst_entry *dst;
-
-       if (skb->dev != NULL)
-               return dev_net(skb->dev);
-       dst = skb_dst(skb);
-       if (dst != NULL && dst->dev != NULL)
-               return dev_net(dst->dev);
-#endif
-       return &init_net;
-}
-
-static bool
-tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
-{
-       const struct iphdr *iph = ip_hdr(skb);
-       struct net *net = pick_net(skb);
-       struct rtable *rt;
-       struct flowi4 fl4;
-
-       memset(&fl4, 0, sizeof(fl4));
-       if (info->priv) {
-               if (info->priv->oif == -1)
-                       return false;
-               fl4.flowi4_oif = info->priv->oif;
-       }
-       fl4.daddr = info->gw.ip;
-       fl4.flowi4_tos = RT_TOS(iph->tos);
-       fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
-       fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
-       rt = ip_route_output_key(net, &fl4);
-       if (IS_ERR(rt))
-               return false;
-
-       skb_dst_drop(skb);
-       skb_dst_set(skb, &rt->dst);
-       skb->dev      = rt->dst.dev;
-       skb->protocol = htons(ETH_P_IP);
-       return true;
-}
-
 static unsigned int
 tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
 {
        const struct xt_tee_tginfo *info = par->targinfo;
-       struct iphdr *iph;
 
-       if (__this_cpu_read(nf_skb_duplicated))
-               return XT_CONTINUE;
-       /*
-        * Copy the skb, and route the copy. Will later return %XT_CONTINUE for
-        * the original skb, which should continue on its way as if nothing has
-        * happened. The copy should be independently delivered to the TEE
-        * --gateway.
-        */
-       skb = pskb_copy(skb, GFP_ATOMIC);
-       if (skb == NULL)
-               return XT_CONTINUE;
-
-#ifdef WITH_CONNTRACK
-       /* Avoid counting cloned packets towards the original connection. */
-       nf_conntrack_put(skb->nfct);
-       skb->nfct     = &nf_ct_untracked_get()->ct_general;
-       skb->nfctinfo = IP_CT_NEW;
-       nf_conntrack_get(skb->nfct);
-#endif
-       /*
-        * If we are in PREROUTING/INPUT, the checksum must be recalculated
-        * since the length could have changed as a result of defragmentation.
-        *
-        * We also decrease the TTL to mitigate potential TEE loops
-        * between two hosts.
-        *
-        * Set %IP_DF so that the original source is notified of a potentially
-        * decreased MTU on the clone route. IPv6 does this too.
-        */
-       iph = ip_hdr(skb);
-       iph->frag_off |= htons(IP_DF);
-       if (par->hooknum == NF_INET_PRE_ROUTING ||
-           par->hooknum == NF_INET_LOCAL_IN)
-               --iph->ttl;
-       ip_send_check(iph);
+       nf_dup_ipv4(skb, par->hooknum, &info->gw.in, info->priv->oif);
 
-       if (tee_tg_route4(skb, info)) {
-               __this_cpu_write(nf_skb_duplicated, true);
-               ip_local_out(skb);
-               __this_cpu_write(nf_skb_duplicated, false);
-       } else {
-               kfree_skb(skb);
-       }
        return XT_CONTINUE;
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
-static bool
-tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
-{
-       const struct ipv6hdr *iph = ipv6_hdr(skb);
-       struct net *net = pick_net(skb);
-       struct dst_entry *dst;
-       struct flowi6 fl6;
-
-       memset(&fl6, 0, sizeof(fl6));
-       if (info->priv) {
-               if (info->priv->oif == -1)
-                       return false;
-               fl6.flowi6_oif = info->priv->oif;
-       }
-       fl6.daddr = info->gw.in6;
-       fl6.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
-                          (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
-       fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
-       dst = ip6_route_output(net, NULL, &fl6);
-       if (dst->error) {
-               dst_release(dst);
-               return false;
-       }
-       skb_dst_drop(skb);
-       skb_dst_set(skb, dst);
-       skb->dev      = dst->dev;
-       skb->protocol = htons(ETH_P_IPV6);
-       return true;
-}
-
 static unsigned int
 tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 {
        const struct xt_tee_tginfo *info = par->targinfo;
 
-       if (__this_cpu_read(nf_skb_duplicated))
-               return XT_CONTINUE;
-       skb = pskb_copy(skb, GFP_ATOMIC);
-       if (skb == NULL)
-               return XT_CONTINUE;
+       nf_dup_ipv6(skb, par->hooknum, &info->gw.in6, info->priv->oif);
 
-#ifdef WITH_CONNTRACK
-       nf_conntrack_put(skb->nfct);
-       skb->nfct     = &nf_ct_untracked_get()->ct_general;
-       skb->nfctinfo = IP_CT_NEW;
-       nf_conntrack_get(skb->nfct);
-#endif
-       if (par->hooknum == NF_INET_PRE_ROUTING ||
-           par->hooknum == NF_INET_LOCAL_IN) {
-               struct ipv6hdr *iph = ipv6_hdr(skb);
-               --iph->hop_limit;
-       }
-       if (tee_tg_route6(skb, info)) {
-               __this_cpu_write(nf_skb_duplicated, true);
-               ip6_local_out(skb);
-               __this_cpu_write(nf_skb_duplicated, false);
-       } else {
-               kfree_skb(skb);
-       }
        return XT_CONTINUE;
 }
 #endif
index 29ba6218a820e7cc8e9363db91312cc27c09004e..075d89d94d28f4deb87f473dd787f61395fa2681 100644 (file)
@@ -134,7 +134,7 @@ static bool add_hlist(struct hlist_head *head,
 static unsigned int check_hlist(struct net *net,
                                struct hlist_head *head,
                                const struct nf_conntrack_tuple *tuple,
-                               u16 zone,
+                               const struct nf_conntrack_zone *zone,
                                bool *addit)
 {
        const struct nf_conntrack_tuple_hash *found;
@@ -201,7 +201,7 @@ static unsigned int
 count_tree(struct net *net, struct rb_root *root,
           const struct nf_conntrack_tuple *tuple,
           const union nf_inet_addr *addr, const union nf_inet_addr *mask,
-          u8 family, u16 zone)
+          u8 family, const struct nf_conntrack_zone *zone)
 {
        struct xt_connlimit_rb *gc_nodes[CONNLIMIT_GC_MAX_NODES];
        struct rb_node **rbnode, *parent;
@@ -290,7 +290,8 @@ static int count_them(struct net *net,
                      const struct nf_conntrack_tuple *tuple,
                      const union nf_inet_addr *addr,
                      const union nf_inet_addr *mask,
-                     u_int8_t family, u16 zone)
+                     u_int8_t family,
+                     const struct nf_conntrack_zone *zone)
 {
        struct rb_root *root;
        int count;
@@ -321,10 +322,10 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
        union nf_inet_addr addr;
        struct nf_conntrack_tuple tuple;
        const struct nf_conntrack_tuple *tuple_ptr = &tuple;
+       const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
        enum ip_conntrack_info ctinfo;
        const struct nf_conn *ct;
        unsigned int connections;
-       u16 zone = NF_CT_DEFAULT_ZONE;
 
        ct = nf_ct_get(skb, &ctinfo);
        if (ct != NULL) {
index 8c646ed9c921bca1fbf507c1aa97c1dca60d8df1..3048a7e3a90a5a27887b7e4ff731d00098f2c928 100644 (file)
@@ -37,7 +37,7 @@ nfacct_mt_checkentry(const struct xt_mtchk_param *par)
        struct xt_nfacct_match_info *info = par->matchinfo;
        struct nf_acct *nfacct;
 
-       nfacct = nfnl_acct_find_get(info->name);
+       nfacct = nfnl_acct_find_get(par->net, info->name);
        if (nfacct == NULL) {
                pr_info("xt_nfacct: accounting object with name `%s' "
                        "does not exists\n", info->name);
index 14da52ddd327dcd35827bea6c9581528cbd96087..4f4200717bef984d02b9232ea764180f9e8203d7 100644 (file)
@@ -284,14 +284,14 @@ static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
        if (nh->protocol == IPPROTO_TCP) {
                if (likely(transport_len >= sizeof(struct tcphdr)))
                        inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
-                                                addr, new_addr, 1);
+                                                addr, new_addr, true);
        } else if (nh->protocol == IPPROTO_UDP) {
                if (likely(transport_len >= sizeof(struct udphdr))) {
                        struct udphdr *uh = udp_hdr(skb);
 
                        if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
                                inet_proto_csum_replace4(&uh->check, skb,
-                                                        addr, new_addr, 1);
+                                                        addr, new_addr, true);
                                if (!uh->check)
                                        uh->check = CSUM_MANGLED_0;
                        }
@@ -316,14 +316,14 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
        if (l4_proto == NEXTHDR_TCP) {
                if (likely(transport_len >= sizeof(struct tcphdr)))
                        inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
-                                                 addr, new_addr, 1);
+                                                 addr, new_addr, true);
        } else if (l4_proto == NEXTHDR_UDP) {
                if (likely(transport_len >= sizeof(struct udphdr))) {
                        struct udphdr *uh = udp_hdr(skb);
 
                        if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
                                inet_proto_csum_replace16(&uh->check, skb,
-                                                         addr, new_addr, 1);
+                                                         addr, new_addr, true);
                                if (!uh->check)
                                        uh->check = CSUM_MANGLED_0;
                        }
@@ -331,7 +331,7 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
        } else if (l4_proto == NEXTHDR_ICMP) {
                if (likely(transport_len >= sizeof(struct icmp6hdr)))
                        inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
-                                                 skb, addr, new_addr, 1);
+                                                 skb, addr, new_addr, true);
        }
 }
 
@@ -498,7 +498,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
 static void set_tp_port(struct sk_buff *skb, __be16 *port,
                        __be16 new_port, __sum16 *check)
 {
-       inet_proto_csum_replace2(check, skb, *port, new_port, 0);
+       inet_proto_csum_replace2(check, skb, *port, new_port, false);
        *port = new_port;
 }
 
index a6eb77ab1a6456768338a55290955bb29c69749a..4e7a3f7facc2202742671a7ceddca5e253552e42 100644 (file)
@@ -534,19 +534,19 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
                        tun_flags |= TUNNEL_KEY;
                        break;
                case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
-                       SW_FLOW_KEY_PUT(match, tun_key.ipv4_src,
+                       SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.src,
                                        nla_get_in_addr(a), is_mask);
                        break;
                case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
-                       SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst,
+                       SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.dst,
                                        nla_get_in_addr(a), is_mask);
                        break;
                case OVS_TUNNEL_KEY_ATTR_TOS:
-                       SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos,
+                       SW_FLOW_KEY_PUT(match, tun_key.tos,
                                        nla_get_u8(a), is_mask);
                        break;
                case OVS_TUNNEL_KEY_ATTR_TTL:
-                       SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl,
+                       SW_FLOW_KEY_PUT(match, tun_key.ttl,
                                        nla_get_u8(a), is_mask);
                        ttl = true;
                        break;
@@ -609,7 +609,7 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
        }
 
        if (!is_mask) {
-               if (!match->key->tun_key.ipv4_dst) {
+               if (!match->key->tun_key.u.ipv4.dst) {
                        OVS_NLERR(log, "IPv4 tunnel dst address is zero");
                        return -EINVAL;
                }
@@ -647,18 +647,18 @@ static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
        if (output->tun_flags & TUNNEL_KEY &&
            nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
                return -EMSGSIZE;
-       if (output->ipv4_src &&
+       if (output->u.ipv4.src &&
            nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
-                           output->ipv4_src))
+                           output->u.ipv4.src))
                return -EMSGSIZE;
-       if (output->ipv4_dst &&
+       if (output->u.ipv4.dst &&
            nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
-                           output->ipv4_dst))
+                           output->u.ipv4.dst))
                return -EMSGSIZE;
-       if (output->ipv4_tos &&
-           nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos))
+       if (output->tos &&
+           nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->tos))
                return -EMSGSIZE;
-       if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl))
+       if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ttl))
                return -EMSGSIZE;
        if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
            nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
@@ -1116,7 +1116,7 @@ int ovs_nla_get_match(struct sw_flow_match *match,
                        /* The userspace does not send tunnel attributes that
                         * are 0, but we should not wildcard them nonetheless.
                         */
-                       if (match->key->tun_key.ipv4_dst)
+                       if (match->key->tun_key.u.ipv4.dst)
                                SW_FLOW_KEY_MEMSET_FIELD(match, tun_key,
                                                         0xff, true);
 
@@ -1287,7 +1287,7 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
        if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
                goto nla_put_failure;
 
-       if ((swkey->tun_key.ipv4_dst || is_mask)) {
+       if ((swkey->tun_key.u.ipv4.dst || is_mask)) {
                const void *opts = NULL;
 
                if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
index 3a9d1dde76ed3457bbe527053d95bd9ec774f100..d22d8e948d0f4b126894a71f8dcc30bc7d6d024d 100644 (file)
@@ -426,7 +426,7 @@ static u32 flow_hash(const struct sw_flow_key *key,
 
 static int flow_key_start(const struct sw_flow_key *key)
 {
-       if (key->tun_key.ipv4_dst)
+       if (key->tun_key.u.ipv4.dst)
                return 0;
        else
                return rounddown(offsetof(struct sw_flow_key, phy),
index 1da3a14d10101f78881c392a06b49aa9e53c9a13..d01bd6360970871db16889eea191b80735ff22a2 100644 (file)
@@ -203,8 +203,8 @@ static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
        }
 
        err = geneve_xmit_skb(geneve_port->gs, rt, skb, fl.saddr,
-                             tun_key->ipv4_dst, tun_key->ipv4_tos,
-                             tun_key->ipv4_ttl, df, sport, dport,
+                             tun_key->u.ipv4.dst, tun_key->tos,
+                             tun_key->ttl, df, sport, dport,
                              tun_key->tun_flags, vni, opts_len, opts,
                              !!(tun_key->tun_flags & TUNNEL_CSUM), false);
        if (err < 0)
index 4b70aaa4a746d8815b08d784dfa7147f7fb75601..a75011505039222c85332b364dec8cd014934f2d 100644 (file)
@@ -57,7 +57,7 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
        skb_push(skb, ETH_HLEN);
        ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
 
-       ovs_vport_receive(vport, skb, skb_tunnel_info(skb, AF_INET));
+       ovs_vport_receive(vport, skb, skb_tunnel_info(skb));
        return;
 
 error:
index d14f59403c5eb61cdde91cbe557617194756ccaf..d73e5a16e7ca80b20f5e48820cbe0a723d39e1ad 100644 (file)
@@ -603,9 +603,9 @@ int ovs_tunnel_get_egress_info(struct ip_tunnel_info *egress_tun_info,
         * saddr, tp_src and tp_dst
         */
        __ip_tunnel_info_init(egress_tun_info,
-                             fl.saddr, tun_key->ipv4_dst,
-                             tun_key->ipv4_tos,
-                             tun_key->ipv4_ttl,
+                             fl.saddr, tun_key->u.ipv4.dst,
+                             tun_key->tos,
+                             tun_key->ttl,
                              tp_src, tp_dst,
                              tun_key->tun_id,
                              tun_key->tun_flags,
index 1a689c28b5a6356dc1ffe061c80fc537822a4e7b..b88b3ee86f079beb2b63269cbfe8e97067f6d1e3 100644 (file)
@@ -254,9 +254,9 @@ static inline struct rtable *ovs_tunnel_route_lookup(struct net *net,
        struct rtable *rt;
 
        memset(fl, 0, sizeof(*fl));
-       fl->daddr = key->ipv4_dst;
-       fl->saddr = key->ipv4_src;
-       fl->flowi4_tos = RT_TOS(key->ipv4_tos);
+       fl->daddr = key->u.ipv4.dst;
+       fl->saddr = key->u.ipv4.src;
+       fl->flowi4_tos = RT_TOS(key->tos);
        fl->flowi4_mark = mark;
        fl->flowi4_proto = protocol;
 
index b5afe538bb88e9b97eec2faa4e93acbb16be4fa0..7b8e39a223879c3cb0a9d2a0bc6ff003118379f9 100644 (file)
@@ -92,6 +92,7 @@
 #ifdef CONFIG_INET
 #include <net/inet_common.h>
 #endif
+#include <linux/bpf.h>
 
 #include "internal.h"
 
@@ -1410,6 +1411,22 @@ static unsigned int fanout_demux_qm(struct packet_fanout *f,
        return skb_get_queue_mapping(skb) % num;
 }
 
+static unsigned int fanout_demux_bpf(struct packet_fanout *f,
+                                    struct sk_buff *skb,
+                                    unsigned int num)
+{
+       struct bpf_prog *prog;
+       unsigned int ret = 0;
+
+       rcu_read_lock();
+       prog = rcu_dereference(f->bpf_prog);
+       if (prog)
+               ret = BPF_PROG_RUN(prog, skb) % num;
+       rcu_read_unlock();
+
+       return ret;
+}
+
 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
 {
        return f->flags & (flag >> 8);
@@ -1454,6 +1471,10 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
        case PACKET_FANOUT_ROLLOVER:
                idx = fanout_demux_rollover(f, skb, 0, false, num);
                break;
+       case PACKET_FANOUT_CBPF:
+       case PACKET_FANOUT_EBPF:
+               idx = fanout_demux_bpf(f, skb, num);
+               break;
        }
 
        if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
@@ -1502,6 +1523,103 @@ static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
        return false;
 }
 
+static void fanout_init_data(struct packet_fanout *f)
+{
+       switch (f->type) {
+       case PACKET_FANOUT_LB:
+               atomic_set(&f->rr_cur, 0);
+               break;
+       case PACKET_FANOUT_CBPF:
+       case PACKET_FANOUT_EBPF:
+               RCU_INIT_POINTER(f->bpf_prog, NULL);
+               break;
+       }
+}
+
+static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
+{
+       struct bpf_prog *old;
+
+       spin_lock(&f->lock);
+       old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
+       rcu_assign_pointer(f->bpf_prog, new);
+       spin_unlock(&f->lock);
+
+       if (old) {
+               synchronize_net();
+               bpf_prog_destroy(old);
+       }
+}
+
+static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
+                               unsigned int len)
+{
+       struct bpf_prog *new;
+       struct sock_fprog fprog;
+       int ret;
+
+       if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
+               return -EPERM;
+       if (len != sizeof(fprog))
+               return -EINVAL;
+       if (copy_from_user(&fprog, data, len))
+               return -EFAULT;
+
+       ret = bpf_prog_create_from_user(&new, &fprog, NULL);
+       if (ret)
+               return ret;
+
+       __fanout_set_data_bpf(po->fanout, new);
+       return 0;
+}
+
+static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
+                               unsigned int len)
+{
+       struct bpf_prog *new;
+       u32 fd;
+
+       if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
+               return -EPERM;
+       if (len != sizeof(fd))
+               return -EINVAL;
+       if (copy_from_user(&fd, data, len))
+               return -EFAULT;
+
+       new = bpf_prog_get(fd);
+       if (IS_ERR(new))
+               return PTR_ERR(new);
+       if (new->type != BPF_PROG_TYPE_SOCKET_FILTER) {
+               bpf_prog_put(new);
+               return -EINVAL;
+       }
+
+       __fanout_set_data_bpf(po->fanout, new);
+       return 0;
+}
+
+static int fanout_set_data(struct packet_sock *po, char __user *data,
+                          unsigned int len)
+{
+       switch (po->fanout->type) {
+       case PACKET_FANOUT_CBPF:
+               return fanout_set_data_cbpf(po, data, len);
+       case PACKET_FANOUT_EBPF:
+               return fanout_set_data_ebpf(po, data, len);
+       default:
+               return -EINVAL;
+       };
+}
+
+static void fanout_release_data(struct packet_fanout *f)
+{
+       switch (f->type) {
+       case PACKET_FANOUT_CBPF:
+       case PACKET_FANOUT_EBPF:
+               __fanout_set_data_bpf(f, NULL);
+       };
+}
+
 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
 {
        struct packet_sock *po = pkt_sk(sk);
@@ -1519,6 +1637,8 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
        case PACKET_FANOUT_CPU:
        case PACKET_FANOUT_RND:
        case PACKET_FANOUT_QM:
+       case PACKET_FANOUT_CBPF:
+       case PACKET_FANOUT_EBPF:
                break;
        default:
                return -EINVAL;
@@ -1561,10 +1681,10 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
                match->id = id;
                match->type = type;
                match->flags = flags;
-               atomic_set(&match->rr_cur, 0);
                INIT_LIST_HEAD(&match->list);
                spin_lock_init(&match->lock);
                atomic_set(&match->sk_ref, 0);
+               fanout_init_data(match);
                match->prot_hook.type = po->prot_hook.type;
                match->prot_hook.dev = po->prot_hook.dev;
                match->prot_hook.func = packet_rcv_fanout;
@@ -1610,6 +1730,7 @@ static void fanout_release(struct sock *sk)
        if (atomic_dec_and_test(&f->sk_ref)) {
                list_del(&f->list);
                dev_remove_pack(&f->prot_hook);
+               fanout_release_data(f);
                kfree(f);
        }
        mutex_unlock(&fanout_mutex);
@@ -3529,6 +3650,13 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
 
                return fanout_add(sk, val & 0xffff, val >> 16);
        }
+       case PACKET_FANOUT_DATA:
+       {
+               if (!po->fanout)
+                       return -EINVAL;
+
+               return fanout_set_data(po, optval, optlen);
+       }
        case PACKET_TX_HAS_OFF:
        {
                unsigned int val;
index e20b3e8829b8acac25b74d7bbc23636b089d1ff4..9ee46314b7d76df47d683c252a92ce97398d592b 100644 (file)
@@ -79,7 +79,10 @@ struct packet_fanout {
        u16                     id;
        u8                      type;
        u8                      flags;
-       atomic_t                rr_cur;
+       union {
+               atomic_t                rr_cur;
+               struct bpf_prog __rcu   *bpf_prog;
+       };
        struct list_head        list;
        struct sock             *arr[PACKET_FANOUT_MAX];
        spinlock_t              lock;
index 4c10e7e6c9f6ae53291d3f72d82b6c9b0539b552..598d374f6a35f714db4753efa7ab28d6782419a9 100644 (file)
@@ -36,7 +36,8 @@ config RFKILL_REGULATOR
 
 config RFKILL_GPIO
        tristate "GPIO RFKILL driver"
-       depends on RFKILL && GPIOLIB
+       depends on RFKILL
+       depends on GPIOLIB || COMPILE_TEST
        default n
        help
          If you say yes here you get support of a generic gpio RFKILL
index d5d58d9195524f36b03bdac0125148cb7110901a..93127220cb54ac7ffdc78890a3753031652b8139 100644 (file)
@@ -164,7 +164,6 @@ static int rfkill_gpio_remove(struct platform_device *pdev)
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id rfkill_acpi_match[] = {
        { "BCM2E1A", RFKILL_TYPE_BLUETOOTH },
-       { "BCM2E39", RFKILL_TYPE_BLUETOOTH },
        { "BCM2E3D", RFKILL_TYPE_BLUETOOTH },
        { "BCM2E40", RFKILL_TYPE_BLUETOOTH },
        { "BCM2E64", RFKILL_TYPE_BLUETOOTH },
index f2b540220ad02f1f8e3b2add9c7477a334081c3d..5019a47b9270e758f65c346631becc99e933b0c6 100644 (file)
@@ -37,6 +37,7 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
        struct nf_conntrack_tuple tuple;
        enum ip_conntrack_info ctinfo;
        struct tcf_connmark_info *ca = a->priv;
+       struct nf_conntrack_zone zone;
        struct nf_conn *c;
        int proto;
 
@@ -70,7 +71,10 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
                               proto, &tuple))
                goto out;
 
-       thash = nf_conntrack_find_get(dev_net(skb->dev), ca->zone, &tuple);
+       zone.id = ca->zone;
+       zone.dir = NF_CT_DEFAULT_ZONE_DIR;
+
+       thash = nf_conntrack_find_get(dev_net(skb->dev), &zone, &tuple);
        if (!thash)
                goto out;
 
index 5be0b3c1c5b0c9f17e3fbd4e1dc1c92c7a8e5aed..b7c4ead8b5a8e863d87f8c1c4c1e37840fe50577 100644 (file)
@@ -162,7 +162,8 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
                        goto drop;
 
                tcph = (void *)(skb_network_header(skb) + ihl);
-               inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, 1);
+               inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr,
+                                        true);
                break;
        }
        case IPPROTO_UDP:
@@ -178,7 +179,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
                udph = (void *)(skb_network_header(skb) + ihl);
                if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
                        inet_proto_csum_replace4(&udph->check, skb, addr,
-                                                new_addr, 1);
+                                                new_addr, true);
                        if (!udph->check)
                                udph->check = CSUM_MANGLED_0;
                }
@@ -231,7 +232,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
                        iph->saddr = new_addr;
 
                inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
-                                        0);
+                                        false);
                break;
        }
        default:
index 2e2398cfc694aaf7ed12c2afda377e8ded340cb2..2177eac0a61ed00c6c60655f577e0dd816fd2c08 100644 (file)
@@ -54,7 +54,7 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
        bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
 
        if (opt == NULL) {
-               u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
+               u32 limit = qdisc_dev(sch)->tx_queue_len;
 
                if (is_bfifo)
                        limit *= psched_mtu(qdisc_dev(sch));
index 6efca30894aad4294824d2ba64f732239a921517..942fea8405a476a3f4e23db9a826b8b5fe5f4ecd 100644 (file)
@@ -735,7 +735,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
 {
        struct Qdisc *qdisc = &noqueue_qdisc;
 
-       if (dev->tx_queue_len) {
+       if (dev->tx_queue_len && !(dev->priv_flags & IFF_NO_QUEUE)) {
                qdisc = qdisc_create_dflt(dev_queue,
                                          default_qdisc_ops, TC_H_ROOT);
                if (!qdisc) {
@@ -755,7 +755,9 @@ static void attach_default_qdiscs(struct net_device *dev)
 
        txq = netdev_get_tx_queue(dev, 0);
 
-       if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) {
+       if (!netif_is_multiqueue(dev) ||
+           dev->tx_queue_len == 0 ||
+           dev->priv_flags & IFF_NO_QUEUE) {
                netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
                dev->qdisc = txq->qdisc_sleeping;
                atomic_inc(&dev->qdisc->refcnt);
index abb9f2fec28fbd435ff89a17eb7e1937cb4c19b3..80105109f756315d7a31d6734887941c9f16ea9c 100644 (file)
@@ -512,11 +512,9 @@ static int gred_init(struct Qdisc *sch, struct nlattr *opt)
 
        if (tb[TCA_GRED_LIMIT])
                sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
-       else {
-               u32 qlen = qdisc_dev(sch)->tx_queue_len ? : 1;
-
-               sch->limit = qlen * psched_mtu(qdisc_dev(sch));
-       }
+       else
+               sch->limit = qdisc_dev(sch)->tx_queue_len
+                            * psched_mtu(qdisc_dev(sch));
 
        return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
 }
index f1acb0f60dc35b724289f98498b5cf2162583ce9..cf4b0f865d1bc6a87873b54bb89cfe8e28f36ea6 100644 (file)
@@ -1048,11 +1048,9 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
 
        if (tb[TCA_HTB_DIRECT_QLEN])
                q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
-       else {
+       else
                q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
-               if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
-                       q->direct_qlen = 2;
-       }
+
        if ((q->rate2quantum = gopt->rate2quantum) < 1)
                q->rate2quantum = 1;
        q->defcls = gopt->defcls;
index ade9445a55abe468107f2bcb233e48da027afb4b..5abfe44678d4a1ecda495bf9e00879c649c63cdb 100644 (file)
@@ -130,12 +130,8 @@ static int plug_init(struct Qdisc *sch, struct nlattr *opt)
        q->unplug_indefinite = false;
 
        if (opt == NULL) {
-               /* We will set a default limit of 100 pkts (~150kB)
-                * in case tx_queue_len is not available. The
-                * default value is completely arbitrary.
-                */
-               u32 pkt_limit = qdisc_dev(sch)->tx_queue_len ? : 100;
-               q->limit = pkt_limit * psched_mtu(qdisc_dev(sch));
+               q->limit = qdisc_dev(sch)->tx_queue_len
+                          * psched_mtu(qdisc_dev(sch));
        } else {
                struct tc_plug_qopt *ctl = nla_data(opt);
 
index 4b815193326c9abae464c05d53609c28c2071b38..dcdff5c769a1c28e6ea476ce822c8fbe74f9fc53 100644 (file)
@@ -502,7 +502,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
 
        limit = ctl->limit;
        if (limit == 0)
-               limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
+               limit = qdisc_dev(sch)->tx_queue_len;
 
        child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit);
        if (IS_ERR(child))
index f067e5425560fe0d43c184589a397d614c12573b..75db07c78a6900157c0b568491a5d4e8e694e274 100644 (file)
@@ -351,11 +351,11 @@ int tipc_link_fsm_evt(struct tipc_link *l, int evt)
                        l->state = LINK_RESET;
                        break;
                case LINK_ESTABLISH_EVT:
+               case LINK_SYNCH_END_EVT:
                        break;
                case LINK_SYNCH_BEGIN_EVT:
                        l->state = LINK_SYNCHING;
                        break;
-               case LINK_SYNCH_END_EVT:
                case LINK_FAILOVER_BEGIN_EVT:
                case LINK_FAILOVER_END_EVT:
                default:
@@ -1330,6 +1330,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
        u16 peers_snd_nxt =  msg_next_sent(hdr);
        u16 peers_tol = msg_link_tolerance(hdr);
        u16 peers_prio = msg_linkprio(hdr);
+       u16 rcv_nxt = l->rcv_nxt;
        char *if_name;
        int rc = 0;
 
@@ -1393,7 +1394,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
                        break;
 
                /* Send NACK if peer has sent pkts we haven't received yet */
-               if (more(peers_snd_nxt, l->rcv_nxt))
+               if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
                        rcvgap = peers_snd_nxt - l->rcv_nxt;
                if (rcvgap || (msg_probe(hdr)))
                        tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
index 53e0fee800864e4b8c29ed793d60a5823d59c773..1eadc95e113294c159a6e5288db831e142be77dc 100644 (file)
@@ -1114,7 +1114,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
        }
 
        len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
-       if (TLV_GET_LEN(msg.req) && !TLV_OK(msg.req, len)) {
+       if (len && !TLV_OK(msg.req, len)) {
                msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
                err = -EOPNOTSUPP;
                goto send;
index 7c191641b44f64c080745df6615a8eccb237dd38..703875fd6cde204ddeaf630b9a6bd11daec6dbfa 100644 (file)
@@ -423,6 +423,8 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
 
        /* There is still a working link => initiate failover */
        tnl = node_active_link(n, 0);
+       tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
+       tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
        n->sync_point = tnl->rcv_nxt + (U16_MAX / 2 - 1);
        tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
        tipc_link_reset(l);
@@ -565,6 +567,8 @@ void tipc_node_check_dest(struct net *net, u32 onode,
                        goto exit;
                }
                tipc_link_reset(l);
+               if (n->state == NODE_FAILINGOVER)
+                       tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
                le->link = l;
                n->link_cnt++;
                tipc_node_calculate_timer(n, l);
@@ -1075,7 +1079,7 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
        u16 exp_pkts = msg_msgcnt(hdr);
        u16 rcv_nxt, syncpt, dlv_nxt;
        int state = n->state;
-       struct tipc_link *l, *pl = NULL;
+       struct tipc_link *l, *tnl, *pl = NULL;
        struct tipc_media_addr *maddr;
        int i, pb_id;
 
@@ -1129,7 +1133,7 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
        }
 
        /* Open parallel link when tunnel link reaches synch point */
-       if ((n->state == NODE_FAILINGOVER) && !tipc_link_is_failingover(l)) {
+       if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
                if (!more(rcv_nxt, n->sync_point))
                        return true;
                tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
@@ -1138,6 +1142,10 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
                return true;
        }
 
+       /* No synching needed if only one link */
+       if (!pl || !tipc_link_is_up(pl))
+               return true;
+
        /* Initiate or update synch mode if applicable */
        if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) {
                syncpt = iseqno + exp_pkts - 1;
@@ -1156,13 +1164,20 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
 
        /* Open tunnel link when parallel link reaches synch point */
        if ((n->state == NODE_SYNCHING) && tipc_link_is_synching(l)) {
-               if (pl)
-                       dlv_nxt = mod(pl->rcv_nxt - skb_queue_len(pl->inputq));
-               if (!pl || more(dlv_nxt, n->sync_point)) {
-                       tipc_link_fsm_evt(l, LINK_SYNCH_END_EVT);
+               if (tipc_link_is_synching(l)) {
+                       tnl = l;
+               } else {
+                       tnl = pl;
+                       pl = l;
+               }
+               dlv_nxt = pl->rcv_nxt - mod(skb_queue_len(pl->inputq));
+               if (more(dlv_nxt, n->sync_point)) {
+                       tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
                        tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
                        return true;
                }
+               if (l == pl)
+                       return true;
                if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
                        return true;
                if (usr == LINK_PROTOCOL)
index 2a0bbd22854bd97b377139200f9e6b5e0ec2662f..3893409dee95b3ba23c39b369a57c48c6e838d9b 100644 (file)
@@ -407,6 +407,9 @@ use_default_name:
        INIT_LIST_HEAD(&rdev->bss_list);
        INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done);
        INIT_WORK(&rdev->sched_scan_results_wk, __cfg80211_sched_scan_results);
+       INIT_LIST_HEAD(&rdev->mlme_unreg);
+       spin_lock_init(&rdev->mlme_unreg_lock);
+       INIT_WORK(&rdev->mlme_unreg_wk, cfg80211_mlme_unreg_wk);
        INIT_DELAYED_WORK(&rdev->dfs_update_channels_wk,
                          cfg80211_dfs_channels_update_work);
 #ifdef CONFIG_CFG80211_WEXT
@@ -802,6 +805,7 @@ void wiphy_unregister(struct wiphy *wiphy)
        cancel_delayed_work_sync(&rdev->dfs_update_channels_wk);
        flush_work(&rdev->destroy_work);
        flush_work(&rdev->sched_scan_stop_wk);
+       flush_work(&rdev->mlme_unreg_wk);
 
 #ifdef CONFIG_PM
        if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup)
@@ -855,6 +859,7 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev)
 
        switch (wdev->iftype) {
        case NL80211_IFTYPE_P2P_DEVICE:
+               cfg80211_mlme_purge_registrations(wdev);
                cfg80211_stop_p2p_device(rdev, wdev);
                break;
        default:
index 311eef26bf88b9a0e8125678498583fa19edf688..b9d5bc8c148d32ecb8156d19c2652c49824f41cd 100644 (file)
@@ -59,6 +59,10 @@ struct cfg80211_registered_device {
        struct list_head beacon_registrations;
        spinlock_t beacon_registrations_lock;
 
+       struct list_head mlme_unreg;
+       spinlock_t mlme_unreg_lock;
+       struct work_struct mlme_unreg_wk;
+
        /* protected by RTNL only */
        int num_running_ifaces;
        int num_running_monitor_ifaces;
@@ -348,6 +352,7 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
 int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
                                u16 frame_type, const u8 *match_data,
                                int match_len);
+void cfg80211_mlme_unreg_wk(struct work_struct *wk);
 void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid);
 void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev);
 int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
index 7aae329e2b4e4a8e3afa943f670852a6e26e89dd..fb44fa3bf4efa750298163a15534572c43229b2e 100644 (file)
@@ -2,6 +2,7 @@
  * cfg80211 MLME SAP interface
  *
  * Copyright (c) 2009, Jouni Malinen <j@w1.fi>
+ * Copyright (c) 2015          Intel Deutschland GmbH
  */
 
 #include <linux/kernel.h>
@@ -389,6 +390,7 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
 
 struct cfg80211_mgmt_registration {
        struct list_head list;
+       struct wireless_dev *wdev;
 
        u32 nlportid;
 
@@ -399,6 +401,46 @@ struct cfg80211_mgmt_registration {
        u8 match[];
 };
 
+static void
+cfg80211_process_mlme_unregistrations(struct cfg80211_registered_device *rdev)
+{
+       struct cfg80211_mgmt_registration *reg;
+
+       ASSERT_RTNL();
+
+       spin_lock_bh(&rdev->mlme_unreg_lock);
+       while ((reg = list_first_entry_or_null(&rdev->mlme_unreg,
+                                              struct cfg80211_mgmt_registration,
+                                              list))) {
+               list_del(&reg->list);
+               spin_unlock_bh(&rdev->mlme_unreg_lock);
+
+               if (rdev->ops->mgmt_frame_register) {
+                       u16 frame_type = le16_to_cpu(reg->frame_type);
+
+                       rdev_mgmt_frame_register(rdev, reg->wdev,
+                                                frame_type, false);
+               }
+
+               kfree(reg);
+
+               spin_lock_bh(&rdev->mlme_unreg_lock);
+       }
+       spin_unlock_bh(&rdev->mlme_unreg_lock);
+}
+
+void cfg80211_mlme_unreg_wk(struct work_struct *wk)
+{
+       struct cfg80211_registered_device *rdev;
+
+       rdev = container_of(wk, struct cfg80211_registered_device,
+                           mlme_unreg_wk);
+
+       rtnl_lock();
+       cfg80211_process_mlme_unregistrations(rdev);
+       rtnl_unlock();
+}
+
 int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
                                u16 frame_type, const u8 *match_data,
                                int match_len)
@@ -449,11 +491,18 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
        nreg->match_len = match_len;
        nreg->nlportid = snd_portid;
        nreg->frame_type = cpu_to_le16(frame_type);
+       nreg->wdev = wdev;
        list_add(&nreg->list, &wdev->mgmt_registrations);
+       spin_unlock_bh(&wdev->mgmt_registrations_lock);
+
+       /* process all unregistrations to avoid driver confusion */
+       cfg80211_process_mlme_unregistrations(rdev);
 
        if (rdev->ops->mgmt_frame_register)
                rdev_mgmt_frame_register(rdev, wdev, frame_type, true);
 
+       return 0;
+
  out:
        spin_unlock_bh(&wdev->mgmt_registrations_lock);
 
@@ -472,15 +521,12 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid)
                if (reg->nlportid != nlportid)
                        continue;
 
-               if (rdev->ops->mgmt_frame_register) {
-                       u16 frame_type = le16_to_cpu(reg->frame_type);
-
-                       rdev_mgmt_frame_register(rdev, wdev,
-                                                frame_type, false);
-               }
-
                list_del(&reg->list);
-               kfree(reg);
+               spin_lock(&rdev->mlme_unreg_lock);
+               list_add_tail(&reg->list, &rdev->mlme_unreg);
+               spin_unlock(&rdev->mlme_unreg_lock);
+
+               schedule_work(&rdev->mlme_unreg_wk);
        }
 
        spin_unlock_bh(&wdev->mgmt_registrations_lock);
@@ -496,16 +542,15 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid)
 
 void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
 {
-       struct cfg80211_mgmt_registration *reg, *tmp;
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 
        spin_lock_bh(&wdev->mgmt_registrations_lock);
-
-       list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) {
-               list_del(&reg->list);
-               kfree(reg);
-       }
-
+       spin_lock(&rdev->mlme_unreg_lock);
+       list_splice_tail_init(&wdev->mgmt_registrations, &rdev->mlme_unreg);
+       spin_unlock(&rdev->mlme_unreg_lock);
        spin_unlock_bh(&wdev->mgmt_registrations_lock);
+
+       cfg80211_process_mlme_unregistrations(rdev);
 }
 
 int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
index 76b41578a838e3bed8596c5950ffbd6881d54a64..5d8748b4c8a2d20f76c38a8394888c7fe56924bf 100644 (file)
@@ -2321,6 +2321,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
                        rdev->wiphy.frag_threshold = old_frag_threshold;
                        rdev->wiphy.rts_threshold = old_rts_threshold;
                        rdev->wiphy.coverage_class = old_coverage_class;
+                       return result;
                }
        }
        return 0;
@@ -7390,7 +7391,8 @@ static int nl80211_set_mcast_rate(struct sk_buff *skb, struct genl_info *info)
        int err;
 
        if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC &&
-           dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
+           dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
+           dev->ieee80211_ptr->iftype != NL80211_IFTYPE_OCB)
                return -EOPNOTSUPP;
 
        if (!rdev->ops->set_mcast_rate)
index c6e83a7468c0c43baea32bbb0291c312017226f5..c23516d0f80794c7277812dd25dac250da4e5f0c 100644 (file)
@@ -733,6 +733,8 @@ static inline void
 rdev_mgmt_frame_register(struct cfg80211_registered_device *rdev,
                         struct wireless_dev *wdev, u16 frame_type, bool reg)
 {
+       might_sleep();
+
        trace_rdev_mgmt_frame_register(&rdev->wiphy, wdev , frame_type, reg);
        rdev->ops->mgmt_frame_register(&rdev->wiphy, wdev , frame_type, reg);
        trace_rdev_return_void(&rdev->wiphy);
index aa2d75482017e1a258eb0cdb7271b2d615d8782e..b144485946f2e5ce2ec6411cd52462fa278b185e 100644 (file)
@@ -1004,7 +1004,7 @@ static u32 map_regdom_flags(u32 rd_flags)
 
 static const struct ieee80211_reg_rule *
 freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
-                  const struct ieee80211_regdomain *regd)
+                  const struct ieee80211_regdomain *regd, u32 bw)
 {
        int i;
        bool band_rule_found = false;
@@ -1028,7 +1028,7 @@ freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
                if (!band_rule_found)
                        band_rule_found = freq_in_rule_band(fr, center_freq);
 
-               bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(20));
+               bw_fits = reg_does_bw_fit(fr, center_freq, bw);
 
                if (band_rule_found && bw_fits)
                        return rr;
@@ -1040,14 +1040,26 @@ freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
        return ERR_PTR(-EINVAL);
 }
 
-const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
-                                              u32 center_freq)
+const struct ieee80211_reg_rule *__freq_reg_info(struct wiphy *wiphy,
+                                                u32 center_freq, u32 min_bw)
 {
-       const struct ieee80211_regdomain *regd;
+       const struct ieee80211_regdomain *regd = reg_get_regdomain(wiphy);
+       const struct ieee80211_reg_rule *reg_rule = NULL;
+       u32 bw;
 
-       regd = reg_get_regdomain(wiphy);
+       for (bw = MHZ_TO_KHZ(20); bw >= min_bw; bw = bw / 2) {
+               reg_rule = freq_reg_info_regd(wiphy, center_freq, regd, bw);
+               if (!IS_ERR(reg_rule))
+                       return reg_rule;
+       }
 
-       return freq_reg_info_regd(wiphy, center_freq, regd);
+       return reg_rule;
+}
+
+const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
+                                              u32 center_freq)
+{
+       return __freq_reg_info(wiphy, center_freq, MHZ_TO_KHZ(20));
 }
 EXPORT_SYMBOL(freq_reg_info);
 
@@ -1176,8 +1188,20 @@ static void handle_channel(struct wiphy *wiphy,
        if (reg_rule->flags & NL80211_RRF_AUTO_BW)
                max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
 
+       /* If we get a reg_rule we can assume that at least 5Mhz fit */
+       if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+                            MHZ_TO_KHZ(10)))
+               bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+       if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+                            MHZ_TO_KHZ(20)))
+               bw_flags |= IEEE80211_CHAN_NO_20MHZ;
+
+       if (max_bandwidth_khz < MHZ_TO_KHZ(10))
+               bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+       if (max_bandwidth_khz < MHZ_TO_KHZ(20))
+               bw_flags |= IEEE80211_CHAN_NO_20MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(40))
-               bw_flags = IEEE80211_CHAN_NO_HT40;
+               bw_flags |= IEEE80211_CHAN_NO_HT40;
        if (max_bandwidth_khz < MHZ_TO_KHZ(80))
                bw_flags |= IEEE80211_CHAN_NO_80MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -1695,9 +1719,15 @@ static void handle_channel_custom(struct wiphy *wiphy,
        const struct ieee80211_power_rule *power_rule = NULL;
        const struct ieee80211_freq_range *freq_range = NULL;
        u32 max_bandwidth_khz;
+       u32 bw;
 
-       reg_rule = freq_reg_info_regd(wiphy, MHZ_TO_KHZ(chan->center_freq),
-                                     regd);
+       for (bw = MHZ_TO_KHZ(20); bw >= MHZ_TO_KHZ(5); bw = bw / 2) {
+               reg_rule = freq_reg_info_regd(wiphy,
+                                             MHZ_TO_KHZ(chan->center_freq),
+                                             regd, bw);
+               if (!IS_ERR(reg_rule))
+                       break;
+       }
 
        if (IS_ERR(reg_rule)) {
                REG_DBG_PRINT("Disabling freq %d MHz as custom regd has no rule that fits it\n",
@@ -1721,8 +1751,20 @@ static void handle_channel_custom(struct wiphy *wiphy,
        if (reg_rule->flags & NL80211_RRF_AUTO_BW)
                max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
 
+       /* If we get a reg_rule we can assume that at least 5Mhz fit */
+       if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+                            MHZ_TO_KHZ(10)))
+               bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+       if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+                            MHZ_TO_KHZ(20)))
+               bw_flags |= IEEE80211_CHAN_NO_20MHZ;
+
+       if (max_bandwidth_khz < MHZ_TO_KHZ(10))
+               bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+       if (max_bandwidth_khz < MHZ_TO_KHZ(20))
+               bw_flags |= IEEE80211_CHAN_NO_20MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(40))
-               bw_flags = IEEE80211_CHAN_NO_HT40;
+               bw_flags |= IEEE80211_CHAN_NO_HT40;
        if (max_bandwidth_khz < MHZ_TO_KHZ(80))
                bw_flags |= IEEE80211_CHAN_NO_80MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -2079,10 +2121,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
                reg_process_hint_core(reg_request);
                return;
        case NL80211_REGDOM_SET_BY_USER:
-               treatment = reg_process_hint_user(reg_request);
-               if (treatment == REG_REQ_IGNORE ||
-                   treatment == REG_REQ_ALREADY_SET)
-                       return;
+               reg_process_hint_user(reg_request);
                return;
        case NL80211_REGDOM_SET_BY_DRIVER:
                if (!wiphy)
@@ -2099,7 +2138,9 @@ static void reg_process_hint(struct regulatory_request *reg_request)
                goto out_free;
        }
 
-       /* This is required so that the orig_* parameters are saved */
+       /* This is required so that the orig_* parameters are saved.
+        * NOTE: treatment must be set for any case that reaches here!
+        */
        if (treatment == REG_REQ_ALREADY_SET && wiphy &&
            wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
                wiphy_update_regulatory(wiphy, reg_request->initiator);
index 18cead7645be0e75621e1b08a9e02621e75b7a4c..94af3d0657859e98c61d3f9b2babebcab722aec8 100644 (file)
@@ -115,7 +115,8 @@ static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
        rcu_read_unlock();
 }
 
-static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
+static inline struct dst_entry *__xfrm_dst_lookup(struct net *net,
+                                                 int tos, int oif,
                                                  const xfrm_address_t *saddr,
                                                  const xfrm_address_t *daddr,
                                                  int family)
@@ -127,14 +128,15 @@ static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
        if (unlikely(afinfo == NULL))
                return ERR_PTR(-EAFNOSUPPORT);
 
-       dst = afinfo->dst_lookup(net, tos, saddr, daddr);
+       dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr);
 
        xfrm_policy_put_afinfo(afinfo);
 
        return dst;
 }
 
-static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
+static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
+                                               int tos, int oif,
                                                xfrm_address_t *prev_saddr,
                                                xfrm_address_t *prev_daddr,
                                                int family)
@@ -153,7 +155,7 @@ static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
                daddr = x->coaddr;
        }
 
-       dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
+       dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family);
 
        if (!IS_ERR(dst)) {
                if (prev_saddr != saddr)
@@ -1373,15 +1375,15 @@ int __xfrm_sk_clone_policy(struct sock *sk)
 }
 
 static int
-xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
-              unsigned short family)
+xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
+              xfrm_address_t *remote, unsigned short family)
 {
        int err;
        struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
 
        if (unlikely(afinfo == NULL))
                return -EINVAL;
-       err = afinfo->get_saddr(net, local, remote);
+       err = afinfo->get_saddr(net, oif, local, remote);
        xfrm_policy_put_afinfo(afinfo);
        return err;
 }
@@ -1410,7 +1412,9 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
                        remote = &tmpl->id.daddr;
                        local = &tmpl->saddr;
                        if (xfrm_addr_any(local, tmpl->encap_family)) {
-                               error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
+                               error = xfrm_get_saddr(net, fl->flowi_oif,
+                                                      &tmp, remote,
+                                                      tmpl->encap_family);
                                if (error)
                                        goto fail;
                                local = &tmp;
@@ -1690,8 +1694,8 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
 
                if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
                        family = xfrm[i]->props.family;
-                       dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
-                                             family);
+                       dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
+                                             &saddr, &daddr, family);
                        err = PTR_ERR(dst);
                        if (IS_ERR(dst))
                                goto put_states;
index 0cebf1fc37a2743ba096747056fab6c927922b23..a8de9e3002000d7eaa76f6764797e5b231d187ff 100644 (file)
@@ -925,12 +925,10 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
                        return err;
 
                if (attrs[XFRMA_ADDRESS_FILTER]) {
-                       filter = kmalloc(sizeof(*filter), GFP_KERNEL);
+                       filter = kmemdup(nla_data(attrs[XFRMA_ADDRESS_FILTER]),
+                                        sizeof(*filter), GFP_KERNEL);
                        if (filter == NULL)
                                return -ENOMEM;
-
-                       memcpy(filter, nla_data(attrs[XFRMA_ADDRESS_FILTER]),
-                              sizeof(*filter));
                }
 
                if (attrs[XFRMA_PROTO])
index 0b9847affbeccbc4b1a807b642120a3feaa397d0..374ea53288ca25ff6093467012afd2607192f505 100644 (file)
@@ -5190,6 +5190,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
+       SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5291,6 +5292,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x2211, "Thinkpad W541", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
index 2ae9619443d15dbeaf128cab385347db6bf26369..1d651b8a89570404cd306f239b17e939b1d5fa81 100644 (file)
@@ -30,6 +30,9 @@ config SND_SOC_GENERIC_DMAENGINE_PCM
        bool
        select SND_DMAENGINE_PCM
 
+config SND_SOC_TOPOLOGY
+       bool
+
 # All the supported SoCs
 source "sound/soc/adi/Kconfig"
 source "sound/soc/atmel/Kconfig"
index e189903fabf42958eff143e487999f0f013b85d7..669648b41d3027adf29ead27eda5f72a6ed0aaf1 100644 (file)
@@ -1,6 +1,9 @@
 snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o
 snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o soc-devres.o soc-ops.o
+
+ifneq ($(CONFIG_SND_SOC_TOPOLOGY),)
 snd-soc-core-objs += soc-topology.o
+endif
 
 ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),)
 snd-soc-core-objs += soc-generic-dmaengine-pcm.o
index 1fab9778807a0015f2578f0504306ed852eb4932..0450593980fd3525a65feca17dccea0e9940506e 100644 (file)
@@ -638,7 +638,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
        int err = -ENODEV;
 
        down_read(&chip->shutdown_rwsem);
-       if (chip->probing && chip->in_pm)
+       if (chip->probing || chip->in_pm)
                err = 0;
        else if (!chip->shutdown)
                err = usb_autopm_get_interface(chip->pm_intf);
index 094ddaee104c73d7caae22d851d79629c4715cd3..d31fac19c30b2d298ab2cf3a710b9b27c5764144 100644 (file)
@@ -638,7 +638,7 @@ ifndef DESTDIR
 prefix ?= $(HOME)
 endif
 bindir_relative = bin
-bindir = $(prefix)/$(bindir_relative)
+bindir = $(abspath $(prefix)/$(bindir_relative))
 mandir = share/man
 infodir = share/info
 perfexecdir = libexec/perf-core
index 53e8bb7bc8521a09f1347d48de2a0dd1eeb5e0bc..2a5d8d7698aedb8c82bdf8488f1fb62ded5b438a 100644 (file)
@@ -85,7 +85,7 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
        else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
                update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
        else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
-               update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
+               update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]);
        else if (perf_stat_evsel__is(counter, TRANSACTION_START))
                update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
        else if (perf_stat_evsel__is(counter, ELISION_START))
@@ -398,20 +398,18 @@ void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
                                " #   %5.2f%% aborted cycles         ",
                                100.0 * ((total2-avg) / total));
        } else if (perf_stat_evsel__is(evsel, TRANSACTION_START) &&
-                  avg > 0 &&
                   runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
                total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
 
-               if (total)
+               if (avg)
                        ratio = total / avg;
 
                fprintf(out, " # %8.0f cycles / transaction   ", ratio);
        } else if (perf_stat_evsel__is(evsel, ELISION_START) &&
-                  avg > 0 &&
                   runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
                total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
 
-               if (total)
+               if (avg)
                        ratio = total / avg;
 
                fprintf(out, " # %8.0f cycles / elision       ", ratio);
index 08c2a36ef7a9b429169fa1bd2f19f9b5340348ad..4124593696862fcb370fee15bcf7e34e11cdf9e0 100644 (file)
@@ -19,6 +19,8 @@
  *   - PACKET_FANOUT_LB
  *   - PACKET_FANOUT_CPU
  *   - PACKET_FANOUT_ROLLOVER
+ *   - PACKET_FANOUT_CBPF
+ *   - PACKET_FANOUT_EBPF
  *
  * Todo:
  * - functionality: PACKET_FANOUT_FLAG_DEFRAG
@@ -44,7 +46,9 @@
 #include <arpa/inet.h>
 #include <errno.h>
 #include <fcntl.h>
+#include <linux/unistd.h>      /* for __NR_bpf */
 #include <linux/filter.h>
+#include <linux/bpf.h>
 #include <linux/if_packet.h>
 #include <net/ethernet.h>
 #include <netinet/ip.h>
@@ -91,6 +95,51 @@ static int sock_fanout_open(uint16_t typeflags, int num_packets)
        return fd;
 }
 
+static void sock_fanout_set_ebpf(int fd)
+{
+       const int len_off = __builtin_offsetof(struct __sk_buff, len);
+       struct bpf_insn prog[] = {
+               { BPF_ALU64 | BPF_MOV | BPF_X,   6, 1, 0, 0 },
+               { BPF_LDX   | BPF_W   | BPF_MEM, 0, 6, len_off, 0 },
+               { BPF_JMP   | BPF_JGE | BPF_K,   0, 0, 1, DATA_LEN },
+               { BPF_JMP   | BPF_JA  | BPF_K,   0, 0, 4, 0 },
+               { BPF_LD    | BPF_B   | BPF_ABS, 0, 0, 0, 0x50 },
+               { BPF_JMP   | BPF_JEQ | BPF_K,   0, 0, 2, DATA_CHAR },
+               { BPF_JMP   | BPF_JEQ | BPF_K,   0, 0, 1, DATA_CHAR_1 },
+               { BPF_ALU   | BPF_MOV | BPF_K,   0, 0, 0, 0 },
+               { BPF_JMP   | BPF_EXIT,          0, 0, 0, 0 }
+       };
+       char log_buf[512];
+       union bpf_attr attr;
+       int pfd;
+
+       memset(&attr, 0, sizeof(attr));
+       attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+       attr.insns = (unsigned long) prog;
+       attr.insn_cnt = sizeof(prog) / sizeof(prog[0]);
+       attr.license = (unsigned long) "GPL";
+       attr.log_buf = (unsigned long) log_buf,
+       attr.log_size = sizeof(log_buf),
+       attr.log_level = 1,
+
+       pfd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
+       if (pfd < 0) {
+               perror("bpf");
+               fprintf(stderr, "bpf verifier:\n%s\n", log_buf);
+               exit(1);
+       }
+
+       if (setsockopt(fd, SOL_PACKET, PACKET_FANOUT_DATA, &pfd, sizeof(pfd))) {
+               perror("fanout data ebpf");
+               exit(1);
+       }
+
+       if (close(pfd)) {
+               perror("close ebpf");
+               exit(1);
+       }
+}
+
 static char *sock_fanout_open_ring(int fd)
 {
        struct tpacket_req req = {
@@ -115,8 +164,8 @@ static char *sock_fanout_open_ring(int fd)
 
        ring = mmap(0, req.tp_block_size * req.tp_block_nr,
                    PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
-       if (!ring) {
-               fprintf(stderr, "packetsock ring mmap\n");
+       if (ring == MAP_FAILED) {
+               perror("packetsock ring mmap");
                exit(1);
        }
 
@@ -209,6 +258,7 @@ static int test_datapath(uint16_t typeflags, int port_off,
 {
        const int expect0[] = { 0, 0 };
        char *rings[2];
+       uint8_t type = typeflags & 0xFF;
        int fds[2], fds_udp[2][2], ret;
 
        fprintf(stderr, "test: datapath 0x%hx\n", typeflags);
@@ -219,6 +269,11 @@ static int test_datapath(uint16_t typeflags, int port_off,
                fprintf(stderr, "ERROR: failed open\n");
                exit(1);
        }
+       if (type == PACKET_FANOUT_CBPF)
+               sock_setfilter(fds[0], SOL_PACKET, PACKET_FANOUT_DATA);
+       else if (type == PACKET_FANOUT_EBPF)
+               sock_fanout_set_ebpf(fds[0]);
+
        rings[0] = sock_fanout_open_ring(fds[0]);
        rings[1] = sock_fanout_open_ring(fds[1]);
        pair_udp_open(fds_udp[0], PORT_BASE);
@@ -227,11 +282,11 @@ static int test_datapath(uint16_t typeflags, int port_off,
 
        /* Send data, but not enough to overflow a queue */
        pair_udp_send(fds_udp[0], 15);
-       pair_udp_send(fds_udp[1], 5);
+       pair_udp_send_char(fds_udp[1], 5, DATA_CHAR_1);
        ret = sock_fanout_read(fds, rings, expect1);
 
        /* Send more data, overflow the queue */
-       pair_udp_send(fds_udp[0], 15);
+       pair_udp_send_char(fds_udp[0], 15, DATA_CHAR_1);
        /* TODO: ensure consistent order between expect1 and expect2 */
        ret |= sock_fanout_read(fds, rings, expect2);
 
@@ -275,6 +330,7 @@ int main(int argc, char **argv)
        const int expect_rb[2][2]       = { { 15, 5 },  { 20, 15 } };
        const int expect_cpu0[2][2]     = { { 20, 0 },  { 20, 0 } };
        const int expect_cpu1[2][2]     = { { 0, 20 },  { 0, 20 } };
+       const int expect_bpf[2][2]      = { { 15, 5 },  { 15, 20 } };
        int port_off = 2, tries = 5, ret;
 
        test_control_single();
@@ -296,6 +352,11 @@ int main(int argc, char **argv)
        ret |= test_datapath(PACKET_FANOUT_ROLLOVER,
                             port_off, expect_rb[0], expect_rb[1]);
 
+       ret |= test_datapath(PACKET_FANOUT_CBPF,
+                            port_off, expect_bpf[0], expect_bpf[1]);
+       ret |= test_datapath(PACKET_FANOUT_EBPF,
+                            port_off, expect_bpf[0], expect_bpf[1]);
+
        set_cpuaffinity(0);
        ret |= test_datapath(PACKET_FANOUT_CPU, port_off,
                             expect_cpu0[0], expect_cpu0[1]);
index 37da54ac85a9583f1d364b8c88ed3fcb7d9f6b84..24bc7ec1be7dab217689fbda39d9c503a1bc6bfc 100644 (file)
@@ -30,6 +30,7 @@
 
 #define DATA_LEN                       100
 #define DATA_CHAR                      'a'
+#define DATA_CHAR_1                    'b'
 
 #define PORT_BASE                      8000
 
 # define __maybe_unused                __attribute__ ((__unused__))
 #endif
 
-static __maybe_unused void pair_udp_setfilter(int fd)
+static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum)
 {
        struct sock_filter bpf_filter[] = {
                { 0x80, 0, 0, 0x00000000 },  /* LD  pktlen                    */
-               { 0x35, 0, 5, DATA_LEN   },  /* JGE DATA_LEN  [f goto nomatch]*/
+               { 0x35, 0, 4, DATA_LEN   },  /* JGE DATA_LEN  [f goto nomatch]*/
                { 0x30, 0, 0, 0x00000050 },  /* LD  ip[80]                    */
-               { 0x15, 0, 3, DATA_CHAR  },  /* JEQ DATA_CHAR [f goto nomatch]*/
-               { 0x30, 0, 0, 0x00000051 },  /* LD  ip[81]                    */
-               { 0x15, 0, 1, DATA_CHAR  },  /* JEQ DATA_CHAR [f goto nomatch]*/
+               { 0x15, 1, 0, DATA_CHAR  },  /* JEQ DATA_CHAR   [t goto match]*/
+               { 0x15, 0, 1, DATA_CHAR_1},  /* JEQ DATA_CHAR_1 [t goto match]*/
                { 0x06, 0, 0, 0x00000060 },  /* RET match                     */
                { 0x06, 0, 0, 0x00000000 },  /* RET no match                  */
        };
        struct sock_fprog bpf_prog;
 
+       if (lvl == SOL_PACKET && optnum == PACKET_FANOUT_DATA)
+               bpf_filter[5].code = 0x16;   /* RET A                         */
+
        bpf_prog.filter = bpf_filter;
        bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter);
-       if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &bpf_prog,
+       if (setsockopt(fd, lvl, optnum, &bpf_prog,
                       sizeof(bpf_prog))) {
                perror("setsockopt SO_ATTACH_FILTER");
                exit(1);
        }
 }
 
+static __maybe_unused void pair_udp_setfilter(int fd)
+{
+       sock_setfilter(fd, SOL_SOCKET, SO_ATTACH_FILTER);
+}
+
 static __maybe_unused void pair_udp_open(int fds[], uint16_t port)
 {
        struct sockaddr_in saddr, daddr;
@@ -96,11 +104,11 @@ static __maybe_unused void pair_udp_open(int fds[], uint16_t port)
        }
 }
 
-static __maybe_unused void pair_udp_send(int fds[], int num)
+static __maybe_unused void pair_udp_send_char(int fds[], int num, char payload)
 {
        char buf[DATA_LEN], rbuf[DATA_LEN];
 
-       memset(buf, DATA_CHAR, sizeof(buf));
+       memset(buf, payload, sizeof(buf));
        while (num--) {
                /* Should really handle EINTR and EAGAIN */
                if (write(fds[0], buf, sizeof(buf)) != sizeof(buf)) {
@@ -118,6 +126,11 @@ static __maybe_unused void pair_udp_send(int fds[], int num)
        }
 }
 
+static __maybe_unused void pair_udp_send(int fds[], int num)
+{
+       return pair_udp_send_char(fds, num, DATA_CHAR);
+}
+
 static __maybe_unused void pair_udp_close(int fds[])
 {
        close(fds[0]);